From 032356566e4ebd3f6ea3834c670273745c4bf435 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Mon, 13 Mar 2023 14:07:36 +0100 Subject: [PATCH 01/14] chore(sdk): Rewrite a small part of the code to make it clearer. --- .../src/sliding_sync/list/request_generator.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index 76281e0b5..ff05512cf 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -65,17 +65,13 @@ impl SlidingSyncListRequestGenerator { batch_size: u32, limit: Option, ) -> v4::SyncRequestList { - let calculated_end = start + batch_size; + let maximum_end = start + batch_size; - let mut end = match limit { - Some(limit) => min(limit, calculated_end), - _ => calculated_end, - }; + let mut end = limit.map(|limit| min(limit, maximum_end)).unwrap_or(maximum_end); - end = match self.list.rooms_count() { - Some(total_room_count) => min(end, total_room_count - 1), - _ => end, - }; + if let Some(rooms_count) = self.list.rooms_count() { + end = min(end, rooms_count - 1); + } self.make_request_for_ranges(vec![(start.into(), end.into())]) } From c6c259144ae85c3adc9a510bbc61e26aa78ca4e1 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Mon, 13 Mar 2023 14:29:12 +0100 Subject: [PATCH 02/14] chore(sdk): Rename enum variants to be consistent across all the modules. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 8 +++++--- .../src/sliding_sync/list/request_generator.rs | 14 +++++++------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index af7c706d3..b0ce9344d 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -380,14 +380,16 @@ impl SlidingSyncList { pub(super) fn request_generator(&self) -> SlidingSyncListRequestGenerator { match &self.sync_mode { SlidingSyncMode::PagingFullSync => { - SlidingSyncListRequestGenerator::new_with_paging_syncup(self.clone()) + SlidingSyncListRequestGenerator::new_with_paging_full_sync(self.clone()) } SlidingSyncMode::GrowingFullSync => { - SlidingSyncListRequestGenerator::new_with_growing_syncup(self.clone()) + SlidingSyncListRequestGenerator::new_with_growing_full_sync(self.clone()) } - SlidingSyncMode::Selective => SlidingSyncListRequestGenerator::new_live(self.clone()), + SlidingSyncMode::Selective => { + SlidingSyncListRequestGenerator::new_selective(self.clone()) + } } } } diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index ff05512cf..54f407ed8 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -9,7 +9,7 @@ use super::{Error, SlidingSyncList, SlidingSyncState}; enum GeneratorKind { GrowingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, PagingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, - Live, + Selective, } pub(in super::super) struct SlidingSyncListRequestGenerator { @@ -19,7 +19,7 @@ pub(in super::super) struct SlidingSyncListRequestGenerator { } impl SlidingSyncListRequestGenerator { - pub(super) fn new_with_paging_syncup(list: SlidingSyncList) -> Self { + pub(super) fn new_with_paging_full_sync(list: SlidingSyncList) -> Self { let batch_size = list.batch_size; let limit = list.limit; let position = list @@ -37,7 +37,7 @@ impl SlidingSyncListRequestGenerator { } } - pub(super) fn new_with_growing_syncup(list: SlidingSyncList) -> Self { + pub(super) fn new_with_growing_full_sync(list: SlidingSyncList) -> Self { let batch_size = list.batch_size; let limit = list.limit; let position = list @@ -55,8 +55,8 @@ impl SlidingSyncListRequestGenerator { } } - pub(super) fn new_live(list: SlidingSyncList) -> Self { - Self { list, ranges: Default::default(), kind: GeneratorKind::Live } + pub(super) fn new_selective(list: SlidingSyncList) -> Self { + Self { list, ranges: Default::default(), kind: GeneratorKind::Selective } } fn prefetch_request( @@ -158,7 +158,7 @@ impl SlidingSyncListRequestGenerator { } } - GeneratorKind::Live => { + GeneratorKind::Selective => { Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { *state = SlidingSyncState::Live; }); @@ -174,7 +174,7 @@ impl Iterator for SlidingSyncListRequestGenerator { match self.kind { GeneratorKind::PagingFullSync { live: true, .. } | GeneratorKind::GrowingFullSync { live: true, .. } - | GeneratorKind::Live => { + | GeneratorKind::Selective => { let ranges = self.list.ranges.read().unwrap().clone(); Some(self.make_request_for_ranges(ranges)) From a2dcfa905fe47880ceddf47f6c7bd4d89f441552 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 16:57:29 +0100 Subject: [PATCH 03/14] fix(sdk): Fix, test, and clean up `SlidingSyncList`. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch should ideally be split into multiple smaller ones, but life is life. This main purpose of this patch is to fix and to test `SlidingSyncListRequestGenerator`. This quest has led me to rename mutiple fields in `SlidingSyncList` and `SlidingSyncListBuilder`, like: * `rooms_count` becomes `maximum_number_of_rooms`, it's not something the _client_ counts, but it's a maximum number given by the server, * `batch_size` becomes `full_sync_batch_size`, so that now, it emphasizes that it's about full-sync only, * `limit` becomes `full_sync_maximum_number_of_rooms_to_fetch`, so that now, it also emphasizes that it's about ful-sync only _and_ what the limit is about! This quest has continued with the renaming of the `SlidingSyncMode` variants. After a discussion with the ElementX team, we've agreed on the following renamings: * `Cold` becomes `NotLoaded`, * `Preload` becomes `Preloaded`, * `CatchingUp` becomes `PartiallyLoaded`, * `Live` becomes `FullyLoaded`. Finally, _le plat de résistance_. In `SlidingSyncListRequestGenerator`, the `make_request_for_ranges` has been renamed to `build_request` and no longer takes a `&mut self` but a simpler `&self`! It didn't make sense to me that something that make/build a request was modifying `Self`. Because the type of `SlidingSyncListRequestGenerator::ranges` has changed, all ranges now have a consistent type (within this module at least). Consequently, this method no longer need to do a type conversion. Still on the same type, the `update_state` method is much more documented, and errors on range bounds (offset by 1) are now all fixed. The creation of new ranges happens in a new dedicated pure function, `create_range`. It returns an `Option` because it's possible to not be able to compute a range (previously, invalid ranges were considered valid). It's used in the `Iterator` implementation. This `Iterator` implementation contains a liiiittle bit more code, but at least now we understand what it does, and it's clear what `range_start` and `desired_size` we calculate. By the way, the `prefetch_request` method has been removed: it's not a prefetch, it's a regular request; it was calculating the range. But now there is `create_range`, and since it's pure, we can unit test it! _Pour le dessert_, this patch adds multiple tests. It is now possible because of the previous refactoring. First off, we test the `create_range` in many configurations. It's pretty clear to understand, and since it's core to `SlidingSyncListRequestGenerator`, I'm pretty happy with how it ends. Second, we test paging-, growing- and selective- mode with a new macro: `assert_request_and_response`, which allows to “send” requests, and to “receive” responses. The design of `SlidingSync` allows to mimic requests and responses, that's great. We don't really care about the responses here, but we care about the requests' `ranges`, and the `SlidingSyncList.state` after a response is received. It also helps to see how ranges behaves when the state is `PartiallyLoaded` or `FullyLoaded`. --- bindings/matrix-sdk-ffi/src/sliding_sync.rs | 10 +- crates/matrix-sdk/src/sliding_sync/builder.rs | 5 +- .../src/sliding_sync/list/builder.rs | 52 +- .../matrix-sdk/src/sliding_sync/list/mod.rs | 134 ++-- .../sliding_sync/list/request_generator.rs | 643 +++++++++++++++--- crates/matrix-sdk/src/sliding_sync/mod.rs | 68 +- labs/jack-in/src/client/mod.rs | 7 +- labs/jack-in/src/client/state.rs | 2 +- .../sliding-sync-integration-test/src/lib.rs | 32 +- 9 files changed, 739 insertions(+), 214 deletions(-) diff --git a/bindings/matrix-sdk-ffi/src/sliding_sync.rs b/bindings/matrix-sdk-ffi/src/sliding_sync.rs index 4a70a2695..440066b4c 100644 --- a/bindings/matrix-sdk-ffi/src/sliding_sync.rs +++ b/bindings/matrix-sdk-ffi/src/sliding_sync.rs @@ -448,19 +448,19 @@ impl SlidingSyncListBuilder { pub fn batch_size(self: Arc, batch_size: u32) -> Arc { let mut builder = unwrap_or_clone_arc(self); - builder.inner = builder.inner.batch_size(batch_size); + builder.inner = builder.inner.full_sync_batch_size(batch_size); Arc::new(builder) } pub fn room_limit(self: Arc, limit: u32) -> Arc { let mut builder = unwrap_or_clone_arc(self); - builder.inner = builder.inner.limit(limit); + builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(limit); Arc::new(builder) } pub fn no_room_limit(self: Arc) -> Arc { let mut builder = unwrap_or_clone_arc(self); - builder.inner = builder.inner.limit(None); + builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(None); Arc::new(builder) } @@ -556,7 +556,7 @@ impl SlidingSyncList { &self, observer: Box, ) -> Arc { - let mut rooms_count_stream = self.inner.rooms_count_stream(); + let mut rooms_count_stream = self.inner.maximum_number_of_rooms_stream(); Arc::new(TaskHandle::new(RUNTIME.spawn(async move { loop { @@ -598,7 +598,7 @@ impl SlidingSyncList { /// Total of rooms matching the filter pub fn current_room_count(&self) -> Option { - self.inner.rooms_count() + self.inner.maximum_number_of_rooms() } /// The current timeline limit diff --git a/crates/matrix-sdk/src/sliding_sync/builder.rs b/crates/matrix-sdk/src/sliding_sync/builder.rs index 114787487..027c1f98c 100644 --- a/crates/matrix-sdk/src/sliding_sync/builder.rs +++ b/crates/matrix-sdk/src/sliding_sync/builder.rs @@ -248,8 +248,9 @@ impl SlidingSyncBuilder { { trace!(name, "frozen for list found"); - let FrozenSlidingSyncList { rooms_count, rooms_list, rooms } = frozen_list; - list.set_from_cold(rooms_count, rooms_list); + let FrozenSlidingSyncList { maximum_number_of_rooms, rooms_list, rooms } = + frozen_list; + list.set_from_cold(maximum_number_of_rooms, rooms_list); for (key, frozen_room) in rooms.into_iter() { rooms_found.entry(key).or_insert_with(|| { diff --git a/crates/matrix-sdk/src/sliding_sync/list/builder.rs b/crates/matrix-sdk/src/sliding_sync/list/builder.rs index 772e4d6fc..42e629e24 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/builder.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/builder.rs @@ -22,14 +22,13 @@ pub struct SlidingSyncListBuilder { sync_mode: SlidingSyncMode, sort: Vec, required_state: Vec<(StateEventType, String)>, - batch_size: u32, + full_sync_batch_size: u32, + full_sync_maximum_number_of_rooms_to_fetch: Option, send_updates_for_items: bool, - limit: Option, filters: Option, timeline_limit: Option, name: Option, state: SlidingSyncState, - rooms_count: Option, rooms_list: Vector, ranges: Vec<(UInt, UInt)>, } @@ -43,14 +42,13 @@ impl SlidingSyncListBuilder { (StateEventType::RoomEncryption, "".to_owned()), (StateEventType::RoomTombstone, "".to_owned()), ], - batch_size: 20, + full_sync_batch_size: 20, + full_sync_maximum_number_of_rooms_to_fetch: None, send_updates_for_items: false, - limit: None, filters: None, timeline_limit: None, name: None, state: SlidingSyncState::default(), - rooms_count: None, rooms_list: Vector::new(), ranges: Vec::new(), } @@ -79,9 +77,20 @@ impl SlidingSyncListBuilder { self } - /// How many rooms request at a time when doing a full-sync catch up. - pub fn batch_size(mut self, value: u32) -> Self { - self.batch_size = value; + /// When doing a full-sync, this method defines the value by which ranges of + /// rooms will be extended. + pub fn full_sync_batch_size(mut self, value: u32) -> Self { + self.full_sync_batch_size = value; + self + } + + /// When doing a full-sync, this method defines the total limit of rooms to + /// load (it can be useful for gigantic account). + pub fn full_sync_maximum_number_of_rooms_to_fetch( + mut self, + value: impl Into>, + ) -> Self { + self.full_sync_maximum_number_of_rooms_to_fetch = value.into(); self } @@ -92,12 +101,6 @@ impl SlidingSyncListBuilder { self } - /// How many rooms request a total hen doing a full-sync catch up. - pub fn limit(mut self, value: impl Into>) -> Self { - self.limit = value.into(); - self - } - /// Any filters to apply to the query. pub fn filters(mut self, value: Option) -> Self { self.filters = value; @@ -123,31 +126,31 @@ impl SlidingSyncListBuilder { self } - /// Set the ranges to fetch + /// Set the ranges to fetch. pub fn ranges>(mut self, range: Vec<(U, U)>) -> Self { self.ranges = range.into_iter().map(|(a, b)| (a.into(), b.into())).collect(); self } - /// Set a single range fetch + /// Set a single range fetch. pub fn set_range>(mut self, from: U, to: U) -> Self { self.ranges = vec![(from.into(), to.into())]; self } - /// Set the ranges to fetch + /// Set the ranges to fetch. pub fn add_range>(mut self, from: U, to: U) -> Self { self.ranges.push((from.into(), to.into())); self } - /// Set the ranges to fetch + /// Set the ranges to fetch. pub fn reset_ranges(mut self) -> Self { - self.ranges = Default::default(); + self.ranges.clear(); self } - /// Build the list + /// Build the list. pub fn build(self) -> Result { let mut rooms_list = ObservableVector::new(); rooms_list.append(self.rooms_list); @@ -156,14 +159,15 @@ impl SlidingSyncListBuilder { sync_mode: self.sync_mode, sort: self.sort, required_state: self.required_state, - batch_size: self.batch_size, + full_sync_batch_size: self.full_sync_batch_size, send_updates_for_items: self.send_updates_for_items, - limit: self.limit, + full_sync_maximum_number_of_rooms_to_fetch: self + .full_sync_maximum_number_of_rooms_to_fetch, filters: self.filters, timeline_limit: Arc::new(StdRwLock::new(Observable::new(self.timeline_limit))), name: self.name.ok_or(Error::BuildMissingField("name"))?, state: Arc::new(StdRwLock::new(Observable::new(self.state))), - rooms_count: Arc::new(StdRwLock::new(Observable::new(self.rooms_count))), + maximum_number_of_rooms: Arc::new(StdRwLock::new(Observable::new(None))), rooms_list: Arc::new(StdRwLock::new(rooms_list)), ranges: Arc::new(StdRwLock::new(Observable::new(self.ranges))), is_cold: Arc::new(AtomicBool::new(false)), diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index b0ce9344d..8bd338f6b 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -51,17 +51,19 @@ pub struct SlidingSyncList { /// Required states to return per room required_state: Vec<(StateEventType, String)>, - /// How many rooms request at a time when doing a full-sync catch up - batch_size: u32, + /// When doing a full-sync, the ranges of rooms to load are extended by this + /// `full_sync_batch_size` size. + full_sync_batch_size: u32, + + /// When doing a full-sync, it is possible to limit the total number of + /// rooms to load by using this field. + full_sync_maximum_number_of_rooms_to_fetch: Option, /// Whether the list should send `UpdatedAt`-Diff signals for rooms - /// that have changed + /// that have changed. send_updates_for_items: bool, - /// How many rooms request a total hen doing a full-sync catch up - limit: Option, - - /// Any filters to apply to the query + /// Any filters to apply to the query. filters: Option, /// The maximum number of timeline events to query for @@ -70,16 +72,20 @@ pub struct SlidingSyncList { /// Name of this list to easily recognize them pub name: String, - /// The state this list is in + /// The state this list is in. state: Arc>>, - /// The total known number of rooms, - rooms_count: Arc>>>, + /// The total number of rooms that is possible to interact with for the + /// given list. It's not the total rooms that have been fetched. The + /// server tells the client that it's possible to fetch this amount of + /// rooms maximum. Since this number can change according to the list + /// filters, it's observable. + maximum_number_of_rooms: Arc>>>, - /// The rooms in order + /// The rooms in order. rooms_list: Arc>>, - /// The ranges windows of the list + /// The ranges windows of the list. #[allow(clippy::type_complexity)] // temporarily ranges: Arc>>>, @@ -95,12 +101,15 @@ pub struct SlidingSyncList { impl SlidingSyncList { pub(crate) fn set_from_cold( &mut self, - rooms_count: Option, + maximum_number_of_rooms: Option, rooms_list: Vector, ) { - Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preload); + Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preloaded); self.is_cold.store(true, Ordering::SeqCst); - Observable::set(&mut self.rooms_count.write().unwrap(), rooms_count); + Observable::set( + &mut self.maximum_number_of_rooms.write().unwrap(), + maximum_number_of_rooms, + ); let mut lock = self.rooms_list.write().unwrap(); lock.clear(); @@ -119,7 +128,7 @@ impl SlidingSyncList { .sync_mode(self.sync_mode.clone()) .sort(self.sort.clone()) .required_state(self.required_state.clone()) - .batch_size(self.batch_size) + .full_sync_batch_size(self.full_sync_batch_size) .ranges(self.ranges.read().unwrap().clone()) } @@ -195,14 +204,15 @@ impl SlidingSyncList { ObservableVector::subscribe(&self.rooms_list.read().unwrap()) } - /// Get the current rooms count. - pub fn rooms_count(&self) -> Option { - **self.rooms_count.read().unwrap() + /// Get the maximum number of rooms. See [`Self::maximum_number_of_rooms`] + /// to learn more. + pub fn maximum_number_of_rooms(&self) -> Option { + **self.maximum_number_of_rooms.read().unwrap() } /// Get a stream of rooms count. - pub fn rooms_count_stream(&self) -> impl Stream> { - Observable::subscribe(&self.rooms_count.read().unwrap()) + pub fn maximum_number_of_rooms_stream(&self) -> impl Stream> { + Observable::subscribe(&self.maximum_number_of_rooms.read().unwrap()) } /// Find the current valid position of the room in the list `room_list`. @@ -283,26 +293,32 @@ impl SlidingSyncList { #[instrument(skip(self, ops), fields(name = self.name, ops_count = ops.len()))] pub(super) fn handle_response( &self, - rooms_count: u32, + maximum_number_of_rooms: u32, ops: &Vec, - ranges: &Vec<(usize, usize)>, - rooms: &Vec, + ranges: &Vec<(UInt, UInt)>, + updated_rooms: &Vec, ) -> Result { - let current_rooms_count = **self.rooms_count.read().unwrap(); + let ranges = ranges + .iter() + .map(|(start, end)| ((*start).try_into().unwrap(), (*end).try_into().unwrap())) + .collect::>(); - if current_rooms_count.is_none() - || current_rooms_count == Some(0) + let current_maximum_number_of_rooms = **self.maximum_number_of_rooms.read().unwrap(); + + if current_maximum_number_of_rooms.is_none() + || current_maximum_number_of_rooms == Some(0) || self.is_cold.load(Ordering::SeqCst) { debug!("first run, replacing rooms list"); // first response, we do that slightly differently let mut rooms_list = ObservableVector::new(); - rooms_list - .append(iter::repeat(RoomListEntry::Empty).take(rooms_count as usize).collect()); + rooms_list.append( + iter::repeat(RoomListEntry::Empty).take(maximum_number_of_rooms as usize).collect(), + ); // then we apply it - room_ops(&mut rooms_list, ops, ranges)?; + room_ops(&mut rooms_list, ops, &ranges)?; { let mut lock = self.rooms_list.write().unwrap(); @@ -310,7 +326,10 @@ impl SlidingSyncList { lock.append(rooms_list.into_inner()); } - Observable::set(&mut self.rooms_count.write().unwrap(), Some(rooms_count)); + Observable::set( + &mut self.maximum_number_of_rooms.write().unwrap(), + Some(maximum_number_of_rooms), + ); self.is_cold.store(false, Ordering::SeqCst); return Ok(true); @@ -318,7 +337,7 @@ impl SlidingSyncList { debug!("regular update"); - let mut missing = rooms_count + let mut missing = maximum_number_of_rooms .checked_sub(self.rooms_list.read().unwrap().len() as u32) .unwrap_or_default(); let mut changed = false; @@ -339,7 +358,7 @@ impl SlidingSyncList { let mut rooms_list = self.rooms_list.write().unwrap(); if !ops.is_empty() { - room_ops(&mut rooms_list, ops, ranges)?; + room_ops(&mut rooms_list, ops, &ranges)?; changed = true; } else { debug!("no rooms operations found"); @@ -347,16 +366,16 @@ impl SlidingSyncList { } { - let mut lock = self.rooms_count.write().unwrap(); + let mut lock = self.maximum_number_of_rooms.write().unwrap(); - if **lock != Some(rooms_count) { - Observable::set(&mut lock, Some(rooms_count)); + if **lock != Some(maximum_number_of_rooms) { + Observable::set(&mut lock, Some(maximum_number_of_rooms)); changed = true; } } - if self.send_updates_for_items && !rooms.is_empty() { - let found_lists = self.find_rooms_in_list(rooms); + if self.send_updates_for_items && !updated_rooms.is_empty() { + let found_lists = self.find_rooms_in_list(updated_rooms); if !found_lists.is_empty() { debug!("room details found"); @@ -396,8 +415,8 @@ impl SlidingSyncList { #[derive(Serialize, Deserialize)] pub(super) struct FrozenSlidingSyncList { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub(super) rooms_count: Option, + #[serde(default, rename = "rooms_count", skip_serializing_if = "Option::is_none")] + pub(super) maximum_number_of_rooms: Option, #[serde(default, skip_serializing_if = "Vector::is_empty")] pub(super) rooms_list: Vector, #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] @@ -428,7 +447,7 @@ impl FrozenSlidingSyncList { } FrozenSlidingSyncList { - rooms_count: **source_list.rooms_count.read().unwrap(), + maximum_number_of_rooms: **source_list.maximum_number_of_rooms.read().unwrap(), rooms_list, rooms, } @@ -601,24 +620,31 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a SlidingSync usually starts at a `Preload`, getting a fast -/// response for the first given number of Rooms, then switches into -/// `CatchingUp` during which the list fetches the remaining rooms, usually in -/// order, some times in batches. Once that is ready, it switches into `Live`. +/// The lifetime of a `SlidingSync` usually starts at a `Loading`, getting a +/// fast response for the first given number of rooms, then switches into +/// `PartiallyLoaded` during which the list fetches the remaining rooms, usually +/// in order, some times in batches. Once that is ready, it switches into +/// `Live`. /// /// If the client has been offline for a while, though, the SlidingSync might -/// return back to `CatchingUp` at any point. +/// return back to `PartiallyLoaded` at any point. #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncState { - /// Hasn't started yet + /// Sliding Sync has not started to load anything yet. #[default] - Cold, - /// We are quickly preloading a preview of the most important rooms - Preload, - /// We are trying to load all remaining rooms, might be in batches - CatchingUp, - /// We are all caught up and now only sync the live responses. - Live, + #[serde(rename = "Cold")] + NotLoaded, + /// Sliding Sync has been preloaded, i.e. restored from a catch for example. + #[serde(rename = "Preload")] + Preloaded, + /// Updates are received from the loaded rooms, and new rooms are being + /// fetched in the background. + #[serde(rename = "CatchingUp")] + PartiallyLoaded, + /// Updates are received for all the loaded rooms, and all rooms have been + /// loaded! + #[serde(rename = "Live")] + FullyLoaded, } /// The mode by which the the [`SlidingSyncList`] is in fetching the data. diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index 54f407ed8..f3c1cbd3f 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -1,100 +1,157 @@ +//! The logic to generate Sliding Sync list request. +//! +//! Depending on the [`SlidingSyncMode`], the generated requests aren't the +//! same. +//! +//! In [`SlidingSyncMode::Selective`], it's pretty straighforward: +//! +//! * There is set of ranges, +//! * Each request asks to load the particular ranges. +//! +//! In [`SlidingSyncMode::PagingFullSync`]: +//! +//! * There is a `batch_size`, +//! * Each request asks to load a new successive range containing exactly +//! `batch_size` rooms. +//! +//! In [`SlidingSyncMode::GrowingFullSync]: +//! +//! * There is a `batch_size`, +//! * Each request asks to load a new range, always starting from 0, but where +//! the end is incremented by `batch_size` everytime. +//! +//! The number of rooms to load is capped by the +//! [`SlidingSyncList::maximum_number_of_rooms`], i.e. the real number of +//! rooms it is possible to load. This value comes from the server. +//! +//! The number of rooms to load can _also_ be capped by the +//! [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`], i.e. a +//! user-specified limit representing the maximum number of rooms the user +//! actually wants to load. + use std::cmp::min; use eyeball::unique::Observable; use ruma::{api::client::sync::sync_events::v4, assign, OwnedRoomId, UInt}; -use tracing::{error, instrument, trace}; +use tracing::{error, instrument}; use super::{Error, SlidingSyncList, SlidingSyncState}; +/// The kind of request generator. +#[derive(Debug)] enum GeneratorKind { - GrowingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, - PagingFullSync { position: u32, batch_size: u32, limit: Option, live: bool }, + // Growing-mode (see [`SlidingSyncMode`]). + GrowingFullSync { + // Number of fetched rooms. + number_of_fetched_rooms: u32, + // Size of the batch, used to grow the range to fetch more rooms. + batch_size: u32, + // Maximum number of rooms to fetch (see + // [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]). + maximum_number_of_rooms_to_fetch: Option, + // Whether all rooms have been loaded. + fully_loaded: bool, + }, + + // Paging-mode (see [`SlidingSyncMode`]). + PagingFullSync { + // Number of fetched rooms. + number_of_fetched_rooms: u32, + // Size of the batch, used to grow the range to fetch more rooms. + batch_size: u32, + // Maximum number of rooms to fetch (see + // [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]). + maximum_number_of_rooms_to_fetch: Option, + // Whether all romms have been loaded. + fully_loaded: bool, + }, + + // Selective-mode (see [`SlidingSyncMode`]). Selective, } +/// A request generator for [`SlidingSyncList`]. +#[derive(Debug)] pub(in super::super) struct SlidingSyncListRequestGenerator { + /// The parent [`SlidingSyncList`] object that has created this request + /// generator. list: SlidingSyncList, - ranges: Vec<(usize, usize)>, + /// The current range used by this request generator. + ranges: Vec<(UInt, UInt)>, + /// The kind of request generator. kind: GeneratorKind, } impl SlidingSyncListRequestGenerator { + /// Create a new request generator configured for paging-mode. pub(super) fn new_with_paging_full_sync(list: SlidingSyncList) -> Self { - let batch_size = list.batch_size; - let limit = list.limit; - let position = list + let batch_size = list.full_sync_batch_size; + let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch; + // If a range exists, let's consider it's been used to load existing room. So + // let's start from the end of the range. It can be useful when we resume a sync + // for example. Otherwise let's use the default value. + let number_of_fetched_rooms = list .ranges .read() .unwrap() .first() - .map(|(_start, end)| u32::try_from(*end).unwrap()) + .map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1)) .unwrap_or_default(); Self { list, - ranges: Default::default(), - kind: GeneratorKind::PagingFullSync { position, batch_size, limit, live: false }, + ranges: Vec::new(), + kind: GeneratorKind::PagingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + fully_loaded: false, + }, } } + /// Create a new request generator configured for growing-mode. pub(super) fn new_with_growing_full_sync(list: SlidingSyncList) -> Self { - let batch_size = list.batch_size; - let limit = list.limit; - let position = list + let batch_size = list.full_sync_batch_size; + let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch; + // If a range exists, let's consider it's been used to load existing room. So + // let's start from the end of the range. It can be useful when we resume a sync + // for example. Otherwise let's use the default value. + let number_of_fetched_rooms = list .ranges .read() .unwrap() .first() - .map(|(_start, end)| u32::try_from(*end).unwrap()) + .map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1)) .unwrap_or_default(); Self { list, - ranges: Default::default(), - kind: GeneratorKind::GrowingFullSync { position, batch_size, limit, live: false }, + ranges: Vec::new(), + kind: GeneratorKind::GrowingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + fully_loaded: false, + }, } } + /// Create a new request generator configured for selective-mode. pub(super) fn new_selective(list: SlidingSyncList) -> Self { - Self { list, ranges: Default::default(), kind: GeneratorKind::Selective } + Self { list, ranges: Vec::new(), kind: GeneratorKind::Selective } } - fn prefetch_request( - &mut self, - start: u32, - batch_size: u32, - limit: Option, - ) -> v4::SyncRequestList { - let maximum_end = start + batch_size; - - let mut end = limit.map(|limit| min(limit, maximum_end)).unwrap_or(maximum_end); - - if let Some(rooms_count) = self.list.rooms_count() { - end = min(end, rooms_count - 1); - } - - self.make_request_for_ranges(vec![(start.into(), end.into())]) - } - - #[instrument(skip(self), fields(name = self.list.name))] - fn make_request_for_ranges(&mut self, ranges: Vec<(UInt, UInt)>) -> v4::SyncRequestList { + /// Build a [`SyncRequestList`][v4::SyncRequestList]. + #[instrument(skip(self), fields(name = self.list.name, ranges = ?&self.ranges))] + fn build_request(&self) -> v4::SyncRequestList { let sort = self.list.sort.clone(); let required_state = self.list.required_state.clone(); let timeline_limit = **self.list.timeline_limit.read().unwrap(); let filters = self.list.filters.clone(); - self.ranges = ranges - .iter() - .map(|(a, b)| { - ( - usize::try_from(*a).expect("range is a valid u32"), - usize::try_from(*b).expect("range is a valid u32"), - ) - }) - .collect(); - assign!(v4::SyncRequestList::default(), { - ranges: ranges, + ranges: self.ranges.clone(), room_details: assign!(v4::RoomDetailsConfig::default(), { required_state, timeline_limit, @@ -104,67 +161,151 @@ impl SlidingSyncListRequestGenerator { }) } + // Handle the response from the server. #[instrument(skip_all, fields(name = self.list.name, rooms_count, has_ops = !ops.is_empty()))] pub(in super::super) fn handle_response( &mut self, - rooms_count: u32, + maximum_number_of_rooms: u32, ops: &Vec, - rooms: &Vec, + updated_rooms: &Vec, ) -> Result { - let response = self.list.handle_response(rooms_count, ops, &self.ranges, rooms)?; - self.update_state(rooms_count.saturating_sub(1)); // index is 0 based, count is 1 based + let response = + self.list.handle_response(maximum_number_of_rooms, ops, &self.ranges, updated_rooms)?; + + self.update_state(maximum_number_of_rooms); Ok(response) } - fn update_state(&mut self, max_index: u32) { - let Some((_start, range_end)) = self.ranges.first() else { - error!("Why don't we have any ranges?"); + /// Update the state of the generator. + fn update_state(&mut self, maximum_number_of_rooms: u32) { + let Some(range_end) = self.ranges.first().map(|(_start, end)| u32::try_from(*end).unwrap()) else { + error!(name = self.list.name, "The request generator must have a range."); return; }; - let end = if &(max_index as usize) < range_end { max_index } else { *range_end as u32 }; - - trace!(end, max_index, range_end, name = self.list.name, "updating state"); - match &mut self.kind { - GeneratorKind::PagingFullSync { position, live, limit, .. } - | GeneratorKind::GrowingFullSync { position, live, limit, .. } => { - let max = limit.map(|limit| min(limit, max_index)).unwrap_or(max_index); + GeneratorKind::PagingFullSync { + number_of_fetched_rooms, + fully_loaded, + maximum_number_of_rooms_to_fetch, + .. + } + | GeneratorKind::GrowingFullSync { + number_of_fetched_rooms, + fully_loaded, + maximum_number_of_rooms_to_fetch, + .. + } => { + // Calculate the maximum bound for the range. + // At this step, the server has given us a maximum number of rooms for this + // list. That's our `range_maximum`. + let mut range_maximum = maximum_number_of_rooms; - trace!(end, max, name = self.list.name, "updating state"); + // But maybe the user has defined a maximum number of rooms to fetch? In this + // case, let's take the minimum of the two. + if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch { + range_maximum = min(range_maximum, *maximum_number_of_rooms_to_fetch); + } - if end >= max { - // Switching to live mode. + // Finally, ranges are inclusive! + range_maximum = range_maximum.saturating_sub(1); - trace!(name = self.list.name, "going live"); + // Now, we know what the maximum bound for the range is. - self.list.set_range(0, max); - *position = max; - *live = true; + // The current range hasn't reached its maximum, let's continue. + if range_end < range_maximum { + // Update the _list range_ to cover from 0 to `range_end`. + // The list range is different from the request generator (this) range. + self.list.set_range(0, range_end); + // Update the number of fetched rooms forward. Do not forget that ranges are + // inclusive, so let's add 1. + *number_of_fetched_rooms = range_end.saturating_add(1); + + // The list is still not fully loaded. + *fully_loaded = false; + + // Finally, let's update the list' state. Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { - *state = SlidingSyncState::Live; + *state = SlidingSyncState::PartiallyLoaded; }); - } else { - *position = end; - *live = false; - self.list.set_range(0, end); + } + // Otherwise the current range has reached its maximum, we switched to `Live` mode. + else { + // The range is covering the entire list, from 0 to its maximum. + self.list.set_range(0, range_maximum); + // The number of fetched rooms is set to the maximum too. + *number_of_fetched_rooms = range_maximum; + + // And we update the `fully_loaded` marker. + *fully_loaded = true; + + // Finally, let's update the list' state. Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { - *state = SlidingSyncState::CatchingUp; + *state = SlidingSyncState::FullyLoaded; }); } } GeneratorKind::Selective => { + // Selective mode always loads everything. Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { - *state = SlidingSyncState::Live; + *state = SlidingSyncState::FullyLoaded; }); } } } + + #[cfg(test)] + fn is_fully_loaded(&self) -> bool { + match self.kind { + GeneratorKind::PagingFullSync { fully_loaded, .. } + | GeneratorKind::GrowingFullSync { fully_loaded, .. } => fully_loaded, + GeneratorKind::Selective => true, + } + } +} + +fn create_range( + start: u32, + desired_size: u32, + maximum_number_of_rooms_to_fetch: Option, + maximum_number_of_rooms: Option, +) -> Option<(UInt, UInt)> { + // Calculate the range. + // The `start` bound is given. Let's calculate the `end` bound. + + // The `end`, by default, is `start` + `desired_size`. + let mut end = start + desired_size; + + // But maybe the user has defined a maximum number of rooms to fetch? In this + // case, take the minimum of the two. + if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch { + end = min(end, maximum_number_of_rooms_to_fetch); + } + + // But there is more! The server can tell us what is the maximum number of rooms + // fulfilling a particular list. For example, if the server says there is 42 + // rooms for a particular list, with a `start` of 40 and a `batch_size` of 20, + // the range must be capped to `[40; 46]`; the range `[40; 60]` would be invalid + // and could be rejected by the server. + if let Some(maximum_number_of_rooms) = maximum_number_of_rooms { + end = min(end, maximum_number_of_rooms); + } + + // Finally, because the bounds of the range are inclusive, 1 is substracted. + end = end.saturating_sub(1); + + // Make sure `start` is smaller than `end`. It can happen if `start` is greater + // than `maximum_number_of_rooms_to_fetch` or `maximum_number_of_rooms`. + if start > end { + return None; + } + + Some((start.into(), end.into())) } impl Iterator for SlidingSyncListRequestGenerator { @@ -172,21 +313,357 @@ impl Iterator for SlidingSyncListRequestGenerator { fn next(&mut self) -> Option { match self.kind { - GeneratorKind::PagingFullSync { live: true, .. } - | GeneratorKind::GrowingFullSync { live: true, .. } + // Cases where all rooms have been fully loaded. + GeneratorKind::PagingFullSync { fully_loaded: true, .. } + | GeneratorKind::GrowingFullSync { fully_loaded: true, .. } | GeneratorKind::Selective => { - let ranges = self.list.ranges.read().unwrap().clone(); + // Let's copy all the ranges from the parent `SlidingSyncList`, and build a + // request for them. + self.ranges = self.list.ranges.read().unwrap().clone(); - Some(self.make_request_for_ranges(ranges)) + // Here we go. + Some(self.build_request()) } - GeneratorKind::PagingFullSync { position, batch_size, limit, .. } => { - Some(self.prefetch_request(position, batch_size, limit)) + GeneratorKind::PagingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + .. + } => { + // In paging-mode, range starts at the number of fetched rooms. Since ranges are + // inclusive, and since the number of fetched rooms starts at 1, + // not at 0, there is no need to add 1 here. + let range_start = number_of_fetched_rooms; + let range_desired_size = batch_size; + + // Create a new range, and use it as the current set of ranges. + self.ranges = vec![create_range( + range_start, + range_desired_size, + maximum_number_of_rooms_to_fetch, + self.list.maximum_number_of_rooms(), + )?]; + + // Here we go. + Some(self.build_request()) } - GeneratorKind::GrowingFullSync { position, batch_size, limit, .. } => { - Some(self.prefetch_request(0, position + batch_size, limit)) + GeneratorKind::GrowingFullSync { + number_of_fetched_rooms, + batch_size, + maximum_number_of_rooms_to_fetch, + .. + } => { + // In growing-mode, range always starts from 0. However, the end is growing by + // adding `batch_size` to the previous number of fetched rooms. + let range_start = 0; + let range_desired_size = number_of_fetched_rooms.saturating_add(batch_size); + + self.ranges = vec![create_range( + range_start, + range_desired_size, + maximum_number_of_rooms_to_fetch, + self.list.maximum_number_of_rooms(), + )?]; + + // Here we go. + Some(self.build_request()) } } } } + +#[cfg(test)] +mod tests { + use ruma::uint; + + use super::*; + + #[test] + fn test_create_range_from() { + // From 0, we want 100 items. + assert_eq!(create_range(0, 100, None, None), Some((uint!(0), uint!(99)))); + + // From 100, we want 100 items. + assert_eq!(create_range(100, 100, None, None), Some((uint!(100), uint!(199)))); + + // From 0, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50. + assert_eq!(create_range(0, 100, Some(50), None), Some((uint!(0), uint!(49)))); + + // From 49, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50. There is 1 item to load. + assert_eq!(create_range(49, 100, Some(50), None), Some((uint!(49), uint!(49)))); + + // From 50, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50. + assert_eq!(create_range(50, 100, Some(50), None), None); + + // From 0, we want 100 items, but there is a maximum number of rooms defined at + // 50. + assert_eq!(create_range(0, 100, None, Some(50)), Some((uint!(0), uint!(49)))); + + // From 49, we want 100 items, but there is a maximum number of rooms defined at + // 50. There is 1 item to load. + assert_eq!(create_range(49, 100, None, Some(50)), Some((uint!(49), uint!(49)))); + + // From 50, we want 100 items, but there is a maximum number of rooms defined at + // 50. + assert_eq!(create_range(50, 100, None, Some(50)), None); + + // From 0, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 75, and a maximum number of rooms defined at 50. + assert_eq!(create_range(0, 100, Some(75), Some(50)), Some((uint!(0), uint!(49)))); + + // From 0, we want 100 items, but there is a maximum number of rooms to fetch + // defined at 50, and a maximum number of rooms defined at 75. + assert_eq!(create_range(0, 100, Some(50), Some(75)), Some((uint!(0), uint!(49)))); + } + + macro_rules! assert_request_and_response { + ( + list = $list:ident, + generator = $generator:ident, + maximum_number_of_rooms = $maximum_number_of_rooms:expr, + $( + next => { + ranges = $( [ $range_start:literal ; $range_end:literal ] ),+ , + is_fully_loaded = $is_fully_loaded:expr, + list_state = $list_state:ident, + } + ),* + $(,)* + ) => { + // That's the initial state. + assert_eq!($list.state(), SlidingSyncState::NotLoaded); + + $( + { + let request = $generator.next().unwrap(); + + assert_eq!(request.ranges, [ $( (uint!( $range_start ), uint!( $range_end )) ),* ]); + + // Fake a response. + let _ = $generator.handle_response($maximum_number_of_rooms, &vec![], &vec![]); + + // Now, Sliding Sync has started to load rooms. + assert_eq!($generator.is_fully_loaded(), $is_fully_loaded); + assert_eq!($list.state(), SlidingSyncState::$list_state); + } + )* + }; + } + + #[test] + fn test_generator_paging_full_sync() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::PagingFullSync) + .name("testing") + .full_sync_batch_size(10) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [10; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms is reached! + next => { + ranges = [20; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 24], // the range starts at 0 now! + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_paging_full_sync_with_a_maximum_number_of_rooms_to_fetch() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::PagingFullSync) + .name("testing") + .full_sync_batch_size(10) + .full_sync_maximum_number_of_rooms_to_fetch(22) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [10; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms to fetch is reached! + next => { + ranges = [20; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 21], // the range starts at 0 now! + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_growing_full_sync() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::GrowingFullSync) + .name("testing") + .full_sync_batch_size(10) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [0; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms is reached! + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 24], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_growing_full_sync_with_a_maximum_number_of_rooms_to_fetch() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::GrowingFullSync) + .name("testing") + .full_sync_batch_size(10) + .full_sync_maximum_number_of_rooms_to_fetch(22) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + next => { + ranges = [0; 9], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + next => { + ranges = [0; 19], + is_fully_loaded = false, + list_state = PartiallyLoaded, + }, + // The maximum number of rooms is reached! + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 21], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + }; + } + + #[test] + fn test_generator_selective() { + let list = SlidingSyncList::builder() + .sync_mode(crate::SlidingSyncMode::Selective) + .name("testing") + .ranges(vec![(0u32, 10), (42, 153)]) + .build() + .unwrap(); + let mut generator = list.request_generator(); + + assert_request_and_response! { + list = list, + generator = generator, + maximum_number_of_rooms = 25, + // The maximum number of rooms is reached directly! + next => { + ranges = [0; 10], [42; 153], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + // Now it's fully loaded, so the same request must be produced everytime. + next => { + ranges = [0; 10], [42; 153], + is_fully_loaded = true, + list_state = FullyLoaded, + }, + next => { + ranges = [0; 10], [42; 153], + is_fully_loaded = true, + list_state = FullyLoaded, + } + }; + } +} diff --git a/crates/matrix-sdk/src/sliding_sync/mod.rs b/crates/matrix-sdk/src/sliding_sync/mod.rs index 6de238261..73bbbf04e 100644 --- a/crates/matrix-sdk/src/sliding_sync/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/mod.rs @@ -110,15 +110,15 @@ //! copy can be retrieved by calling `SlidingSync::list()`, providing the name //! of the list. Next to the configuration settings (like name and //! `timeline_limit`), the list provides the stateful -//! [`rooms_count`](SlidingSyncList::rooms_count), +//! [`maximum_number_of_rooms`](SlidingSyncList::maximum_number_of_rooms), //! [`rooms_list`](SlidingSyncList::rooms_list) and //! [`state`](SlidingSyncList::state): //! -//! - `rooms_count` is the number of rooms _total_ there were found matching -//! the filters given. -//! - `rooms_list` is a vector of `rooms_count` [`RoomListEntry`]'s at the -//! current state. `RoomListEntry`'s only hold `the room_id` if given, the -//! [Rooms API](#rooms) holds the actual information about each room +//! - `maximum_number_of_rooms` is the number of rooms _total_ there were found +//! matching the filters given. +//! - `rooms_list` is a vector of `maximum_number_of_rooms` [`RoomListEntry`]'s +//! at the current state. `RoomListEntry`'s only hold `the room_id` if given, +//! the [Rooms API](#rooms) holds the actual information about each room //! - `state` is a [`SlidingSyncMode`] signalling meta information about the //! list and its stateful data — whether this is the state loaded from local //! cache, whether the [full sync](#helper-lists) is in progress or whether @@ -171,11 +171,11 @@ //! //! ### Room List Entries //! -//! As the room list of each list is a vec of the `rooms_count` len but a room -//! may only know of a subset of entries for sure at any given time, these -//! entries are wrapped in [`RoomListEntry`][]. This type, in close proximity to -//! the [specification][MSC], can be either `Empty`, `Filled` or `Invalidated`, -//! signaling the state of each entry position. +//! As the room list of each list is a vec of the `maximum_number_of_rooms` len +//! but a room may only know of a subset of entries for sure at any given time, +//! these entries are wrapped in [`RoomListEntry`][]. This type, in close +//! proximity to the [specification][MSC], can be either `Empty`, `Filled` or +//! `Invalidated`, signaling the state of each entry position. //! - `Empty` we don't know what sits here at this position in the list. //! - `Filled`: there is this `room_id` at this position. //! - `Invalidated` in that sense means that we _knew_ what was here before, but @@ -429,8 +429,9 @@ //! ## Caching //! //! All room data, for filled but also _invalidated_ rooms, including the entire -//! timeline events as well as all list `room_lists` and `rooms_count` are held -//! in memory (unless one `pop`s the list out). +//! timeline events as well as all list `room_lists` and +//! `maximum_number_of_rooms` are held in memory (unless one `pop`s the list +//! out). //! //! This is a purely in-memory cache layer though. If one wants Sliding Sync to //! persist and load from cold (storage) cache, one needs to set its key with @@ -511,8 +512,8 @@ //! .required_state(vec![ //! (StateEventType::RoomEncryption, "".to_owned()) //! ]) // only want to know if the room is encrypted -//! .batch_size(50) // grow the window by 50 items at a time -//! .limit(500) // only sync up the top 500 rooms +//! .full_sync_batch_size(50) // grow the window by 50 items at a time +//! .full_sync_maximum_number_of_rooms_to_fetch(500) // only sync up the top 500 rooms //! .build()?; //! //! let active_list = SlidingSyncList::builder() @@ -538,7 +539,7 @@ //! //! let active_list = sliding_sync.list(&active_list_name).unwrap(); //! let list_state_stream = active_list.state_stream(); -//! let list_count_stream = active_list.rooms_count_stream(); +//! let list_count_stream = active_list.maximum_number_of_rooms_stream(); //! let list_stream = active_list.rooms_list_stream(); //! //! tokio::spawn(async move { @@ -585,7 +586,6 @@ //! # }); //! ``` //! -//! //! [MSC]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 //! [proxy]: https://github.com/matrix-org/sliding-sync //! [ruma-types]: https://docs.rs/ruma/latest/ruma/api/client/sync/sync_events/v4/index.html @@ -930,7 +930,7 @@ impl SlidingSync { } let update_summary = { - let mut rooms = Vec::new(); + let mut updated_rooms = Vec::new(); let mut rooms_map = self.inner.rooms.write().unwrap(); for (room_id, mut room_data) in sliding_sync_response.rooms.into_iter() { @@ -963,22 +963,26 @@ impl SlidingSync { ); } - rooms.push(room_id); + updated_rooms.push(room_id); } let mut updated_lists = Vec::new(); for (name, updates) in sliding_sync_response.lists { - let Some(generator) = list_generators.get_mut(&name) else { + let Some(list_generator) = list_generators.get_mut(&name) else { error!("Response for list `{name}` - unknown to us; skipping"); continue }; - let count: u32 = + let maximum_number_of_rooms: u32 = updates.count.try_into().expect("the list total count convertible into u32"); - if generator.handle_response(count, &updates.ops, &rooms)? { + if list_generator.handle_response( + maximum_number_of_rooms, + &updates.ops, + &updated_rooms, + )? { updated_lists.push(name.clone()); } } @@ -988,7 +992,7 @@ impl SlidingSync { self.update_to_device_since(to_device.next_batch); } - UpdateSummary { lists: updated_lists, rooms } + UpdateSummary { lists: updated_lists, rooms: updated_rooms } }; Ok(update_summary) @@ -1296,7 +1300,7 @@ pub struct UpdateSummary { #[cfg(test)] mod test { use assert_matches::assert_matches; - use ruma::room_id; + use ruma::{room_id, uint}; use serde_json::json; use wiremock::MockServer; @@ -1325,7 +1329,13 @@ mod test { })) .unwrap(); - list.handle_response(10u32, &vec![full_window_update], &vec![(0, 9)], &vec![]).unwrap(); + list.handle_response( + 10u32, + &vec![full_window_update], + &vec![(uint!(0), uint!(9))], + &vec![], + ) + .unwrap(); let a02 = room_id!("!A00002:matrix.example").to_owned(); let a05 = room_id!("!A00005:matrix.example").to_owned(); @@ -1347,7 +1357,13 @@ mod test { })) .unwrap(); - list.handle_response(10u32, &vec![update], &vec![(0, 3), (8, 9)], &vec![]).unwrap(); + list.handle_response( + 10u32, + &vec![update], + &vec![(uint!(0), uint!(3)), (uint!(8), uint!(9))], + &vec![], + ) + .unwrap(); assert_eq!(list.find_room_in_list(room_id!("!A00002:matrix.example")), Some(2)); assert_eq!(list.find_room_in_list(room_id!("!A00005:matrix.example")), None); diff --git a/labs/jack-in/src/client/mod.rs b/labs/jack-in/src/client/mod.rs index d411211ed..79b180140 100644 --- a/labs/jack-in/src/client/mod.rs +++ b/labs/jack-in/src/client/mod.rs @@ -21,11 +21,12 @@ pub async fn run_client( .timeline_limit(10u32) .sync_mode(config.full_sync_mode.into()); if let Some(size) = config.batch_size { - full_sync_view_builder = full_sync_view_builder.batch_size(size); + full_sync_view_builder = full_sync_view_builder.full_sync_batch_size(size); } if let Some(limit) = config.limit { - full_sync_view_builder = full_sync_view_builder.limit(limit); + full_sync_view_builder = + full_sync_view_builder.full_sync_maximum_number_of_rooms_to_fetch(limit); } if let Some(limit) = config.timeline_limit { full_sync_view_builder = full_sync_view_builder.timeline_limit(limit); @@ -66,7 +67,7 @@ pub async fn run_client( let state = view.state(); ssync_state.set_view_state(state.clone()); - if state == SlidingSyncState::Live { + if state == SlidingSyncState::FullyLoaded { info!("Reached live sync"); break; } diff --git a/labs/jack-in/src/client/state.rs b/labs/jack-in/src/client/state.rs index 5f60af891..32ad09253 100644 --- a/labs/jack-in/src/client/state.rs +++ b/labs/jack-in/src/client/state.rs @@ -135,7 +135,7 @@ impl SlidingSyncState { } pub fn total_rooms_count(&self) -> Option { - self.view.rooms_count() + self.view.maximum_number_of_rooms() } pub fn set_first_render_now(&mut self) { diff --git a/testing/sliding-sync-integration-test/src/lib.rs b/testing/sliding-sync-integration-test/src/lib.rs index 90bd8b354..c8ec677db 100644 --- a/testing/sliding-sync-integration-test/src/lib.rs +++ b/testing/sliding-sync-integration-test/src/lib.rs @@ -133,7 +133,7 @@ mod tests { // Get the list to all rooms to check the list' state. let list = sync.list("init_list").context("list `init_list` isn't found")?; - assert_eq!(list.state(), SlidingSyncState::Cold); + assert_eq!(list.state(), SlidingSyncState::NotLoaded); // Send the request and wait for a response. let update_summary = stream @@ -142,7 +142,7 @@ mod tests { .context("No room summary found, loop ended unsuccessfully")??; // Check the state has switched to `Live`. - assert_eq!(list.state(), SlidingSyncState::Live); + assert_eq!(list.state(), SlidingSyncState::FullyLoaded); // One room has received an update. assert_eq!(update_summary.rooms.len(), 1); @@ -543,7 +543,7 @@ mod tests { let full = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .batch_size(10u32) + .full_sync_batch_size(10u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("full") .build()?; @@ -552,8 +552,8 @@ mod tests { let list = sync_proxy.list("sliding").context("but we just added that list!")?; let full_list = sync_proxy.list("full").context("but we just added that list!")?; - assert_eq!(list.state(), SlidingSyncState::Cold, "list isn't cold"); - assert_eq!(full_list.state(), SlidingSyncState::Cold, "full isn't cold"); + assert_eq!(list.state(), SlidingSyncState::NotLoaded, "list isn't cold"); + assert_eq!(full_list.state(), SlidingSyncState::NotLoaded, "full isn't cold"); let stream = sync_proxy.stream(); pin_mut!(stream); @@ -564,8 +564,8 @@ mod tests { // we only heard about the ones we had asked for assert_eq!(room_summary.rooms.len(), 11); - assert_eq!(list.state(), SlidingSyncState::Live, "list isn't live"); - assert_eq!(full_list.state(), SlidingSyncState::CatchingUp, "full isn't preloading"); + assert_eq!(list.state(), SlidingSyncState::FullyLoaded, "list isn't live"); + assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); // doing another two requests 0-20; 0-21 should bring full live, too let _room_summary = @@ -574,7 +574,7 @@ mod tests { let rooms_list = full_list.rooms_list::(); assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::>()); - assert_eq!(full_list.state(), SlidingSyncState::Live, "full isn't live yet"); + assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't live yet"); Ok(()) } @@ -844,7 +844,7 @@ mod tests { .build()?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .limit(100) + .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -867,7 +867,7 @@ mod tests { sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. let stream = sync_proxy.stream(); pin_mut!(stream); - while growing_sync.state() != SlidingSyncState::Live { + while growing_sync.state() != SlidingSyncState::FullyLoaded { // we wait until growing sync is all done, too println!("awaiting"); let _room_summary = stream @@ -902,7 +902,7 @@ mod tests { let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .batch_size(10u32) + .full_sync_batch_size(10u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -954,7 +954,7 @@ mod tests { let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .batch_size(10u32) + .full_sync_batch_size(10u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -1014,7 +1014,7 @@ mod tests { print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .limit(100) + .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -1096,7 +1096,7 @@ mod tests { print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .limit(100) + .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -1111,7 +1111,7 @@ mod tests { let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. let stream = sync_proxy.stream(); pin_mut!(stream); - while list.state() != SlidingSyncState::Live { + while list.state() != SlidingSyncState::FullyLoaded { // we wait until growing sync is all done, too println!("awaiting"); let _room_summary = stream @@ -1142,7 +1142,7 @@ mod tests { let summary = room_summary?; // we only heard about the ones we had asked for if summary.lists.iter().any(|s| s == "growing") - && list.rooms_count().unwrap_or_default() == 32 + && list.maximum_number_of_rooms().unwrap_or_default() == 32 { if seen { // once we saw 32, we give it another loop to catch up! From c0f84bb8daab3832aed7760cab7b4da09d33db18 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 17:54:39 +0100 Subject: [PATCH 04/14] test(sdk): Fix tests and improve speed. To improve the speed, we simply reduce the numbers of rooms involved in the test. --- .../sliding-sync-integration-test/src/lib.rs | 96 +++++++++++-------- 1 file changed, 55 insertions(+), 41 deletions(-) diff --git a/testing/sliding-sync-integration-test/src/lib.rs b/testing/sliding-sync-integration-test/src/lib.rs index c8ec677db..c7a870bed 100644 --- a/testing/sliding-sync-integration-test/src/lib.rs +++ b/testing/sliding-sync-integration-test/src/lib.rs @@ -67,7 +67,7 @@ impl From<&RoomListEntry> for RoomListEntryEasy { #[cfg(test)] mod tests { use std::{ - iter::repeat, + iter::{once, repeat}, time::{Duration, Instant}, }; @@ -558,7 +558,8 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - // exactly one poll! + // Exactly one poll! + // Ranges are 0-10 for selective list, and 0-9 for growing list. let room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??; @@ -567,14 +568,30 @@ mod tests { assert_eq!(list.state(), SlidingSyncState::FullyLoaded, "list isn't live"); assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); - // doing another two requests 0-20; 0-21 should bring full live, too + // Another poll! + // Ranges are 0-10 for selective list, and 0-19 for growing list. let _room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??; let rooms_list = full_list.rooms_list::(); + assert_eq!( + rooms_list, + repeat(RoomListEntryEasy::Filled) + .take(20) + .chain(once(RoomListEntryEasy::Empty)) + .collect::>() + ); + assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); + + // One last poll, and we should get all rooms loaded. + let _room_summary = + stream.next().await.context("No room summary found, loop ended unsecessfully")??; + + let rooms_list = full_list.rooms_list::(); + assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::>()); - assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't live yet"); + assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't fully loaded"); Ok(()) } @@ -899,10 +916,10 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn growing_sync_keeps_going() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; + let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(10u32) + .full_sync_batch_size(5u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -913,9 +930,9 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - // we have 50 and catch up in batches of 10. so let's get over to 20. + // we have 20 and catch up in batches of 5. so let's get over to 15. - for _n in 0..2 { + for _ in 0..=2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let _summary = room_summary?; } @@ -925,25 +942,20 @@ mod tests { assert_eq!( collection_simple, repeat(RoomListEntryEasy::Filled) - .take(21) - .chain(repeat(RoomListEntryEasy::Empty).take(29)) + .take(15) + .chain(repeat(RoomListEntryEasy::Empty).take(5)) .collect::>() ); - // we have 50 and catch up in batches of 10. let's go two more, see it grow. - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - } + // we have 20 and catch up in batches of 5. let's go one more, see it grows. + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; let collection_simple = list.rooms_list::(); assert_eq!( collection_simple, - repeat(RoomListEntryEasy::Filled) - .take(41) - .chain(repeat(RoomListEntryEasy::Empty).take(9)) - .collect::>() + repeat(RoomListEntryEasy::Filled).take(20).collect::>() ); Ok(()) @@ -951,10 +963,10 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn growing_sync_keeps_going_after_restart() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; + let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?; let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) - .full_sync_batch_size(10u32) + .full_sync_batch_size(5u32) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") .build()?; @@ -965,9 +977,9 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - // we have 50 and catch up in batches of 10. so let's get over to 20. + // we have 20 and catch up in batches of 5. so let's get over to 15. - for _n in 0..2 { + for _ in 0..=2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let _summary = room_summary?; } @@ -980,19 +992,17 @@ mod tests { } else { acc }), - 21 + 15 ); - // we have 50 and catch up in batches of 10. Let's make sure the restart - // continues + // we have 20 and catch up in batches of 5. Let's make sure the restart + // continues. let stream = sync_proxy.stream(); pin_mut!(stream); - for _n in 0..2 { - let room_summary = stream.next().await.context("sync has closed unexpectedly")?; - let _summary = room_summary?; - } + let room_summary = stream.next().await.context("sync has closed unexpectedly")?; + let _summary = room_summary?; let collection_simple = list.rooms_list::(); @@ -1002,7 +1012,7 @@ mod tests { } else { acc }), - 41 + 20 ); Ok(()) @@ -1010,10 +1020,11 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn continue_on_reset() -> anyhow::Result<()> { - let (_client, sync_proxy_builder) = random_setup_with_rooms(30).await?; + let (_client, sync_proxy_builder) = random_setup_with_rooms(10).await?; print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(5u32) .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") @@ -1030,9 +1041,10 @@ mod tests { let stream = sync_proxy.stream(); pin_mut!(stream); - for _n in 0..2 { + for _ in 0..=2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let summary = room_summary?; + if summary.lists.iter().any(|s| s == "growing") { break; } @@ -1046,14 +1058,14 @@ mod tests { } else { acc }), - 21 + 5 ); // force the pos to be invalid and thus this being reset internally sync_proxy.set_pos("100".to_owned()); let mut error_seen = false; - for _n in 0..2 { + for _ in 0..2 { let summary = match stream.next().await { Some(Ok(e)) => e, Some(Err(e)) => { @@ -1068,6 +1080,7 @@ mod tests { } None => anyhow::bail!("Stream ended unexpectedly."), }; + // we only heard about the ones we had asked for if summary.lists.iter().any(|s| s == "growing") { break; @@ -1084,7 +1097,7 @@ mod tests { } else { acc }), - 30 + 10 ); Ok(()) @@ -1092,10 +1105,11 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn noticing_new_rooms_in_growing() -> anyhow::Result<()> { - let (client, sync_proxy_builder) = random_setup_with_rooms(30).await?; + let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?; print!("setup took its time"); let growing_sync = SlidingSyncList::builder() .sync_mode(SlidingSyncMode::GrowingFullSync) + .full_sync_batch_size(10u32) .full_sync_maximum_number_of_rooms_to_fetch(100) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .name("growing") @@ -1128,7 +1142,7 @@ mod tests { } else { acc }), - 30 + 20 ); // all found. let's add two more. @@ -1142,10 +1156,10 @@ mod tests { let summary = room_summary?; // we only heard about the ones we had asked for if summary.lists.iter().any(|s| s == "growing") - && list.maximum_number_of_rooms().unwrap_or_default() == 32 + && list.maximum_number_of_rooms().unwrap_or_default() == 22 { if seen { - // once we saw 32, we give it another loop to catch up! + // once we saw 22, we give it another loop to catch up! break; } else { seen = true; @@ -1161,7 +1175,7 @@ mod tests { } else { acc }), - 32 + 22 ); Ok(()) From 7a2b1e6e1fca3b4b3844ea721e73960a9cb85324 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 17:59:11 +0100 Subject: [PATCH 05/14] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/request_generator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index f3c1cbd3f..b2fa7934a 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -3,7 +3,7 @@ //! Depending on the [`SlidingSyncMode`], the generated requests aren't the //! same. //! -//! In [`SlidingSyncMode::Selective`], it's pretty straighforward: +//! In [`SlidingSyncMode::Selective`], it's pretty straightforward: //! //! * There is set of ranges, //! * Each request asks to load the particular ranges. From ca5cabb7e18cbc2d0e205c5f993815d2654075fa Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Wed, 15 Mar 2023 18:08:19 +0100 Subject: [PATCH 06/14] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/request_generator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index b2fa7934a..3d4736f2c 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -296,7 +296,7 @@ fn create_range( end = min(end, maximum_number_of_rooms); } - // Finally, because the bounds of the range are inclusive, 1 is substracted. + // Finally, because the bounds of the range are inclusive, 1 is subtracted. end = end.saturating_sub(1); // Make sure `start` is smaller than `end`. It can happen if `start` is greater From 9d5a1fda3c02d301a2afbe92afc7793ebd2f59ab Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:11:45 +0100 Subject: [PATCH 07/14] doc(sdk): Fix typos or improve. --- .../src/sliding_sync/list/builder.rs | 2 +- .../matrix-sdk/src/sliding_sync/list/mod.rs | 33 ++++++++++--------- .../sliding_sync/list/request_generator.rs | 7 ++-- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/builder.rs b/crates/matrix-sdk/src/sliding_sync/list/builder.rs index 42e629e24..f38022b54 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/builder.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/builder.rs @@ -85,7 +85,7 @@ impl SlidingSyncListBuilder { } /// When doing a full-sync, this method defines the total limit of rooms to - /// load (it can be useful for gigantic account). + /// load (it can be useful for gigantic accounts). pub fn full_sync_maximum_number_of_rooms_to_fetch( mut self, value: impl Into>, diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index 8bd338f6b..cef5d232f 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -25,7 +25,7 @@ use super::{Error, FrozenSlidingSyncRoom, SlidingSyncRoom}; use crate::Result; /// Holding a specific filtered list within the concept of sliding sync. -/// Main entrypoint to the SlidingSync +/// Main entrypoint to the `SlidingSync`: /// /// ```no_run /// # use futures::executor::block_on; @@ -76,10 +76,12 @@ pub struct SlidingSyncList { state: Arc>>, /// The total number of rooms that is possible to interact with for the - /// given list. It's not the total rooms that have been fetched. The - /// server tells the client that it's possible to fetch this amount of - /// rooms maximum. Since this number can change according to the list - /// filters, it's observable. + /// given list. + /// + /// It's not the total rooms that have been fetched. The server tells the + /// client that it's possible to fetch this amount of rooms maximum. + /// Since this number can change according to the list filters, it's + /// observable. maximum_number_of_rooms: Arc>>>, /// The rooms in order. @@ -620,14 +622,13 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSync` usually starts at a `Loading`, getting a -/// fast response for the first given number of rooms, then switches into -/// `PartiallyLoaded` during which the list fetches the remaining rooms, usually -/// in order, some times in batches. Once that is ready, it switches into -/// `Live`. +/// The lifetime of a `SlidingSyncList` ususally starts at `NotLoaded` or +/// `Preloaded` (if it is restored from a cache). When loading rooms in a list, +/// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or +/// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a /// -/// If the client has been offline for a while, though, the SlidingSync might -/// return back to `PartiallyLoaded` at any point. +/// If the client has been offline for a while, though, the `SlidingSyncList` +/// might return back to `PartiallyLoaded` at any point. #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncState { /// Sliding Sync has not started to load anything yet. @@ -647,18 +648,18 @@ pub enum SlidingSyncState { FullyLoaded, } -/// The mode by which the the [`SlidingSyncList`] is in fetching the data. +/// How a [`SlidingSyncList`] fetches the data. #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncMode { /// Fully sync all rooms in the background, page by page of `batch_size`, - /// like `0..20`, `21..40`, 41..60` etc. assuming the `batch_size` is 20. + /// like `0..19`, `20..39`, 40..59` etc. assuming the `batch_size` is 20. #[serde(alias = "FullSync")] PagingFullSync, /// Fully sync all rooms in the background, with a growing window of - /// `batch_size`, like `0..20`, `0..40`, `0..60` etc. assuming the + /// `batch_size`, like `0..19`, `0..39`, `0..59` etc. assuming the /// `batch_size` is 20. GrowingFullSync, - /// Only sync the specific windows defined + /// Only sync the specific defined windows/ranges. #[default] Selective, } diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index 3d4736f2c..abe193894 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -1,11 +1,11 @@ -//! The logic to generate Sliding Sync list request. +//! The logic to generate Sliding Sync list requests. //! //! Depending on the [`SlidingSyncMode`], the generated requests aren't the //! same. //! //! In [`SlidingSyncMode::Selective`], it's pretty straightforward: //! -//! * There is set of ranges, +//! * There is a set of ranges, //! * Each request asks to load the particular ranges. //! //! In [`SlidingSyncMode::PagingFullSync`]: @@ -232,7 +232,8 @@ impl SlidingSyncListRequestGenerator { *state = SlidingSyncState::PartiallyLoaded; }); } - // Otherwise the current range has reached its maximum, we switched to `Live` mode. + // Otherwise the current range has reached its maximum, we switched to `FullyLoaded` + // mode. else { // The range is covering the entire list, from 0 to its maximum. self.list.set_range(0, range_maximum); From 51abedda59c0ac254c6b4f8954e3f8e26c7e1cb7 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:21:11 +0100 Subject: [PATCH 08/14] feat(ffi): Update `SlidingStateState` variants. --- bindings/matrix-sdk-ffi/src/api.udl | 17 ++++++++++------- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/bindings/matrix-sdk-ffi/src/api.udl b/bindings/matrix-sdk-ffi/src/api.udl index 833040183..e27b7a384 100644 --- a/bindings/matrix-sdk-ffi/src/api.udl +++ b/bindings/matrix-sdk-ffi/src/api.udl @@ -32,14 +32,17 @@ callback interface SlidingSyncObserver { }; enum SlidingSyncState { - /// Hasn't started yet - "Cold", - /// We are quickly preloading a preview of the most important rooms - "Preload", + /// Sliding Sync has not started to load anything yet. + "NotLoaded", + /// Sliding Sync has been preloaded, i.e. restored from a cache for example. + "Preloaded", /// We are trying to load all remaining rooms, might be in batches - "CatchingUp", - /// We are all caught up and now only sync the live responses. - "Live", + /// Updates are received from the loaded rooms, and new rooms are being fetched + /// in background + "PartiallyLoaded", + /// Updates are received for all the loaded rooms, and all rooms have been + /// loaded! + "FullyLoaded", }; enum SlidingSyncMode { diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index cef5d232f..98f8b4d3f 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -635,7 +635,7 @@ pub enum SlidingSyncState { #[default] #[serde(rename = "Cold")] NotLoaded, - /// Sliding Sync has been preloaded, i.e. restored from a catch for example. + /// Sliding Sync has been preloaded, i.e. restored from a cache for example. #[serde(rename = "Preload")] Preloaded, /// Updates are received from the loaded rooms, and new rooms are being From 07c366983de53d34ae5f133c9ff8ede30fb829e8 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:24:05 +0100 Subject: [PATCH 09/14] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index 98f8b4d3f..ca4594063 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -622,7 +622,7 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSyncList` ususally starts at `NotLoaded` or +/// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or /// `Preloaded` (if it is restored from a cache). When loading rooms in a list, /// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or /// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a From c8bad4b86e39a6a8866593fdc962f8505e141b90 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 08:55:25 +0100 Subject: [PATCH 10/14] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index ca4594063..9859fd424 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -622,7 +622,7 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or +/// The lifetime of a `SlidingSyncList` usuaslly starts at `NotLoaded` or /// `Preloaded` (if it is restored from a cache). When loading rooms in a list, /// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or /// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a From 5e23bd09bc3bda0144db66cc34fc905386d4a4f9 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 09:01:14 +0100 Subject: [PATCH 11/14] feat(ci): `aarch64-apple-ios` should work on rustc stable. --- .github/workflows/bindings_ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bindings_ci.yml b/.github/workflows/bindings_ci.yml index 1c6e22fce..f4969ec59 100644 --- a/.github/workflows/bindings_ci.yml +++ b/.github/workflows/bindings_ci.yml @@ -209,7 +209,7 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install Rust - uses: dtolnay/rust-toolchain@nightly + uses: dtolnay/rust-toolchain@stable - name: Install aarch64-apple-ios target run: rustup target install aarch64-apple-ios From 1933fe7a8f0f3e6ea6e253f1d7de6bcefac7de36 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 11:04:18 +0100 Subject: [PATCH 12/14] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index 9859fd424..ca4594063 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -622,7 +622,7 @@ fn room_ops( /// The state the [`SlidingSyncList`] is in. /// -/// The lifetime of a `SlidingSyncList` usuaslly starts at `NotLoaded` or +/// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or /// `Preloaded` (if it is restored from a cache). When loading rooms in a list, /// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or /// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a From 5f06dc8229edc6cdcc2ccc74d852f097a6fd91ed Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 11:53:01 +0100 Subject: [PATCH 13/14] doc(sdk): Fix a typo. --- crates/matrix-sdk/src/sliding_sync/list/request_generator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs index abe193894..7fa7b285b 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/request_generator.rs @@ -441,6 +441,7 @@ mod tests { $( { + // Generate a new request. let request = $generator.next().unwrap(); assert_eq!(request.ranges, [ $( (uint!( $range_start ), uint!( $range_end )) ),* ]); @@ -448,7 +449,6 @@ mod tests { // Fake a response. let _ = $generator.handle_response($maximum_number_of_rooms, &vec![], &vec![]); - // Now, Sliding Sync has started to load rooms. assert_eq!($generator.is_fully_loaded(), $is_fully_loaded); assert_eq!($list.state(), SlidingSyncState::$list_state); } From b39a224be82efdd3b4b3433ef5554319f93d4855 Mon Sep 17 00:00:00 2001 From: Ivan Enderlin Date: Thu, 16 Mar 2023 17:03:25 +0100 Subject: [PATCH 14/14] doc(sdk): Use the Rust range notation to avoid ambiguity. --- crates/matrix-sdk/src/sliding_sync/list/mod.rs | 4 ++-- testing/sliding-sync-integration-test/src/lib.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk/src/sliding_sync/list/mod.rs b/crates/matrix-sdk/src/sliding_sync/list/mod.rs index ca4594063..5f7079520 100644 --- a/crates/matrix-sdk/src/sliding_sync/list/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/list/mod.rs @@ -652,11 +652,11 @@ pub enum SlidingSyncState { #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SlidingSyncMode { /// Fully sync all rooms in the background, page by page of `batch_size`, - /// like `0..19`, `20..39`, 40..59` etc. assuming the `batch_size` is 20. + /// like `0..=19`, `20..=39`, 40..=59` etc. assuming the `batch_size` is 20. #[serde(alias = "FullSync")] PagingFullSync, /// Fully sync all rooms in the background, with a growing window of - /// `batch_size`, like `0..19`, `0..39`, `0..59` etc. assuming the + /// `batch_size`, like `0..=19`, `0..=39`, `0..=59` etc. assuming the /// `batch_size` is 20. GrowingFullSync, /// Only sync the specific defined windows/ranges. diff --git a/testing/sliding-sync-integration-test/src/lib.rs b/testing/sliding-sync-integration-test/src/lib.rs index c7a870bed..d8d10b783 100644 --- a/testing/sliding-sync-integration-test/src/lib.rs +++ b/testing/sliding-sync-integration-test/src/lib.rs @@ -559,7 +559,7 @@ mod tests { pin_mut!(stream); // Exactly one poll! - // Ranges are 0-10 for selective list, and 0-9 for growing list. + // Ranges are 0..=9 for selective list, and 0..=9 for growing list. let room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??; @@ -569,7 +569,7 @@ mod tests { assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading"); // Another poll! - // Ranges are 0-10 for selective list, and 0-19 for growing list. + // Ranges are 0..=10 for selective list, and 0..=19 for growing list. let _room_summary = stream.next().await.context("No room summary found, loop ended unsuccessfully")??;