fix(sdk): Fix, test, and clean up `SlidingSyncListRequestGenerator`

fix(sdk): Fix, test, and clean up `SlidingSyncListRequestGenerator`
This commit is contained in:
Ivan Enderlin 2023-03-16 17:04:05 +01:00 committed by GitHub
commit 2d56f550aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 820 additions and 278 deletions

View File

@ -209,7 +209,7 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@nightly uses: dtolnay/rust-toolchain@stable
- name: Install aarch64-apple-ios target - name: Install aarch64-apple-ios target
run: rustup target install aarch64-apple-ios run: rustup target install aarch64-apple-ios

View File

@ -32,14 +32,17 @@ callback interface SlidingSyncObserver {
}; };
enum SlidingSyncState { enum SlidingSyncState {
/// Hasn't started yet /// Sliding Sync has not started to load anything yet.
"Cold", "NotLoaded",
/// We are quickly preloading a preview of the most important rooms /// Sliding Sync has been preloaded, i.e. restored from a cache for example.
"Preload", "Preloaded",
/// We are trying to load all remaining rooms, might be in batches /// We are trying to load all remaining rooms, might be in batches
"CatchingUp", /// Updates are received from the loaded rooms, and new rooms are being fetched
/// We are all caught up and now only sync the live responses. /// in background
"Live", "PartiallyLoaded",
/// Updates are received for all the loaded rooms, and all rooms have been
/// loaded!
"FullyLoaded",
}; };
enum SlidingSyncMode { enum SlidingSyncMode {

View File

@ -447,19 +447,19 @@ impl SlidingSyncListBuilder {
pub fn batch_size(self: Arc<Self>, batch_size: u32) -> Arc<Self> { pub fn batch_size(self: Arc<Self>, batch_size: u32) -> Arc<Self> {
let mut builder = unwrap_or_clone_arc(self); let mut builder = unwrap_or_clone_arc(self);
builder.inner = builder.inner.batch_size(batch_size); builder.inner = builder.inner.full_sync_batch_size(batch_size);
Arc::new(builder) Arc::new(builder)
} }
pub fn room_limit(self: Arc<Self>, limit: u32) -> Arc<Self> { pub fn room_limit(self: Arc<Self>, limit: u32) -> Arc<Self> {
let mut builder = unwrap_or_clone_arc(self); let mut builder = unwrap_or_clone_arc(self);
builder.inner = builder.inner.limit(limit); builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(limit);
Arc::new(builder) Arc::new(builder)
} }
pub fn no_room_limit(self: Arc<Self>) -> Arc<Self> { pub fn no_room_limit(self: Arc<Self>) -> Arc<Self> {
let mut builder = unwrap_or_clone_arc(self); let mut builder = unwrap_or_clone_arc(self);
builder.inner = builder.inner.limit(None); builder.inner = builder.inner.full_sync_maximum_number_of_rooms_to_fetch(None);
Arc::new(builder) Arc::new(builder)
} }
@ -555,7 +555,7 @@ impl SlidingSyncList {
&self, &self,
observer: Box<dyn SlidingSyncListRoomsCountObserver>, observer: Box<dyn SlidingSyncListRoomsCountObserver>,
) -> Arc<TaskHandle> { ) -> Arc<TaskHandle> {
let mut rooms_count_stream = self.inner.rooms_count_stream(); let mut rooms_count_stream = self.inner.maximum_number_of_rooms_stream();
Arc::new(TaskHandle::new(RUNTIME.spawn(async move { Arc::new(TaskHandle::new(RUNTIME.spawn(async move {
loop { loop {
@ -597,7 +597,7 @@ impl SlidingSyncList {
/// Total of rooms matching the filter /// Total of rooms matching the filter
pub fn current_room_count(&self) -> Option<u32> { pub fn current_room_count(&self) -> Option<u32> {
self.inner.rooms_count() self.inner.maximum_number_of_rooms()
} }
/// The current timeline limit /// The current timeline limit

View File

@ -248,8 +248,9 @@ impl SlidingSyncBuilder {
{ {
trace!(name, "frozen for list found"); trace!(name, "frozen for list found");
let FrozenSlidingSyncList { rooms_count, rooms_list, rooms } = frozen_list; let FrozenSlidingSyncList { maximum_number_of_rooms, rooms_list, rooms } =
list.set_from_cold(rooms_count, rooms_list); frozen_list;
list.set_from_cold(maximum_number_of_rooms, rooms_list);
for (key, frozen_room) in rooms.into_iter() { for (key, frozen_room) in rooms.into_iter() {
rooms_found.entry(key).or_insert_with(|| { rooms_found.entry(key).or_insert_with(|| {

View File

@ -22,14 +22,13 @@ pub struct SlidingSyncListBuilder {
sync_mode: SlidingSyncMode, sync_mode: SlidingSyncMode,
sort: Vec<String>, sort: Vec<String>,
required_state: Vec<(StateEventType, String)>, required_state: Vec<(StateEventType, String)>,
batch_size: u32, full_sync_batch_size: u32,
full_sync_maximum_number_of_rooms_to_fetch: Option<u32>,
send_updates_for_items: bool, send_updates_for_items: bool,
limit: Option<u32>,
filters: Option<v4::SyncRequestListFilters>, filters: Option<v4::SyncRequestListFilters>,
timeline_limit: Option<UInt>, timeline_limit: Option<UInt>,
name: Option<String>, name: Option<String>,
state: SlidingSyncState, state: SlidingSyncState,
rooms_count: Option<u32>,
rooms_list: Vector<RoomListEntry>, rooms_list: Vector<RoomListEntry>,
ranges: Vec<(UInt, UInt)>, ranges: Vec<(UInt, UInt)>,
} }
@ -43,14 +42,13 @@ impl SlidingSyncListBuilder {
(StateEventType::RoomEncryption, "".to_owned()), (StateEventType::RoomEncryption, "".to_owned()),
(StateEventType::RoomTombstone, "".to_owned()), (StateEventType::RoomTombstone, "".to_owned()),
], ],
batch_size: 20, full_sync_batch_size: 20,
full_sync_maximum_number_of_rooms_to_fetch: None,
send_updates_for_items: false, send_updates_for_items: false,
limit: None,
filters: None, filters: None,
timeline_limit: None, timeline_limit: None,
name: None, name: None,
state: SlidingSyncState::default(), state: SlidingSyncState::default(),
rooms_count: None,
rooms_list: Vector::new(), rooms_list: Vector::new(),
ranges: Vec::new(), ranges: Vec::new(),
} }
@ -79,9 +77,20 @@ impl SlidingSyncListBuilder {
self self
} }
/// How many rooms request at a time when doing a full-sync catch up. /// When doing a full-sync, this method defines the value by which ranges of
pub fn batch_size(mut self, value: u32) -> Self { /// rooms will be extended.
self.batch_size = value; pub fn full_sync_batch_size(mut self, value: u32) -> Self {
self.full_sync_batch_size = value;
self
}
/// When doing a full-sync, this method defines the total limit of rooms to
/// load (it can be useful for gigantic accounts).
pub fn full_sync_maximum_number_of_rooms_to_fetch(
mut self,
value: impl Into<Option<u32>>,
) -> Self {
self.full_sync_maximum_number_of_rooms_to_fetch = value.into();
self self
} }
@ -92,12 +101,6 @@ impl SlidingSyncListBuilder {
self self
} }
/// How many rooms request a total hen doing a full-sync catch up.
pub fn limit(mut self, value: impl Into<Option<u32>>) -> Self {
self.limit = value.into();
self
}
/// Any filters to apply to the query. /// Any filters to apply to the query.
pub fn filters(mut self, value: Option<v4::SyncRequestListFilters>) -> Self { pub fn filters(mut self, value: Option<v4::SyncRequestListFilters>) -> Self {
self.filters = value; self.filters = value;
@ -123,31 +126,31 @@ impl SlidingSyncListBuilder {
self self
} }
/// Set the ranges to fetch /// Set the ranges to fetch.
pub fn ranges<U: Into<UInt>>(mut self, range: Vec<(U, U)>) -> Self { pub fn ranges<U: Into<UInt>>(mut self, range: Vec<(U, U)>) -> Self {
self.ranges = range.into_iter().map(|(a, b)| (a.into(), b.into())).collect(); self.ranges = range.into_iter().map(|(a, b)| (a.into(), b.into())).collect();
self self
} }
/// Set a single range fetch /// Set a single range fetch.
pub fn set_range<U: Into<UInt>>(mut self, from: U, to: U) -> Self { pub fn set_range<U: Into<UInt>>(mut self, from: U, to: U) -> Self {
self.ranges = vec![(from.into(), to.into())]; self.ranges = vec![(from.into(), to.into())];
self self
} }
/// Set the ranges to fetch /// Set the ranges to fetch.
pub fn add_range<U: Into<UInt>>(mut self, from: U, to: U) -> Self { pub fn add_range<U: Into<UInt>>(mut self, from: U, to: U) -> Self {
self.ranges.push((from.into(), to.into())); self.ranges.push((from.into(), to.into()));
self self
} }
/// Set the ranges to fetch /// Set the ranges to fetch.
pub fn reset_ranges(mut self) -> Self { pub fn reset_ranges(mut self) -> Self {
self.ranges = Default::default(); self.ranges.clear();
self self
} }
/// Build the list /// Build the list.
pub fn build(self) -> Result<SlidingSyncList> { pub fn build(self) -> Result<SlidingSyncList> {
let mut rooms_list = ObservableVector::new(); let mut rooms_list = ObservableVector::new();
rooms_list.append(self.rooms_list); rooms_list.append(self.rooms_list);
@ -156,14 +159,15 @@ impl SlidingSyncListBuilder {
sync_mode: self.sync_mode, sync_mode: self.sync_mode,
sort: self.sort, sort: self.sort,
required_state: self.required_state, required_state: self.required_state,
batch_size: self.batch_size, full_sync_batch_size: self.full_sync_batch_size,
send_updates_for_items: self.send_updates_for_items, send_updates_for_items: self.send_updates_for_items,
limit: self.limit, full_sync_maximum_number_of_rooms_to_fetch: self
.full_sync_maximum_number_of_rooms_to_fetch,
filters: self.filters, filters: self.filters,
timeline_limit: Arc::new(StdRwLock::new(Observable::new(self.timeline_limit))), timeline_limit: Arc::new(StdRwLock::new(Observable::new(self.timeline_limit))),
name: self.name.ok_or(Error::BuildMissingField("name"))?, name: self.name.ok_or(Error::BuildMissingField("name"))?,
state: Arc::new(StdRwLock::new(Observable::new(self.state))), state: Arc::new(StdRwLock::new(Observable::new(self.state))),
rooms_count: Arc::new(StdRwLock::new(Observable::new(self.rooms_count))), maximum_number_of_rooms: Arc::new(StdRwLock::new(Observable::new(None))),
rooms_list: Arc::new(StdRwLock::new(rooms_list)), rooms_list: Arc::new(StdRwLock::new(rooms_list)),
ranges: Arc::new(StdRwLock::new(Observable::new(self.ranges))), ranges: Arc::new(StdRwLock::new(Observable::new(self.ranges))),
is_cold: Arc::new(AtomicBool::new(false)), is_cold: Arc::new(AtomicBool::new(false)),

View File

@ -25,7 +25,7 @@ use super::{Error, FrozenSlidingSyncRoom, SlidingSyncRoom};
use crate::Result; use crate::Result;
/// Holding a specific filtered list within the concept of sliding sync. /// Holding a specific filtered list within the concept of sliding sync.
/// Main entrypoint to the SlidingSync /// Main entrypoint to the `SlidingSync`:
/// ///
/// ```no_run /// ```no_run
/// # use futures::executor::block_on; /// # use futures::executor::block_on;
@ -51,17 +51,19 @@ pub struct SlidingSyncList {
/// Required states to return per room /// Required states to return per room
required_state: Vec<(StateEventType, String)>, required_state: Vec<(StateEventType, String)>,
/// How many rooms request at a time when doing a full-sync catch up /// When doing a full-sync, the ranges of rooms to load are extended by this
batch_size: u32, /// `full_sync_batch_size` size.
full_sync_batch_size: u32,
/// When doing a full-sync, it is possible to limit the total number of
/// rooms to load by using this field.
full_sync_maximum_number_of_rooms_to_fetch: Option<u32>,
/// Whether the list should send `UpdatedAt`-Diff signals for rooms /// Whether the list should send `UpdatedAt`-Diff signals for rooms
/// that have changed /// that have changed.
send_updates_for_items: bool, send_updates_for_items: bool,
/// How many rooms request a total hen doing a full-sync catch up /// Any filters to apply to the query.
limit: Option<u32>,
/// Any filters to apply to the query
filters: Option<v4::SyncRequestListFilters>, filters: Option<v4::SyncRequestListFilters>,
/// The maximum number of timeline events to query for /// The maximum number of timeline events to query for
@ -70,16 +72,22 @@ pub struct SlidingSyncList {
/// Name of this list to easily recognize them /// Name of this list to easily recognize them
pub name: String, pub name: String,
/// The state this list is in /// The state this list is in.
state: Arc<StdRwLock<Observable<SlidingSyncState>>>, state: Arc<StdRwLock<Observable<SlidingSyncState>>>,
/// The total known number of rooms, /// The total number of rooms that is possible to interact with for the
rooms_count: Arc<StdRwLock<Observable<Option<u32>>>>, /// given list.
///
/// It's not the total rooms that have been fetched. The server tells the
/// client that it's possible to fetch this amount of rooms maximum.
/// Since this number can change according to the list filters, it's
/// observable.
maximum_number_of_rooms: Arc<StdRwLock<Observable<Option<u32>>>>,
/// The rooms in order /// The rooms in order.
rooms_list: Arc<StdRwLock<ObservableVector<RoomListEntry>>>, rooms_list: Arc<StdRwLock<ObservableVector<RoomListEntry>>>,
/// The ranges windows of the list /// The ranges windows of the list.
#[allow(clippy::type_complexity)] // temporarily #[allow(clippy::type_complexity)] // temporarily
ranges: Arc<StdRwLock<Observable<Vec<(UInt, UInt)>>>>, ranges: Arc<StdRwLock<Observable<Vec<(UInt, UInt)>>>>,
@ -95,12 +103,15 @@ pub struct SlidingSyncList {
impl SlidingSyncList { impl SlidingSyncList {
pub(crate) fn set_from_cold( pub(crate) fn set_from_cold(
&mut self, &mut self,
rooms_count: Option<u32>, maximum_number_of_rooms: Option<u32>,
rooms_list: Vector<RoomListEntry>, rooms_list: Vector<RoomListEntry>,
) { ) {
Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preload); Observable::set(&mut self.state.write().unwrap(), SlidingSyncState::Preloaded);
self.is_cold.store(true, Ordering::SeqCst); self.is_cold.store(true, Ordering::SeqCst);
Observable::set(&mut self.rooms_count.write().unwrap(), rooms_count); Observable::set(
&mut self.maximum_number_of_rooms.write().unwrap(),
maximum_number_of_rooms,
);
let mut lock = self.rooms_list.write().unwrap(); let mut lock = self.rooms_list.write().unwrap();
lock.clear(); lock.clear();
@ -119,7 +130,7 @@ impl SlidingSyncList {
.sync_mode(self.sync_mode.clone()) .sync_mode(self.sync_mode.clone())
.sort(self.sort.clone()) .sort(self.sort.clone())
.required_state(self.required_state.clone()) .required_state(self.required_state.clone())
.batch_size(self.batch_size) .full_sync_batch_size(self.full_sync_batch_size)
.ranges(self.ranges.read().unwrap().clone()) .ranges(self.ranges.read().unwrap().clone())
} }
@ -195,14 +206,15 @@ impl SlidingSyncList {
ObservableVector::subscribe(&self.rooms_list.read().unwrap()) ObservableVector::subscribe(&self.rooms_list.read().unwrap())
} }
/// Get the current rooms count. /// Get the maximum number of rooms. See [`Self::maximum_number_of_rooms`]
pub fn rooms_count(&self) -> Option<u32> { /// to learn more.
**self.rooms_count.read().unwrap() pub fn maximum_number_of_rooms(&self) -> Option<u32> {
**self.maximum_number_of_rooms.read().unwrap()
} }
/// Get a stream of rooms count. /// Get a stream of rooms count.
pub fn rooms_count_stream(&self) -> impl Stream<Item = Option<u32>> { pub fn maximum_number_of_rooms_stream(&self) -> impl Stream<Item = Option<u32>> {
Observable::subscribe(&self.rooms_count.read().unwrap()) Observable::subscribe(&self.maximum_number_of_rooms.read().unwrap())
} }
/// Find the current valid position of the room in the list `room_list`. /// Find the current valid position of the room in the list `room_list`.
@ -283,26 +295,32 @@ impl SlidingSyncList {
#[instrument(skip(self, ops), fields(name = self.name, ops_count = ops.len()))] #[instrument(skip(self, ops), fields(name = self.name, ops_count = ops.len()))]
pub(super) fn handle_response( pub(super) fn handle_response(
&self, &self,
rooms_count: u32, maximum_number_of_rooms: u32,
ops: &Vec<v4::SyncOp>, ops: &Vec<v4::SyncOp>,
ranges: &Vec<(usize, usize)>, ranges: &Vec<(UInt, UInt)>,
rooms: &Vec<OwnedRoomId>, updated_rooms: &Vec<OwnedRoomId>,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let current_rooms_count = **self.rooms_count.read().unwrap(); let ranges = ranges
.iter()
.map(|(start, end)| ((*start).try_into().unwrap(), (*end).try_into().unwrap()))
.collect::<Vec<(usize, usize)>>();
if current_rooms_count.is_none() let current_maximum_number_of_rooms = **self.maximum_number_of_rooms.read().unwrap();
|| current_rooms_count == Some(0)
if current_maximum_number_of_rooms.is_none()
|| current_maximum_number_of_rooms == Some(0)
|| self.is_cold.load(Ordering::SeqCst) || self.is_cold.load(Ordering::SeqCst)
{ {
debug!("first run, replacing rooms list"); debug!("first run, replacing rooms list");
// first response, we do that slightly differently // first response, we do that slightly differently
let mut rooms_list = ObservableVector::new(); let mut rooms_list = ObservableVector::new();
rooms_list rooms_list.append(
.append(iter::repeat(RoomListEntry::Empty).take(rooms_count as usize).collect()); iter::repeat(RoomListEntry::Empty).take(maximum_number_of_rooms as usize).collect(),
);
// then we apply it // then we apply it
room_ops(&mut rooms_list, ops, ranges)?; room_ops(&mut rooms_list, ops, &ranges)?;
{ {
let mut lock = self.rooms_list.write().unwrap(); let mut lock = self.rooms_list.write().unwrap();
@ -310,7 +328,10 @@ impl SlidingSyncList {
lock.append(rooms_list.into_inner()); lock.append(rooms_list.into_inner());
} }
Observable::set(&mut self.rooms_count.write().unwrap(), Some(rooms_count)); Observable::set(
&mut self.maximum_number_of_rooms.write().unwrap(),
Some(maximum_number_of_rooms),
);
self.is_cold.store(false, Ordering::SeqCst); self.is_cold.store(false, Ordering::SeqCst);
return Ok(true); return Ok(true);
@ -318,7 +339,7 @@ impl SlidingSyncList {
debug!("regular update"); debug!("regular update");
let mut missing = rooms_count let mut missing = maximum_number_of_rooms
.checked_sub(self.rooms_list.read().unwrap().len() as u32) .checked_sub(self.rooms_list.read().unwrap().len() as u32)
.unwrap_or_default(); .unwrap_or_default();
let mut changed = false; let mut changed = false;
@ -339,7 +360,7 @@ impl SlidingSyncList {
let mut rooms_list = self.rooms_list.write().unwrap(); let mut rooms_list = self.rooms_list.write().unwrap();
if !ops.is_empty() { if !ops.is_empty() {
room_ops(&mut rooms_list, ops, ranges)?; room_ops(&mut rooms_list, ops, &ranges)?;
changed = true; changed = true;
} else { } else {
debug!("no rooms operations found"); debug!("no rooms operations found");
@ -347,16 +368,16 @@ impl SlidingSyncList {
} }
{ {
let mut lock = self.rooms_count.write().unwrap(); let mut lock = self.maximum_number_of_rooms.write().unwrap();
if **lock != Some(rooms_count) { if **lock != Some(maximum_number_of_rooms) {
Observable::set(&mut lock, Some(rooms_count)); Observable::set(&mut lock, Some(maximum_number_of_rooms));
changed = true; changed = true;
} }
} }
if self.send_updates_for_items && !rooms.is_empty() { if self.send_updates_for_items && !updated_rooms.is_empty() {
let found_lists = self.find_rooms_in_list(rooms); let found_lists = self.find_rooms_in_list(updated_rooms);
if !found_lists.is_empty() { if !found_lists.is_empty() {
debug!("room details found"); debug!("room details found");
@ -380,22 +401,24 @@ impl SlidingSyncList {
pub(super) fn request_generator(&self) -> SlidingSyncListRequestGenerator { pub(super) fn request_generator(&self) -> SlidingSyncListRequestGenerator {
match &self.sync_mode { match &self.sync_mode {
SlidingSyncMode::PagingFullSync => { SlidingSyncMode::PagingFullSync => {
SlidingSyncListRequestGenerator::new_with_paging_syncup(self.clone()) SlidingSyncListRequestGenerator::new_with_paging_full_sync(self.clone())
} }
SlidingSyncMode::GrowingFullSync => { SlidingSyncMode::GrowingFullSync => {
SlidingSyncListRequestGenerator::new_with_growing_syncup(self.clone()) SlidingSyncListRequestGenerator::new_with_growing_full_sync(self.clone())
} }
SlidingSyncMode::Selective => SlidingSyncListRequestGenerator::new_live(self.clone()), SlidingSyncMode::Selective => {
SlidingSyncListRequestGenerator::new_selective(self.clone())
}
} }
} }
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub(super) struct FrozenSlidingSyncList { pub(super) struct FrozenSlidingSyncList {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, rename = "rooms_count", skip_serializing_if = "Option::is_none")]
pub(super) rooms_count: Option<u32>, pub(super) maximum_number_of_rooms: Option<u32>,
#[serde(default, skip_serializing_if = "Vector::is_empty")] #[serde(default, skip_serializing_if = "Vector::is_empty")]
pub(super) rooms_list: Vector<RoomListEntry>, pub(super) rooms_list: Vector<RoomListEntry>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")] #[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
@ -426,7 +449,7 @@ impl FrozenSlidingSyncList {
} }
FrozenSlidingSyncList { FrozenSlidingSyncList {
rooms_count: **source_list.rooms_count.read().unwrap(), maximum_number_of_rooms: **source_list.maximum_number_of_rooms.read().unwrap(),
rooms_list, rooms_list,
rooms, rooms,
} }
@ -599,38 +622,44 @@ fn room_ops(
/// The state the [`SlidingSyncList`] is in. /// The state the [`SlidingSyncList`] is in.
/// ///
/// The lifetime of a SlidingSync usually starts at a `Preload`, getting a fast /// The lifetime of a `SlidingSyncList` usually starts at `NotLoaded` or
/// response for the first given number of Rooms, then switches into /// `Preloaded` (if it is restored from a cache). When loading rooms in a list,
/// `CatchingUp` during which the list fetches the remaining rooms, usually in /// depending of the [`SlidingSyncMode`], it moves to `PartiallyLoaded` or
/// order, some times in batches. Once that is ready, it switches into `Live`. /// `FullyLoaded`. The lifetime of a `SlidingSync` usually starts at a
/// ///
/// If the client has been offline for a while, though, the SlidingSync might /// If the client has been offline for a while, though, the `SlidingSyncList`
/// return back to `CatchingUp` at any point. /// might return back to `PartiallyLoaded` at any point.
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SlidingSyncState { pub enum SlidingSyncState {
/// Hasn't started yet /// Sliding Sync has not started to load anything yet.
#[default] #[default]
Cold, #[serde(rename = "Cold")]
/// We are quickly preloading a preview of the most important rooms NotLoaded,
Preload, /// Sliding Sync has been preloaded, i.e. restored from a cache for example.
/// We are trying to load all remaining rooms, might be in batches #[serde(rename = "Preload")]
CatchingUp, Preloaded,
/// We are all caught up and now only sync the live responses. /// Updates are received from the loaded rooms, and new rooms are being
Live, /// fetched in the background.
#[serde(rename = "CatchingUp")]
PartiallyLoaded,
/// Updates are received for all the loaded rooms, and all rooms have been
/// loaded!
#[serde(rename = "Live")]
FullyLoaded,
} }
/// The mode by which the the [`SlidingSyncList`] is in fetching the data. /// How a [`SlidingSyncList`] fetches the data.
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SlidingSyncMode { pub enum SlidingSyncMode {
/// Fully sync all rooms in the background, page by page of `batch_size`, /// Fully sync all rooms in the background, page by page of `batch_size`,
/// like `0..20`, `21..40`, 41..60` etc. assuming the `batch_size` is 20. /// like `0..=19`, `20..=39`, 40..=59` etc. assuming the `batch_size` is 20.
#[serde(alias = "FullSync")] #[serde(alias = "FullSync")]
PagingFullSync, PagingFullSync,
/// Fully sync all rooms in the background, with a growing window of /// Fully sync all rooms in the background, with a growing window of
/// `batch_size`, like `0..20`, `0..40`, `0..60` etc. assuming the /// `batch_size`, like `0..=19`, `0..=39`, `0..=59` etc. assuming the
/// `batch_size` is 20. /// `batch_size` is 20.
GrowingFullSync, GrowingFullSync,
/// Only sync the specific windows defined /// Only sync the specific defined windows/ranges.
#[default] #[default]
Selective, Selective,
} }

View File

@ -1,104 +1,157 @@
//! The logic to generate Sliding Sync list requests.
//!
//! Depending on the [`SlidingSyncMode`], the generated requests aren't the
//! same.
//!
//! In [`SlidingSyncMode::Selective`], it's pretty straightforward:
//!
//! * There is a set of ranges,
//! * Each request asks to load the particular ranges.
//!
//! In [`SlidingSyncMode::PagingFullSync`]:
//!
//! * There is a `batch_size`,
//! * Each request asks to load a new successive range containing exactly
//! `batch_size` rooms.
//!
//! In [`SlidingSyncMode::GrowingFullSync]:
//!
//! * There is a `batch_size`,
//! * Each request asks to load a new range, always starting from 0, but where
//! the end is incremented by `batch_size` everytime.
//!
//! The number of rooms to load is capped by the
//! [`SlidingSyncList::maximum_number_of_rooms`], i.e. the real number of
//! rooms it is possible to load. This value comes from the server.
//!
//! The number of rooms to load can _also_ be capped by the
//! [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`], i.e. a
//! user-specified limit representing the maximum number of rooms the user
//! actually wants to load.
use std::cmp::min; use std::cmp::min;
use eyeball::unique::Observable; use eyeball::unique::Observable;
use ruma::{api::client::sync::sync_events::v4, assign, OwnedRoomId, UInt}; use ruma::{api::client::sync::sync_events::v4, assign, OwnedRoomId, UInt};
use tracing::{error, instrument, trace}; use tracing::{error, instrument};
use super::{Error, SlidingSyncList, SlidingSyncState}; use super::{Error, SlidingSyncList, SlidingSyncState};
/// The kind of request generator.
#[derive(Debug)]
enum GeneratorKind { enum GeneratorKind {
GrowingFullSync { position: u32, batch_size: u32, limit: Option<u32>, live: bool }, // Growing-mode (see [`SlidingSyncMode`]).
PagingFullSync { position: u32, batch_size: u32, limit: Option<u32>, live: bool }, GrowingFullSync {
Live, // Number of fetched rooms.
number_of_fetched_rooms: u32,
// Size of the batch, used to grow the range to fetch more rooms.
batch_size: u32,
// Maximum number of rooms to fetch (see
// [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]).
maximum_number_of_rooms_to_fetch: Option<u32>,
// Whether all rooms have been loaded.
fully_loaded: bool,
},
// Paging-mode (see [`SlidingSyncMode`]).
PagingFullSync {
// Number of fetched rooms.
number_of_fetched_rooms: u32,
// Size of the batch, used to grow the range to fetch more rooms.
batch_size: u32,
// Maximum number of rooms to fetch (see
// [`SlidingSyncList::full_sync_maximum_number_of_rooms_to_fetch`]).
maximum_number_of_rooms_to_fetch: Option<u32>,
// Whether all romms have been loaded.
fully_loaded: bool,
},
// Selective-mode (see [`SlidingSyncMode`]).
Selective,
} }
/// A request generator for [`SlidingSyncList`].
#[derive(Debug)]
pub(in super::super) struct SlidingSyncListRequestGenerator { pub(in super::super) struct SlidingSyncListRequestGenerator {
/// The parent [`SlidingSyncList`] object that has created this request
/// generator.
list: SlidingSyncList, list: SlidingSyncList,
ranges: Vec<(usize, usize)>, /// The current range used by this request generator.
ranges: Vec<(UInt, UInt)>,
/// The kind of request generator.
kind: GeneratorKind, kind: GeneratorKind,
} }
impl SlidingSyncListRequestGenerator { impl SlidingSyncListRequestGenerator {
pub(super) fn new_with_paging_syncup(list: SlidingSyncList) -> Self { /// Create a new request generator configured for paging-mode.
let batch_size = list.batch_size; pub(super) fn new_with_paging_full_sync(list: SlidingSyncList) -> Self {
let limit = list.limit; let batch_size = list.full_sync_batch_size;
let position = list let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch;
// If a range exists, let's consider it's been used to load existing room. So
// let's start from the end of the range. It can be useful when we resume a sync
// for example. Otherwise let's use the default value.
let number_of_fetched_rooms = list
.ranges .ranges
.read() .read()
.unwrap() .unwrap()
.first() .first()
.map(|(_start, end)| u32::try_from(*end).unwrap()) .map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1))
.unwrap_or_default(); .unwrap_or_default();
Self { Self {
list, list,
ranges: Default::default(), ranges: Vec::new(),
kind: GeneratorKind::PagingFullSync { position, batch_size, limit, live: false }, kind: GeneratorKind::PagingFullSync {
number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
fully_loaded: false,
},
} }
} }
pub(super) fn new_with_growing_syncup(list: SlidingSyncList) -> Self { /// Create a new request generator configured for growing-mode.
let batch_size = list.batch_size; pub(super) fn new_with_growing_full_sync(list: SlidingSyncList) -> Self {
let limit = list.limit; let batch_size = list.full_sync_batch_size;
let position = list let maximum_number_of_rooms_to_fetch = list.full_sync_maximum_number_of_rooms_to_fetch;
// If a range exists, let's consider it's been used to load existing room. So
// let's start from the end of the range. It can be useful when we resume a sync
// for example. Otherwise let's use the default value.
let number_of_fetched_rooms = list
.ranges .ranges
.read() .read()
.unwrap() .unwrap()
.first() .first()
.map(|(_start, end)| u32::try_from(*end).unwrap()) .map(|(_start, end)| u32::try_from(*end).unwrap().saturating_add(1))
.unwrap_or_default(); .unwrap_or_default();
Self { Self {
list, list,
ranges: Default::default(), ranges: Vec::new(),
kind: GeneratorKind::GrowingFullSync { position, batch_size, limit, live: false }, kind: GeneratorKind::GrowingFullSync {
number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
fully_loaded: false,
},
} }
} }
pub(super) fn new_live(list: SlidingSyncList) -> Self { /// Create a new request generator configured for selective-mode.
Self { list, ranges: Default::default(), kind: GeneratorKind::Live } pub(super) fn new_selective(list: SlidingSyncList) -> Self {
Self { list, ranges: Vec::new(), kind: GeneratorKind::Selective }
} }
fn prefetch_request( /// Build a [`SyncRequestList`][v4::SyncRequestList].
&mut self, #[instrument(skip(self), fields(name = self.list.name, ranges = ?&self.ranges))]
start: u32, fn build_request(&self) -> v4::SyncRequestList {
batch_size: u32,
limit: Option<u32>,
) -> v4::SyncRequestList {
let calculated_end = start + batch_size;
let mut end = match limit {
Some(limit) => min(limit, calculated_end),
_ => calculated_end,
};
end = match self.list.rooms_count() {
Some(total_room_count) => min(end, total_room_count - 1),
_ => end,
};
self.make_request_for_ranges(vec![(start.into(), end.into())])
}
#[instrument(skip(self), fields(name = self.list.name))]
fn make_request_for_ranges(&mut self, ranges: Vec<(UInt, UInt)>) -> v4::SyncRequestList {
let sort = self.list.sort.clone(); let sort = self.list.sort.clone();
let required_state = self.list.required_state.clone(); let required_state = self.list.required_state.clone();
let timeline_limit = **self.list.timeline_limit.read().unwrap(); let timeline_limit = **self.list.timeline_limit.read().unwrap();
let filters = self.list.filters.clone(); let filters = self.list.filters.clone();
self.ranges = ranges
.iter()
.map(|(a, b)| {
(
usize::try_from(*a).expect("range is a valid u32"),
usize::try_from(*b).expect("range is a valid u32"),
)
})
.collect();
assign!(v4::SyncRequestList::default(), { assign!(v4::SyncRequestList::default(), {
ranges: ranges, ranges: self.ranges.clone(),
room_details: assign!(v4::RoomDetailsConfig::default(), { room_details: assign!(v4::RoomDetailsConfig::default(), {
required_state, required_state,
timeline_limit, timeline_limit,
@ -108,67 +161,152 @@ impl SlidingSyncListRequestGenerator {
}) })
} }
// Handle the response from the server.
#[instrument(skip_all, fields(name = self.list.name, rooms_count, has_ops = !ops.is_empty()))] #[instrument(skip_all, fields(name = self.list.name, rooms_count, has_ops = !ops.is_empty()))]
pub(in super::super) fn handle_response( pub(in super::super) fn handle_response(
&mut self, &mut self,
rooms_count: u32, maximum_number_of_rooms: u32,
ops: &Vec<v4::SyncOp>, ops: &Vec<v4::SyncOp>,
rooms: &Vec<OwnedRoomId>, updated_rooms: &Vec<OwnedRoomId>,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let response = self.list.handle_response(rooms_count, ops, &self.ranges, rooms)?; let response =
self.update_state(rooms_count.saturating_sub(1)); // index is 0 based, count is 1 based self.list.handle_response(maximum_number_of_rooms, ops, &self.ranges, updated_rooms)?;
self.update_state(maximum_number_of_rooms);
Ok(response) Ok(response)
} }
fn update_state(&mut self, max_index: u32) { /// Update the state of the generator.
let Some((_start, range_end)) = self.ranges.first() else { fn update_state(&mut self, maximum_number_of_rooms: u32) {
error!("Why don't we have any ranges?"); let Some(range_end) = self.ranges.first().map(|(_start, end)| u32::try_from(*end).unwrap()) else {
error!(name = self.list.name, "The request generator must have a range.");
return; return;
}; };
let end = if &(max_index as usize) < range_end { max_index } else { *range_end as u32 };
trace!(end, max_index, range_end, name = self.list.name, "updating state");
match &mut self.kind { match &mut self.kind {
GeneratorKind::PagingFullSync { position, live, limit, .. } GeneratorKind::PagingFullSync {
| GeneratorKind::GrowingFullSync { position, live, limit, .. } => { number_of_fetched_rooms,
let max = limit.map(|limit| min(limit, max_index)).unwrap_or(max_index); fully_loaded,
maximum_number_of_rooms_to_fetch,
..
}
| GeneratorKind::GrowingFullSync {
number_of_fetched_rooms,
fully_loaded,
maximum_number_of_rooms_to_fetch,
..
} => {
// Calculate the maximum bound for the range.
// At this step, the server has given us a maximum number of rooms for this
// list. That's our `range_maximum`.
let mut range_maximum = maximum_number_of_rooms;
trace!(end, max, name = self.list.name, "updating state"); // But maybe the user has defined a maximum number of rooms to fetch? In this
// case, let's take the minimum of the two.
if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch {
range_maximum = min(range_maximum, *maximum_number_of_rooms_to_fetch);
}
if end >= max { // Finally, ranges are inclusive!
// Switching to live mode. range_maximum = range_maximum.saturating_sub(1);
trace!(name = self.list.name, "going live"); // Now, we know what the maximum bound for the range is.
self.list.set_range(0, max); // The current range hasn't reached its maximum, let's continue.
*position = max; if range_end < range_maximum {
*live = true; // Update the _list range_ to cover from 0 to `range_end`.
// The list range is different from the request generator (this) range.
self.list.set_range(0, range_end);
// Update the number of fetched rooms forward. Do not forget that ranges are
// inclusive, so let's add 1.
*number_of_fetched_rooms = range_end.saturating_add(1);
// The list is still not fully loaded.
*fully_loaded = false;
// Finally, let's update the list' state.
Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { Observable::update_eq(&mut self.list.state.write().unwrap(), |state| {
*state = SlidingSyncState::Live; *state = SlidingSyncState::PartiallyLoaded;
}); });
} else { }
*position = end; // Otherwise the current range has reached its maximum, we switched to `FullyLoaded`
*live = false; // mode.
self.list.set_range(0, end); else {
// The range is covering the entire list, from 0 to its maximum.
self.list.set_range(0, range_maximum);
// The number of fetched rooms is set to the maximum too.
*number_of_fetched_rooms = range_maximum;
// And we update the `fully_loaded` marker.
*fully_loaded = true;
// Finally, let's update the list' state.
Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { Observable::update_eq(&mut self.list.state.write().unwrap(), |state| {
*state = SlidingSyncState::CatchingUp; *state = SlidingSyncState::FullyLoaded;
}); });
} }
} }
GeneratorKind::Live => { GeneratorKind::Selective => {
// Selective mode always loads everything.
Observable::update_eq(&mut self.list.state.write().unwrap(), |state| { Observable::update_eq(&mut self.list.state.write().unwrap(), |state| {
*state = SlidingSyncState::Live; *state = SlidingSyncState::FullyLoaded;
}); });
} }
} }
} }
#[cfg(test)]
fn is_fully_loaded(&self) -> bool {
match self.kind {
GeneratorKind::PagingFullSync { fully_loaded, .. }
| GeneratorKind::GrowingFullSync { fully_loaded, .. } => fully_loaded,
GeneratorKind::Selective => true,
}
}
}
fn create_range(
start: u32,
desired_size: u32,
maximum_number_of_rooms_to_fetch: Option<u32>,
maximum_number_of_rooms: Option<u32>,
) -> Option<(UInt, UInt)> {
// Calculate the range.
// The `start` bound is given. Let's calculate the `end` bound.
// The `end`, by default, is `start` + `desired_size`.
let mut end = start + desired_size;
// But maybe the user has defined a maximum number of rooms to fetch? In this
// case, take the minimum of the two.
if let Some(maximum_number_of_rooms_to_fetch) = maximum_number_of_rooms_to_fetch {
end = min(end, maximum_number_of_rooms_to_fetch);
}
// But there is more! The server can tell us what is the maximum number of rooms
// fulfilling a particular list. For example, if the server says there is 42
// rooms for a particular list, with a `start` of 40 and a `batch_size` of 20,
// the range must be capped to `[40; 46]`; the range `[40; 60]` would be invalid
// and could be rejected by the server.
if let Some(maximum_number_of_rooms) = maximum_number_of_rooms {
end = min(end, maximum_number_of_rooms);
}
// Finally, because the bounds of the range are inclusive, 1 is subtracted.
end = end.saturating_sub(1);
// Make sure `start` is smaller than `end`. It can happen if `start` is greater
// than `maximum_number_of_rooms_to_fetch` or `maximum_number_of_rooms`.
if start > end {
return None;
}
Some((start.into(), end.into()))
} }
impl Iterator for SlidingSyncListRequestGenerator { impl Iterator for SlidingSyncListRequestGenerator {
@ -176,21 +314,357 @@ impl Iterator for SlidingSyncListRequestGenerator {
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
match self.kind { match self.kind {
GeneratorKind::PagingFullSync { live: true, .. } // Cases where all rooms have been fully loaded.
| GeneratorKind::GrowingFullSync { live: true, .. } GeneratorKind::PagingFullSync { fully_loaded: true, .. }
| GeneratorKind::Live => { | GeneratorKind::GrowingFullSync { fully_loaded: true, .. }
let ranges = self.list.ranges.read().unwrap().clone(); | GeneratorKind::Selective => {
// Let's copy all the ranges from the parent `SlidingSyncList`, and build a
// request for them.
self.ranges = self.list.ranges.read().unwrap().clone();
Some(self.make_request_for_ranges(ranges)) // Here we go.
Some(self.build_request())
} }
GeneratorKind::PagingFullSync { position, batch_size, limit, .. } => { GeneratorKind::PagingFullSync {
Some(self.prefetch_request(position, batch_size, limit)) number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
..
} => {
// In paging-mode, range starts at the number of fetched rooms. Since ranges are
// inclusive, and since the number of fetched rooms starts at 1,
// not at 0, there is no need to add 1 here.
let range_start = number_of_fetched_rooms;
let range_desired_size = batch_size;
// Create a new range, and use it as the current set of ranges.
self.ranges = vec![create_range(
range_start,
range_desired_size,
maximum_number_of_rooms_to_fetch,
self.list.maximum_number_of_rooms(),
)?];
// Here we go.
Some(self.build_request())
} }
GeneratorKind::GrowingFullSync { position, batch_size, limit, .. } => { GeneratorKind::GrowingFullSync {
Some(self.prefetch_request(0, position + batch_size, limit)) number_of_fetched_rooms,
batch_size,
maximum_number_of_rooms_to_fetch,
..
} => {
// In growing-mode, range always starts from 0. However, the end is growing by
// adding `batch_size` to the previous number of fetched rooms.
let range_start = 0;
let range_desired_size = number_of_fetched_rooms.saturating_add(batch_size);
self.ranges = vec![create_range(
range_start,
range_desired_size,
maximum_number_of_rooms_to_fetch,
self.list.maximum_number_of_rooms(),
)?];
// Here we go.
Some(self.build_request())
} }
} }
} }
} }
#[cfg(test)]
mod tests {
use ruma::uint;
use super::*;
#[test]
fn test_create_range_from() {
// From 0, we want 100 items.
assert_eq!(create_range(0, 100, None, None), Some((uint!(0), uint!(99))));
// From 100, we want 100 items.
assert_eq!(create_range(100, 100, None, None), Some((uint!(100), uint!(199))));
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50.
assert_eq!(create_range(0, 100, Some(50), None), Some((uint!(0), uint!(49))));
// From 49, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50. There is 1 item to load.
assert_eq!(create_range(49, 100, Some(50), None), Some((uint!(49), uint!(49))));
// From 50, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50.
assert_eq!(create_range(50, 100, Some(50), None), None);
// From 0, we want 100 items, but there is a maximum number of rooms defined at
// 50.
assert_eq!(create_range(0, 100, None, Some(50)), Some((uint!(0), uint!(49))));
// From 49, we want 100 items, but there is a maximum number of rooms defined at
// 50. There is 1 item to load.
assert_eq!(create_range(49, 100, None, Some(50)), Some((uint!(49), uint!(49))));
// From 50, we want 100 items, but there is a maximum number of rooms defined at
// 50.
assert_eq!(create_range(50, 100, None, Some(50)), None);
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 75, and a maximum number of rooms defined at 50.
assert_eq!(create_range(0, 100, Some(75), Some(50)), Some((uint!(0), uint!(49))));
// From 0, we want 100 items, but there is a maximum number of rooms to fetch
// defined at 50, and a maximum number of rooms defined at 75.
assert_eq!(create_range(0, 100, Some(50), Some(75)), Some((uint!(0), uint!(49))));
}
macro_rules! assert_request_and_response {
(
list = $list:ident,
generator = $generator:ident,
maximum_number_of_rooms = $maximum_number_of_rooms:expr,
$(
next => {
ranges = $( [ $range_start:literal ; $range_end:literal ] ),+ ,
is_fully_loaded = $is_fully_loaded:expr,
list_state = $list_state:ident,
}
),*
$(,)*
) => {
// That's the initial state.
assert_eq!($list.state(), SlidingSyncState::NotLoaded);
$(
{
// Generate a new request.
let request = $generator.next().unwrap();
assert_eq!(request.ranges, [ $( (uint!( $range_start ), uint!( $range_end )) ),* ]);
// Fake a response.
let _ = $generator.handle_response($maximum_number_of_rooms, &vec![], &vec![]);
assert_eq!($generator.is_fully_loaded(), $is_fully_loaded);
assert_eq!($list.state(), SlidingSyncState::$list_state);
}
)*
};
}
#[test]
fn test_generator_paging_full_sync() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::PagingFullSync)
.name("testing")
.full_sync_batch_size(10)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [10; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms is reached!
next => {
ranges = [20; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 24], // the range starts at 0 now!
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_paging_full_sync_with_a_maximum_number_of_rooms_to_fetch() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::PagingFullSync)
.name("testing")
.full_sync_batch_size(10)
.full_sync_maximum_number_of_rooms_to_fetch(22)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [10; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms to fetch is reached!
next => {
ranges = [20; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 21], // the range starts at 0 now!
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_growing_full_sync() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::GrowingFullSync)
.name("testing")
.full_sync_batch_size(10)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [0; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms is reached!
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 24],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_growing_full_sync_with_a_maximum_number_of_rooms_to_fetch() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::GrowingFullSync)
.name("testing")
.full_sync_batch_size(10)
.full_sync_maximum_number_of_rooms_to_fetch(22)
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
next => {
ranges = [0; 9],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
next => {
ranges = [0; 19],
is_fully_loaded = false,
list_state = PartiallyLoaded,
},
// The maximum number of rooms is reached!
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 21],
is_fully_loaded = true,
list_state = FullyLoaded,
},
};
}
#[test]
fn test_generator_selective() {
let list = SlidingSyncList::builder()
.sync_mode(crate::SlidingSyncMode::Selective)
.name("testing")
.ranges(vec![(0u32, 10), (42, 153)])
.build()
.unwrap();
let mut generator = list.request_generator();
assert_request_and_response! {
list = list,
generator = generator,
maximum_number_of_rooms = 25,
// The maximum number of rooms is reached directly!
next => {
ranges = [0; 10], [42; 153],
is_fully_loaded = true,
list_state = FullyLoaded,
},
// Now it's fully loaded, so the same request must be produced everytime.
next => {
ranges = [0; 10], [42; 153],
is_fully_loaded = true,
list_state = FullyLoaded,
},
next => {
ranges = [0; 10], [42; 153],
is_fully_loaded = true,
list_state = FullyLoaded,
}
};
}
}

View File

@ -110,15 +110,15 @@
//! copy can be retrieved by calling `SlidingSync::list()`, providing the name //! copy can be retrieved by calling `SlidingSync::list()`, providing the name
//! of the list. Next to the configuration settings (like name and //! of the list. Next to the configuration settings (like name and
//! `timeline_limit`), the list provides the stateful //! `timeline_limit`), the list provides the stateful
//! [`rooms_count`](SlidingSyncList::rooms_count), //! [`maximum_number_of_rooms`](SlidingSyncList::maximum_number_of_rooms),
//! [`rooms_list`](SlidingSyncList::rooms_list) and //! [`rooms_list`](SlidingSyncList::rooms_list) and
//! [`state`](SlidingSyncList::state): //! [`state`](SlidingSyncList::state):
//! //!
//! - `rooms_count` is the number of rooms _total_ there were found matching //! - `maximum_number_of_rooms` is the number of rooms _total_ there were found
//! the filters given. //! matching the filters given.
//! - `rooms_list` is a vector of `rooms_count` [`RoomListEntry`]'s at the //! - `rooms_list` is a vector of `maximum_number_of_rooms` [`RoomListEntry`]'s
//! current state. `RoomListEntry`'s only hold `the room_id` if given, the //! at the current state. `RoomListEntry`'s only hold `the room_id` if given,
//! [Rooms API](#rooms) holds the actual information about each room //! the [Rooms API](#rooms) holds the actual information about each room
//! - `state` is a [`SlidingSyncMode`] signalling meta information about the //! - `state` is a [`SlidingSyncMode`] signalling meta information about the
//! list and its stateful data — whether this is the state loaded from local //! list and its stateful data — whether this is the state loaded from local
//! cache, whether the [full sync](#helper-lists) is in progress or whether //! cache, whether the [full sync](#helper-lists) is in progress or whether
@ -171,11 +171,11 @@
//! //!
//! ### Room List Entries //! ### Room List Entries
//! //!
//! As the room list of each list is a vec of the `rooms_count` len but a room //! As the room list of each list is a vec of the `maximum_number_of_rooms` len
//! may only know of a subset of entries for sure at any given time, these //! but a room may only know of a subset of entries for sure at any given time,
//! entries are wrapped in [`RoomListEntry`][]. This type, in close proximity to //! these entries are wrapped in [`RoomListEntry`][]. This type, in close
//! the [specification][MSC], can be either `Empty`, `Filled` or `Invalidated`, //! proximity to the [specification][MSC], can be either `Empty`, `Filled` or
//! signaling the state of each entry position. //! `Invalidated`, signaling the state of each entry position.
//! - `Empty` we don't know what sits here at this position in the list. //! - `Empty` we don't know what sits here at this position in the list.
//! - `Filled`: there is this `room_id` at this position. //! - `Filled`: there is this `room_id` at this position.
//! - `Invalidated` in that sense means that we _knew_ what was here before, but //! - `Invalidated` in that sense means that we _knew_ what was here before, but
@ -429,8 +429,9 @@
//! ## Caching //! ## Caching
//! //!
//! All room data, for filled but also _invalidated_ rooms, including the entire //! All room data, for filled but also _invalidated_ rooms, including the entire
//! timeline events as well as all list `room_lists` and `rooms_count` are held //! timeline events as well as all list `room_lists` and
//! in memory (unless one `pop`s the list out). //! `maximum_number_of_rooms` are held in memory (unless one `pop`s the list
//! out).
//! //!
//! This is a purely in-memory cache layer though. If one wants Sliding Sync to //! This is a purely in-memory cache layer though. If one wants Sliding Sync to
//! persist and load from cold (storage) cache, one needs to set its key with //! persist and load from cold (storage) cache, one needs to set its key with
@ -511,8 +512,8 @@
//! .required_state(vec![ //! .required_state(vec![
//! (StateEventType::RoomEncryption, "".to_owned()) //! (StateEventType::RoomEncryption, "".to_owned())
//! ]) // only want to know if the room is encrypted //! ]) // only want to know if the room is encrypted
//! .batch_size(50) // grow the window by 50 items at a time //! .full_sync_batch_size(50) // grow the window by 50 items at a time
//! .limit(500) // only sync up the top 500 rooms //! .full_sync_maximum_number_of_rooms_to_fetch(500) // only sync up the top 500 rooms
//! .build()?; //! .build()?;
//! //!
//! let active_list = SlidingSyncList::builder() //! let active_list = SlidingSyncList::builder()
@ -538,7 +539,7 @@
//! //!
//! let active_list = sliding_sync.list(&active_list_name).unwrap(); //! let active_list = sliding_sync.list(&active_list_name).unwrap();
//! let list_state_stream = active_list.state_stream(); //! let list_state_stream = active_list.state_stream();
//! let list_count_stream = active_list.rooms_count_stream(); //! let list_count_stream = active_list.maximum_number_of_rooms_stream();
//! let list_stream = active_list.rooms_list_stream(); //! let list_stream = active_list.rooms_list_stream();
//! //!
//! tokio::spawn(async move { //! tokio::spawn(async move {
@ -585,7 +586,6 @@
//! # }); //! # });
//! ``` //! ```
//! //!
//!
//! [MSC]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 //! [MSC]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575
//! [proxy]: https://github.com/matrix-org/sliding-sync //! [proxy]: https://github.com/matrix-org/sliding-sync
//! [ruma-types]: https://docs.rs/ruma/latest/ruma/api/client/sync/sync_events/v4/index.html //! [ruma-types]: https://docs.rs/ruma/latest/ruma/api/client/sync/sync_events/v4/index.html
@ -930,7 +930,7 @@ impl SlidingSync {
} }
let update_summary = { let update_summary = {
let mut rooms = Vec::new(); let mut updated_rooms = Vec::new();
let mut rooms_map = self.inner.rooms.write().unwrap(); let mut rooms_map = self.inner.rooms.write().unwrap();
for (room_id, mut room_data) in sliding_sync_response.rooms.into_iter() { for (room_id, mut room_data) in sliding_sync_response.rooms.into_iter() {
@ -963,22 +963,26 @@ impl SlidingSync {
); );
} }
rooms.push(room_id); updated_rooms.push(room_id);
} }
let mut updated_lists = Vec::new(); let mut updated_lists = Vec::new();
for (name, updates) in sliding_sync_response.lists { for (name, updates) in sliding_sync_response.lists {
let Some(generator) = list_generators.get_mut(&name) else { let Some(list_generator) = list_generators.get_mut(&name) else {
error!("Response for list `{name}` - unknown to us; skipping"); error!("Response for list `{name}` - unknown to us; skipping");
continue continue
}; };
let count: u32 = let maximum_number_of_rooms: u32 =
updates.count.try_into().expect("the list total count convertible into u32"); updates.count.try_into().expect("the list total count convertible into u32");
if generator.handle_response(count, &updates.ops, &rooms)? { if list_generator.handle_response(
maximum_number_of_rooms,
&updates.ops,
&updated_rooms,
)? {
updated_lists.push(name.clone()); updated_lists.push(name.clone());
} }
} }
@ -988,7 +992,7 @@ impl SlidingSync {
self.update_to_device_since(to_device.next_batch); self.update_to_device_since(to_device.next_batch);
} }
UpdateSummary { lists: updated_lists, rooms } UpdateSummary { lists: updated_lists, rooms: updated_rooms }
}; };
Ok(update_summary) Ok(update_summary)
@ -1292,7 +1296,7 @@ pub struct UpdateSummary {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use assert_matches::assert_matches; use assert_matches::assert_matches;
use ruma::room_id; use ruma::{room_id, uint};
use serde_json::json; use serde_json::json;
use wiremock::MockServer; use wiremock::MockServer;
@ -1321,7 +1325,13 @@ mod test {
})) }))
.unwrap(); .unwrap();
list.handle_response(10u32, &vec![full_window_update], &vec![(0, 9)], &vec![]).unwrap(); list.handle_response(
10u32,
&vec![full_window_update],
&vec![(uint!(0), uint!(9))],
&vec![],
)
.unwrap();
let a02 = room_id!("!A00002:matrix.example").to_owned(); let a02 = room_id!("!A00002:matrix.example").to_owned();
let a05 = room_id!("!A00005:matrix.example").to_owned(); let a05 = room_id!("!A00005:matrix.example").to_owned();
@ -1343,7 +1353,13 @@ mod test {
})) }))
.unwrap(); .unwrap();
list.handle_response(10u32, &vec![update], &vec![(0, 3), (8, 9)], &vec![]).unwrap(); list.handle_response(
10u32,
&vec![update],
&vec![(uint!(0), uint!(3)), (uint!(8), uint!(9))],
&vec![],
)
.unwrap();
assert_eq!(list.find_room_in_list(room_id!("!A00002:matrix.example")), Some(2)); assert_eq!(list.find_room_in_list(room_id!("!A00002:matrix.example")), Some(2));
assert_eq!(list.find_room_in_list(room_id!("!A00005:matrix.example")), None); assert_eq!(list.find_room_in_list(room_id!("!A00005:matrix.example")), None);

View File

@ -21,11 +21,12 @@ pub async fn run_client(
.timeline_limit(10u32) .timeline_limit(10u32)
.sync_mode(config.full_sync_mode.into()); .sync_mode(config.full_sync_mode.into());
if let Some(size) = config.batch_size { if let Some(size) = config.batch_size {
full_sync_view_builder = full_sync_view_builder.batch_size(size); full_sync_view_builder = full_sync_view_builder.full_sync_batch_size(size);
} }
if let Some(limit) = config.limit { if let Some(limit) = config.limit {
full_sync_view_builder = full_sync_view_builder.limit(limit); full_sync_view_builder =
full_sync_view_builder.full_sync_maximum_number_of_rooms_to_fetch(limit);
} }
if let Some(limit) = config.timeline_limit { if let Some(limit) = config.timeline_limit {
full_sync_view_builder = full_sync_view_builder.timeline_limit(limit); full_sync_view_builder = full_sync_view_builder.timeline_limit(limit);
@ -66,7 +67,7 @@ pub async fn run_client(
let state = view.state(); let state = view.state();
ssync_state.set_view_state(state.clone()); ssync_state.set_view_state(state.clone());
if state == SlidingSyncState::Live { if state == SlidingSyncState::FullyLoaded {
info!("Reached live sync"); info!("Reached live sync");
break; break;
} }

View File

@ -135,7 +135,7 @@ impl SlidingSyncState {
} }
pub fn total_rooms_count(&self) -> Option<u32> { pub fn total_rooms_count(&self) -> Option<u32> {
self.view.rooms_count() self.view.maximum_number_of_rooms()
} }
pub fn set_first_render_now(&mut self) { pub fn set_first_render_now(&mut self) {

View File

@ -67,7 +67,7 @@ impl From<&RoomListEntry> for RoomListEntryEasy {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::{ use std::{
iter::repeat, iter::{once, repeat},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
@ -133,7 +133,7 @@ mod tests {
// Get the list to all rooms to check the list' state. // Get the list to all rooms to check the list' state.
let list = sync.list("init_list").context("list `init_list` isn't found")?; let list = sync.list("init_list").context("list `init_list` isn't found")?;
assert_eq!(list.state(), SlidingSyncState::Cold); assert_eq!(list.state(), SlidingSyncState::NotLoaded);
// Send the request and wait for a response. // Send the request and wait for a response.
let update_summary = stream let update_summary = stream
@ -142,7 +142,7 @@ mod tests {
.context("No room summary found, loop ended unsuccessfully")??; .context("No room summary found, loop ended unsuccessfully")??;
// Check the state has switched to `Live`. // Check the state has switched to `Live`.
assert_eq!(list.state(), SlidingSyncState::Live); assert_eq!(list.state(), SlidingSyncState::FullyLoaded);
// One room has received an update. // One room has received an update.
assert_eq!(update_summary.rooms.len(), 1); assert_eq!(update_summary.rooms.len(), 1);
@ -543,7 +543,7 @@ mod tests {
let full = SlidingSyncList::builder() let full = SlidingSyncList::builder()
.sync_mode(SlidingSyncMode::GrowingFullSync) .sync_mode(SlidingSyncMode::GrowingFullSync)
.batch_size(10u32) .full_sync_batch_size(10u32)
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()])
.name("full") .name("full")
.build()?; .build()?;
@ -552,29 +552,46 @@ mod tests {
let list = sync_proxy.list("sliding").context("but we just added that list!")?; let list = sync_proxy.list("sliding").context("but we just added that list!")?;
let full_list = sync_proxy.list("full").context("but we just added that list!")?; let full_list = sync_proxy.list("full").context("but we just added that list!")?;
assert_eq!(list.state(), SlidingSyncState::Cold, "list isn't cold"); assert_eq!(list.state(), SlidingSyncState::NotLoaded, "list isn't cold");
assert_eq!(full_list.state(), SlidingSyncState::Cold, "full isn't cold"); assert_eq!(full_list.state(), SlidingSyncState::NotLoaded, "full isn't cold");
let stream = sync_proxy.stream(); let stream = sync_proxy.stream();
pin_mut!(stream); pin_mut!(stream);
// exactly one poll! // Exactly one poll!
// Ranges are 0..=9 for selective list, and 0..=9 for growing list.
let room_summary = let room_summary =
stream.next().await.context("No room summary found, loop ended unsuccessfully")??; stream.next().await.context("No room summary found, loop ended unsuccessfully")??;
// we only heard about the ones we had asked for // we only heard about the ones we had asked for
assert_eq!(room_summary.rooms.len(), 11); assert_eq!(room_summary.rooms.len(), 11);
assert_eq!(list.state(), SlidingSyncState::Live, "list isn't live"); assert_eq!(list.state(), SlidingSyncState::FullyLoaded, "list isn't live");
assert_eq!(full_list.state(), SlidingSyncState::CatchingUp, "full isn't preloading"); assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading");
// doing another two requests 0-20; 0-21 should bring full live, too // Another poll!
// Ranges are 0..=10 for selective list, and 0..=19 for growing list.
let _room_summary = let _room_summary =
stream.next().await.context("No room summary found, loop ended unsuccessfully")??; stream.next().await.context("No room summary found, loop ended unsuccessfully")??;
let rooms_list = full_list.rooms_list::<RoomListEntryEasy>(); let rooms_list = full_list.rooms_list::<RoomListEntryEasy>();
assert_eq!(
rooms_list,
repeat(RoomListEntryEasy::Filled)
.take(20)
.chain(once(RoomListEntryEasy::Empty))
.collect::<Vec<_>>()
);
assert_eq!(full_list.state(), SlidingSyncState::PartiallyLoaded, "full isn't preloading");
// One last poll, and we should get all rooms loaded.
let _room_summary =
stream.next().await.context("No room summary found, loop ended unsecessfully")??;
let rooms_list = full_list.rooms_list::<RoomListEntryEasy>();
assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::<Vec<_>>()); assert_eq!(rooms_list, repeat(RoomListEntryEasy::Filled).take(21).collect::<Vec<_>>());
assert_eq!(full_list.state(), SlidingSyncState::Live, "full isn't live yet"); assert_eq!(full_list.state(), SlidingSyncState::FullyLoaded, "full isn't fully loaded");
Ok(()) Ok(())
} }
@ -844,7 +861,7 @@ mod tests {
.build()?; .build()?;
let growing_sync = SlidingSyncList::builder() let growing_sync = SlidingSyncList::builder()
.sync_mode(SlidingSyncMode::GrowingFullSync) .sync_mode(SlidingSyncMode::GrowingFullSync)
.limit(100) .full_sync_maximum_number_of_rooms_to_fetch(100)
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()])
.name("growing") .name("growing")
.build()?; .build()?;
@ -867,7 +884,7 @@ mod tests {
sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully.
let stream = sync_proxy.stream(); let stream = sync_proxy.stream();
pin_mut!(stream); pin_mut!(stream);
while growing_sync.state() != SlidingSyncState::Live { while growing_sync.state() != SlidingSyncState::FullyLoaded {
// we wait until growing sync is all done, too // we wait until growing sync is all done, too
println!("awaiting"); println!("awaiting");
let _room_summary = stream let _room_summary = stream
@ -899,10 +916,10 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn growing_sync_keeps_going() -> anyhow::Result<()> { async fn growing_sync_keeps_going() -> anyhow::Result<()> {
let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?;
let growing_sync = SlidingSyncList::builder() let growing_sync = SlidingSyncList::builder()
.sync_mode(SlidingSyncMode::GrowingFullSync) .sync_mode(SlidingSyncMode::GrowingFullSync)
.batch_size(10u32) .full_sync_batch_size(5u32)
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()])
.name("growing") .name("growing")
.build()?; .build()?;
@ -913,9 +930,9 @@ mod tests {
let stream = sync_proxy.stream(); let stream = sync_proxy.stream();
pin_mut!(stream); pin_mut!(stream);
// we have 50 and catch up in batches of 10. so let's get over to 20. // we have 20 and catch up in batches of 5. so let's get over to 15.
for _n in 0..2 { for _ in 0..=2 {
let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let room_summary = stream.next().await.context("sync has closed unexpectedly")?;
let _summary = room_summary?; let _summary = room_summary?;
} }
@ -925,25 +942,20 @@ mod tests {
assert_eq!( assert_eq!(
collection_simple, collection_simple,
repeat(RoomListEntryEasy::Filled) repeat(RoomListEntryEasy::Filled)
.take(21) .take(15)
.chain(repeat(RoomListEntryEasy::Empty).take(29)) .chain(repeat(RoomListEntryEasy::Empty).take(5))
.collect::<Vec<_>>() .collect::<Vec<_>>()
); );
// we have 50 and catch up in batches of 10. let's go two more, see it grow. // we have 20 and catch up in batches of 5. let's go one more, see it grows.
for _n in 0..2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?;
let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let _summary = room_summary?;
let _summary = room_summary?;
}
let collection_simple = list.rooms_list::<RoomListEntryEasy>(); let collection_simple = list.rooms_list::<RoomListEntryEasy>();
assert_eq!( assert_eq!(
collection_simple, collection_simple,
repeat(RoomListEntryEasy::Filled) repeat(RoomListEntryEasy::Filled).take(20).collect::<Vec<_>>()
.take(41)
.chain(repeat(RoomListEntryEasy::Empty).take(9))
.collect::<Vec<_>>()
); );
Ok(()) Ok(())
@ -951,10 +963,10 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn growing_sync_keeps_going_after_restart() -> anyhow::Result<()> { async fn growing_sync_keeps_going_after_restart() -> anyhow::Result<()> {
let (_client, sync_proxy_builder) = random_setup_with_rooms(50).await?; let (_client, sync_proxy_builder) = random_setup_with_rooms(20).await?;
let growing_sync = SlidingSyncList::builder() let growing_sync = SlidingSyncList::builder()
.sync_mode(SlidingSyncMode::GrowingFullSync) .sync_mode(SlidingSyncMode::GrowingFullSync)
.batch_size(10u32) .full_sync_batch_size(5u32)
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()])
.name("growing") .name("growing")
.build()?; .build()?;
@ -965,9 +977,9 @@ mod tests {
let stream = sync_proxy.stream(); let stream = sync_proxy.stream();
pin_mut!(stream); pin_mut!(stream);
// we have 50 and catch up in batches of 10. so let's get over to 20. // we have 20 and catch up in batches of 5. so let's get over to 15.
for _n in 0..2 { for _ in 0..=2 {
let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let room_summary = stream.next().await.context("sync has closed unexpectedly")?;
let _summary = room_summary?; let _summary = room_summary?;
} }
@ -980,19 +992,17 @@ mod tests {
} else { } else {
acc acc
}), }),
21 15
); );
// we have 50 and catch up in batches of 10. Let's make sure the restart // we have 20 and catch up in batches of 5. Let's make sure the restart
// continues // continues.
let stream = sync_proxy.stream(); let stream = sync_proxy.stream();
pin_mut!(stream); pin_mut!(stream);
for _n in 0..2 { let room_summary = stream.next().await.context("sync has closed unexpectedly")?;
let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let _summary = room_summary?;
let _summary = room_summary?;
}
let collection_simple = list.rooms_list::<RoomListEntryEasy>(); let collection_simple = list.rooms_list::<RoomListEntryEasy>();
@ -1002,7 +1012,7 @@ mod tests {
} else { } else {
acc acc
}), }),
41 20
); );
Ok(()) Ok(())
@ -1010,11 +1020,12 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn continue_on_reset() -> anyhow::Result<()> { async fn continue_on_reset() -> anyhow::Result<()> {
let (_client, sync_proxy_builder) = random_setup_with_rooms(30).await?; let (_client, sync_proxy_builder) = random_setup_with_rooms(10).await?;
print!("setup took its time"); print!("setup took its time");
let growing_sync = SlidingSyncList::builder() let growing_sync = SlidingSyncList::builder()
.sync_mode(SlidingSyncMode::GrowingFullSync) .sync_mode(SlidingSyncMode::GrowingFullSync)
.limit(100) .full_sync_batch_size(5u32)
.full_sync_maximum_number_of_rooms_to_fetch(100)
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()])
.name("growing") .name("growing")
.build()?; .build()?;
@ -1030,9 +1041,10 @@ mod tests {
let stream = sync_proxy.stream(); let stream = sync_proxy.stream();
pin_mut!(stream); pin_mut!(stream);
for _n in 0..2 { for _ in 0..=2 {
let room_summary = stream.next().await.context("sync has closed unexpectedly")?; let room_summary = stream.next().await.context("sync has closed unexpectedly")?;
let summary = room_summary?; let summary = room_summary?;
if summary.lists.iter().any(|s| s == "growing") { if summary.lists.iter().any(|s| s == "growing") {
break; break;
} }
@ -1046,14 +1058,14 @@ mod tests {
} else { } else {
acc acc
}), }),
21 5
); );
// force the pos to be invalid and thus this being reset internally // force the pos to be invalid and thus this being reset internally
sync_proxy.set_pos("100".to_owned()); sync_proxy.set_pos("100".to_owned());
let mut error_seen = false; let mut error_seen = false;
for _n in 0..2 { for _ in 0..2 {
let summary = match stream.next().await { let summary = match stream.next().await {
Some(Ok(e)) => e, Some(Ok(e)) => e,
Some(Err(e)) => { Some(Err(e)) => {
@ -1068,6 +1080,7 @@ mod tests {
} }
None => anyhow::bail!("Stream ended unexpectedly."), None => anyhow::bail!("Stream ended unexpectedly."),
}; };
// we only heard about the ones we had asked for // we only heard about the ones we had asked for
if summary.lists.iter().any(|s| s == "growing") { if summary.lists.iter().any(|s| s == "growing") {
break; break;
@ -1084,7 +1097,7 @@ mod tests {
} else { } else {
acc acc
}), }),
30 10
); );
Ok(()) Ok(())
@ -1092,11 +1105,12 @@ mod tests {
#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn noticing_new_rooms_in_growing() -> anyhow::Result<()> { async fn noticing_new_rooms_in_growing() -> anyhow::Result<()> {
let (client, sync_proxy_builder) = random_setup_with_rooms(30).await?; let (client, sync_proxy_builder) = random_setup_with_rooms(20).await?;
print!("setup took its time"); print!("setup took its time");
let growing_sync = SlidingSyncList::builder() let growing_sync = SlidingSyncList::builder()
.sync_mode(SlidingSyncMode::GrowingFullSync) .sync_mode(SlidingSyncMode::GrowingFullSync)
.limit(100) .full_sync_batch_size(10u32)
.full_sync_maximum_number_of_rooms_to_fetch(100)
.sort(vec!["by_recency".to_owned(), "by_name".to_owned()]) .sort(vec!["by_recency".to_owned(), "by_name".to_owned()])
.name("growing") .name("growing")
.build()?; .build()?;
@ -1111,7 +1125,7 @@ mod tests {
let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully. let list = sync_proxy.list("growing").context("but we just added that list!")?; // let's catch it up fully.
let stream = sync_proxy.stream(); let stream = sync_proxy.stream();
pin_mut!(stream); pin_mut!(stream);
while list.state() != SlidingSyncState::Live { while list.state() != SlidingSyncState::FullyLoaded {
// we wait until growing sync is all done, too // we wait until growing sync is all done, too
println!("awaiting"); println!("awaiting");
let _room_summary = stream let _room_summary = stream
@ -1128,7 +1142,7 @@ mod tests {
} else { } else {
acc acc
}), }),
30 20
); );
// all found. let's add two more. // all found. let's add two more.
@ -1142,10 +1156,10 @@ mod tests {
let summary = room_summary?; let summary = room_summary?;
// we only heard about the ones we had asked for // we only heard about the ones we had asked for
if summary.lists.iter().any(|s| s == "growing") if summary.lists.iter().any(|s| s == "growing")
&& list.rooms_count().unwrap_or_default() == 32 && list.maximum_number_of_rooms().unwrap_or_default() == 22
{ {
if seen { if seen {
// once we saw 32, we give it another loop to catch up! // once we saw 22, we give it another loop to catch up!
break; break;
} else { } else {
seen = true; seen = true;
@ -1161,7 +1175,7 @@ mod tests {
} else { } else {
acc acc
}), }),
32 22
); );
Ok(()) Ok(())