diff --git a/crates/matrix-sdk-common/src/linked_chunk/mod.rs b/crates/matrix-sdk-common/src/linked_chunk/mod.rs index 9f63ea4f6ed..92be5fc5a51 100644 --- a/crates/matrix-sdk-common/src/linked_chunk/mod.rs +++ b/crates/matrix-sdk-common/src/linked_chunk/mod.rs @@ -1260,6 +1260,12 @@ impl Chunk { !self.is_gap() } + /// Is this the definitive first chunk, even in the presence of + /// lazy-loading? + pub fn is_definitive_head(&self) -> bool { + self.previous.is_none() && self.lazy_previous.is_none() + } + /// Check whether this current chunk is the first chunk. fn is_first_chunk(&self) -> bool { self.previous.is_none() diff --git a/crates/matrix-sdk/src/event_cache/pagination.rs b/crates/matrix-sdk/src/event_cache/pagination.rs index 9401b391bc3..2efaf8288f9 100644 --- a/crates/matrix-sdk/src/event_cache/pagination.rs +++ b/crates/matrix-sdk/src/event_cache/pagination.rs @@ -98,6 +98,7 @@ impl RoomPagination { // Try to load one chunk backwards. If it returns events, no need to reach the // network! + match self.inner.state.write().await.load_more_events_backwards().await? { LoadMoreEventsBackwardsOutcome::Gap => { // continue, let's resolve this gap! @@ -107,7 +108,11 @@ impl RoomPagination { return Ok(Some(BackPaginationOutcome { reached_start: true, events: vec![] })) } - LoadMoreEventsBackwardsOutcome::Events(events, sync_timeline_events_diffs) => { + LoadMoreEventsBackwardsOutcome::Events { + events, + sync_timeline_events_diffs, + reached_start, + } => { if !sync_timeline_events_diffs.is_empty() { let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents { diffs: sync_timeline_events_diffs, @@ -116,7 +121,7 @@ impl RoomPagination { } return Ok(Some(BackPaginationOutcome { - reached_start: false, + reached_start, // This is a backwards pagination. `BackPaginationOutcome` expects events to // be in “reverse order”. events: events.into_iter().rev().collect(), @@ -261,6 +266,20 @@ impl RoomPagination { }) .await?; + // There could be an inconsistency between the network (which thinks we hit the + // start of the timeline) and the disk (which has the initial empty + // chunks), so tweak the `reached_start` value so that it reflects the disk + // state in priority instead. + let reached_start = { + // There's no gaps. + !state.events().chunks().any(|chunk| chunk.is_gap()) && + // The first chunk has no predecessor. + state.events() + .chunks() + .next() + .map_or(reached_start, |chunk| chunk.is_definitive_head()) + }; + let backpagination_outcome = BackPaginationOutcome { events, reached_start }; if !sync_timeline_events_diffs.is_empty() { diff --git a/crates/matrix-sdk/src/event_cache/room/mod.rs b/crates/matrix-sdk/src/event_cache/room/mod.rs index 94e91b43e87..1674423d1c5 100644 --- a/crates/matrix-sdk/src/event_cache/room/mod.rs +++ b/crates/matrix-sdk/src/event_cache/room/mod.rs @@ -534,7 +534,11 @@ pub(super) enum LoadMoreEventsBackwardsOutcome { StartOfTimeline, /// Events have been inserted. - Events(Vec, Vec>), + Events { + events: Vec, + sync_timeline_events_diffs: Vec>, + reached_start: bool, + }, } // Use a private module to hide `events` to this parent module. @@ -727,7 +731,13 @@ mod private { let events = match &new_first_chunk.content { ChunkContent::Gap(_) => None, - ChunkContent::Items(events) => Some(events.clone()), + ChunkContent::Items(events) => { + // We've reached the start on disk, if and only if, there was no chunk prior to + // the one we just loaded. + let reached_start = new_first_chunk.previous.is_none(); + + Some((events.clone(), reached_start)) + } }; if let Err(err) = self.events.insert_new_chunk_as_first(new_first_chunk) { @@ -749,9 +759,11 @@ mod private { Ok(match events { None => LoadMoreEventsBackwardsOutcome::Gap, - Some(events) => { - LoadMoreEventsBackwardsOutcome::Events(events, updates_as_vector_diffs) - } + Some((events, reached_start)) => LoadMoreEventsBackwardsOutcome::Events { + events, + sync_timeline_events_diffs: updates_as_vector_diffs, + reached_start, + }, }) } @@ -1862,8 +1874,6 @@ mod tests { #[cfg(not(target_arch = "wasm32"))] // This uses the cross-process lock, so needs time support. #[async_test] async fn test_shrink_to_last_chunk() { - use std::ops::Not as _; - use eyeball_im::VectorDiff; use crate::{assert_let_timeout, event_cache::RoomEventCacheUpdate}; @@ -1930,7 +1940,7 @@ mod tests { let outcome = room_event_cache.pagination().run_backwards_once(20).await.unwrap(); assert_eq!(outcome.events.len(), 1); assert_eq!(outcome.events[0].event_id().as_deref(), Some(evid1)); - assert!(outcome.reached_start.not()); + assert!(outcome.reached_start); // We also get an update about the loading from the store. assert_let_timeout!( @@ -1974,6 +1984,6 @@ mod tests { let outcome = room_event_cache.pagination().run_backwards_once(20).await.unwrap(); assert_eq!(outcome.events.len(), 1); assert_eq!(outcome.events[0].event_id().as_deref(), Some(evid1)); - assert!(outcome.reached_start.not()); + assert!(outcome.reached_start); } } diff --git a/crates/matrix-sdk/tests/integration/event_cache.rs b/crates/matrix-sdk/tests/integration/event_cache.rs index 827fd99d27e..eef45b60d26 100644 --- a/crates/matrix-sdk/tests/integration/event_cache.rs +++ b/crates/matrix-sdk/tests/integration/event_cache.rs @@ -1189,7 +1189,14 @@ async fn test_no_gap_stored_after_deduplicated_sync() { // only contains the events returned by the sync. // // The first back-pagination will hit the network, and let us know we've reached - // the end of the room. + // the end of the room, but! we do have the default initial events chunk in + // storage. + + let outcome = pagination.run_backwards_once(20).await.unwrap(); + assert!(outcome.reached_start.not()); + assert!(outcome.events.is_empty()); + + // Loading the default initial event chunk. let outcome = pagination.run_backwards_once(20).await.unwrap(); assert!(outcome.reached_start); assert!(outcome.events.is_empty()); @@ -1222,14 +1229,10 @@ async fn test_no_gap_stored_after_deduplicated_sync() { // // The sync was limited, which unloaded the linked chunk, and reloaded only the // final events chunk. - // - // There's an empty events chunk at the start of *every* linked chunk, so the - // next pagination will return it, and since the chunk is lazily loaded, the - // pagination doesn't know *yet* it's reached the start of the linked chunk. let outcome = pagination.run_backwards_once(20).await.unwrap(); assert!(outcome.events.is_empty()); - assert!(!outcome.reached_start); + assert!(outcome.reached_start); { let (events, _) = room_event_cache.subscribe().await; @@ -1238,13 +1241,6 @@ async fn test_no_gap_stored_after_deduplicated_sync() { assert_event_matches_msg(&events[2], "sup"); assert_eq!(events.len(), 3); } - - // Now, lazy-loading notices we've reached the start of the chunk, and reports - // it as such. - - let outcome = pagination.run_backwards_once(20).await.unwrap(); - assert!(outcome.events.is_empty()); - assert!(outcome.reached_start); } #[async_test] @@ -1336,7 +1332,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() { let pagination = room_event_cache.pagination(); let outcome = pagination.run_backwards_once(20).await.unwrap(); - assert!(outcome.reached_start); + assert!(outcome.reached_start.not()); assert!(outcome.events.is_empty()); assert!(stream.is_empty()); @@ -1373,13 +1369,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() { // we shouldn't have to, since it is useless; all events were deduplicated // from the previous pagination. - // Instead, we're lazy-loading an empty events chunks. - let outcome = pagination.run_backwards_once(20).await.unwrap(); - assert!(outcome.reached_start.not()); - assert!(outcome.events.is_empty()); - assert!(stream.is_empty()); - - // And finally hit the start of the timeline. + // Instead, we're lazy-loading the empty initial events chunks. let outcome = pagination.run_backwards_once(20).await.unwrap(); assert!(outcome.reached_start); assert!(outcome.events.is_empty()); @@ -1948,9 +1938,8 @@ async fn test_lazy_loading() { assert_event_id!(outcome.events[4], "$ev0_1"); assert_event_id!(outcome.events[5], "$ev0_0"); - // The start of the timeline isn't reached yet. What we know for the moment is - // that we get new events. - assert!(outcome.reached_start.not()); + // This was the start of the timeline \o/ + assert!(outcome.reached_start); // Let's check the stream for the last time. let update = updates_stream.recv().await.unwrap(); @@ -1978,20 +1967,7 @@ async fn test_lazy_loading() { assert_event_id!(event, "$ev0_5"); }); }); - - assert!(updates_stream.is_empty()); } - // Final pagination? Oh yeah; - // This time, the first chunk is loaded and there is nothing else to do, no gap, - // nothing. We've reached the start of the timeline! - { - let outcome = room_event_cache.pagination().run_backwards_until(1).await.unwrap(); - - // No events, hmmm… - assert!(outcome.events.is_empty()); - - // … that's because the start of the timeline is finally reached! - assert!(outcome.reached_start); - } + assert!(updates_stream.is_empty()); }