Skip to content

Commit

Permalink
fix(event cache): override reached_start when there's a mismatch be…
Browse files Browse the repository at this point in the history
…tween network and disk

It could be that we have a mismatch between network and disk, after
running a back-pagination:

- network indicates start of the timeline, aka there's no previous-batch
token
- but in the persisted storage, we do have an initial empty events chunk

Because of this, we could have weird transitions from "I've reached the
start of the room" to "I haven't actually reached it", if calling the
`run_backwards()` method manually.

This patch rewrites the logic when returning `reached_start`, so that
it's more precise:

- when reloading an events chunk from disk, rely on the previous chunk
property to indicate whether we've reached the start of the timeline,
thus avoiding unnecessary calls to back-paginations.
- after resolving a gap via the network, override the result of
`reached_start` with a boolean that indicates 1. there are no more gaps
and 2. there's no previous chunk (actual previous or lazily-loaded).

In the future, we should consider NOT having empty events chunks, if we
can.
  • Loading branch information
bnjbvr committed Feb 24, 2025
1 parent 39c6481 commit f3f37a3
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 49 deletions.
6 changes: 6 additions & 0 deletions crates/matrix-sdk-common/src/linked_chunk/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1260,6 +1260,12 @@ impl<const CAPACITY: usize, Item, Gap> Chunk<CAPACITY, Item, Gap> {
!self.is_gap()
}

/// Is this the definitive first chunk, even in the presence of
/// lazy-loading?
pub fn is_definitive_head(&self) -> bool {
self.previous.is_none() && self.lazy_previous.is_none()
}

/// Check whether this current chunk is the first chunk.
fn is_first_chunk(&self) -> bool {
self.previous.is_none()
Expand Down
23 changes: 21 additions & 2 deletions crates/matrix-sdk/src/event_cache/pagination.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ impl RoomPagination {

// Try to load one chunk backwards. If it returns events, no need to reach the
// network!

match self.inner.state.write().await.load_more_events_backwards().await? {
LoadMoreEventsBackwardsOutcome::Gap => {
// continue, let's resolve this gap!
Expand All @@ -107,7 +108,11 @@ impl RoomPagination {
return Ok(Some(BackPaginationOutcome { reached_start: true, events: vec![] }))
}

LoadMoreEventsBackwardsOutcome::Events(events, sync_timeline_events_diffs) => {
LoadMoreEventsBackwardsOutcome::Events {
events,
sync_timeline_events_diffs,
reached_start,
} => {
if !sync_timeline_events_diffs.is_empty() {
let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents {
diffs: sync_timeline_events_diffs,
Expand All @@ -116,7 +121,7 @@ impl RoomPagination {
}

return Ok(Some(BackPaginationOutcome {
reached_start: false,
reached_start,
// This is a backwards pagination. `BackPaginationOutcome` expects events to
// be in “reverse order”.
events: events.into_iter().rev().collect(),
Expand Down Expand Up @@ -261,6 +266,20 @@ impl RoomPagination {
})
.await?;

// There could be an inconsistency between the network (which thinks we hit the
// start of the timeline) and the disk (which has the initial empty
// chunks), so tweak the `reached_start` value so that it reflects the disk
// state in priority instead.
let reached_start = {
// There's no gaps.
!state.events().chunks().any(|chunk| chunk.is_gap()) &&
// The first chunk has no predecessor.
state.events()
.chunks()
.next()
.map_or(reached_start, |chunk| chunk.is_definitive_head())
};

let backpagination_outcome = BackPaginationOutcome { events, reached_start };

if !sync_timeline_events_diffs.is_empty() {
Expand Down
28 changes: 19 additions & 9 deletions crates/matrix-sdk/src/event_cache/room/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,11 @@ pub(super) enum LoadMoreEventsBackwardsOutcome {
StartOfTimeline,

/// Events have been inserted.
Events(Vec<TimelineEvent>, Vec<VectorDiff<TimelineEvent>>),
Events {
events: Vec<TimelineEvent>,
sync_timeline_events_diffs: Vec<VectorDiff<TimelineEvent>>,
reached_start: bool,
},
}

// Use a private module to hide `events` to this parent module.
Expand Down Expand Up @@ -727,7 +731,13 @@ mod private {

let events = match &new_first_chunk.content {
ChunkContent::Gap(_) => None,
ChunkContent::Items(events) => Some(events.clone()),
ChunkContent::Items(events) => {
// We've reached the start on disk, if and only if, there was no chunk prior to
// the one we just loaded.
let reached_start = new_first_chunk.previous.is_none();

Some((events.clone(), reached_start))
}
};

if let Err(err) = self.events.insert_new_chunk_as_first(new_first_chunk) {
Expand All @@ -749,9 +759,11 @@ mod private {

Ok(match events {
None => LoadMoreEventsBackwardsOutcome::Gap,
Some(events) => {
LoadMoreEventsBackwardsOutcome::Events(events, updates_as_vector_diffs)
}
Some((events, reached_start)) => LoadMoreEventsBackwardsOutcome::Events {
events,
sync_timeline_events_diffs: updates_as_vector_diffs,
reached_start,
},
})
}

Expand Down Expand Up @@ -1862,8 +1874,6 @@ mod tests {
#[cfg(not(target_arch = "wasm32"))] // This uses the cross-process lock, so needs time support.
#[async_test]
async fn test_shrink_to_last_chunk() {
use std::ops::Not as _;

use eyeball_im::VectorDiff;

use crate::{assert_let_timeout, event_cache::RoomEventCacheUpdate};
Expand Down Expand Up @@ -1930,7 +1940,7 @@ mod tests {
let outcome = room_event_cache.pagination().run_backwards_once(20).await.unwrap();
assert_eq!(outcome.events.len(), 1);
assert_eq!(outcome.events[0].event_id().as_deref(), Some(evid1));
assert!(outcome.reached_start.not());
assert!(outcome.reached_start);

// We also get an update about the loading from the store.
assert_let_timeout!(
Expand Down Expand Up @@ -1974,6 +1984,6 @@ mod tests {
let outcome = room_event_cache.pagination().run_backwards_once(20).await.unwrap();
assert_eq!(outcome.events.len(), 1);
assert_eq!(outcome.events[0].event_id().as_deref(), Some(evid1));
assert!(outcome.reached_start.not());
assert!(outcome.reached_start);
}
}
52 changes: 14 additions & 38 deletions crates/matrix-sdk/tests/integration/event_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1189,7 +1189,14 @@ async fn test_no_gap_stored_after_deduplicated_sync() {
// only contains the events returned by the sync.
//
// The first back-pagination will hit the network, and let us know we've reached
// the end of the room.
// the end of the room, but! we do have the default initial events chunk in
// storage.

let outcome = pagination.run_backwards_once(20).await.unwrap();
assert!(outcome.reached_start.not());
assert!(outcome.events.is_empty());

// Loading the default initial event chunk.
let outcome = pagination.run_backwards_once(20).await.unwrap();
assert!(outcome.reached_start);
assert!(outcome.events.is_empty());
Expand Down Expand Up @@ -1222,14 +1229,10 @@ async fn test_no_gap_stored_after_deduplicated_sync() {
//
// The sync was limited, which unloaded the linked chunk, and reloaded only the
// final events chunk.
//
// There's an empty events chunk at the start of *every* linked chunk, so the
// next pagination will return it, and since the chunk is lazily loaded, the
// pagination doesn't know *yet* it's reached the start of the linked chunk.

let outcome = pagination.run_backwards_once(20).await.unwrap();
assert!(outcome.events.is_empty());
assert!(!outcome.reached_start);
assert!(outcome.reached_start);

{
let (events, _) = room_event_cache.subscribe().await;
Expand All @@ -1238,13 +1241,6 @@ async fn test_no_gap_stored_after_deduplicated_sync() {
assert_event_matches_msg(&events[2], "sup");
assert_eq!(events.len(), 3);
}

// Now, lazy-loading notices we've reached the start of the chunk, and reports
// it as such.

let outcome = pagination.run_backwards_once(20).await.unwrap();
assert!(outcome.events.is_empty());
assert!(outcome.reached_start);
}

#[async_test]
Expand Down Expand Up @@ -1336,7 +1332,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() {
let pagination = room_event_cache.pagination();

let outcome = pagination.run_backwards_once(20).await.unwrap();
assert!(outcome.reached_start);
assert!(outcome.reached_start.not());
assert!(outcome.events.is_empty());
assert!(stream.is_empty());

Expand Down Expand Up @@ -1373,13 +1369,7 @@ async fn test_no_gap_stored_after_deduplicated_backpagination() {
// we shouldn't have to, since it is useless; all events were deduplicated
// from the previous pagination.

// Instead, we're lazy-loading an empty events chunks.
let outcome = pagination.run_backwards_once(20).await.unwrap();
assert!(outcome.reached_start.not());
assert!(outcome.events.is_empty());
assert!(stream.is_empty());

// And finally hit the start of the timeline.
// Instead, we're lazy-loading the empty initial events chunks.
let outcome = pagination.run_backwards_once(20).await.unwrap();
assert!(outcome.reached_start);
assert!(outcome.events.is_empty());
Expand Down Expand Up @@ -1948,9 +1938,8 @@ async fn test_lazy_loading() {
assert_event_id!(outcome.events[4], "$ev0_1");
assert_event_id!(outcome.events[5], "$ev0_0");

// The start of the timeline isn't reached yet. What we know for the moment is
// that we get new events.
assert!(outcome.reached_start.not());
// This was the start of the timeline \o/
assert!(outcome.reached_start);

// Let's check the stream for the last time.
let update = updates_stream.recv().await.unwrap();
Expand Down Expand Up @@ -1978,20 +1967,7 @@ async fn test_lazy_loading() {
assert_event_id!(event, "$ev0_5");
});
});

assert!(updates_stream.is_empty());
}

// Final pagination? Oh yeah;
// This time, the first chunk is loaded and there is nothing else to do, no gap,
// nothing. We've reached the start of the timeline!
{
let outcome = room_event_cache.pagination().run_backwards_until(1).await.unwrap();

// No events, hmmm…
assert!(outcome.events.is_empty());

// … that's because the start of the timeline is finally reached!
assert!(outcome.reached_start);
}
assert!(updates_stream.is_empty());
}

0 comments on commit f3f37a3

Please sign in to comment.