Implement specified non-blocking semantic for sliding-sync.

Simplify sliding-sync watch loop and bounds.

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk
2025-10-22 20:53:04 +00:00
parent 4010fc62bc
commit 0397bb8237
5 changed files with 42 additions and 35 deletions

View File

@@ -13,8 +13,7 @@ ARG mrsdk_profile="release"
ARG mrsdk_target_share="${MRSDK_TARGET_DIR}/${sys_name}/${sys_version}/${rust_target}/${rust_toolchain}/${mrsdk_profile}/_shared_cache" ARG mrsdk_target_share="${MRSDK_TARGET_DIR}/${sys_name}/${sys_version}/${rust_target}/${rust_toolchain}/${mrsdk_profile}/_shared_cache"
ARG mrsdk_test_args="" ARG mrsdk_test_args=""
ARG mrsdk_test_opts="" ARG mrsdk_test_opts=""
#TODO!!! ARG mrsdk_skip_list=""
ARG mrsdk_skip_list="--skip test_history_share_on_invite_pin_violation --skip test_room_notification_count"
WORKDIR / WORKDIR /
COPY --link --from=input . . COPY --link --from=input . .

View File

@@ -107,13 +107,12 @@ pub(crate) async fn sync_events_v5_route(
.map(Duration::as_millis) .map(Duration::as_millis)
.map(TryInto::try_into) .map(TryInto::try_into)
.flat_ok() .flat_ok()
.unwrap_or(services.config.client_sync_timeout_default) .map(|timeout: u64| {
.max(services.config.client_sync_timeout_min) timeout
.min(services.config.client_sync_timeout_max); .max(services.config.client_sync_timeout_min)
.min(services.config.client_sync_timeout_max)
let stop_at = Instant::now() })
.checked_add(Duration::from_millis(timeout)) .unwrap_or(0);
.expect("configuration must limit maximum timeout");
let conn_key = into_connection_key(sender_user, sender_device, request.conn_id.as_deref()); let conn_key = into_connection_key(sender_user, sender_device, request.conn_id.as_deref());
let conn_val = since let conn_val = since
@@ -147,7 +146,7 @@ pub(crate) async fn sync_events_v5_route(
// Update parameters regardless of replay or advance // Update parameters regardless of replay or advance
conn.next_batch = services.globals.wait_pending().await?; conn.next_batch = services.globals.wait_pending().await?;
conn.globalsince = since; conn.globalsince = since.min(conn.next_batch);
conn.update_cache(request); conn.update_cache(request);
conn.update_rooms_prologue(advancing); conn.update_rooms_prologue(advancing);
@@ -159,6 +158,10 @@ pub(crate) async fn sync_events_v5_route(
extensions: Default::default(), extensions: Default::default(),
}; };
let stop_at = Instant::now()
.checked_add(Duration::from_millis(timeout))
.expect("configuration must limit maximum timeout");
let sync_info = SyncInfo { services, sender_user, sender_device }; let sync_info = SyncInfo { services, sender_user, sender_device };
loop { loop {
debug_assert!( debug_assert!(
@@ -167,21 +170,20 @@ pub(crate) async fn sync_events_v5_route(
); );
let window; let window;
(window, response.lists) = selector(&mut conn, sync_info).boxed().await; let watchers = services.sync.watch(
let watch_rooms = window.keys().map(AsRef::as_ref).stream(); sender_user,
let watchers = services sender_device,
.sync services.state_cache.rooms_joined(sender_user),
.watch(sender_user, sender_device, watch_rooms); );
let window;
conn.next_batch = services.globals.wait_pending().await?; conn.next_batch = services.globals.wait_pending().await?;
(window, response.lists) = selector(&mut conn, sync_info).boxed().await; (window, response.lists) = selector(&mut conn, sync_info).boxed().await;
if conn.globalsince < conn.next_batch { if conn.globalsince < conn.next_batch {
let rooms = let rooms = handle_rooms(sync_info, &conn, &window)
handle_rooms(sync_info, &conn, &window).map_ok(|rooms| response.rooms = rooms); .map_ok(|response_rooms| response.rooms = response_rooms);
let extensions = handle_extensions(sync_info, &conn, &window) let extensions = handle_extensions(sync_info, &conn, &window)
.map_ok(|extensions| response.extensions = extensions); .map_ok(|response_extensions| response.extensions = response_extensions);
try_join(rooms, extensions).boxed().await?; try_join(rooms, extensions).boxed().await?;
@@ -194,7 +196,13 @@ pub(crate) async fn sync_events_v5_route(
} }
} }
if timeout_at(stop_at, watchers).await.is_err() || services.server.is_stopping() { if timeout == 0
|| services.server.is_stopping()
|| timeout_at(stop_at, watchers)
.boxed()
.await
.is_err()
{
response.pos = conn.next_batch.to_string().into(); response.pos = conn.next_batch.to_string().into();
trace!(conn.globalsince, conn.next_batch, "timeout; empty response {response:?}"); trace!(conn.globalsince, conn.next_batch, "timeout; empty response {response:?}");
return Ok(response); return Ok(response);

View File

@@ -99,11 +99,15 @@ pub(super) async fn handle(
.flat_ok() .flat_ok()
.unwrap_or_else(|| (Vec::new(), true, PduCount::default())); .unwrap_or_else(|| (Vec::new(), true, PduCount::default()));
let required_state = required_state
.into_iter()
.filter(|_| !timeline_pdus.is_empty())
.collect::<Vec<_>>();
let prev_batch = timeline_pdus let prev_batch = timeline_pdus
.first() .first()
.map(at!(0)) .map(at!(0))
.map(PduCount::into_unsigned) .map(PduCount::into_unsigned)
.or_else(|| roomsince.ne(&0).then_some(roomsince))
.as_ref() .as_ref()
.map(ToString::to_string); .map(ToString::to_string);
@@ -152,14 +156,6 @@ pub(super) async fn handle(
.map(|sender| (StateEventType::RoomMember, StateKey::from_str(sender.as_str()))) .map(|sender| (StateEventType::RoomMember, StateKey::from_str(sender.as_str())))
.stream(); .stream();
let timeline = timeline_pdus
.iter()
.stream()
.filter_map(|item| ignored_filter(services, item.clone(), sender_user))
.map(at!(1))
.map(Event::into_format)
.collect();
let wildcard_state = required_state let wildcard_state = required_state
.iter() .iter()
.filter(|(_, state_key)| state_key == "*") .filter(|(_, state_key)| state_key == "*")
@@ -253,6 +249,14 @@ pub(super) async fn handle(
.user .user
.last_notification_read(sender_user, room_id); .last_notification_read(sender_user, room_id);
let timeline = timeline_pdus
.iter()
.stream()
.filter_map(|item| ignored_filter(services, item.clone(), sender_user))
.map(at!(1))
.map(Event::into_format)
.collect();
let meta = join3(room_name, room_avatar, is_dm); let meta = join3(room_name, room_avatar, is_dm);
let events = join4(timeline, num_live, required_state, invite_state); let events = join4(timeline, num_live, required_state, invite_state);
let member_counts = join(joined_count, invited_count); let member_counts = join(joined_count, invited_count);

View File

@@ -174,7 +174,6 @@ async fn match_lists_for_room(
services services
.user .user
.last_notification_read(sender_user, &room_id) .last_notification_read(sender_user, &room_id)
.map(|count| count.min(conn.next_batch))
}) })
.into(); .into();
@@ -183,23 +182,19 @@ async fn match_lists_for_room(
services services
.read_receipt .read_receipt
.last_privateread_update(sender_user, &room_id) .last_privateread_update(sender_user, &room_id)
.map(|count| count.min(conn.next_batch))
}) })
.into(); .into();
let last_receipt: OptionFuture<_> = matched let last_receipt: OptionFuture<_> = matched
.and_is(false) // masked out, maybe unnecessary
.then(|| { .then(|| {
services services
.read_receipt .read_receipt
.last_receipt_count(&room_id, sender_user.into(), None) .last_receipt_count(&room_id, sender_user.into(), None)
.unwrap_or_default() .unwrap_or_default()
.map(|count| count.min(conn.next_batch))
}) })
.into(); .into();
let last_account: OptionFuture<_> = matched let last_account: OptionFuture<_> = matched
.and_is(false) // masked out, maybe unnecessary
.then(|| { .then(|| {
services services
.account_data .account_data
@@ -236,6 +231,7 @@ async fn match_lists_for_room(
] ]
.into_iter() .into_iter()
.map(Option::unwrap_or_default) .map(Option::unwrap_or_default)
.filter(|count| conn.next_batch.ge(count))
.max() .max()
.unwrap_or_default(), .unwrap_or_default(),
}) })

View File

@@ -50,7 +50,7 @@ pub struct Connection {
pub next_batch: u64, pub next_batch: u64,
} }
#[derive(Clone, Debug, Default)] #[derive(Clone, Copy, Debug, Default)]
pub struct Room { pub struct Room {
pub roomsince: u64, pub roomsince: u64,
pub last_batch: u64, pub last_batch: u64,