fix: ensure aggregate presence change flushes queue

This commit is contained in:
Jared L
2026-01-21 16:50:23 +11:00
parent 0f547115af
commit 7ec9d7f5aa
2 changed files with 32 additions and 0 deletions

View File

@@ -98,6 +98,7 @@ impl Service {
refresh_window_ms: Option<u64>, refresh_window_ms: Option<u64>,
) -> Result { ) -> Result {
let now = tuwunel_core::utils::millis_since_unix_epoch(); let now = tuwunel_core::utils::millis_since_unix_epoch();
// 1) Capture per-device presence snapshot for aggregation.
debug!( debug!(
?user_id, ?user_id,
?device_key, ?device_key,
@@ -118,6 +119,7 @@ impl Service {
) )
.await; .await;
// 2) Compute the aggregated presence across all devices.
let aggregated = self let aggregated = self
.device_presence .device_presence
.aggregate(user_id, now, self.idle_timeout, self.offline_timeout) .aggregate(user_id, now, self.idle_timeout, self.offline_timeout)
@@ -131,17 +133,23 @@ impl Service {
"Presence aggregate computed" "Presence aggregate computed"
); );
// 3) Load the last persisted presence to decide whether to skip or merge.
let last_presence = self.db.get_presence(user_id).await; let last_presence = self.db.get_presence(user_id).await;
let (last_count, last_event) = match last_presence { let (last_count, last_event) = match last_presence {
| Ok((count, event)) => (Some(count), Some(event)), | Ok((count, event)) => (Some(count), Some(event)),
| Err(_) => (None, None), | Err(_) => (None, None),
}; };
let last_state = last_event
.as_ref()
.map(|event| event.content.presence.clone());
let state_changed = match &last_event { let state_changed = match &last_event {
| Some(event) => event.content.presence != aggregated.state, | Some(event) => event.content.presence != aggregated.state,
| None => true, | None => true,
}; };
// 4) For rapid pings with no state change, skip writes and reschedule.
if !state_changed if !state_changed
&& let Some((count, last_last_active_ago)) = && let Some((count, last_last_active_ago)) =
Self::refresh_skip_decision(refresh_window_ms, last_event.as_ref(), last_count) Self::refresh_skip_decision(refresh_window_ms, last_event.as_ref(), last_count)
@@ -163,6 +171,25 @@ impl Service {
return Ok(()); return Ok(());
} }
// 5) If we just transitioned away from online, flush suppressed pushes.
if matches!(last_state, Some(PresenceState::Online))
&& aggregated.state != PresenceState::Online
{
debug!(
?user_id,
from = ?PresenceState::Online,
to = ?aggregated.state,
"Presence went inactive; flushing suppressed pushes"
);
self.services
.sending
.schedule_flush_suppressed_for_user(
user_id.to_owned(),
"presence->inactive (aggregate)",
);
}
// 6) Persist the aggregated presence, preserving last non-empty status.
let fallback_status = last_event let fallback_status = last_event
.and_then(|event| event.content.status_msg) .and_then(|event| event.content.status_msg)
.filter(|msg| !msg.is_empty()); .filter(|msg| !msg.is_empty());

View File

@@ -938,6 +938,7 @@ impl Service {
return; return;
} }
let mut sent = 0_usize;
debug!(?user_id, pushkey, rooms = rooms.len(), "Flushing suppressed pushes ({reason})"); debug!(?user_id, pushkey, rooms = rooms.len(), "Flushing suppressed pushes ({reason})");
for (room_id, pdu_ids) in rooms { for (room_id, pdu_ids) in rooms {
@@ -984,9 +985,13 @@ impl Service {
requeued, requeued,
"Failed to send suppressed push notification" "Failed to send suppressed push notification"
); );
} else {
sent = sent.saturating_add(1);
} }
} }
} }
debug!(?user_id, pushkey, sent, "Flushed suppressed push notifications");
} }
async fn flush_suppressed_for_pushkey( async fn flush_suppressed_for_pushkey(