From 5bc2863721df52ab2a72e1bf3206297cdb0bf62e Mon Sep 17 00:00:00 2001 From: Jared L <48422312+lhjt@users.noreply.github.com> Date: Wed, 21 Jan 2026 06:27:18 +1100 Subject: [PATCH] docs: add rustdocs for presence aggregation and suppression --- src/service/presence/aggregate.rs | 13 +++++++++++++ src/service/pusher/suppressed.rs | 10 ++++++++++ 2 files changed, 23 insertions(+) diff --git a/src/service/presence/aggregate.rs b/src/service/presence/aggregate.rs index 208af1b4..7d93749a 100644 --- a/src/service/presence/aggregate.rs +++ b/src/service/presence/aggregate.rs @@ -1,3 +1,9 @@ +//! Presence aggregation across devices. +//! +//! This module keeps per-device presence snapshots and computes a single +//! user-level presence view. Aggregation applies idle/offline thresholds, +//! favors higher-ranked states, and prunes stale devices to cap memory. + use std::collections::HashMap; use ruma::{OwnedDeviceId, OwnedUserId, UInt, UserId, presence::PresenceState}; @@ -35,12 +41,15 @@ pub(crate) struct PresenceAggregator { } impl PresenceAggregator { + /// Create a new, empty aggregator. pub(crate) fn new() -> Self { Self::default() } + /// Clear all tracked device state. pub(crate) async fn clear(&self) { self.inner.write().await.clear(); } + /// Update presence state for a single device. pub(crate) async fn update( &self, user_id: &UserId, @@ -76,6 +85,10 @@ impl PresenceAggregator { } } + /// Aggregate per-device state into a single presence snapshot. + /// + /// Prunes devices that have not updated within the offline timeout to keep + /// the map bounded. pub(crate) async fn aggregate( &self, user_id: &UserId, diff --git a/src/service/pusher/suppressed.rs b/src/service/pusher/suppressed.rs index e7c467d1..34b3e385 100644 --- a/src/service/pusher/suppressed.rs +++ b/src/service/pusher/suppressed.rs @@ -1,3 +1,8 @@ +//! Deferred push suppression queues. +//! +//! Stores suppressed push events in memory until they can be flushed. This is +//! intentionally in-memory only: suppressed events are discarded on restart. + use std::collections::{HashMap, VecDeque}; use std::sync::Mutex; @@ -50,6 +55,7 @@ impl SuppressedQueue { } } +/// Enqueue a PDU for later push delivery when suppression is active. #[implement(super::Service)] pub fn queue_suppressed_push( &self, @@ -123,6 +129,7 @@ pub fn queue_suppressed_push( true } +/// Take and remove all suppressed PDUs for a given user + pushkey. #[implement(super::Service)] pub fn take_suppressed_for_pushkey( &self, @@ -149,6 +156,7 @@ pub fn take_suppressed_for_pushkey( .collect() } +/// Take and remove all suppressed PDUs for a given user across all pushkeys. #[implement(super::Service)] pub fn take_suppressed_for_user( &self, @@ -172,6 +180,7 @@ pub fn take_suppressed_for_user( .collect() } +/// Clear suppressed PDUs for a specific room (across all pushkeys). #[implement(super::Service)] pub fn clear_suppressed_room(&self, user_id: &UserId, room_id: &RoomId) -> usize { let mut inner = self.suppressed.lock(); @@ -196,6 +205,7 @@ pub fn clear_suppressed_room(&self, user_id: &UserId, room_id: &RoomId) -> usize removed } +/// Clear suppressed PDUs for a specific pushkey. #[implement(super::Service)] pub fn clear_suppressed_pushkey(&self, user_id: &UserId, pushkey: &str) -> usize { let mut inner = self.suppressed.lock();