chore: address clippy warnings

This commit is contained in:
Jared L
2026-01-21 06:32:10 +11:00
parent 5bc2863721
commit a91b01f9a2
4 changed files with 22 additions and 20 deletions

View File

@@ -50,6 +50,7 @@ impl PresenceAggregator {
} }
/// Update presence state for a single device. /// Update presence state for a single device.
#[allow(clippy::too_many_arguments)]
pub(crate) async fn update( pub(crate) async fn update(
&self, &self,
user_id: &UserId, user_id: &UserId,
@@ -68,7 +69,7 @@ impl PresenceAggregator {
| Some(ago) => now_ms.saturating_sub(ago.into()), | Some(ago) => now_ms.saturating_sub(ago.into()),
}; };
let entry = devices.entry(device_key).or_insert(DevicePresence { let entry = devices.entry(device_key).or_insert_with(|| DevicePresence {
state: state.clone(), state: state.clone(),
currently_active: currently_active.unwrap_or(false), currently_active: currently_active.unwrap_or(false),
last_active_ts, last_active_ts,
@@ -130,10 +131,11 @@ impl PresenceAggregator {
best_state = effective_state.clone(); best_state = effective_state.clone();
} }
if effective_state == PresenceState::Online || effective_state == PresenceState::Busy { if (effective_state == PresenceState::Online || effective_state == PresenceState::Busy)
if device.currently_active && last_active_age < idle_timeout_ms { && device.currently_active
any_currently_active = true; && last_active_age < idle_timeout_ms
} {
any_currently_active = true;
} }
if let Some(msg) = device.status_msg.as_ref().filter(|msg| !msg.is_empty()) { if let Some(msg) = device.status_msg.as_ref().filter(|msg| !msg.is_empty()) {
@@ -221,7 +223,6 @@ fn state_rank(state: &PresenceState) -> u8 {
| PresenceState::Busy => 3, | PresenceState::Busy => 3,
| PresenceState::Online => 2, | PresenceState::Online => 2,
| PresenceState::Unavailable => 1, | PresenceState::Unavailable => 1,
| PresenceState::Offline => 0,
| _ => 0, | _ => 0,
} }
} }

View File

@@ -73,11 +73,10 @@ impl crate::Service for Service {
continue; continue;
}; };
if let Some((current_count, _)) = timer_handles.get(&user_id) { if let Some((current_count, _)) = timer_handles.get(&user_id)
if *current_count != count { && *current_count != count {
trace!(?user_id, count, current_count, "Skipping stale presence timer"); trace!(?user_id, count, current_count, "Skipping stale presence timer");
continue; continue;
}
} }
timer_handles.remove(&user_id); timer_handles.remove(&user_id);

View File

@@ -79,6 +79,7 @@ impl Service {
expected_count != current_count expected_count != current_count
} }
#[allow(clippy::too_many_arguments)]
async fn apply_device_presence_update( async fn apply_device_presence_update(
&self, &self,
user_id: &UserId, user_id: &UserId,
@@ -134,8 +135,8 @@ impl Service {
| None => true, | None => true,
}; };
if !state_changed { if !state_changed
if let Some((count, last_last_active_ago)) = Self::refresh_skip_decision( && let Some((count, last_last_active_ago)) = Self::refresh_skip_decision(
refresh_window_ms, refresh_window_ms,
last_event.as_ref(), last_event.as_ref(),
last_count, last_count,
@@ -156,7 +157,6 @@ impl Service {
); );
return Ok(()); return Ok(());
} }
}
let fallback_status = last_event let fallback_status = last_event
.and_then(|event| event.content.status_msg) .and_then(|event| event.content.status_msg)
@@ -304,9 +304,8 @@ impl Service {
user_id: &OwnedUserId, user_id: &OwnedUserId,
expected_count: u64, expected_count: u64,
) -> Result { ) -> Result {
let (current_count, presence) = match self.db.get_presence_raw(user_id).await { let Ok((current_count, presence)) = self.db.get_presence_raw(user_id).await else {
| Ok(presence) => presence, return Ok(());
| Err(_) => return Ok(()),
}; };
if Self::timer_is_stale(expected_count, current_count) { if Self::timer_is_stale(expected_count, current_count) {

View File

@@ -15,6 +15,9 @@ const SUPPRESSED_MAX_EVENTS_PER_ROOM: usize = 512;
const SUPPRESSED_MAX_EVENTS_PER_PUSHKEY: usize = 4096; const SUPPRESSED_MAX_EVENTS_PER_PUSHKEY: usize = 4096;
const SUPPRESSED_MAX_ROOMS_PER_PUSHKEY: usize = 256; const SUPPRESSED_MAX_ROOMS_PER_PUSHKEY: usize = 256;
type SuppressedRooms = Vec<(OwnedRoomId, Vec<RawPduId>)>;
type SuppressedPushes = Vec<(String, SuppressedRooms)>;
#[derive(Default)] #[derive(Default)]
pub(super) struct SuppressedQueue { pub(super) struct SuppressedQueue {
inner: Mutex<HashMap<OwnedUserId, HashMap<String, PushkeyQueue>>>, inner: Mutex<HashMap<OwnedUserId, HashMap<String, PushkeyQueue>>>,
@@ -38,7 +41,7 @@ impl SuppressedQueue {
) -> std::sync::MutexGuard<'_, HashMap<OwnedUserId, HashMap<String, PushkeyQueue>>> { ) -> std::sync::MutexGuard<'_, HashMap<OwnedUserId, HashMap<String, PushkeyQueue>>> {
self.inner self.inner
.lock() .lock()
.unwrap_or_else(|e| e.into_inner()) .unwrap_or_else(std::sync::PoisonError::into_inner)
} }
fn drain_room(queue: VecDeque<SuppressedEvent>) -> Vec<RawPduId> { fn drain_room(queue: VecDeque<SuppressedEvent>) -> Vec<RawPduId> {
@@ -86,7 +89,7 @@ pub fn queue_suppressed_push(
let queue = push_entry let queue = push_entry
.rooms .rooms
.entry(room_id.to_owned()) .entry(room_id.to_owned())
.or_insert_with(VecDeque::new); .or_default();
if queue if queue
.back() .back()
@@ -161,7 +164,7 @@ pub fn take_suppressed_for_pushkey(
pub fn take_suppressed_for_user( pub fn take_suppressed_for_user(
&self, &self,
user_id: &UserId, user_id: &UserId,
) -> Vec<(String, Vec<(OwnedRoomId, Vec<RawPduId>)>)> { ) -> SuppressedPushes {
let mut inner = self.suppressed.lock(); let mut inner = self.suppressed.lock();
let Some(user_entry) = inner.remove(user_id) else { let Some(user_entry) = inner.remove(user_id) else {
return Vec::new(); return Vec::new();