chore: address clippy warnings
This commit is contained in:
@@ -50,6 +50,7 @@ impl PresenceAggregator {
|
||||
}
|
||||
|
||||
/// Update presence state for a single device.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn update(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
@@ -68,7 +69,7 @@ impl PresenceAggregator {
|
||||
| Some(ago) => now_ms.saturating_sub(ago.into()),
|
||||
};
|
||||
|
||||
let entry = devices.entry(device_key).or_insert(DevicePresence {
|
||||
let entry = devices.entry(device_key).or_insert_with(|| DevicePresence {
|
||||
state: state.clone(),
|
||||
currently_active: currently_active.unwrap_or(false),
|
||||
last_active_ts,
|
||||
@@ -130,10 +131,11 @@ impl PresenceAggregator {
|
||||
best_state = effective_state.clone();
|
||||
}
|
||||
|
||||
if effective_state == PresenceState::Online || effective_state == PresenceState::Busy {
|
||||
if device.currently_active && last_active_age < idle_timeout_ms {
|
||||
any_currently_active = true;
|
||||
}
|
||||
if (effective_state == PresenceState::Online || effective_state == PresenceState::Busy)
|
||||
&& device.currently_active
|
||||
&& last_active_age < idle_timeout_ms
|
||||
{
|
||||
any_currently_active = true;
|
||||
}
|
||||
|
||||
if let Some(msg) = device.status_msg.as_ref().filter(|msg| !msg.is_empty()) {
|
||||
@@ -221,7 +223,6 @@ fn state_rank(state: &PresenceState) -> u8 {
|
||||
| PresenceState::Busy => 3,
|
||||
| PresenceState::Online => 2,
|
||||
| PresenceState::Unavailable => 1,
|
||||
| PresenceState::Offline => 0,
|
||||
| _ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,11 +73,10 @@ impl crate::Service for Service {
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Some((current_count, _)) = timer_handles.get(&user_id) {
|
||||
if *current_count != count {
|
||||
trace!(?user_id, count, current_count, "Skipping stale presence timer");
|
||||
continue;
|
||||
}
|
||||
if let Some((current_count, _)) = timer_handles.get(&user_id)
|
||||
&& *current_count != count {
|
||||
trace!(?user_id, count, current_count, "Skipping stale presence timer");
|
||||
continue;
|
||||
}
|
||||
|
||||
timer_handles.remove(&user_id);
|
||||
|
||||
@@ -79,6 +79,7 @@ impl Service {
|
||||
expected_count != current_count
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn apply_device_presence_update(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
@@ -134,8 +135,8 @@ impl Service {
|
||||
| None => true,
|
||||
};
|
||||
|
||||
if !state_changed {
|
||||
if let Some((count, last_last_active_ago)) = Self::refresh_skip_decision(
|
||||
if !state_changed
|
||||
&& let Some((count, last_last_active_ago)) = Self::refresh_skip_decision(
|
||||
refresh_window_ms,
|
||||
last_event.as_ref(),
|
||||
last_count,
|
||||
@@ -156,7 +157,6 @@ impl Service {
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let fallback_status = last_event
|
||||
.and_then(|event| event.content.status_msg)
|
||||
@@ -304,9 +304,8 @@ impl Service {
|
||||
user_id: &OwnedUserId,
|
||||
expected_count: u64,
|
||||
) -> Result {
|
||||
let (current_count, presence) = match self.db.get_presence_raw(user_id).await {
|
||||
| Ok(presence) => presence,
|
||||
| Err(_) => return Ok(()),
|
||||
let Ok((current_count, presence)) = self.db.get_presence_raw(user_id).await else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if Self::timer_is_stale(expected_count, current_count) {
|
||||
|
||||
@@ -15,6 +15,9 @@ const SUPPRESSED_MAX_EVENTS_PER_ROOM: usize = 512;
|
||||
const SUPPRESSED_MAX_EVENTS_PER_PUSHKEY: usize = 4096;
|
||||
const SUPPRESSED_MAX_ROOMS_PER_PUSHKEY: usize = 256;
|
||||
|
||||
type SuppressedRooms = Vec<(OwnedRoomId, Vec<RawPduId>)>;
|
||||
type SuppressedPushes = Vec<(String, SuppressedRooms)>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(super) struct SuppressedQueue {
|
||||
inner: Mutex<HashMap<OwnedUserId, HashMap<String, PushkeyQueue>>>,
|
||||
@@ -38,7 +41,7 @@ impl SuppressedQueue {
|
||||
) -> std::sync::MutexGuard<'_, HashMap<OwnedUserId, HashMap<String, PushkeyQueue>>> {
|
||||
self.inner
|
||||
.lock()
|
||||
.unwrap_or_else(|e| e.into_inner())
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner)
|
||||
}
|
||||
|
||||
fn drain_room(queue: VecDeque<SuppressedEvent>) -> Vec<RawPduId> {
|
||||
@@ -86,7 +89,7 @@ pub fn queue_suppressed_push(
|
||||
let queue = push_entry
|
||||
.rooms
|
||||
.entry(room_id.to_owned())
|
||||
.or_insert_with(VecDeque::new);
|
||||
.or_default();
|
||||
|
||||
if queue
|
||||
.back()
|
||||
@@ -161,7 +164,7 @@ pub fn take_suppressed_for_pushkey(
|
||||
pub fn take_suppressed_for_user(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
) -> Vec<(String, Vec<(OwnedRoomId, Vec<RawPduId>)>)> {
|
||||
) -> SuppressedPushes {
|
||||
let mut inner = self.suppressed.lock();
|
||||
let Some(user_entry) = inner.remove(user_id) else {
|
||||
return Vec::new();
|
||||
|
||||
Reference in New Issue
Block a user