Optimize reference graph container value type for topological_sort.

Optimize initial container capacity estimates.

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk
2026-02-14 10:23:47 +00:00
parent b7ea9714e8
commit 1bd4ab0601
15 changed files with 192 additions and 155 deletions

View File

@@ -1,7 +1,4 @@
use std::{
collections::{HashMap, HashSet},
iter::once,
};
use std::{collections::HashMap, iter::once};
use futures::{FutureExt, StreamExt, stream::FuturesOrdered};
use ruma::{
@@ -50,7 +47,7 @@ where
while let Some((prev_event_id, mut outlier)) = todo_outlier_stack.next().await {
let Some((pdu, mut json_opt)) = outlier.pop() else {
// Fetch and handle failed
graph.insert(prev_event_id.clone(), HashSet::new());
graph.insert(prev_event_id.clone(), Default::default());
continue;
};
@@ -59,7 +56,7 @@ where
let limit = self.services.server.config.max_fetch_prev_events;
if amount > limit {
debug_warn!(?limit, "Max prev event limit reached!");
graph.insert(prev_event_id.clone(), HashSet::new());
graph.insert(prev_event_id.clone(), Default::default());
continue;
}
@@ -74,7 +71,7 @@ where
let Some(json) = json_opt else {
// Get json failed, so this was not fetched over federation
graph.insert(prev_event_id.clone(), HashSet::new());
graph.insert(prev_event_id.clone(), Default::default());
continue;
};
@@ -104,7 +101,7 @@ where
);
} else {
// Time based check failed
graph.insert(prev_event_id.clone(), HashSet::new());
graph.insert(prev_event_id.clone(), Default::default());
}
eventid_info.insert(prev_event_id.clone(), (pdu, json));

View File

@@ -110,11 +110,24 @@ fn is_backed_off(&self, event_id: &EventId, range: Range<Duration>) -> bool {
}
#[implement(Service)]
#[tracing::instrument(
name = "exists",
level = "trace",
ret(level = "trace"),
skip_all,
fields(%event_id)
)]
async fn event_exists(&self, event_id: &EventId) -> bool {
self.services.timeline.pdu_exists(event_id).await
}
#[implement(Service)]
#[tracing::instrument(
name = "fetch",
level = "trace",
skip_all,
fields(%event_id)
)]
async fn event_fetch(&self, event_id: &EventId) -> Result<PduEvent> {
self.services.timeline.get_pdu(event_id).await
}

View File

@@ -103,15 +103,10 @@ pub async fn resolve_state(
}
#[implement(super::Service)]
#[tracing::instrument(
name = "resolve",
level = "debug",
skip_all,
fields(%room_id),
)]
#[tracing::instrument(name = "resolve", level = "debug", skip_all)]
pub(super) async fn state_resolution<StateSets, AuthSets>(
&self,
room_id: &RoomId,
_room_id: &RoomId,
room_version: &RoomVersionId,
state_sets: StateSets,
auth_chains: AuthSets,

View File

@@ -22,8 +22,8 @@ use crate::rooms::{
#[tracing::instrument(
name = "upgrade",
level = "debug",
skip_all,
ret(level = "debug")
ret(level = "debug"),
skip_all
)]
pub(super) async fn upgrade_outlier_to_timeline_pdu(
&self,