Refactor to async closures.

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk
2025-07-08 11:08:58 +00:00
parent cca0f20148
commit c8d35cca57
36 changed files with 78 additions and 80 deletions

View File

@@ -213,7 +213,7 @@ impl Service {
self.db
.id_appserviceregistrations
.keys()
.and_then(move |id: &str| async move {
.and_then(async move |id: &str| {
Ok((id.to_owned(), self.get_db_registration(id).await?))
})
}

View File

@@ -146,7 +146,7 @@ async fn get_auth_chain_outer(
let chunk_cache: Vec<_> = chunk
.into_iter()
.try_stream()
.broad_and_then(|(shortid, event_id)| async move {
.broad_and_then(async |(shortid, event_id)| {
if let Ok(cached) = self
.get_cached_eventid_authchain(&[shortid])
.await

View File

@@ -3,7 +3,7 @@ use std::{
iter::once,
};
use futures::{FutureExt, future};
use futures::FutureExt;
use ruma::{
CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName,
int, uint,
@@ -110,7 +110,7 @@ where
}
}
let event_fetch = |event_id| {
let event_fetch = async |event_id: OwnedEventId| {
let origin_server_ts = eventid_info
.get(&event_id)
.map_or_else(|| uint!(0), |info| info.0.origin_server_ts().get());
@@ -118,7 +118,7 @@ where
// This return value is the key used for sorting events,
// events are then sorted by power level, time,
// and lexically by event_id.
future::ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts)))
Ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts)))
};
let sorted = state_res::lexicographical_topological_sort(&graph, &event_fetch)

View File

@@ -128,7 +128,7 @@ where
new_state
.into_iter()
.stream()
.broad_then(|((event_type, state_key), event_id)| async move {
.broad_then(async |((event_type, state_key), event_id)| {
self.services
.short
.get_or_create_shortstatekey(&event_type, &state_key)

View File

@@ -167,7 +167,7 @@ where
.prev_events()
.any(is_equal_to!(event_id))
})
.broad_filter_map(|event_id| async move {
.broad_filter_map(async |event_id| {
// Only keep those extremities were not referenced yet
self.services
.pdu_metadata

View File

@@ -84,9 +84,9 @@ impl Data {
.ready_take_while(move |key| key.starts_with(&target.to_be_bytes()))
.map(|to_from| u64_from_u8(&to_from[8..16]))
.map(PduCount::from_unsigned)
.wide_filter_map(move |shorteventid| async move {
.map(move |shorteventid| (user_id, shortroomid, shorteventid))
.wide_filter_map(async |(user_id, shortroomid, shorteventid)| {
let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into();
let mut pdu = self
.services
.timeline

View File

@@ -112,7 +112,7 @@ pub async fn search_pdus<'a>(
let pdus = pdu_ids
.into_iter()
.stream()
.wide_filter_map(move |result_pdu_id: RawPduId| async move {
.wide_filter_map(async |result_pdu_id: RawPduId| {
self.services
.timeline
.get_pdu_from_id(&result_pdu_id)
@@ -121,7 +121,7 @@ pub async fn search_pdus<'a>(
})
.ready_filter(|pdu| !pdu.is_redacted())
.ready_filter(move |pdu| filter.matches(pdu))
.wide_filter_map(move |pdu| async move {
.wide_filter_map(async |pdu| {
self.services
.state_accessor
.user_can_see_event(query.user_id?, pdu.room_id(), pdu.event_id())
@@ -164,7 +164,7 @@ async fn search_pdu_ids_query_room(
) -> Vec<Vec<RawPduId>> {
tokenize(&query.criteria.search_term)
.stream()
.wide_then(|word| async move {
.wide_then(async |word| {
self.search_pdu_ids_query_words(shortroomid, &word)
.collect::<Vec<_>>()
.await

View File

@@ -270,7 +270,7 @@ fn get_space_child_events<'a>(
.map(IterStream::stream)
.map(StreamExt::flatten)
.flatten_stream()
.broad_filter_map(move |(state_key, event_id): (_, OwnedEventId)| async move {
.broad_filter_map(async move |(state_key, event_id): (_, OwnedEventId)| {
self.services
.timeline
.get_pdu(&event_id)

View File

@@ -338,7 +338,7 @@ pub fn state_full_pdus(
.short
.multi_get_eventid_from_short(short_ids)
.ready_filter_map(Result::ok)
.broad_filter_map(move |event_id: OwnedEventId| async move {
.broad_filter_map(async |event_id: OwnedEventId| {
self.services
.timeline
.get_pdu(&event_id)

View File

@@ -197,7 +197,7 @@ where
.map(ToOwned::to_owned)
// Don't notify the sender of their own events, and dont send from ignored users
.ready_filter(|user| *user != pdu.sender())
.filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(pdu.sender(), &recipient_user).await).then_some(recipient_user) })
.filter_map(async |recipient_user| self.services.users.user_is_ignored(pdu.sender(), &recipient_user).await.eq(&false).then_some(recipient_user))
.collect()
.await;

View File

@@ -95,7 +95,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
.stream(),
)
.ready_filter(|server_name| !self.services.globals.server_is_ours(server_name))
.filter_map(|server_name| async move {
.filter_map(async |server_name| {
self.services
.state_cache
.server_in_room(&server_name, room_id)

View File

@@ -205,12 +205,12 @@ impl Service {
let user_ids: Vec<_> = typing_indicators
.into_keys()
.stream()
.filter_map(|typing_user_id| async move {
(!self
.services
.filter_map(async |typing_user_id| {
self.services
.users
.user_is_ignored(&typing_user_id, sender_user)
.await)
.await
.eq(&false)
.then_some(typing_user_id)
})
.collect()

View File

@@ -492,7 +492,7 @@ impl Service {
.state_cache
.server_rooms(server_name)
.map(ToOwned::to_owned)
.broad_filter_map(|room_id| async move {
.broad_filter_map(async |room_id| {
let receipt_map = self
.select_edus_receipts_room(&room_id, since, max_edu_count, &mut num)
.await;

View File

@@ -173,7 +173,7 @@ impl Services {
pub async fn clear_cache(&self) {
self.services()
.for_each(|service| async move {
.for_each(async |service| {
service.clear_cache().await;
})
.await;
@@ -182,7 +182,7 @@ impl Services {
pub async fn memory_usage(&self) -> Result<String> {
self.services()
.map(Ok)
.try_fold(String::new(), |mut out, service| async move {
.try_fold(String::new(), async |mut out, service| {
service.memory_usage(&mut out).await?;
Ok(out)
})