State-reset and security mitigations.

Upgrade Ruma to present.

The following are intentionally benign for activation in a later commit:

- Hydra backports not default.
- Room version 12 not default.
- Room version 12 not listed as stable.

Do not enable them manually or you can brick your database.

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk
2025-06-29 03:33:29 +00:00
parent 2c6dd78502
commit 628597c318
134 changed files with 14961 additions and 4935 deletions

View File

@@ -13,7 +13,7 @@ use ruma::{
name::RoomNameEventContent,
power_levels::RoomPowerLevelsEventContent,
preview_url::RoomPreviewUrlsEventContent,
topic::RoomTopicEventContent,
topic::{RoomTopicEventContent, TopicContentBlock},
},
};
use tuwunel_core::{Result, pdu::PduBuilder};
@@ -41,8 +41,8 @@ pub async fn create_server_user(services: &Services) -> Result {
/// Users in this room are considered admins by tuwunel, and the room can be
/// used to issue admin commands by talking to the server user inside it.
pub async fn create_admin_room(services: &Services) -> Result {
let room_id = RoomId::new(services.globals.server_name());
let room_version = &services.config.default_room_version;
let room_id = RoomId::new_v1(services.globals.server_name());
let room_version = RoomVersionId::V11;
let _short_id = services
.rooms
@@ -183,6 +183,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
.timeline
.build_and_append_pdu(
PduBuilder::state(String::new(), &RoomTopicEventContent {
topic_block: TopicContentBlock::default(),
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://github.com/matrix-construct/tuwunel/", services.config.server_name),
}),
server_user,

View File

@@ -8,7 +8,8 @@ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId,
api::{
EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
client::error::Error as RumaError, federation::authentication::XMatrix,
SupportedVersions, client::error::Error as RumaError,
federation::authentication::XMatrix,
},
serde::Base64,
};
@@ -79,6 +80,7 @@ where
.resolver
.get_actual_dest(dest)
.await?;
let request = into_http_request::<T>(&actual, request)?;
let request = self.prepare(dest, request)?;
self.perform::<T>(dest, &actual, request, client)
@@ -298,9 +300,13 @@ where
{
const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_11];
const SATIR: SendAccessToken<'_> = SendAccessToken::IfRequired(EMPTY);
let supported = SupportedVersions {
versions: VERSIONS.into(),
features: Default::default(),
};
let http_request = request
.try_into_http_request::<Vec<u8>>(actual.string().as_str(), SATIR, &VERSIONS)
.try_into_http_request::<Vec<u8>>(actual.string().as_str(), SATIR, &supported)
.map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?;
Ok(http_request)

View File

@@ -534,10 +534,10 @@ async fn fix_referencedevents_missing_sep(services: &Services) -> Result {
}
async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result {
use ruma::identifiers_validation::MAX_BYTES;
use ruma::identifiers_validation::ID_MAX_BYTES;
use tuwunel_core::arrayvec::ArrayString;
type ArrayId = ArrayString<MAX_BYTES>;
type ArrayId = ArrayString<ID_MAX_BYTES>;
type Key<'a> = (&'a RoomId, u64, &'a UserId);
warn!("Fixing undeleted entries in readreceiptid_readreceipt...");

View File

@@ -6,17 +6,14 @@ use ipaddress::IPAddress;
use ruma::{
DeviceId, OwnedDeviceId, RoomId, UInt, UserId,
api::{
IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, SupportedVersions,
client::push::{Pusher, PusherKind, set_pusher},
push_gateway::send_event_notification::{
self,
v1::{Device, Notification, NotificationCounts, NotificationPriority},
},
},
events::{
AnySyncTimelineEvent, StateEventType, TimelineEventType,
room::power_levels::RoomPowerLevelsEventContent,
},
events::{AnySyncTimelineEvent, TimelineEventType, room::power_levels::RoomPowerLevels},
push::{
Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak,
},
@@ -202,12 +199,16 @@ impl Service {
T: OutgoingRequest + Debug + Send,
{
const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_0];
let supported = SupportedVersions {
versions: VERSIONS.into(),
features: Default::default(),
};
let dest = dest.replace(self.services.globals.notification_push_path(), "");
trace!("Push gateway destination: {dest}");
let http_request = request
.try_into_http_request::<BytesMut>(&dest, SendAccessToken::IfRequired(""), &VERSIONS)
.try_into_http_request::<BytesMut>(&dest, SendAccessToken::IfRequired(""), &supported)
.map_err(|e| {
err!(BadServerResponse(warn!(
"Failed to find destination {dest} for push gateway: {e}"
@@ -301,13 +302,11 @@ impl Service {
let mut notify = None;
let mut tweaks = Vec::new();
let power_levels: RoomPowerLevelsEventContent = self
let power_levels: RoomPowerLevels = self
.services
.state_accessor
.room_state_get(event.room_id(), &StateEventType::RoomPowerLevels, "")
.await
.and_then(|event| event.get_content())
.unwrap_or_default();
.get_power_levels(event.room_id())
.await?;
let serialized = event.to_format();
for action in self
@@ -346,7 +345,7 @@ impl Service {
&self,
user: &UserId,
ruleset: &'a Ruleset,
power_levels: &RoomPowerLevelsEventContent,
power_levels: &RoomPowerLevels,
pdu: &Raw<AnySyncTimelineEvent>,
room_id: &RoomId,
) -> &'a [Action] {
@@ -354,6 +353,7 @@ impl Service {
users: power_levels.users.clone(),
users_default: power_levels.users_default,
notifications: power_levels.notifications.clone(),
rules: power_levels.rules.clone(),
};
let room_joined_count = self
@@ -380,7 +380,7 @@ impl Service {
power_levels: Some(power_levels),
};
ruleset.get_actions(pdu, &ctx)
ruleset.get_actions(pdu, &ctx).await
}
#[tracing::instrument(skip(self, unread, pusher, tweaks, event))]

View File

@@ -2,13 +2,10 @@ mod remote;
use std::sync::Arc;
use futures::{Stream, StreamExt, TryFutureExt};
use futures::{Stream, StreamExt};
use ruma::{
OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, UserId,
events::{
StateEventType,
room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
},
events::StateEventType,
};
use tuwunel_core::{
Err, Result, Server, err,
@@ -225,12 +222,7 @@ impl Service {
if let Ok(power_levels) = self
.services
.state_accessor
.room_state_get_content::<RoomPowerLevelsEventContent>(
&room_id,
&StateEventType::RoomPowerLevels,
"",
)
.map_ok(RoomPowerLevels::from)
.get_power_levels(&room_id)
.await
{
return Ok(

View File

@@ -8,9 +8,11 @@ use std::{
};
use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, pin_mut};
use ruma::{EventId, OwnedEventId, RoomId};
use ruma::{EventId, OwnedEventId, RoomId, room_version_rules::RoomVersionRules};
use tuwunel_core::{
Err, Result, at, debug, debug_error, implement, trace,
Err, Result, at, debug, debug_error, implement,
matrix::Event,
trace,
utils::{
IterStream,
stream::{ReadyExt, TryBroadbandExt},
@@ -28,6 +30,7 @@ pub struct Service {
struct Services {
short: Dep<rooms::short::Service>,
state: Dep<rooms::state::Service>,
timeline: Dep<rooms::timeline::Service>,
}
@@ -38,6 +41,7 @@ impl crate::Service for Service {
Ok(Arc::new(Self {
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
state: args.depend::<rooms::state::Service>("rooms::state"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
db: Data::new(&args),
@@ -80,6 +84,12 @@ where
const BUCKET: Bucket<'_> = BTreeSet::new();
let started = Instant::now();
let room_rules = self
.services
.state
.get_room_version_rules(room_id)
.await?;
let starting_ids = self
.services
.short
@@ -103,7 +113,7 @@ where
let full_auth_chain: Vec<ShortEventId> = buckets
.into_iter()
.try_stream()
.broad_and_then(|chunk| self.get_auth_chain_outer(room_id, started, chunk))
.broad_and_then(|chunk| self.get_auth_chain_outer(room_id, started, chunk, &room_rules))
.try_collect()
.map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect())
.map_ok(|mut full_auth_chain: Vec<_>| {
@@ -129,6 +139,7 @@ async fn get_auth_chain_outer(
room_id: &RoomId,
started: Instant,
chunk: Bucket<'_>,
room_rules: &RoomVersionRules,
) -> Result<Vec<ShortEventId>> {
let chunk_key: Vec<ShortEventId> = chunk.iter().map(at!(0)).collect();
@@ -155,7 +166,7 @@ async fn get_auth_chain_outer(
}
let auth_chain = self
.get_auth_chain_inner(room_id, event_id)
.get_auth_chain_inner(room_id, event_id, room_rules)
.await?;
self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice());
@@ -187,14 +198,33 @@ async fn get_auth_chain_outer(
}
#[implement(Service)]
#[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))]
#[tracing::instrument(
name = "inner",
level = "trace",
skip(self, room_id, room_rules)
)]
async fn get_auth_chain_inner(
&self,
room_id: &RoomId,
event_id: &EventId,
room_rules: &RoomVersionRules,
) -> Result<Vec<ShortEventId>> {
let mut todo: VecDeque<_> = [event_id.to_owned()].into();
let mut found = HashSet::new();
let mut todo: VecDeque<_> = [event_id.to_owned()].into();
if room_rules
.authorization
.room_create_event_id_as_room_id
{
let create_id = room_id.as_event_id()?;
let sauthevent = self
.services
.short
.get_or_create_shorteventid(&create_id)
.await;
found.insert(sauthevent);
}
while let Some(event_id) = todo.pop_front() {
trace!(?event_id, "processing auth event");
@@ -213,7 +243,7 @@ async fn get_auth_chain_inner(
))));
}
for auth_event in &pdu.auth_events {
for auth_event in pdu.auth_events() {
let sauthevent = self
.services
.short
@@ -223,7 +253,7 @@ async fn get_auth_chain_inner(
if found.insert(sauthevent) {
trace!(?event_id, ?auth_event, "adding auth event to processing queue");
todo.push_back(auth_event.clone());
todo.push_back(auth_event.to_owned());
}
}
},

View File

@@ -140,10 +140,7 @@ async fn fetch_auth_chain(
let Ok(res) = self
.services
.sending
.send_federation_request(origin, get_event::v1::Request {
event_id: next_id.clone(),
include_unredacted_content: None,
})
.send_federation_request(origin, get_event::v1::Request { event_id: next_id.clone() })
.await
.inspect_err(|e| debug_error!("Failed to fetch event {next_id}: {e}"))
else {

View File

@@ -119,10 +119,10 @@ where
// This return value is the key used for sorting events,
// events are then sorted by power level, time,
// and lexically by event_id.
Ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts)))
Ok((int!(0).into(), MilliSecondsSinceUnixEpoch(origin_server_ts)))
};
let sorted = state_res::lexicographical_topological_sort(&graph, &event_fetch)
let sorted = state_res::topological_sort(&graph, &event_fetch)
.await
.map_err(|e| err!(Database(error!("Error sorting prev events: {e}"))))?;

View File

@@ -4,11 +4,14 @@ use futures::{
};
use ruma::{CanonicalJsonObject, EventId, RoomId, ServerName, UserId, events::StateEventType};
use tuwunel_core::{
Err, Result, debug, debug::INFO_SPAN_LEVEL, err, implement, matrix::Event,
utils::stream::IterStream, warn,
Err, Result, debug,
debug::INFO_SPAN_LEVEL,
err, implement,
matrix::{Event, room_version},
utils::stream::IterStream,
warn,
};
use super::get_room_version_id;
use crate::rooms::timeline::RawPduId;
/// When receiving an event one needs to:
@@ -47,7 +50,7 @@ use crate::rooms::timeline::RawPduId;
ret(Debug),
)]
pub async fn handle_incoming_pdu<'a>(
&self,
&'a self,
origin: &'a ServerName,
room_id: &'a RoomId,
event_id: &'a EventId,
@@ -107,11 +110,10 @@ pub async fn handle_incoming_pdu<'a>(
return Err!(Request(Forbidden("Federation of this room is disabled by this server.")));
}
let room_version = get_room_version_id(create_event)?;
let room_version = room_version::from_create_event(create_event)?;
let (incoming_pdu, val) = self
.handle_outlier_pdu(origin, room_id, event_id, pdu, &room_version, false)
.boxed()
.await?;
// 8. if not timeline event: stop

View File

@@ -1,17 +1,16 @@
use std::collections::{HashMap, hash_map};
use futures::future::ready;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, RoomVersionId, ServerName,
events::StateEventType,
events::{StateEventType, TimelineEventType},
};
use tuwunel_core::{
Err, Result, debug, debug_info, err, implement,
matrix::{Event, PduEvent},
matrix::{Event, PduEvent, event::TypeExt, room_version},
state_res, trace, warn,
};
use super::{check_room_id, to_room_version};
use super::check_room_id;
#[implement(super::Service)]
pub(super) async fn handle_outlier_pdu(
@@ -40,7 +39,13 @@ pub(super) async fn handle_outlier_pdu(
| Ok(ruma::signatures::Verified::Signatures) => {
// Redact
debug_info!("Calculated hash does not match (redaction): {event_id}");
let Ok(obj) = ruma::canonical_json::redact(pdu_json, room_version, None) else {
let Some(rules) = room_version.rules() else {
return Err!(Request(UnsupportedRoomVersion(
"Cannot redact event for unknown room version {room_version:?}."
)));
};
let Ok(obj) = ruma::canonical_json::redact(pdu_json, &rules.redaction, None) else {
return Err!(Request(InvalidParam("Redaction failed")));
};
@@ -62,8 +67,7 @@ pub(super) async fn handle_outlier_pdu(
// Now that we have checked the signature and hashes we can add the eventID and
// convert to our PduEvent type
pdu_json
.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
pdu_json.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.to_string()));
let event = serde_json::from_value::<PduEvent>(serde_json::to_value(&pdu_json)?)
.map_err(|e| err!(Request(BadJson(debug_warn!("Event is not a valid PDU: {e}")))))?;
@@ -83,16 +87,28 @@ pub(super) async fn handle_outlier_pdu(
// 6. Reject "due to auth events" if the event doesn't pass auth based on the
// auth events
debug!("Checking based on auth events");
let room_rules = room_version::rules(room_version)?;
let is_create = *event.kind() == TimelineEventType::RoomCreate;
let is_hydra = room_rules
.authorization
.room_create_event_id_as_room_id;
let hydra_create_id = (is_hydra && !is_create).then_some(event.room_id().as_event_id()?);
let auth_event_ids = event
.auth_events()
.map(ToOwned::to_owned)
.chain(hydra_create_id.into_iter());
// Build map of auth events
let mut auth_events = HashMap::with_capacity(event.auth_events().count());
for id in event.auth_events() {
let Ok(auth_event) = self.services.timeline.get_pdu(id).await else {
let mut auth_events = HashMap::with_capacity(event.auth_events().count().saturating_add(1));
for id in auth_event_ids {
let Ok(auth_event) = self.services.timeline.get_pdu(&id).await else {
warn!("Could not find auth event {id}");
continue;
};
check_room_id(room_id, &auth_event)?;
match auth_events.entry((
auth_event.kind.to_string().into(),
auth_event
@@ -119,24 +135,20 @@ pub(super) async fn handle_outlier_pdu(
return Err!(Request(InvalidParam("Incoming event refers to wrong create event.")));
}
let state_fetch = |ty: &StateEventType, sk: &str| {
let key = (ty.to_owned(), sk.into());
ready(auth_events.get(&key).map(ToOwned::to_owned))
};
let auth_check = state_res::event_auth::auth_check(
&to_room_version(room_version),
state_res::auth_check(
&room_rules,
&event,
None, // TODO: third party invite
state_fetch,
&async |event_id| self.event_fetch(&event_id).await,
&async |event_type, state_key| {
auth_events
.get(&event_type.with_state_key(state_key.as_str()))
.map(ToOwned::to_owned)
.ok_or_else(|| err!(Request(NotFound("state not found"))))
},
)
.await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
if !auth_check {
return Err!(Request(Forbidden("Auth check failed")));
}
trace!("Validation successful.");
// 7. Persist the event as an outlier.

View File

@@ -19,12 +19,9 @@ use std::{
};
use async_trait::async_trait;
use ruma::{
EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId,
events::room::create::RoomCreateEventContent,
};
use ruma::{EventId, OwnedRoomId, RoomId};
use tuwunel_core::{
Err, Result, RoomVersion, Server, implement,
Err, Result, Server, implement,
matrix::{Event, PduEvent},
utils::{MutexMap, continue_exponential_backoff},
};
@@ -126,17 +123,13 @@ fn is_backed_off(&self, event_id: &EventId, range: Range<Duration>) -> bool {
}
#[implement(Service)]
async fn event_exists(&self, event_id: OwnedEventId) -> bool {
self.services.timeline.pdu_exists(&event_id).await
async fn event_exists(&self, event_id: &EventId) -> bool {
self.services.timeline.pdu_exists(event_id).await
}
#[implement(Service)]
async fn event_fetch(&self, event_id: OwnedEventId) -> Option<PduEvent> {
self.services
.timeline
.get_pdu(&event_id)
.await
.ok()
async fn event_fetch(&self, event_id: &EventId) -> Result<PduEvent> {
self.services.timeline.get_pdu(event_id).await
}
fn check_room_id<Pdu: Event>(room_id: &RoomId, pdu: &Pdu) -> Result {
@@ -151,15 +144,3 @@ fn check_room_id<Pdu: Event>(room_id: &RoomId, pdu: &Pdu) -> Result {
Ok(())
}
fn get_room_version_id<Pdu: Event>(create_event: &Pdu) -> Result<RoomVersionId> {
let content: RoomCreateEventContent = create_event.get_content()?;
let room_version = content.room_version;
Ok(room_version)
}
#[inline]
fn to_room_version(room_version_id: &RoomVersionId) -> RoomVersion {
RoomVersion::new(room_version_id).expect("room version is supported")
}

View File

@@ -1,14 +1,11 @@
use std::{
borrow::Borrow,
collections::{HashMap, HashSet},
sync::Arc,
};
use std::{borrow::Borrow, collections::HashMap, sync::Arc};
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join};
use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt};
use ruma::{OwnedEventId, RoomId, RoomVersionId};
use tuwunel_core::{
Error, Result, err, implement,
state_res::{self, StateMap},
Result, err, implement,
matrix::room_version,
state_res::{self, AuthSet, StateMap},
trace,
utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt},
};
@@ -47,9 +44,9 @@ pub async fn resolve_state(
self.services
.auth_chain
.event_ids_iter(room_id, state.values().map(Borrow::borrow))
.try_collect()
.try_collect::<AuthSet<OwnedEventId>>()
})
.try_collect::<Vec<HashSet<OwnedEventId>>>();
.ready_filter_map(Result::ok);
let fork_states = fork_states
.iter()
@@ -62,17 +59,12 @@ pub async fn resolve_state(
.multi_get_statekey_from_short(shortstatekeys)
.zip(event_ids)
.ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id)))
.collect()
})
.map(Ok::<_, Error>)
.try_collect::<Vec<StateMap<OwnedEventId>>>();
let (fork_states, auth_chain_sets) = try_join(fork_states, auth_chain_sets).await?;
.collect::<StateMap<OwnedEventId>>()
});
trace!("Resolving state");
let state = self
.state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets)
.boxed()
.state_resolution(room_version_id, fork_states, auth_chain_sets)
.await?;
trace!("State resolution done.");
@@ -104,19 +96,24 @@ pub async fn resolve_state(
}
#[implement(super::Service)]
#[tracing::instrument(name = "ruma", level = "debug", skip_all)]
pub async fn state_resolution<'a, StateSets>(
&'a self,
room_version: &'a RoomVersionId,
pub(super) async fn state_resolution<StateSets, AuthSets>(
&self,
room_version: &RoomVersionId,
state_sets: StateSets,
auth_chain_sets: &'a [HashSet<OwnedEventId>],
auth_chains: AuthSets,
) -> Result<StateMap<OwnedEventId>>
where
StateSets: Iterator<Item = &'a StateMap<OwnedEventId>> + Clone + Send,
StateSets: Stream<Item = StateMap<OwnedEventId>> + Send,
AuthSets: Stream<Item = AuthSet<OwnedEventId>> + Send,
{
let event_fetch = |event_id| self.event_fetch(event_id);
let event_exists = |event_id| self.event_exists(event_id);
state_res::resolve(room_version, state_sets, auth_chain_sets, &event_fetch, &event_exists)
.map_err(|e| err!(error!("State resolution failed: {e:?}")))
.await
state_res::resolve(
&room_version::rules(room_version)?,
state_sets,
auth_chains,
&async |event_id: OwnedEventId| self.event_fetch(&event_id).await,
&async |event_id: OwnedEventId| self.event_exists(&event_id).await,
self.services.server.config.hydra_backports,
)
.map_err(|e| err!(error!("State resolution failed: {e:?}")))
.await
}

View File

@@ -1,15 +1,17 @@
use std::{
borrow::Borrow,
collections::{HashMap, HashSet},
iter::Iterator,
collections::HashMap,
iter::{Iterator, once},
};
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join};
use futures::{
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
future::{OptionFuture, try_join},
};
use ruma::{OwnedEventId, RoomId, RoomVersionId};
use tuwunel_core::{
Result, err, implement,
matrix::{Event, StateMap},
trace,
Result, apply, err, implement,
matrix::{Event, StateMap, state_res::AuthSet},
ref_at, trace,
utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt},
};
@@ -60,14 +62,16 @@ where
let (prev_event, mut state) = try_join(prev_event, state).await?;
if let Some(state_key) = prev_event.state_key {
if let Some(state_key) = prev_event.state_key() {
let prev_event_type = prev_event.event_type().to_cow_str().into();
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&prev_event.kind.into(), &state_key)
.get_or_create_shortstatekey(&prev_event_type, state_key)
.await;
state.insert(shortstatekey, prev_event.event_id);
state.insert(shortstatekey, prev_event.event_id().into());
// Now it's the state after the pdu
}
@@ -113,26 +117,28 @@ where
};
trace!("Calculating fork states...");
let (fork_states, auth_chain_sets): (Vec<StateMap<_>>, Vec<HashSet<_>>) =
extremity_sstatehashes
.into_iter()
.try_stream()
.wide_and_then(|(sstatehash, prev_event)| {
self.state_at_incoming_fork(room_id, sstatehash, prev_event)
})
.try_collect()
.map_ok(Vec::into_iter)
.map_ok(Iterator::unzip)
.await?;
let (fork_states, auth_chain_sets) = extremity_sstatehashes
.into_iter()
.try_stream()
.wide_and_then(|(sstatehash, prev_event)| {
self.state_at_incoming_fork(room_id, sstatehash, prev_event)
})
.try_collect()
.map_ok(Vec::into_iter)
.map_ok(Iterator::unzip)
.map_ok(apply!(2, Vec::into_iter))
.map_ok(apply!(2, IterStream::stream))
.await?;
trace!("Resolving state");
let Ok(new_state) = self
.state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets)
.boxed()
.state_resolution(room_version_id, fork_states, auth_chain_sets)
.await
else {
return Ok(None);
};
trace!("State resolution done.");
new_state
.into_iter()
.stream()
@@ -150,41 +156,55 @@ where
}
#[implement(super::Service)]
#[tracing::instrument(
name = "fork",
level = "debug",
skip_all,
fields(
?sstatehash,
prev_event = ?prev_event.event_id(),
)
)]
async fn state_at_incoming_fork<Pdu>(
&self,
room_id: &RoomId,
sstatehash: ShortStateHash,
prev_event: Pdu,
) -> Result<(StateMap<OwnedEventId>, HashSet<OwnedEventId>)>
) -> Result<(StateMap<OwnedEventId>, AuthSet<OwnedEventId>)>
where
Pdu: Event,
{
let mut leaf_state: HashMap<_, _> = self
let leaf: OptionFuture<_> = prev_event
.state_key()
.map(async |state_key| {
self.services
.short
.get_or_create_shortstatekey(&prev_event.kind().to_cow_str().into(), state_key)
.map(|shortstatekey| once((shortstatekey, prev_event.event_id().to_owned())))
.await
})
.into();
let leaf_state_after_event: Vec<_> = self
.services
.state_accessor
.state_full_ids(sstatehash)
.chain(leaf.await.into_iter().flatten().stream())
.collect()
.await;
if let Some(state_key) = prev_event.state_key() {
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&prev_event.kind().to_string().into(), state_key)
.await;
let event_id = prev_event.event_id();
leaf_state.insert(shortstatekey, event_id.to_owned());
// Now it's the state after the pdu
}
let starting_events = leaf_state_after_event
.iter()
.map(ref_at!(1))
.map(AsRef::as_ref);
let auth_chain = self
.services
.auth_chain
.event_ids_iter(room_id, leaf_state.values().map(Borrow::borrow))
.event_ids_iter(room_id, starting_events)
.try_collect();
let fork_state = leaf_state
let fork_state = leaf_state_after_event
.iter()
.stream()
.broad_then(|(k, id)| {

View File

@@ -1,18 +1,18 @@
use std::{borrow::Borrow, iter::once, sync::Arc, time::Instant};
use futures::{FutureExt, StreamExt, future::ready};
use futures::{FutureExt, StreamExt};
use ruma::{
CanonicalJsonObject, EventId, RoomId, RoomVersionId, ServerName, events::StateEventType,
CanonicalJsonObject, EventId, OwnedEventId, RoomId, RoomVersionId, ServerName,
events::StateEventType,
};
use tuwunel_core::{
Err, Result, debug, debug_info, err, implement, is_equal_to,
matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res},
matrix::{Event, EventTypeExt, PduEvent, StateKey, room_version, state_res},
trace,
utils::stream::{BroadbandExt, ReadyExt},
warn,
};
use super::to_room_version;
use crate::rooms::{
state_compressor::{CompressedState, HashSetCompressStateEvent},
timeline::RawPduId,
@@ -50,6 +50,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
debug!("Upgrading to timeline pdu");
let timer = Instant::now();
let room_rules = room_version::rules(room_version)?;
// 10. Fetch missing state and auth chain events by calling /state_ids at
// backwards extremities doing all the checks in this list starting at 1.
@@ -77,35 +78,26 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
debug!("Performing auth check");
// 11. Check the auth of the event passes based on the state of the event
let state_fetch_state = &state_at_incoming_event;
let state_fetch = |k: StateEventType, s: StateKey| async move {
let state_fetch = async |k: StateEventType, s: StateKey| {
let shortstatekey = self
.services
.short
.get_shortstatekey(&k, &s)
.await
.ok()?;
.get_shortstatekey(&k, s.as_str())
.await?;
let event_id = state_fetch_state.get(&shortstatekey)?;
self.services
.timeline
.get_pdu(event_id)
.await
.ok()
let event_id = state_at_incoming_event
.get(&shortstatekey)
.ok_or_else(|| {
err!(Request(NotFound(
"shortstatekey {shortstatekey:?} not found for ({k:?},{s:?})"
)))
})?;
self.services.timeline.get_pdu(event_id).await
};
let auth_check = state_res::event_auth::auth_check(
&to_room_version(room_version),
&incoming_pdu,
None, // TODO: third party invite
|ty, sk| state_fetch(ty.clone(), sk.into()),
)
.await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
if !auth_check {
return Err!(Request(Forbidden("Event has failed auth check with state at the event.")));
}
let event_fetch = async |event_id: OwnedEventId| self.event_fetch(&event_id).await;
state_res::auth_check(&room_rules, &incoming_pdu, &event_fetch, &state_fetch).await?;
debug!("Gathering auth events");
let auth_events = self
@@ -117,29 +109,25 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
incoming_pdu.sender(),
incoming_pdu.state_key(),
incoming_pdu.content(),
&room_rules.authorization,
true,
)
.await?;
let state_fetch = |k: &StateEventType, s: &str| {
let key = k.with_state_key(s);
ready(auth_events.get(&key).map(ToOwned::to_owned))
let state_fetch = async |k: StateEventType, s: StateKey| {
auth_events
.get(&k.with_state_key(s.as_str()))
.map(ToOwned::to_owned)
.ok_or_else(|| err!(Request(NotFound("state event not found"))))
};
let auth_check = state_res::event_auth::auth_check(
&to_room_version(room_version),
&incoming_pdu,
None, // third-party invite
state_fetch,
)
.await
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
state_res::auth_check(&room_rules, &incoming_pdu, &event_fetch, &state_fetch).await?;
// Soft fail check before doing state res
debug!("Performing soft-fail check");
let soft_fail = match (auth_check, incoming_pdu.redacts_id(room_version)) {
| (false, _) => true,
| (true, None) => false,
| (true, Some(redact_id)) =>
let soft_fail = match incoming_pdu.redacts_id(room_version) {
| None => false,
| Some(redact_id) =>
!self
.services
.state_accessor
@@ -224,11 +212,13 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
.services
.state_compressor
.save_state(room_id, new_room_state)
.boxed()
.await?;
self.services
.state
.force_state(room_id, shortstatehash, added, removed, &state_lock)
.boxed()
.await?;
}

View File

@@ -11,17 +11,14 @@ use ruma::{
OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId,
api::{
client::space::SpaceHierarchyRoomsChunk,
federation::{
self,
space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary},
},
federation::{self, space::SpaceHierarchyParentSummary},
},
events::{
StateEventType,
space::child::{HierarchySpaceChildEvent, SpaceChildEventContent},
},
room::{JoinRuleSummary, RoomSummary},
serde::Raw,
space::SpaceRoomJoinRule,
};
use tokio::sync::{Mutex, MutexGuard};
use tuwunel_core::{
@@ -130,17 +127,12 @@ pub async fn get_summary_and_children_local(
| None => (), // cache miss
| Some(None) => return Ok(None),
| Some(Some(cached)) => {
let allowed_rooms = cached
.summary
.allowed_room_ids
.iter()
.map(AsRef::as_ref);
let join_rule = &cached.summary.summary.join_rule;
let is_accessible_child = self.is_accessible_child(
current_room,
&cached.summary.join_rule,
join_rule,
identifier,
allowed_rooms,
join_rule.allowed_room_ids(),
);
let accessibility = if is_accessible_child.await {
@@ -236,10 +228,11 @@ async fn get_summary_and_children_federation(
.await;
let identifier = Identifier::UserId(user_id);
let allowed_room_ids = summary.allowed_room_ids.iter().map(AsRef::as_ref);
let join_rule = &summary.summary.join_rule;
let allowed_room_ids = join_rule.allowed_room_ids();
let is_accessible_child = self
.is_accessible_child(current_room, &summary.join_rule, &identifier, allowed_room_ids)
.is_accessible_child(current_room, join_rule, &identifier, allowed_room_ids)
.await;
let accessibility = if is_accessible_child {
@@ -334,7 +327,7 @@ async fn get_room_summary(
room_id,
&join_rule.clone().into(),
identifier,
join_rule.allowed_rooms(),
join_rule.allowed_room_ids(),
)
.await;
@@ -421,23 +414,21 @@ async fn get_room_summary(
);
let summary = SpaceHierarchyParentSummary {
canonical_alias,
name,
topic,
world_readable,
guest_can_join,
avatar_url,
room_type,
children_state,
encryption,
room_version,
room_id: room_id.to_owned(),
num_joined_members: num_joined_members.try_into().unwrap_or_default(),
allowed_room_ids: join_rule
.allowed_rooms()
.map(Into::into)
.collect(),
join_rule: join_rule.clone().into(),
summary: RoomSummary {
canonical_alias,
name,
topic,
world_readable,
guest_can_join,
avatar_url,
room_type,
encryption,
room_version,
room_id: room_id.to_owned(),
num_joined_members: num_joined_members.try_into().unwrap_or_default(),
join_rule: join_rule.clone().into(),
},
};
Ok(summary)
@@ -448,7 +439,7 @@ async fn get_room_summary(
async fn is_accessible_child<'a, I>(
&self,
current_room: &RoomId,
join_rule: &SpaceRoomJoinRule,
join_rule: &JoinRuleSummary,
identifier: &Identifier<'_>,
allowed_rooms: I,
) -> bool
@@ -486,10 +477,10 @@ where
}
match *join_rule {
| SpaceRoomJoinRule::Public
| SpaceRoomJoinRule::Knock
| SpaceRoomJoinRule::KnockRestricted => true,
| SpaceRoomJoinRule::Restricted =>
| JoinRuleSummary::Public
| JoinRuleSummary::Knock
| JoinRuleSummary::KnockRestricted(_) => true,
| JoinRuleSummary::Restricted(_) =>
allowed_rooms
.stream()
.any(async |room| match identifier {
@@ -516,9 +507,9 @@ where
pub fn get_parent_children_via(
parent: &SpaceHierarchyParentSummary,
suggested_only: bool,
) -> impl DoubleEndedIterator<Item = (OwnedRoomId, impl Iterator<Item = OwnedServerName> + use<>)>
+ Send
+ '_ {
) -> impl DoubleEndedIterator<
Item = (OwnedRoomId, impl Iterator<Item = OwnedServerName> + Send + use<>),
> + '_ {
parent
.children_state
.iter()
@@ -535,9 +526,9 @@ async fn cache_insert(
&self,
mut cache: MutexGuard<'_, Cache>,
current_room: &RoomId,
child: SpaceHierarchyChildSummary,
child: RoomSummary,
) {
let SpaceHierarchyChildSummary {
let RoomSummary {
canonical_alias,
name,
num_joined_members,
@@ -548,30 +539,30 @@ async fn cache_insert(
avatar_url,
join_rule,
room_type,
allowed_room_ids,
encryption,
room_version,
} = child;
let summary = SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
allowed_room_ids,
room_id: room_id.clone(),
summary: RoomSummary {
canonical_alias,
name,
num_joined_members,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
room_id: room_id.clone(),
encryption,
room_version,
},
children_state: self
.get_space_child_events(&room_id)
.map(Event::into_format)
.collect()
.await,
encryption,
room_version,
};
cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary }));
@@ -581,39 +572,9 @@ async fn cache_insert(
// ruma-client-api types
impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
fn from(value: CachedSpaceHierarchySummary) -> Self {
let SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
allowed_room_ids,
encryption,
room_version,
} = value.summary;
let SpaceHierarchyParentSummary { children_state, summary } = value.summary;
Self {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
encryption,
room_version,
allowed_room_ids,
}
Self { children_state, summary }
}
}
@@ -621,37 +582,7 @@ impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
/// ruma-client-api types
#[must_use]
pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk {
let SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
allowed_room_ids,
encryption,
room_version,
} = summary;
let SpaceHierarchyParentSummary { children_state, summary } = summary;
SpaceHierarchyRoomsChunk {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
encryption,
room_version,
allowed_room_ids,
}
SpaceHierarchyRoomsChunk { children_state, summary }
}

View File

@@ -2,21 +2,23 @@ use std::str::FromStr;
use ruma::{
UInt,
api::federation::space::{SpaceHierarchyParentSummary, SpaceHierarchyParentSummaryInit},
api::federation::space::SpaceHierarchyParentSummary,
owned_room_id, owned_server_name,
space::SpaceRoomJoinRule,
room::{JoinRuleSummary, RoomSummary},
};
use crate::rooms::spaces::{PaginationToken, get_parent_children_via};
#[test]
fn get_summary_children() {
let summary: SpaceHierarchyParentSummary = SpaceHierarchyParentSummaryInit {
num_joined_members: UInt::from(1_u32),
room_id: owned_room_id!("!root:example.org"),
world_readable: true,
guest_can_join: true,
join_rule: SpaceRoomJoinRule::Public,
let summary: SpaceHierarchyParentSummary = SpaceHierarchyParentSummary {
summary: RoomSummary::new(
owned_room_id!("!root:example.org"),
JoinRuleSummary::Public,
true,
UInt::from(1_u32),
true,
),
children_state: vec![
serde_json::from_str(
r#"{
@@ -63,9 +65,7 @@ fn get_summary_children() {
)
.unwrap(),
],
allowed_room_ids: vec![],
}
.into();
};
assert_eq!(
get_parent_children_via(&summary, false)

View File

@@ -1,21 +1,21 @@
use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc};
use async_trait::async_trait;
use futures::{
FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future::join_all, pin_mut,
};
use futures::{FutureExt, Stream, StreamExt, TryStreamExt, future::join_all, pin_mut};
use ruma::{
EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId,
events::{
AnyStrippedStateEvent, StateEventType, TimelineEventType,
room::{create::RoomCreateEventContent, member::RoomMemberEventContent},
room::member::RoomMemberEventContent,
},
room_version_rules::AuthorizationRules,
serde::Raw,
};
use tuwunel_core::{
Event, PduEvent, Result, err,
result::FlatOk,
state_res::{self, StateMap},
matrix::{RoomVersionRules, StateKey, TypeStateKey, room_version},
result::{AndThenRef, FlatOk},
state_res::{StateMap, auth_types_for_event},
utils::{
IterStream, MutexMap, MutexMapGuard, ReadyExt, calculate_hash,
stream::{BroadbandExt, TryIgnore},
@@ -27,7 +27,7 @@ use tuwunel_database::{Deserialized, Ignore, Interfix, Map};
use crate::{
Dep, globals, rooms,
rooms::{
short::{ShortEventId, ShortStateHash},
short::{ShortEventId, ShortStateHash, ShortStateKey},
state_compressor::{CompressedState, parse_compressed_state_event},
},
};
@@ -355,11 +355,91 @@ impl Service {
}
}
#[tracing::instrument(skip_all, level = "trace")]
pub async fn summary_stripped<Pdu>(&self, event: &Pdu) -> Vec<Raw<AnyStrippedStateEvent>>
/// Set the state hash to a new version, but does not update state_cache.
#[tracing::instrument(skip(self, _mutex_lock), level = "debug")]
pub fn set_room_state(
&self,
room_id: &RoomId,
shortstatehash: u64,
// Take mutex guard to make sure users get the room state mutex
_mutex_lock: &RoomMutexGuard,
) {
const BUFSIZE: usize = size_of::<u64>();
self.db
.roomid_shortstatehash
.raw_aput::<BUFSIZE, _, _>(room_id, shortstatehash);
}
/// This fetches auth events from the current state.
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip(self, content), level = "debug")]
pub async fn get_auth_events(
&self,
room_id: &RoomId,
kind: &TimelineEventType,
sender: &UserId,
state_key: Option<&str>,
content: &serde_json::value::RawValue,
auth_rules: &AuthorizationRules,
include_create: bool,
) -> Result<StateMap<PduEvent>>
where
Pdu: Event,
StateEventType: Send + Sync,
StateKey: Send + Sync,
{
let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else {
return Ok(StateMap::new());
};
let sauthevents: HashMap<ShortStateKey, TypeStateKey> =
auth_types_for_event(kind, sender, state_key, content, auth_rules, include_create)?
.into_iter()
.stream()
.broad_filter_map(async |(event_type, state_key): TypeStateKey| {
self.services
.short
.get_shortstatekey(&event_type, &state_key)
.await
.map(move |sstatekey| (sstatekey, (event_type, state_key)))
.ok()
})
.collect()
.await;
self.services
.state_accessor
.state_full_shortids(shortstatehash)
.ready_filter_map(Result::ok)
.ready_filter_map(|(shortstatekey, shorteventid)| {
sauthevents
.get(&shortstatekey)
.map(move |(ty, sk)| ((ty, sk), shorteventid))
})
.unzip()
.map(|(state_keys, event_ids): (Vec<_>, Vec<_>)| {
self.services
.short
.multi_get_eventid_from_short(event_ids.into_iter().stream())
.zip(state_keys.into_iter().stream())
})
.flatten_stream()
.ready_filter_map(|(event_id, (ty, sk))| Some(((ty, sk), event_id.ok()?)))
.broad_filter_map(async |((ty, sk), event_id): ((&_, &_), OwnedEventId)| {
let pdu = self.services.timeline.get_pdu(&event_id).await;
Some(((ty.clone(), sk.clone()), pdu.ok()?))
})
.collect()
.map(Ok)
.await
}
#[tracing::instrument(skip_all, level = "debug")]
pub async fn summary_stripped<Pdu: Event>(
&self,
event: &Pdu,
) -> Vec<Raw<AnyStrippedStateEvent>> {
let cells = [
(&StateEventType::RoomCreate, ""),
(&StateEventType::RoomJoinRules, ""),
@@ -386,20 +466,12 @@ impl Service {
.collect()
}
/// Set the state hash to a new version, but does not update state_cache.
#[tracing::instrument(skip(self, _mutex_lock), level = "debug")]
pub fn set_room_state(
&self,
room_id: &RoomId,
shortstatehash: u64,
// Take mutex guard to make sure users get the room state mutex
_mutex_lock: &RoomMutexGuard,
) {
const BUFSIZE: usize = size_of::<u64>();
self.db
.roomid_shortstatehash
.raw_aput::<BUFSIZE, _, _>(room_id, shortstatehash);
/// Returns the room's version rules
#[inline]
pub async fn get_room_version_rules(&self, room_id: &RoomId) -> Result<RoomVersionRules> {
self.get_room_version(room_id)
.await
.and_then_ref(room_version::rules)
}
/// Returns the room's version.
@@ -413,7 +485,9 @@ impl Service {
.state_accessor
.room_state_get_content(room_id, &StateEventType::RoomCreate, "")
.await
.map(|content: RoomCreateEventContent| content.room_version)
.as_ref()
.map(room_version::from_create_content)
.cloned()
.map_err(|e| err!(Request(NotFound("No create event found: {e:?}"))))
}
@@ -469,64 +543,4 @@ impl Service {
self.db.roomid_pduleaves.put_raw(key, event_id);
}
}
/// This fetches auth events from the current state.
#[tracing::instrument(skip(self, content), level = "trace")]
pub async fn get_auth_events(
&self,
room_id: &RoomId,
kind: &TimelineEventType,
sender: &UserId,
state_key: Option<&str>,
content: &serde_json::value::RawValue,
) -> Result<StateMap<PduEvent>> {
let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else {
return Ok(HashMap::new());
};
let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?;
let sauthevents: HashMap<_, _> = auth_types
.iter()
.stream()
.broad_filter_map(|(event_type, state_key)| {
self.services
.short
.get_shortstatekey(event_type, state_key)
.map_ok(move |ssk| (ssk, (event_type, state_key)))
.map(Result::ok)
})
.collect()
.await;
let (state_keys, event_ids): (Vec<_>, Vec<_>) = self
.services
.state_accessor
.state_full_shortids(shortstatehash)
.ready_filter_map(Result::ok)
.ready_filter_map(|(shortstatekey, shorteventid)| {
sauthevents
.get(&shortstatekey)
.map(|(ty, sk)| ((ty, sk), shorteventid))
})
.unzip()
.await;
self.services
.short
.multi_get_eventid_from_short(event_ids.into_iter().stream())
.zip(state_keys.into_iter().stream())
.ready_filter_map(|(event_id, (ty, sk))| Some(((ty, sk), event_id.ok()?)))
.broad_filter_map(|((ty, sk), event_id): (_, OwnedEventId)| async move {
self.services
.timeline
.get_pdu(&event_id)
.await
.map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu))
.ok()
})
.collect()
.map(Ok)
.await
}
}

View File

@@ -6,6 +6,7 @@ mod user_can;
use std::sync::Arc;
use async_trait::async_trait;
use futures::{FutureExt, TryFutureExt, future::try_join};
use ruma::{
EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, RoomId, UserId,
events::{
@@ -20,12 +21,16 @@ use ruma::{
join_rules::{JoinRule, RoomJoinRulesEventContent},
member::RoomMemberEventContent,
name::RoomNameEventContent,
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
topic::RoomTopicEventContent,
},
},
room::RoomType,
};
use tuwunel_core::{Result, err};
use tuwunel_core::{
Result, err,
matrix::{Event, room_version, state_res::events::RoomCreateEvent},
};
use tuwunel_database::Map;
use crate::{Dep, rooms};
@@ -69,6 +74,31 @@ impl crate::Service for Service {
}
impl Service {
/// Gets the effective power levels of a room, regardless of if there is an
/// `m.rooms.power_levels` state.
pub async fn get_power_levels(&self, room_id: &RoomId) -> Result<RoomPowerLevels> {
let create = self.get_create(room_id);
let power_levels = self
.room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
.map_ok(|c: RoomPowerLevelsEventContent| c)
.map(Result::ok)
.map(Ok);
let (create, power_levels) = try_join(create, power_levels).await?;
let room_version = create.room_version()?;
let rules = room_version::rules(&room_version)?;
let creators = create.creators(&rules.authorization)?;
Ok(RoomPowerLevels::new(power_levels.into(), &rules.authorization, creators))
}
pub async fn get_create(&self, room_id: &RoomId) -> Result<RoomCreateEvent<impl Event>> {
self.room_state_get(room_id, &StateEventType::RoomCreate, "")
.await
.map(RoomCreateEvent::new)
}
pub async fn get_name(&self, room_id: &RoomId) -> Result<String> {
self.room_state_get_content(room_id, &StateEventType::RoomName, "")
.await

View File

@@ -321,7 +321,9 @@ pub fn state_full(
shortstatehash: ShortStateHash,
) -> impl Stream<Item = ((StateEventType, StateKey), impl Event)> + Send + '_ {
self.state_full_pdus(shortstatehash)
.ready_filter_map(|pdu| Some(((pdu.kind().clone().into(), pdu.state_key()?.into()), pdu)))
.ready_filter_map(|pdu| {
Some(((pdu.kind().to_cow_str().into(), pdu.state_key()?.into()), pdu))
})
}
#[implement(super::Service)]

View File

@@ -5,7 +5,6 @@ use ruma::{
room::{
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
member::{MembershipState, RoomMemberEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
},
},
};
@@ -44,28 +43,18 @@ pub async fn user_can_redact(
)));
}
match self
.room_state_get_content::<RoomPowerLevelsEventContent>(
room_id,
&StateEventType::RoomPowerLevels,
"",
)
.await
{
| Ok(pl_event_content) => {
let pl_event: RoomPowerLevels = pl_event_content.into();
Ok(pl_event.user_can_redact_event_of_other(sender)
|| pl_event.user_can_redact_own_event(sender)
&& match redacting_event {
| Ok(redacting_event) =>
if federation {
redacting_event.sender().server_name() == sender.server_name()
} else {
redacting_event.sender() == sender
},
| _ => false,
})
},
match self.get_power_levels(room_id).await {
| Ok(power_levels) => Ok(power_levels.user_can_redact_event_of_other(sender)
|| power_levels.user_can_redact_own_event(sender)
&& match redacting_event {
| Ok(redacting_event) =>
if federation {
redacting_event.sender().server_name() == sender.server_name()
} else {
redacting_event.sender() == sender
},
| _ => false,
}),
| _ => {
// Falling back on m.room.create to judge power level
match self

View File

@@ -405,7 +405,7 @@ pub fn rooms_invited<'a>(
.stream_prefix(&prefix)
.ignore_err()
.map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state))
.map(|(room_id, state)| Ok((room_id, state.deserialize_as()?)))
.map(|(room_id, state)| Ok((room_id, state.deserialize_as_unchecked()?)))
.ignore_err()
}
@@ -425,7 +425,7 @@ pub fn rooms_knocked<'a>(
.stream_prefix(&prefix)
.ignore_err()
.map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state))
.map(|(room_id, state)| Ok((room_id, state.deserialize_as()?)))
.map(|(room_id, state)| Ok((room_id, state.deserialize_as_unchecked()?)))
.ignore_err()
}
@@ -442,7 +442,9 @@ pub async fn invite_state(
.qry(&key)
.await
.deserialized()
.and_then(|val: Raw<Vec<AnyStrippedStateEvent>>| val.deserialize_as().map_err(Into::into))
.and_then(|val: Raw<Vec<AnyStrippedStateEvent>>| {
val.deserialize_as_unchecked().map_err(Into::into)
})
}
#[implement(Service)]
@@ -458,7 +460,9 @@ pub async fn knock_state(
.qry(&key)
.await
.deserialized()
.and_then(|val: Raw<Vec<AnyStrippedStateEvent>>| val.deserialize_as().map_err(Into::into))
.and_then(|val: Raw<Vec<AnyStrippedStateEvent>>| {
val.deserialize_as_unchecked().map_err(Into::into)
})
}
#[implement(Service)]
@@ -474,7 +478,9 @@ pub async fn left_state(
.qry(&key)
.await
.deserialized()
.and_then(|val: Raw<Vec<AnyStrippedStateEvent>>| val.deserialize_as().map_err(Into::into))
.and_then(|val: Raw<Vec<AnyStrippedStateEvent>>| {
val.deserialize_as_unchecked().map_err(Into::into)
})
}
/// Returns an iterator over all rooms a user left.
@@ -493,7 +499,7 @@ pub fn rooms_left<'a>(
.stream_prefix(&prefix)
.ignore_err()
.map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state))
.map(|(room_id, state)| Ok((room_id, state.deserialize_as()?)))
.map(|(room_id, state)| Ok((room_id, state.deserialize_as_unchecked()?)))
.ignore_err()
}

View File

@@ -7,12 +7,11 @@ use futures::StreamExt;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedUserId, RoomId, RoomVersionId, UserId,
events::{
GlobalAccountDataEventType, StateEventType, TimelineEventType,
GlobalAccountDataEventType, TimelineEventType,
push_rules::PushRulesEvent,
room::{
encrypted::Relation,
member::{MembershipState, RoomMemberEventContent},
power_levels::RoomPowerLevelsEventContent,
redaction::RoomRedactionEventContent,
},
},
@@ -186,14 +185,6 @@ where
drop(insert_lock);
// See if the event matches any known pushers via power level
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get_content(pdu.room_id(), &StateEventType::RoomPowerLevels, "")
.await
.unwrap_or_default();
// Don't notify the sender of their own events, and dont send from ignored users
let mut push_target: HashSet<_> = self
.services
@@ -245,6 +236,12 @@ where
let mut highlight = false;
let mut notify = false;
let power_levels = self
.services
.state_accessor
.get_power_levels(pdu.room_id())
.await?;
for action in self
.services
.pusher

View File

@@ -35,6 +35,15 @@ pub async fn build_and_append_pdu(
.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)
.await?;
//TODO: Use proper room version here
if *pdu.kind() == TimelineEventType::RoomCreate && pdu.room_id().server_name().is_none() {
let _short_id = self
.services
.short
.get_or_create_shortroomid(pdu.room_id())
.await;
}
if self
.services
.admin

View File

@@ -1,21 +1,26 @@
use std::cmp;
use futures::{StreamExt, TryStreamExt, future, future::ready};
use futures::{StreamExt, TryStreamExt};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, UserId,
canonical_json::to_canonical_value,
CanonicalJsonObject, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId,
OwnedRoomId, RoomId, RoomVersionId, UserId,
events::{StateEventType, TimelineEventType, room::create::RoomCreateEventContent},
room_version_rules::RoomIdFormatVersion,
uint,
};
use serde_json::value::to_raw_value;
use tuwunel_core::{
Err, Error, Result, err, implement,
matrix::{
event::{Event, gen_event_id},
event::{Event, StateKey, TypeExt, gen_event_id},
pdu::{EventHash, PduBuilder, PduEvent},
state_res::{self, RoomVersion},
room_version,
state_res::{self},
},
utils::{
IterStream, ReadyExt, TryReadyExt, millis_since_unix_epoch, stream::TryIgnore,
to_canonical_object,
},
utils::{self, IterStream, ReadyExt, stream::TryIgnore},
};
use super::RoomMutexGuard;
@@ -48,7 +53,7 @@ pub async fn create_hash_and_sign_event(
.await;
// If there was no create event yet, assume we are creating a room
let room_version_id = self
let (room_version, version_rules) = self
.services
.state
.get_room_version(room_id)
@@ -63,14 +68,23 @@ pub async fn create_hash_and_sign_event(
room_id.to_owned(),
))
}
})
.and_then(|room_version| {
Ok((room_version.clone(), room_version::rules(&room_version)?))
})?;
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
let auth_events = self
.services
.state
.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)
.get_auth_events(
room_id,
&event_type,
sender,
state_key.as_deref(),
&content,
&version_rules.authorization,
true,
)
.await?;
// Our depth is the maximum depth of prev_events + 1
@@ -79,7 +93,7 @@ pub async fn create_hash_and_sign_event(
.stream()
.map(Ok)
.and_then(|event_id| self.get_pdu(event_id))
.and_then(|pdu| future::ok(pdu.depth))
.ready_and_then(|pdu| Ok(pdu.depth))
.ignore_err()
.ready_fold(uint!(0), cmp::max)
.await
@@ -100,26 +114,25 @@ pub async fn create_hash_and_sign_event(
}
}
let unsigned = if unsigned.is_empty() {
None
} else {
Some(to_raw_value(&unsigned)?)
};
let unsigned = unsigned
.is_empty()
.eq(&false)
.then_some(to_raw_value(&unsigned)?);
let origin_server_ts = timestamp.map_or_else(
|| {
utils::millis_since_unix_epoch()
let origin_server_ts = timestamp
.as_ref()
.map(MilliSecondsSinceUnixEpoch::get)
.unwrap_or_else(|| {
millis_since_unix_epoch()
.try_into()
.expect("timestamp to UInt")
},
|ts| ts.get(),
);
.expect("u64 to UInt")
});
let mut pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
event_id: ruma::event_id!("$thiswillbereplaced").into(),
room_id: room_id.to_owned(),
sender: sender.to_owned(),
origin: None,
origin: Some(self.services.globals.server_name().to_owned()),
origin_server_ts,
kind: event_type,
content,
@@ -132,55 +145,57 @@ pub async fn create_hash_and_sign_event(
prev_events,
auth_events: auth_events
.values()
.filter(|pdu| {
version_rules
.event_format
.allow_room_create_in_auth_events
|| *pdu.kind() != TimelineEventType::RoomCreate
})
.map(|pdu| pdu.event_id.clone())
.collect(),
};
let auth_fetch = |k: &StateEventType, s: &str| {
let key = (k.clone(), s.into());
ready(auth_events.get(&key).map(ToOwned::to_owned))
let auth_fetch = async |k: StateEventType, s: StateKey| {
auth_events
.get(&k.with_state_key(s.as_str()))
.map(ToOwned::to_owned)
.ok_or_else(|| err!(Request(NotFound("Missing auth events"))))
};
let auth_check = state_res::auth_check(
&room_version,
state_res::auth_check(
&version_rules,
&pdu,
None, // TODO: third_party_invite
auth_fetch,
&async |event_id: OwnedEventId| self.get_pdu(&event_id).await,
&auth_fetch,
)
.await
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
if !auth_check {
return Err!(Request(Forbidden("Event is not authorized.")));
}
.await?;
// Hash and sign
let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| {
let mut pdu_json = to_canonical_object(&pdu).map_err(|e| {
err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}"))))
})?;
// room v3 and above removed the "event_id" field from remote PDU format
match room_version_id {
| RoomVersionId::V1 | RoomVersionId::V2 => {},
| _ => {
pdu_json.remove("event_id");
},
if !matches!(room_version, RoomVersionId::V1 | RoomVersionId::V2) {
pdu_json.remove("event_id");
}
// Add origin because synapse likes that (and it's required in the spec)
pdu_json.insert(
"origin".to_owned(),
to_canonical_value(self.services.globals.server_name())
.expect("server name is a valid CanonicalJsonValue"),
);
// room v12 and above removed the placeholder "room_id" field from m.room.create
if matches!(version_rules.room_id_format, RoomIdFormatVersion::V2)
&& pdu.kind == TimelineEventType::RoomCreate
{
pdu_json.remove("room_id");
}
if let Err(e) = self
.services
.server_keys
.hash_and_sign_event(&mut pdu_json, &room_version_id)
.hash_and_sign_event(&mut pdu_json, &room_version)
{
use ruma::signatures::Error::PduSize;
return match e {
| Error::Signatures(ruma::signatures::Error::PduSize) => {
| Error::Signatures(PduSize) => {
Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)")))
},
| _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))),
@@ -188,10 +203,17 @@ pub async fn create_hash_and_sign_event(
}
// Generate event id
pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?;
pdu.event_id = gen_event_id(&pdu_json, &room_version)?;
pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into()));
// Room id is event id for V12+
if matches!(version_rules.room_id_format, RoomIdFormatVersion::V2)
&& pdu.kind == TimelineEventType::RoomCreate
{
pdu.room_id = OwnedRoomId::from_parts('!', pdu.event_id.localpart(), None)?;
pdu_json.insert("room_id".into(), CanonicalJsonValue::String(pdu.room_id.clone().into()));
}
// Generate short event id
let _shorteventid = self
.services

View File

@@ -3,7 +3,8 @@ use std::{fmt::Debug, mem};
use bytes::BytesMut;
use reqwest::Client;
use ruma::api::{
IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration,
IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, SupportedVersions,
appservice::Registration,
};
use tuwunel_core::{Err, Result, debug_error, err, trace, utils, warn};
@@ -20,6 +21,10 @@ where
T: OutgoingRequest + Debug + Send,
{
const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_7];
let supported = SupportedVersions {
versions: VERSIONS.into(),
features: Default::default(),
};
let Some(dest) = registration.url else {
return Ok(None);
@@ -36,7 +41,7 @@ where
.try_into_http_request::<BytesMut>(
&dest,
SendAccessToken::IfRequired(hs_token),
&VERSIONS,
&supported,
)
.map_err(|e| {
err!(BadServerResponse(
@@ -76,6 +81,7 @@ where
let mut http_response_builder = http::Response::builder()
.status(status)
.version(response.version());
mem::swap(
response.headers_mut(),
http_response_builder

View File

@@ -16,8 +16,8 @@ use futures::{
stream::FuturesUnordered,
};
use ruma::{
CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId,
RoomId, RoomVersionId, ServerName, UInt,
CanonicalJsonObject, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedRoomId,
OwnedServerName, OwnedUserId, RoomId, ServerName, UInt,
api::{
appservice::event::push_events::v1::EphemeralData,
federation::transactions::{
@@ -39,7 +39,8 @@ use ruma::{
};
use serde_json::value::{RawValue as RawJsonValue, to_raw_value};
use tuwunel_core::{
Error, Event, Result, debug, err, error,
Error, Event, Result, debug, err, error, implement,
matrix::room_version,
result::LogErr,
trace,
utils::{
@@ -720,7 +721,7 @@ impl Service {
.filter(|event| matches!(event, SendingEvent::Pdu(_)))
.count(),
);
let mut edu_jsons: Vec<EphemeralData> = Vec::with_capacity(
let mut edu_jsons: Vec<Raw<EphemeralData>> = Vec::with_capacity(
events
.iter()
.filter(|event| matches!(event, SendingEvent::Edu(_)))
@@ -740,7 +741,9 @@ impl Service {
},
| SendingEvent::Edu(edu) =>
if appservice.receive_ephemeral {
if let Ok(edu) = serde_json::from_slice(edu) {
if let Ok(edu) =
serde_json::from_slice(edu).and_then(|edu| Raw::new(&edu))
{
edu_jsons.push(edu);
}
},
@@ -749,7 +752,7 @@ impl Service {
}
let txn_hash = calculate_hash(events.iter().filter_map(|e| match e {
| SendingEvent::Edu(b) => Some(&**b),
| SendingEvent::Edu(b) => Some(b.as_ref()),
| SendingEvent::Pdu(b) => Some(b.as_ref()),
| SendingEvent::Flush => None,
}));
@@ -763,8 +766,8 @@ impl Service {
client,
appservice,
ruma::api::appservice::event::push_events::v1::Request {
events: pdu_jsons,
txn_id: txn_id.into(),
events: pdu_jsons,
ephemeral: edu_jsons,
to_device: Vec::new(), // TODO
},
@@ -933,47 +936,59 @@ impl Service {
| Ok(_) => Ok(Destination::Federation(server)),
}
}
}
/// This does not return a full `Pdu` it is only to satisfy ruma's types.
pub async fn convert_to_outgoing_federation_event(
&self,
mut pdu_json: CanonicalJsonObject,
) -> Box<RawJsonValue> {
if let Some(unsigned) = pdu_json
.get_mut("unsigned")
.and_then(|val| val.as_object_mut())
{
unsigned.remove("transaction_id");
}
/// This does not return a full `Pdu` it is only to satisfy ruma's types.
#[implement(Service)]
pub async fn convert_to_outgoing_federation_event(
&self,
mut pdu_json: CanonicalJsonObject,
) -> Box<RawJsonValue> {
self.strip_outgoing_federation_event(&mut pdu_json)
.await;
// room v3 and above removed the "event_id" field from remote PDU format
if let Some(room_id) = pdu_json
.get("room_id")
.and_then(|val| RoomId::parse(val.as_str()?).ok())
{
match self
.services
.state
.get_room_version(room_id)
.await
{
| Ok(room_version_id) => match room_version_id {
| RoomVersionId::V1 | RoomVersionId::V2 => {},
| _ => _ = pdu_json.remove("event_id"),
},
| Err(_) => _ = pdu_json.remove("event_id"),
}
} else {
pdu_json.remove("event_id");
}
to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value")
}
// TODO: another option would be to convert it to a canonical string to validate
// size and return a Result<Raw<...>>
// serde_json::from_str::<Raw<_>>(
// ruma::serde::to_canonical_json_string(pdu_json).expect("CanonicalJson is
// valid serde_json::Value"), )
// .expect("Raw::from_value always works")
#[implement(Service)]
async fn strip_outgoing_federation_event(&self, pdu_json: &mut CanonicalJsonObject) {
if let Some(unsigned) = pdu_json
.get_mut("unsigned")
.and_then(|val| val.as_object_mut())
{
unsigned.remove("transaction_id");
}
to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value")
let Some(room_id) = pdu_json
.get("room_id")
.and_then(CanonicalJsonValue::as_str)
.map(RoomId::parse)
.transpose()
.ok()
.flatten()
else {
return;
};
let Ok(room_rules) = self
.services
.state
.get_room_version(room_id)
.await
.and_then(|ref ver| room_version::rules(ver))
else {
pdu_json.remove("event_id");
return;
};
if !room_rules.event_format.require_event_id {
pdu_json.remove("event_id");
}
if !room_rules
.event_format
.require_room_create_room_id
{
pdu_json.remove("room_id");
}
}

View File

@@ -1,8 +1,8 @@
use std::borrow::Borrow;
use ruma::{
CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId,
api::federation::discovery::VerifyKey,
CanonicalJsonObject, ServerName, ServerSigningKeyId, api::federation::discovery::VerifyKey,
room_version_rules::RoomVersionRules,
};
use tuwunel_core::{Err, Result, implement};
@@ -12,11 +12,11 @@ use super::{PubKeyMap, PubKeys, extract_key};
pub async fn get_event_keys(
&self,
object: &CanonicalJsonObject,
version: &RoomVersionId,
version: &RoomVersionRules,
) -> Result<PubKeyMap> {
use ruma::signatures::required_keys;
let required = match required_keys(object, version) {
let required = match required_keys(object, &version.signatures) {
| Ok(required) => required,
| Err(e) => {
return Err!(BadServerResponse("Failed to determine keys required to verify: {e}"));

View File

@@ -9,9 +9,10 @@ use std::{collections::BTreeMap, sync::Arc, time::Duration};
use futures::StreamExt;
use ruma::{
CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId,
ServerName, ServerSigningKeyId,
CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName,
ServerSigningKeyId,
api::federation::discovery::{ServerSigningKeys, VerifyKey},
room_version_rules::RoomVersionRules,
serde::Raw,
signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet},
};
@@ -109,6 +110,7 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) {
keys.verify_keys.extend(new_keys.verify_keys);
keys.old_verify_keys
.extend(new_keys.old_verify_keys);
self.db
.server_signingkeys
.raw_put(origin, Json(&keys));
@@ -118,11 +120,11 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) {
pub async fn required_keys_exist(
&self,
object: &CanonicalJsonObject,
version: &RoomVersionId,
rules: &RoomVersionRules,
) -> bool {
use ruma::signatures::required_keys;
let Ok(required_keys) = required_keys(object, version) else {
let Ok(required_keys) = required_keys(object, &rules.signatures) else {
return false;
};
@@ -191,6 +193,7 @@ pub async fn signing_keys_for(&self, origin: &ServerName) -> Result<ServerSignin
fn minimum_valid_ts(&self) -> MilliSecondsSinceUnixEpoch {
let timepoint =
timepoint_from_now(self.minimum_valid).expect("SystemTime should not overflow");
MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow")
}

View File

@@ -1,5 +1,5 @@
use ruma::{CanonicalJsonObject, RoomVersionId};
use tuwunel_core::{Result, implement};
use tuwunel_core::{Result, err, implement};
#[implement(super::Service)]
pub fn sign_json(&self, object: &mut CanonicalJsonObject) -> Result {
@@ -17,6 +17,18 @@ pub fn hash_and_sign_event(
) -> Result {
use ruma::signatures::hash_and_sign_event;
let server_name = self.services.globals.server_name().as_str();
hash_and_sign_event(server_name, self.keypair(), object, room_version).map_err(Into::into)
let server_name = &self.services.server.name;
let room_version_rules = room_version.rules().ok_or_else(|| {
err!(Request(UnsupportedRoomVersion(
"Cannot hash and sign event for unknown room version {room_version:?}."
)))
})?;
hash_and_sign_event(
server_name.as_str(),
self.keypair(),
object,
&room_version_rules.redaction,
)
.map_err(Into::into)
}

View File

@@ -2,7 +2,7 @@ use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified,
};
use serde_json::value::RawValue as RawJsonValue;
use tuwunel_core::{Err, Result, implement, matrix::event::gen_event_id_canonical_json};
use tuwunel_core::{Err, Result, err, implement, matrix::event::gen_event_id_canonical_json};
#[implement(super::Service)]
pub async fn validate_and_add_event_id(
@@ -32,8 +32,14 @@ pub async fn validate_and_add_event_id_no_fetch(
room_version: &RoomVersionId,
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?;
let room_version_rules = room_version.rules().ok_or_else(|| {
err!(Request(UnsupportedRoomVersion(
"Cannot verify event for unknown room version {room_version:?}."
)))
})?;
if !self
.required_keys_exist(&value, room_version)
.required_keys_exist(&value, &room_version_rules)
.await
{
return Err!(BadServerResponse(debug_warn!(
@@ -62,8 +68,17 @@ pub async fn verify_event(
room_version: Option<&RoomVersionId>,
) -> Result<Verified> {
let room_version = room_version.unwrap_or(&RoomVersionId::V11);
let keys = self.get_event_keys(event, room_version).await?;
ruma::signatures::verify_event(&keys, event, room_version).map_err(Into::into)
let room_version_rules = room_version.rules().ok_or_else(|| {
err!(Request(UnsupportedRoomVersion(
"Cannot verify event for unknown room version {room_version:?}."
)))
})?;
let event_keys = self
.get_event_keys(event, &room_version_rules)
.await?;
ruma::signatures::verify_event(&event_keys, event, &room_version_rules).map_err(Into::into)
}
#[implement(super::Service)]
@@ -73,6 +88,15 @@ pub async fn verify_json(
room_version: Option<&RoomVersionId>,
) -> Result {
let room_version = room_version.unwrap_or(&RoomVersionId::V11);
let keys = self.get_event_keys(event, room_version).await?;
ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into)
let room_version_rules = room_version.rules().ok_or_else(|| {
err!(Request(UnsupportedRoomVersion(
"Cannot verify json for unknown room version {room_version:?}."
)))
})?;
let event_keys = self
.get_event_keys(event, &room_version_rules)
.await?;
ruma::signatures::verify_json(&event_keys, event).map_err(Into::into)
}

View File

@@ -119,7 +119,8 @@ impl Service {
&mut list.room_details.required_state,
&cached_list.room_details.required_state,
);
some_or_sticky(&mut list.include_heroes, cached_list.include_heroes);
//some_or_sticky(&mut list.include_heroes, cached_list.include_heroes);
match (&mut list.filters, cached_list.filters.clone()) {
| (Some(filters), Some(cached_filters)) => {