2024-06-07 18:35:48 -04:00
|
|
|
use std::{
|
2024-10-11 18:57:59 +00:00
|
|
|
collections::HashMap,
|
2024-06-17 21:46:23 +00:00
|
|
|
fmt::Write,
|
2024-11-22 12:25:46 +00:00
|
|
|
iter::once,
|
2025-06-18 09:29:06 +00:00
|
|
|
str::FromStr,
|
2024-07-24 00:15:03 +00:00
|
|
|
time::{Instant, SystemTime},
|
2024-06-07 18:35:48 -04:00
|
|
|
};
|
2024-04-20 19:13:18 -04:00
|
|
|
|
2025-04-22 01:41:02 +00:00
|
|
|
use futures::{FutureExt, StreamExt, TryStreamExt};
|
|
|
|
|
use ruma::{
|
|
|
|
|
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
|
|
|
|
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
|
2025-04-26 08:24:47 +00:00
|
|
|
api::federation::event::get_room_state, events::AnyStateEvent, serde::Raw,
|
2025-04-22 01:41:02 +00:00
|
|
|
};
|
2025-06-18 09:29:06 +00:00
|
|
|
use serde::Serialize;
|
2025-04-22 01:41:02 +00:00
|
|
|
use tracing_subscriber::EnvFilter;
|
|
|
|
|
use tuwunel_core::{
|
2025-06-18 09:29:06 +00:00
|
|
|
Err, Result, debug_error, err, info, jwt,
|
2025-04-26 08:24:47 +00:00
|
|
|
matrix::{
|
|
|
|
|
Event,
|
|
|
|
|
pdu::{PduEvent, PduId, RawPduId},
|
|
|
|
|
},
|
2025-04-04 03:30:13 +00:00
|
|
|
trace, utils,
|
2025-01-29 21:10:33 +00:00
|
|
|
utils::{
|
|
|
|
|
stream::{IterStream, ReadyExt},
|
|
|
|
|
string::EMPTY,
|
2025-06-18 09:29:06 +00:00
|
|
|
time::now_secs,
|
2025-01-29 21:10:33 +00:00
|
|
|
},
|
2025-02-23 01:17:45 -05:00
|
|
|
warn,
|
2024-12-15 00:05:47 -05:00
|
|
|
};
|
2025-09-29 09:44:45 +00:00
|
|
|
use tuwunel_service::rooms::{short::ShortRoomId, state_compressor::HashSetCompressStateEvent};
|
2024-04-20 19:13:18 -04:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
use crate::admin_command;
|
|
|
|
|
|
|
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn echo(&self, message: Vec<String>) -> Result {
|
2024-06-16 02:14:19 +00:00
|
|
|
let message = message.join(" ");
|
2025-04-06 23:41:58 +00:00
|
|
|
self.write_str(&message).await
|
2024-06-16 02:14:19 +00:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result {
|
2025-04-22 04:42:26 +00:00
|
|
|
let Ok(event) = self
|
|
|
|
|
.services
|
|
|
|
|
.timeline
|
|
|
|
|
.get_pdu_json(&event_id)
|
|
|
|
|
.await
|
|
|
|
|
else {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Event not found.");
|
2024-09-25 03:52:28 +00:00
|
|
|
};
|
2024-04-20 19:13:18 -04:00
|
|
|
|
2024-09-25 03:52:28 +00:00
|
|
|
let room_id_str = event
|
|
|
|
|
.get("room_id")
|
2025-04-06 23:41:58 +00:00
|
|
|
.and_then(CanonicalJsonValue::as_str)
|
|
|
|
|
.ok_or_else(|| err!(Database("Invalid event in database")))?;
|
2024-07-27 00:11:41 +00:00
|
|
|
|
2024-09-25 03:52:28 +00:00
|
|
|
let room_id = <&RoomId>::try_from(room_id_str)
|
2025-04-06 23:41:58 +00:00
|
|
|
.map_err(|_| err!(Database("Invalid room id field in event in database")))?;
|
2024-07-27 00:11:41 +00:00
|
|
|
|
2024-09-25 03:52:28 +00:00
|
|
|
let start = Instant::now();
|
|
|
|
|
let count = self
|
|
|
|
|
.services
|
|
|
|
|
.auth_chain
|
2024-11-22 12:25:46 +00:00
|
|
|
.event_ids_iter(room_id, once(event_id.as_ref()))
|
2025-01-29 08:39:44 +00:00
|
|
|
.ready_filter_map(Result::ok)
|
2024-09-25 03:52:28 +00:00
|
|
|
.count()
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
let elapsed = start.elapsed();
|
2025-04-06 23:41:58 +00:00
|
|
|
let out = format!("Loaded auth chain with length {count} in {elapsed:?}");
|
|
|
|
|
|
|
|
|
|
self.write_str(&out).await
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn parse_pdu(&self) -> Result {
|
2024-10-13 00:57:08 +00:00
|
|
|
if self.body.len() < 2
|
|
|
|
|
|| !self.body[0].trim().starts_with("```")
|
|
|
|
|
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
2024-07-27 00:11:41 +00:00
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Expected code block in command body. Add --help for details.");
|
2024-06-10 02:57:11 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
|
2025-06-29 03:33:29 +00:00
|
|
|
let rules = RoomVersionId::V6
|
|
|
|
|
.rules()
|
|
|
|
|
.expect("rules for V6 rooms");
|
2024-06-10 02:57:11 -04:00
|
|
|
match serde_json::from_str(&string) {
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => return Err!("Invalid json in command body: {e}"),
|
2025-06-29 03:33:29 +00:00
|
|
|
| Ok(value) => match ruma::signatures::reference_hash(&value, &rules) {
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => return Err!("Could not parse PDU JSON: {e:?}"),
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(hash) => {
|
2024-12-28 23:31:24 +00:00
|
|
|
let event_id = OwnedEventId::parse(format!("${hash}"));
|
2025-04-06 23:41:58 +00:00
|
|
|
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
|
|
|
|
|
| Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"),
|
|
|
|
|
| Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"),
|
2024-06-10 02:57:11 -04:00
|
|
|
}
|
2024-04-20 19:13:18 -04:00
|
|
|
},
|
2024-06-10 02:57:11 -04:00
|
|
|
},
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
|
2024-04-20 19:13:18 -04:00
|
|
|
let mut outlier = false;
|
2024-07-27 00:11:41 +00:00
|
|
|
let mut pdu_json = self
|
|
|
|
|
.services
|
2024-04-20 19:13:18 -04:00
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_non_outlier_pdu_json(&event_id)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
if pdu_json.is_err() {
|
2024-04-20 19:13:18 -04:00
|
|
|
outlier = true;
|
2025-04-22 04:42:26 +00:00
|
|
|
pdu_json = self
|
|
|
|
|
.services
|
|
|
|
|
.timeline
|
|
|
|
|
.get_pdu_json(&event_id)
|
|
|
|
|
.await;
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2024-04-20 19:13:18 -04:00
|
|
|
match pdu_json {
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(_) => return Err!("PDU not found locally."),
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(json) => {
|
2025-04-06 23:41:58 +00:00
|
|
|
let text = serde_json::to_string_pretty(&json)?;
|
|
|
|
|
let msg = if outlier {
|
|
|
|
|
"Outlier (Rejected / Soft Failed) PDU found in our database"
|
|
|
|
|
} else {
|
|
|
|
|
"PDU found in our database"
|
|
|
|
|
};
|
|
|
|
|
write!(self, "{msg}\n```json\n{text}\n```",)
|
2024-04-20 19:13:18 -04:00
|
|
|
},
|
|
|
|
|
}
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2025-01-04 16:57:07 +00:00
|
|
|
#[admin_command]
|
2025-09-29 09:44:45 +00:00
|
|
|
pub(super) async fn get_short_pdu(&self, shortroomid: ShortRoomId, count: i64) -> Result {
|
|
|
|
|
let pdu_id: RawPduId = PduId { shortroomid, count: count.into() }.into();
|
2025-01-04 16:57:07 +00:00
|
|
|
|
|
|
|
|
let pdu_json = self
|
|
|
|
|
.services
|
|
|
|
|
.timeline
|
|
|
|
|
.get_pdu_json_from_id(&pdu_id)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
match pdu_json {
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(_) => return Err!("PDU not found locally."),
|
2025-01-04 16:57:07 +00:00
|
|
|
| Ok(json) => {
|
2025-04-06 23:41:58 +00:00
|
|
|
let json_text = serde_json::to_string_pretty(&json)?;
|
|
|
|
|
write!(self, "```json\n{json_text}\n```")
|
2025-01-04 16:57:07 +00:00
|
|
|
},
|
|
|
|
|
}
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2025-01-04 16:57:07 +00:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result {
|
2025-01-24 07:02:56 +00:00
|
|
|
if !self.services.server.config.allow_federation {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Federation is disabled on this homeserver.",);
|
2024-04-21 18:26:32 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
if server == self.services.globals.server_name() {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!(
|
2024-12-15 00:05:47 -05:00
|
|
|
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
|
|
|
|
fetching local PDUs from the database.",
|
2025-04-06 23:41:58 +00:00
|
|
|
);
|
2024-04-21 18:26:32 -04:00
|
|
|
}
|
|
|
|
|
|
2024-10-13 00:57:08 +00:00
|
|
|
if self.body.len() < 2
|
|
|
|
|
|| !self.body[0].trim().starts_with("```")
|
|
|
|
|
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
2024-07-27 00:11:41 +00:00
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Expected code block in command body. Add --help for details.",);
|
2024-06-10 02:57:11 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let list = self
|
|
|
|
|
.body
|
2024-07-24 01:26:23 +00:00
|
|
|
.iter()
|
|
|
|
|
.collect::<Vec<_>>()
|
2024-07-27 00:11:41 +00:00
|
|
|
.drain(1..self.body.len().saturating_sub(1))
|
2024-06-10 02:57:11 -04:00
|
|
|
.filter_map(|pdu| EventId::parse(pdu).ok())
|
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
2024-07-28 17:11:31 -04:00
|
|
|
let mut failed_count: usize = 0;
|
|
|
|
|
let mut success_count: usize = 0;
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
for event_id in list {
|
2024-06-10 02:57:11 -04:00
|
|
|
if force {
|
2025-04-06 23:41:58 +00:00
|
|
|
match self
|
|
|
|
|
.get_remote_pdu(event_id.to_owned(), server.clone())
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-02-23 01:17:45 -05:00
|
|
|
| Err(e) => {
|
|
|
|
|
failed_count = failed_count.saturating_add(1);
|
|
|
|
|
self.services
|
|
|
|
|
.admin
|
2025-04-06 23:41:58 +00:00
|
|
|
.send_text(&format!("Failed to get remote PDU, ignoring error: {e}"))
|
|
|
|
|
.await;
|
|
|
|
|
|
2025-02-23 01:17:45 -05:00
|
|
|
warn!("Failed to get remote PDU, ignoring error: {e}");
|
|
|
|
|
},
|
|
|
|
|
| _ => {
|
|
|
|
|
success_count = success_count.saturating_add(1);
|
|
|
|
|
},
|
2024-04-21 16:30:02 -04:00
|
|
|
}
|
2024-06-10 02:57:11 -04:00
|
|
|
} else {
|
2025-04-06 23:41:58 +00:00
|
|
|
self.get_remote_pdu(event_id.to_owned(), server.clone())
|
|
|
|
|
.await?;
|
2024-07-28 17:11:31 -04:00
|
|
|
success_count = success_count.saturating_add(1);
|
2024-04-21 16:30:02 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let out =
|
|
|
|
|
format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures");
|
|
|
|
|
|
|
|
|
|
self.write_str(&out).await
|
2024-04-21 16:30:02 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2024-06-16 22:26:52 +00:00
|
|
|
pub(super) async fn get_remote_pdu(
|
2024-12-15 00:05:47 -05:00
|
|
|
&self,
|
2025-04-06 23:41:58 +00:00
|
|
|
event_id: OwnedEventId,
|
|
|
|
|
server: OwnedServerName,
|
|
|
|
|
) -> Result {
|
2025-01-24 07:02:56 +00:00
|
|
|
if !self.services.server.config.allow_federation {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Federation is disabled on this homeserver.");
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
if server == self.services.globals.server_name() {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!(
|
2024-12-15 00:05:47 -05:00
|
|
|
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
|
|
|
|
fetching local PDUs.",
|
2025-04-06 23:41:58 +00:00
|
|
|
);
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
match self
|
|
|
|
|
.services
|
2025-11-25 08:49:49 +00:00
|
|
|
.federation
|
|
|
|
|
.execute(&server, ruma::api::federation::event::get_event::v1::Request {
|
2025-04-06 23:41:58 +00:00
|
|
|
event_id: event_id.clone(),
|
2024-12-15 00:05:47 -05:00
|
|
|
})
|
2024-04-20 19:13:18 -04:00
|
|
|
.await
|
|
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) =>
|
|
|
|
|
return Err!(
|
|
|
|
|
"Remote server did not have PDU or failed sending request to remote server: {e}"
|
|
|
|
|
),
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(response) => {
|
|
|
|
|
let json: CanonicalJsonObject =
|
|
|
|
|
serde_json::from_str(response.pdu.get()).map_err(|e| {
|
|
|
|
|
warn!(
|
|
|
|
|
"Requested event ID {event_id} from server but failed to convert from \
|
|
|
|
|
RawValue to CanonicalJsonObject (malformed event/response?): {e}"
|
|
|
|
|
);
|
2025-04-06 23:41:58 +00:00
|
|
|
err!(Request(Unknown(
|
|
|
|
|
"Received response from server but failed to parse PDU"
|
|
|
|
|
)))
|
2024-12-15 00:05:47 -05:00
|
|
|
})?;
|
2024-04-20 19:13:18 -04:00
|
|
|
|
2024-08-03 00:27:18 +00:00
|
|
|
trace!("Attempting to parse PDU: {:?}", &response.pdu);
|
2025-08-29 04:46:11 +00:00
|
|
|
let (room_id, ..) = {
|
2024-07-27 00:11:41 +00:00
|
|
|
let parsed_result = self
|
|
|
|
|
.services
|
2024-07-18 06:37:47 +00:00
|
|
|
.event_handler
|
2024-08-08 17:18:30 +00:00
|
|
|
.parse_incoming_pdu(&response.pdu)
|
2025-04-06 23:41:58 +00:00
|
|
|
.boxed()
|
2024-08-08 17:18:30 +00:00
|
|
|
.await;
|
|
|
|
|
|
2025-08-29 04:46:11 +00:00
|
|
|
match parsed_result {
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(t) => t,
|
|
|
|
|
| Err(e) => {
|
2024-04-20 19:13:18 -04:00
|
|
|
warn!("Failed to parse PDU: {e}");
|
|
|
|
|
info!("Full PDU: {:?}", &response.pdu);
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Failed to parse PDU remote server {server} sent us: {e}");
|
2024-04-20 19:13:18 -04:00
|
|
|
},
|
2025-08-29 04:46:11 +00:00
|
|
|
}
|
2024-04-20 19:13:18 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
info!("Attempting to handle event ID {event_id} as backfilled PDU");
|
2024-07-27 00:11:41 +00:00
|
|
|
self.services
|
2024-04-20 19:13:18 -04:00
|
|
|
.timeline
|
2025-08-29 04:46:11 +00:00
|
|
|
.backfill_pdu(&room_id, &server, response.pdu)
|
2024-04-20 19:13:18 -04:00
|
|
|
.await?;
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let text = serde_json::to_string_pretty(&json)?;
|
|
|
|
|
let msg = "Got PDU from specified server and handled as backfilled";
|
|
|
|
|
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
|
2024-04-20 19:13:18 -04:00
|
|
|
},
|
|
|
|
|
}
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
|
2025-09-20 06:56:21 +05:00
|
|
|
let room_id = self.services.alias.maybe_resolve(&room).await?;
|
2025-04-26 08:24:47 +00:00
|
|
|
let room_state: Vec<Raw<AnyStateEvent>> = self
|
2024-07-27 00:11:41 +00:00
|
|
|
.services
|
2024-04-20 19:13:18 -04:00
|
|
|
.state_accessor
|
2025-01-29 01:04:02 +00:00
|
|
|
.room_state_full_pdus(&room_id)
|
2025-04-26 08:24:47 +00:00
|
|
|
.map_ok(Event::into_format)
|
2025-01-29 01:04:02 +00:00
|
|
|
.try_collect()
|
|
|
|
|
.await?;
|
2024-04-20 19:13:18 -04:00
|
|
|
|
|
|
|
|
if room_state.is_empty() {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Unable to find room state in our database (vector is empty)",);
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-06-23 03:12:12 +00:00
|
|
|
let json = serde_json::to_string_pretty(&room_state).map_err(|e| {
|
2025-04-06 23:41:58 +00:00
|
|
|
err!(Database(
|
2024-12-15 00:05:47 -05:00
|
|
|
"Failed to convert room state events to pretty JSON, possible invalid room state \
|
2025-04-06 23:41:58 +00:00
|
|
|
events in our database {e}",
|
|
|
|
|
))
|
2024-04-20 19:13:18 -04:00
|
|
|
})?;
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let out = format!("```json\n{json}\n```");
|
|
|
|
|
self.write_str(&out).await
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
|
2024-07-27 00:11:41 +00:00
|
|
|
if server == self.services.globals.server_name() {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Not allowed to send federation requests to ourselves.");
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let timer = tokio::time::Instant::now();
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
match self
|
|
|
|
|
.services
|
2025-11-25 08:49:49 +00:00
|
|
|
.federation
|
|
|
|
|
.execute(&server, ruma::api::federation::discovery::get_server_version::v1::Request {})
|
2024-04-20 19:13:18 -04:00
|
|
|
.await
|
|
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => {
|
|
|
|
|
return Err!("Failed sending federation request to specified server:\n\n{e}");
|
|
|
|
|
},
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(response) => {
|
2024-04-20 19:13:18 -04:00
|
|
|
let ping_time = timer.elapsed();
|
|
|
|
|
let json_text_res = serde_json::to_string_pretty(&response.server);
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let out = if let Ok(json) = json_text_res {
|
|
|
|
|
format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```")
|
|
|
|
|
} else {
|
|
|
|
|
format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}")
|
|
|
|
|
};
|
2024-04-20 19:13:18 -04:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
write!(self, "{out}")
|
2024-04-20 19:13:18 -04:00
|
|
|
},
|
|
|
|
|
}
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn force_device_list_updates(&self) -> Result {
|
2024-04-20 19:13:18 -04:00
|
|
|
// Force E2EE device list updates for all users
|
2024-08-08 17:18:30 +00:00
|
|
|
self.services
|
|
|
|
|
.users
|
|
|
|
|
.stream()
|
2025-04-22 04:42:26 +00:00
|
|
|
.for_each(|user_id| {
|
|
|
|
|
self.services
|
|
|
|
|
.users
|
|
|
|
|
.mark_device_key_update(user_id)
|
|
|
|
|
})
|
2024-08-08 17:18:30 +00:00
|
|
|
.await;
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
write!(self, "Marked all devices for all users as having new keys to update").await
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool) -> Result {
|
2024-07-11 00:42:49 +00:00
|
|
|
let handles = &["console"];
|
|
|
|
|
|
2024-04-20 19:13:18 -04:00
|
|
|
if reset {
|
2025-01-24 07:02:56 +00:00
|
|
|
let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) {
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(s) => s,
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => return Err!("Log level from config appears to be invalid now: {e}"),
|
2024-04-20 19:13:18 -04:00
|
|
|
};
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
match self
|
|
|
|
|
.services
|
2024-07-11 00:42:49 +00:00
|
|
|
.server
|
|
|
|
|
.log
|
|
|
|
|
.reload
|
|
|
|
|
.reload(&old_filter_layer, Some(handles))
|
|
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) =>
|
|
|
|
|
return Err!("Failed to modify and reload the global tracing log level: {e}"),
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(()) => {
|
2025-04-06 23:41:58 +00:00
|
|
|
let value = &self.services.server.config.log;
|
|
|
|
|
let out = format!("Successfully changed log level back to config value {value}");
|
|
|
|
|
return self.write_str(&out).await;
|
2024-04-20 19:13:18 -04:00
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let Some(filter) = filter {
|
|
|
|
|
let new_filter_layer = match EnvFilter::try_new(filter) {
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(s) => s,
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => return Err!("Invalid log level filter specified: {e}"),
|
2024-04-20 19:13:18 -04:00
|
|
|
};
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
match self
|
|
|
|
|
.services
|
2024-07-11 00:42:49 +00:00
|
|
|
.server
|
|
|
|
|
.log
|
|
|
|
|
.reload
|
|
|
|
|
.reload(&new_filter_layer, Some(handles))
|
|
|
|
|
{
|
2025-04-22 04:42:26 +00:00
|
|
|
| Ok(()) =>
|
|
|
|
|
return self
|
|
|
|
|
.write_str("Successfully changed log level")
|
|
|
|
|
.await,
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) =>
|
|
|
|
|
return Err!("Failed to modify and reload the global tracing log level: {e}"),
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
Err!("No log level was specified.")
|
2024-04-20 19:13:18 -04:00
|
|
|
}
|
2024-04-21 15:12:22 -04:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn sign_json(&self) -> Result {
|
2024-12-15 00:05:47 -05:00
|
|
|
if self.body.len() < 2
|
|
|
|
|
|| !self.body[0].trim().starts_with("```")
|
|
|
|
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
2024-07-27 00:11:41 +00:00
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Expected code block in command body. Add --help for details.");
|
2024-06-10 02:57:11 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
2024-06-10 02:57:11 -04:00
|
|
|
match serde_json::from_str(&string) {
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => return Err!("Invalid json: {e}"),
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(mut value) => {
|
2025-04-06 23:41:58 +00:00
|
|
|
self.services.server_keys.sign_json(&mut value)?;
|
|
|
|
|
let json_text = serde_json::to_string_pretty(&value)?;
|
|
|
|
|
write!(self, "{json_text}")
|
2024-06-10 02:57:11 -04:00
|
|
|
},
|
2024-04-21 15:12:22 -04:00
|
|
|
}
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2024-04-21 15:12:22 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn verify_json(&self) -> Result {
|
2024-12-15 00:05:47 -05:00
|
|
|
if self.body.len() < 2
|
|
|
|
|
|| !self.body[0].trim().starts_with("```")
|
|
|
|
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
2024-07-27 00:11:41 +00:00
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Expected code block in command body. Add --help for details.");
|
2024-06-10 02:57:11 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
2024-10-11 18:57:59 +00:00
|
|
|
match serde_json::from_str::<CanonicalJsonObject>(&string) {
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => return Err!("Invalid json: {e}"),
|
2025-04-22 04:42:26 +00:00
|
|
|
| Ok(value) => match self
|
|
|
|
|
.services
|
|
|
|
|
.server_keys
|
|
|
|
|
.verify_json(&value, None)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
| Err(e) => return Err!("Signature verification failed: {e}"),
|
|
|
|
|
| Ok(()) => write!(self, "Signature correct"),
|
2024-06-10 02:57:11 -04:00
|
|
|
},
|
2024-04-21 15:12:22 -04:00
|
|
|
}
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2024-04-21 15:12:22 -04:00
|
|
|
}
|
2024-04-26 23:36:23 -04:00
|
|
|
|
2024-10-11 18:57:59 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
|
|
|
|
|
use ruma::signatures::Verified;
|
|
|
|
|
|
2025-04-22 04:42:26 +00:00
|
|
|
let mut event = self
|
|
|
|
|
.services
|
|
|
|
|
.timeline
|
|
|
|
|
.get_pdu_json(&event_id)
|
|
|
|
|
.await?;
|
2024-10-11 18:57:59 +00:00
|
|
|
|
|
|
|
|
event.remove("event_id");
|
2025-04-22 04:42:26 +00:00
|
|
|
let msg = match self
|
|
|
|
|
.services
|
|
|
|
|
.server_keys
|
|
|
|
|
.verify_event(&event, None)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2024-12-15 00:05:47 -05:00
|
|
|
| Err(e) => return Err(e),
|
2025-04-06 23:41:58 +00:00
|
|
|
| Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
|
|
|
|
|
| Ok(Verified::All) => "signatures and hashes OK.",
|
2024-10-11 18:57:59 +00:00
|
|
|
};
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
self.write_str(msg).await
|
2024-10-11 18:57:59 +00:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
|
|
|
|
#[tracing::instrument(skip(self))]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
2024-07-27 00:11:41 +00:00
|
|
|
if !self
|
|
|
|
|
.services
|
2024-05-30 23:42:29 -04:00
|
|
|
.state_cache
|
2025-01-25 23:41:39 +00:00
|
|
|
.server_in_room(&self.services.server.name, &room_id)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
2024-05-30 23:42:29 -04:00
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("We are not participating in the room / we don't know about the room ID.",);
|
2024-05-30 23:42:29 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let first_pdu = self
|
|
|
|
|
.services
|
2024-05-30 23:42:29 -04:00
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.first_pdu_in_room(&room_id)
|
|
|
|
|
.await
|
2025-04-06 23:41:58 +00:00
|
|
|
.map_err(|_| err!(Database("Failed to find the first PDU in database")))?;
|
2024-05-30 23:42:29 -04:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let out = format!("{first_pdu:?}");
|
|
|
|
|
self.write_str(&out).await
|
2024-05-30 23:42:29 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
|
|
|
|
#[tracing::instrument(skip(self))]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
2024-07-27 00:11:41 +00:00
|
|
|
if !self
|
|
|
|
|
.services
|
2024-05-30 23:42:29 -04:00
|
|
|
.state_cache
|
2025-01-25 23:41:39 +00:00
|
|
|
.server_in_room(&self.services.server.name, &room_id)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
2024-05-30 23:42:29 -04:00
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("We are not participating in the room / we don't know about the room ID.");
|
2024-05-30 23:42:29 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let latest_pdu = self
|
|
|
|
|
.services
|
2024-05-30 23:42:29 -04:00
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.latest_pdu_in_room(&room_id)
|
|
|
|
|
.await
|
2025-04-06 23:41:58 +00:00
|
|
|
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
|
2024-05-30 23:42:29 -04:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let out = format!("{latest_pdu:?}");
|
|
|
|
|
self.write_str(&out).await
|
2024-05-30 23:42:29 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
|
|
|
|
#[tracing::instrument(skip(self))]
|
2024-06-16 22:26:52 +00:00
|
|
|
pub(super) async fn force_set_room_state_from_server(
|
2024-12-15 00:05:47 -05:00
|
|
|
&self,
|
2025-04-08 04:39:01 +00:00
|
|
|
room_id: OwnedRoomId,
|
|
|
|
|
server_name: OwnedServerName,
|
2025-04-06 23:41:58 +00:00
|
|
|
) -> Result {
|
2024-07-27 00:11:41 +00:00
|
|
|
if !self
|
|
|
|
|
.services
|
2024-06-07 18:35:48 -04:00
|
|
|
.state_cache
|
2025-01-25 23:41:39 +00:00
|
|
|
.server_in_room(&self.services.server.name, &room_id)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
2024-06-07 18:35:48 -04:00
|
|
|
{
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("We are not participating in the room / we don't know about the room ID.");
|
2024-06-07 18:35:48 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let first_pdu = self
|
|
|
|
|
.services
|
2024-06-07 18:35:48 -04:00
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.latest_pdu_in_room(&room_id)
|
|
|
|
|
.await
|
2025-04-06 23:41:58 +00:00
|
|
|
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
|
2024-06-07 18:35:48 -04:00
|
|
|
|
2025-04-22 04:42:26 +00:00
|
|
|
let room_version = self
|
|
|
|
|
.services
|
|
|
|
|
.state
|
|
|
|
|
.get_room_version(&room_id)
|
|
|
|
|
.await?;
|
2024-06-07 18:35:48 -04:00
|
|
|
|
2024-12-28 00:57:02 +00:00
|
|
|
let mut state: HashMap<u64, OwnedEventId> = HashMap::new();
|
2024-06-07 18:35:48 -04:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let remote_state_response = self
|
|
|
|
|
.services
|
2025-11-25 08:49:49 +00:00
|
|
|
.federation
|
|
|
|
|
.execute(&server_name, get_room_state::v1::Request {
|
2025-04-06 23:41:58 +00:00
|
|
|
room_id: room_id.clone(),
|
2025-04-27 09:34:07 +00:00
|
|
|
event_id: first_pdu.event_id().to_owned(),
|
2024-12-15 00:05:47 -05:00
|
|
|
})
|
2024-06-07 18:35:48 -04:00
|
|
|
.await?;
|
|
|
|
|
|
|
|
|
|
for pdu in remote_state_response.pdus.clone() {
|
2024-10-11 18:57:59 +00:00
|
|
|
match self
|
|
|
|
|
.services
|
|
|
|
|
.event_handler
|
|
|
|
|
.parse_incoming_pdu(&pdu)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2024-12-15 00:05:47 -05:00
|
|
|
| Ok(t) => t,
|
|
|
|
|
| Err(e) => {
|
2024-10-11 18:57:59 +00:00
|
|
|
warn!("Could not parse PDU, ignoring: {e}");
|
|
|
|
|
continue;
|
2024-06-07 18:35:48 -04:00
|
|
|
},
|
2024-10-11 18:57:59 +00:00
|
|
|
};
|
2024-06-07 18:35:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Going through room_state response PDUs");
|
2024-10-11 18:57:59 +00:00
|
|
|
for result in remote_state_response.pdus.iter().map(|pdu| {
|
|
|
|
|
self.services
|
|
|
|
|
.server_keys
|
|
|
|
|
.validate_and_add_event_id(pdu, &room_version)
|
|
|
|
|
}) {
|
2025-08-28 00:30:54 +00:00
|
|
|
let Ok((event_id, mut value)) = result.await else {
|
2024-06-07 18:35:48 -04:00
|
|
|
continue;
|
|
|
|
|
};
|
|
|
|
|
|
2025-08-28 00:30:54 +00:00
|
|
|
let invalid_pdu_err = |e| {
|
2024-08-01 10:58:27 +00:00
|
|
|
debug_error!("Invalid PDU in fetching remote room state PDUs response: {value:#?}");
|
|
|
|
|
err!(BadServerResponse(debug_error!("Invalid PDU in send_join response: {e:?}")))
|
2025-08-28 00:30:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let pdu = if value["type"] == "m.room.create" {
|
|
|
|
|
PduEvent::from_rid_val(&room_id, &event_id, value.clone()).map_err(invalid_pdu_err)?
|
|
|
|
|
} else {
|
|
|
|
|
PduEvent::from_id_val(&event_id, value.clone()).map_err(invalid_pdu_err)?
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if !value.contains_key("room_id") {
|
|
|
|
|
let room_id = CanonicalJsonValue::String(room_id.as_str().into());
|
|
|
|
|
value.insert("room_id".into(), room_id);
|
|
|
|
|
}
|
2024-06-07 18:35:48 -04:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
self.services
|
2025-08-02 03:23:28 +00:00
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.add_pdu_outlier(&event_id, &value);
|
|
|
|
|
|
2024-06-07 18:35:48 -04:00
|
|
|
if let Some(state_key) = &pdu.state_key {
|
2024-07-27 00:11:41 +00:00
|
|
|
let shortstatekey = self
|
|
|
|
|
.services
|
2024-06-07 18:35:48 -04:00
|
|
|
.short
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)
|
|
|
|
|
.await;
|
|
|
|
|
|
2024-06-07 18:35:48 -04:00
|
|
|
state.insert(shortstatekey, pdu.event_id.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
info!("Going through auth_chain response");
|
2025-04-22 04:42:26 +00:00
|
|
|
for result in remote_state_response
|
|
|
|
|
.auth_chain
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|pdu| {
|
|
|
|
|
self.services
|
|
|
|
|
.server_keys
|
|
|
|
|
.validate_and_add_event_id(pdu, &room_version)
|
|
|
|
|
}) {
|
2024-06-07 18:35:48 -04:00
|
|
|
let Ok((event_id, value)) = result.await else {
|
|
|
|
|
continue;
|
|
|
|
|
};
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
self.services
|
2025-08-02 03:23:28 +00:00
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.add_pdu_outlier(&event_id, &value);
|
2024-06-07 18:35:48 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let new_room_state = self
|
|
|
|
|
.services
|
2024-06-07 18:35:48 -04:00
|
|
|
.event_handler
|
2024-12-28 00:57:02 +00:00
|
|
|
.resolve_state(&room_id, &room_version, state)
|
2024-06-07 18:35:48 -04:00
|
|
|
.await?;
|
|
|
|
|
|
|
|
|
|
info!("Forcing new room state");
|
2024-10-31 08:19:37 +00:00
|
|
|
let HashSetCompressStateEvent {
|
|
|
|
|
shortstatehash: short_state_hash,
|
|
|
|
|
added,
|
|
|
|
|
removed,
|
|
|
|
|
} = self
|
2024-07-27 00:11:41 +00:00
|
|
|
.services
|
2024-06-07 18:35:48 -04:00
|
|
|
.state_compressor
|
2024-08-08 17:18:30 +00:00
|
|
|
.save_state(room_id.clone().as_ref(), new_room_state)
|
|
|
|
|
.await?;
|
2024-06-07 18:35:48 -04:00
|
|
|
|
2025-08-22 20:15:54 +05:00
|
|
|
let state_lock = self.services.state.mutex.lock(&*room_id).await;
|
2025-04-06 23:41:58 +00:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
self.services
|
2024-06-07 18:35:48 -04:00
|
|
|
.state
|
2024-10-31 08:19:37 +00:00
|
|
|
.force_state(room_id.clone().as_ref(), short_state_hash, added, removed, &state_lock)
|
2024-06-07 18:35:48 -04:00
|
|
|
.await?;
|
|
|
|
|
|
|
|
|
|
info!(
|
2024-12-15 00:05:47 -05:00
|
|
|
"Updating joined counts for room just in case (e.g. we may have found a difference in \
|
|
|
|
|
the room's m.room.member state"
|
2024-06-07 18:35:48 -04:00
|
|
|
);
|
2024-07-27 00:11:41 +00:00
|
|
|
self.services
|
|
|
|
|
.state_cache
|
2024-08-08 17:18:30 +00:00
|
|
|
.update_joined_count(&room_id)
|
|
|
|
|
.await;
|
2024-06-07 18:35:48 -04:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
self.write_str("Successfully forced the room state from the requested remote server.")
|
|
|
|
|
.await
|
2024-06-07 18:35:48 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2024-06-17 21:46:23 +00:00
|
|
|
pub(super) async fn get_signing_keys(
|
2024-12-15 00:05:47 -05:00
|
|
|
&self,
|
2025-04-08 04:39:01 +00:00
|
|
|
server_name: Option<OwnedServerName>,
|
|
|
|
|
notary: Option<OwnedServerName>,
|
2024-12-15 00:05:47 -05:00
|
|
|
query: bool,
|
2025-04-06 23:41:58 +00:00
|
|
|
) -> Result {
|
|
|
|
|
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
|
2024-10-11 18:57:59 +00:00
|
|
|
|
|
|
|
|
if let Some(notary) = notary {
|
|
|
|
|
let signing_keys = self
|
|
|
|
|
.services
|
|
|
|
|
.server_keys
|
|
|
|
|
.notary_request(¬ary, &server_name)
|
|
|
|
|
.await?;
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let out = format!("```rs\n{signing_keys:#?}\n```");
|
|
|
|
|
return self.write_str(&out).await;
|
2024-10-11 18:57:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let signing_keys = if query {
|
|
|
|
|
self.services
|
|
|
|
|
.server_keys
|
|
|
|
|
.server_request(&server_name)
|
|
|
|
|
.await?
|
|
|
|
|
} else {
|
|
|
|
|
self.services
|
|
|
|
|
.server_keys
|
|
|
|
|
.signing_keys_for(&server_name)
|
|
|
|
|
.await?
|
|
|
|
|
};
|
2024-06-17 21:46:23 +00:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let out = format!("```rs\n{signing_keys:#?}\n```");
|
|
|
|
|
self.write_str(&out).await
|
2024-06-17 21:46:23 +00:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn get_verify_keys(&self, server_name: Option<OwnedServerName>) -> Result {
|
|
|
|
|
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
|
2024-06-17 21:46:23 +00:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let keys = self
|
|
|
|
|
.services
|
2024-07-31 21:05:15 +00:00
|
|
|
.server_keys
|
2024-10-11 18:57:59 +00:00
|
|
|
.verify_keys_for(&server_name)
|
|
|
|
|
.await;
|
2024-06-17 21:46:23 +00:00
|
|
|
|
2024-10-11 18:57:59 +00:00
|
|
|
let mut out = String::new();
|
2024-06-17 21:46:23 +00:00
|
|
|
writeln!(out, "| Key ID | Public Key |")?;
|
|
|
|
|
writeln!(out, "| --- | --- |")?;
|
|
|
|
|
for (key_id, key) in keys {
|
2024-10-11 18:57:59 +00:00
|
|
|
writeln!(out, "| {key_id} | {key:?} |")?;
|
2024-06-17 21:46:23 +00:00
|
|
|
}
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
self.write_str(&out).await
|
2024-06-17 21:46:23 +00:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2024-06-16 22:26:52 +00:00
|
|
|
pub(super) async fn resolve_true_destination(
|
2024-12-15 00:05:47 -05:00
|
|
|
&self,
|
2025-04-08 04:39:01 +00:00
|
|
|
server_name: OwnedServerName,
|
2024-12-15 00:05:47 -05:00
|
|
|
no_cache: bool,
|
2025-04-06 23:41:58 +00:00
|
|
|
) -> Result {
|
2025-01-24 07:02:56 +00:00
|
|
|
if !self.services.server.config.allow_federation {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!("Federation is disabled on this homeserver.",);
|
2024-04-26 23:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
2025-01-25 23:41:39 +00:00
|
|
|
if server_name == self.services.server.name {
|
2025-04-06 23:41:58 +00:00
|
|
|
return Err!(
|
2024-12-15 00:05:47 -05:00
|
|
|
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
|
|
|
|
fetching local PDUs.",
|
2025-04-06 23:41:58 +00:00
|
|
|
);
|
2024-04-26 23:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
let actual = self
|
|
|
|
|
.services
|
2024-07-16 23:38:48 +00:00
|
|
|
.resolver
|
|
|
|
|
.resolve_actual_dest(&server_name, !no_cache)
|
|
|
|
|
.await?;
|
2024-04-26 23:36:23 -04:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host);
|
|
|
|
|
self.write_str(&msg).await
|
2024-04-26 23:36:23 -04:00
|
|
|
}
|
2024-04-27 04:50:20 -07:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result {
|
2025-01-18 01:32:37 +00:00
|
|
|
const OPTS: &str = "abcdefghijklmnopqrstuvwxyz";
|
2024-05-01 21:45:30 -04:00
|
|
|
|
2025-01-18 01:32:37 +00:00
|
|
|
let opts: String = OPTS
|
|
|
|
|
.chars()
|
|
|
|
|
.filter(|&c| {
|
|
|
|
|
let allow_any = opts.as_ref().is_some_and(|opts| opts == "*");
|
2024-05-01 21:45:30 -04:00
|
|
|
|
2025-01-18 01:32:37 +00:00
|
|
|
let allow = allow_any || opts.as_ref().is_some_and(|opts| opts.contains(c));
|
|
|
|
|
|
|
|
|
|
!allow
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
|
2025-04-22 01:41:02 +00:00
|
|
|
let stats = tuwunel_core::alloc::memory_stats(&opts).unwrap_or_default();
|
2025-01-18 01:32:37 +00:00
|
|
|
|
|
|
|
|
self.write_str("```\n").await?;
|
|
|
|
|
self.write_str(&stats).await?;
|
|
|
|
|
self.write_str("\n```").await?;
|
2025-04-06 23:41:58 +00:00
|
|
|
Ok(())
|
2024-04-27 04:50:20 -07:00
|
|
|
}
|
2024-06-25 05:05:02 +00:00
|
|
|
|
|
|
|
|
#[cfg(tokio_unstable)]
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn runtime_metrics(&self) -> Result {
|
2025-04-22 04:42:26 +00:00
|
|
|
let out = self
|
|
|
|
|
.services
|
|
|
|
|
.server
|
|
|
|
|
.metrics
|
|
|
|
|
.runtime_metrics()
|
|
|
|
|
.map_or_else(
|
|
|
|
|
|| "Runtime metrics are not available.".to_owned(),
|
|
|
|
|
|metrics| {
|
|
|
|
|
format!(
|
|
|
|
|
"```rs\nnum_workers: {}\nnum_alive_tasks: {}\nglobal_queue_depth: {}\n```",
|
|
|
|
|
metrics.num_workers(),
|
|
|
|
|
metrics.num_alive_tasks(),
|
|
|
|
|
metrics.global_queue_depth()
|
|
|
|
|
)
|
|
|
|
|
},
|
|
|
|
|
);
|
2024-06-25 05:05:02 +00:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
self.write_str(&out).await
|
2024-06-25 05:05:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(not(tokio_unstable))]
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn runtime_metrics(&self) -> Result {
|
|
|
|
|
self.write_str("Runtime metrics require building with `tokio_unstable`.")
|
|
|
|
|
.await
|
2024-06-25 05:05:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(tokio_unstable)]
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn runtime_interval(&self) -> Result {
|
2025-04-22 04:42:26 +00:00
|
|
|
let out = self
|
|
|
|
|
.services
|
|
|
|
|
.server
|
|
|
|
|
.metrics
|
|
|
|
|
.runtime_interval()
|
|
|
|
|
.map_or_else(
|
|
|
|
|
|| "Runtime metrics are not available.".to_owned(),
|
|
|
|
|
|metrics| format!("```rs\n{metrics:#?}\n```"),
|
|
|
|
|
);
|
2024-06-25 05:05:02 +00:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
self.write_str(&out).await
|
2024-06-25 05:05:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(not(tokio_unstable))]
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn runtime_interval(&self) -> Result {
|
|
|
|
|
self.write_str("Runtime metrics require building with `tokio_unstable`.")
|
|
|
|
|
.await
|
2024-06-25 05:05:02 +00:00
|
|
|
}
|
2024-07-24 00:15:03 +00:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn time(&self) -> Result {
|
2024-07-24 00:15:03 +00:00
|
|
|
let now = SystemTime::now();
|
2025-04-06 23:41:58 +00:00
|
|
|
let now = utils::time::format(now, "%+");
|
|
|
|
|
|
|
|
|
|
self.write_str(&now).await
|
2024-07-24 00:15:03 +00:00
|
|
|
}
|
2024-07-25 21:29:37 +00:00
|
|
|
|
2024-07-27 00:11:41 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn list_dependencies(&self, names: bool) -> Result {
|
2024-07-25 21:29:37 +00:00
|
|
|
if names {
|
|
|
|
|
let out = info::cargo::dependencies_names().join(" ");
|
2025-04-06 23:41:58 +00:00
|
|
|
return self.write_str(&out).await;
|
2024-07-25 21:29:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut out = String::new();
|
2025-04-06 23:41:58 +00:00
|
|
|
let deps = info::cargo::dependencies();
|
2024-07-25 21:29:37 +00:00
|
|
|
writeln!(out, "| name | version | features |")?;
|
|
|
|
|
writeln!(out, "| ---- | ------- | -------- |")?;
|
|
|
|
|
for (name, dep) in deps {
|
|
|
|
|
let version = dep.try_req().unwrap_or("*");
|
|
|
|
|
let feats = dep.req_features();
|
|
|
|
|
let feats = if !feats.is_empty() {
|
|
|
|
|
feats.join(" ")
|
|
|
|
|
} else {
|
|
|
|
|
String::new()
|
|
|
|
|
};
|
2025-04-06 23:41:58 +00:00
|
|
|
|
2025-01-04 16:57:07 +00:00
|
|
|
writeln!(out, "| {name} | {version} | {feats} |")?;
|
2024-07-25 21:29:37 +00:00
|
|
|
}
|
|
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
self.write_str(&out).await
|
2024-07-25 21:29:37 +00:00
|
|
|
}
|
2024-08-02 01:40:41 +00:00
|
|
|
|
|
|
|
|
#[admin_command]
|
|
|
|
|
pub(super) async fn database_stats(
|
2024-12-15 00:05:47 -05:00
|
|
|
&self,
|
|
|
|
|
property: Option<String>,
|
|
|
|
|
map: Option<String>,
|
2025-04-06 23:41:58 +00:00
|
|
|
) -> Result {
|
2024-10-13 00:57:08 +00:00
|
|
|
let map_name = map.as_ref().map_or(EMPTY, String::as_str);
|
2025-01-29 21:10:33 +00:00
|
|
|
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
|
|
|
|
|
self.services
|
|
|
|
|
.db
|
|
|
|
|
.iter()
|
2025-02-23 01:17:45 -05:00
|
|
|
.filter(|&(&name, _)| map_name.is_empty() || map_name == name)
|
2025-01-29 21:10:33 +00:00
|
|
|
.try_stream()
|
|
|
|
|
.try_for_each(|(&name, map)| {
|
|
|
|
|
let res = map.property(&property).expect("invalid property");
|
|
|
|
|
writeln!(self, "##### {name}:\n```\n{}\n```", res.trim())
|
|
|
|
|
})
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2025-01-29 21:10:33 +00:00
|
|
|
}
|
2024-08-02 01:40:41 +00:00
|
|
|
|
2025-01-29 21:10:33 +00:00
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn database_files(&self, map: Option<String>, level: Option<i32>) -> Result {
|
2025-04-22 04:42:26 +00:00
|
|
|
let mut files: Vec<_> = self
|
|
|
|
|
.services
|
|
|
|
|
.db
|
2025-09-09 18:12:38 +05:00
|
|
|
.engine
|
2025-04-22 04:42:26 +00:00
|
|
|
.file_list()
|
|
|
|
|
.collect::<Result<_>>()?;
|
2024-08-02 01:40:41 +00:00
|
|
|
|
2025-01-29 21:10:33 +00:00
|
|
|
files.sort_by_key(|f| f.name.clone());
|
|
|
|
|
|
|
|
|
|
writeln!(self, "| lev | sst | keys | dels | size | column |").await?;
|
|
|
|
|
writeln!(self, "| ---: | :--- | ---: | ---: | ---: | :--- |").await?;
|
|
|
|
|
files
|
|
|
|
|
.into_iter()
|
|
|
|
|
.filter(|file| {
|
|
|
|
|
map.as_deref()
|
|
|
|
|
.is_none_or(|map| map == file.column_family_name)
|
|
|
|
|
})
|
2025-04-22 04:42:26 +00:00
|
|
|
.filter(|file| {
|
|
|
|
|
level
|
|
|
|
|
.as_ref()
|
|
|
|
|
.is_none_or(|&level| level == file.level)
|
|
|
|
|
})
|
2025-01-29 21:10:33 +00:00
|
|
|
.try_stream()
|
|
|
|
|
.try_for_each(|file| {
|
|
|
|
|
writeln!(
|
|
|
|
|
self,
|
|
|
|
|
"| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |",
|
|
|
|
|
file.level,
|
|
|
|
|
file.name,
|
|
|
|
|
file.num_entries,
|
|
|
|
|
file.num_deletions,
|
|
|
|
|
file.size,
|
|
|
|
|
file.column_family_name,
|
|
|
|
|
)
|
|
|
|
|
})
|
2025-04-06 23:41:58 +00:00
|
|
|
.await
|
2024-08-02 01:40:41 +00:00
|
|
|
}
|
2025-01-16 08:58:40 +00:00
|
|
|
|
|
|
|
|
#[admin_command]
|
2025-04-06 23:41:58 +00:00
|
|
|
pub(super) async fn trim_memory(&self) -> Result {
|
2025-04-22 01:41:02 +00:00
|
|
|
tuwunel_core::alloc::trim(None)?;
|
2025-01-16 08:58:40 +00:00
|
|
|
|
2025-04-06 23:41:58 +00:00
|
|
|
writeln!(self, "done").await
|
2025-01-16 08:58:40 +00:00
|
|
|
}
|
2025-06-18 09:29:06 +00:00
|
|
|
|
|
|
|
|
#[admin_command]
|
|
|
|
|
pub(super) async fn create_jwt(
|
|
|
|
|
&self,
|
|
|
|
|
user: String,
|
|
|
|
|
exp_from_now: Option<u64>,
|
|
|
|
|
nbf_from_now: Option<u64>,
|
|
|
|
|
issuer: Option<String>,
|
|
|
|
|
audience: Option<String>,
|
|
|
|
|
) -> Result {
|
|
|
|
|
use jwt::{Algorithm, EncodingKey, Header, encode};
|
|
|
|
|
|
|
|
|
|
#[derive(Serialize)]
|
|
|
|
|
struct Claim {
|
|
|
|
|
sub: String,
|
2025-09-09 03:03:53 +00:00
|
|
|
iss: Option<String>,
|
|
|
|
|
aud: Option<String>,
|
|
|
|
|
exp: Option<usize>,
|
|
|
|
|
nbf: Option<usize>,
|
2025-06-18 09:29:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let config = &self.services.config.jwt;
|
|
|
|
|
if config.format.as_str() != "HMAC" {
|
|
|
|
|
return Err!("This command only supports HMAC key format, not {}.", config.format);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let key = EncodingKey::from_secret(config.key.as_ref());
|
|
|
|
|
let alg = Algorithm::from_str(config.algorithm.as_str()).map_err(|e| {
|
|
|
|
|
err!(Config("jwt.algorithm", "JWT algorithm is not recognized or configured {e}"))
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let header = Header { alg, ..Default::default() };
|
|
|
|
|
let claim = Claim {
|
|
|
|
|
sub: user,
|
|
|
|
|
|
2025-09-09 03:03:53 +00:00
|
|
|
iss: issuer,
|
2025-06-18 09:29:06 +00:00
|
|
|
|
2025-09-09 03:03:53 +00:00
|
|
|
aud: audience,
|
2025-06-18 09:29:06 +00:00
|
|
|
|
|
|
|
|
exp: exp_from_now
|
|
|
|
|
.and_then(|val| now_secs().checked_add(val))
|
|
|
|
|
.map(TryInto::try_into)
|
2025-09-09 03:03:53 +00:00
|
|
|
.and_then(Result::ok),
|
2025-06-18 09:29:06 +00:00
|
|
|
|
|
|
|
|
nbf: nbf_from_now
|
|
|
|
|
.and_then(|val| now_secs().checked_add(val))
|
|
|
|
|
.map(TryInto::try_into)
|
2025-09-09 03:03:53 +00:00
|
|
|
.and_then(Result::ok),
|
2025-06-18 09:29:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
encode(&header, &claim, &key)
|
|
|
|
|
.map_err(|e| err!("Failed to encode JWT: {e}"))
|
|
|
|
|
.map(async |token| self.write_str(&token).await)?
|
|
|
|
|
.await
|
|
|
|
|
}
|
2025-07-23 04:09:25 +00:00
|
|
|
|
|
|
|
|
#[admin_command]
|
|
|
|
|
pub(super) async fn resync_database(&self) -> Result {
|
|
|
|
|
if !self.services.db.is_secondary() {
|
|
|
|
|
return Err!("Not a secondary instance.");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.services
|
|
|
|
|
.db
|
2025-09-09 18:12:38 +05:00
|
|
|
.engine
|
2025-07-23 04:09:25 +00:00
|
|
|
.update()
|
|
|
|
|
.map_err(|e| err!("Failed to update from primary: {e:?}"))
|
|
|
|
|
}
|