diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index 63a9b7a5..6ba1d9fa 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -72,7 +72,12 @@ pub(super) async fn delete_room(&self, room_id: OwnedRoomId) -> Result { return Err!("Cannot delete admin room"); } - self.services.delete.delete_room(room_id).await?; + let state_lock = self.services.state.mutex.lock(&room_id).await; + + self.services + .delete + .delete_room(&room_id, state_lock) + .await?; self.write_str("Successfully deleted the room from our database.") .await?; diff --git a/src/api/client/membership/leave.rs b/src/api/client/membership/leave.rs index cef09fac..d5e72bea 100644 --- a/src/api/client/membership/leave.rs +++ b/src/api/client/membership/leave.rs @@ -14,17 +14,21 @@ pub(crate) async fn leave_room_route( State(services): State, body: Ruma, ) -> Result { - let room_id = &body.room_id; - - let state_lock = services.state.mutex.lock(room_id).await; + let state_lock = services.state.mutex.lock(&body.room_id).await; services .membership - .leave(body.sender_user(), room_id, body.reason.clone(), &state_lock) + .leave(body.sender_user(), &body.room_id, body.reason.clone(), &state_lock) .boxed() .await?; - drop(state_lock); + if services.config.delete_rooms_after_leave { + services + .delete + .delete_if_empty_local(&body.room_id, state_lock) + .boxed() + .await; + } Ok(leave_room::v3::Response {}) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 8d8b86ce..564f641f 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1929,6 +1929,19 @@ pub struct Config { #[serde(default)] pub hydra_backports: bool, + /// Delete rooms when the last user from this server leaves. This feature is + /// experimental and for the purpose of least-surprise is not enabled by + /// default but can be enabled for deployments interested in conserving + /// space. It may eventually default to true in a future release. + /// + /// Note that not all pathways which can remove the last local user + /// currently invoke this operation, so in some cases you may find the room + /// still exists. + /// + /// default: false + #[serde(default)] + pub delete_rooms_after_leave: bool, + // external structure; separate section #[serde(default)] pub blurhashing: BlurhashConfig, diff --git a/src/service/rooms/delete/mod.rs b/src/service/rooms/delete/mod.rs index 3a951f71..80067ca8 100644 --- a/src/service/rooms/delete/mod.rs +++ b/src/service/rooms/delete/mod.rs @@ -1,8 +1,17 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; -use futures::StreamExt; -use ruma::OwnedRoomId; -use tuwunel_core::{Result, debug, result::LogErr, utils::ReadyExt, warn}; +use futures::{FutureExt, StreamExt, pin_mut}; +use ruma::RoomId; +use tokio::time::sleep; +use tuwunel_core::{ + Result, debug, + result::LogErr, + trace, + utils::{ReadyExt, future::BoolExt}, + warn, +}; + +use crate::rooms::timeline::RoomMutexGuard; pub struct Service { services: Arc, @@ -17,17 +26,63 @@ impl crate::Service for Service { } impl Service { - pub async fn delete_room(&self, room_id: OwnedRoomId) -> Result { + pub async fn delete_if_empty_local(&self, room_id: &RoomId, state_lock: RoomMutexGuard) { + debug_assert!( + self.services.config.delete_rooms_after_leave, + "Caller must checking if delete_rooms_after_leave configured." + ); + + let has_local_users = self + .services + .state_cache + .local_users_in_room(room_id) + .into_future() + .map(|(next, ..)| next.as_ref().is_some()); + + let has_local_invites = self + .services + .state_cache + .local_users_invited_to_room(room_id) + .into_future() + .map(|(next, ..)| next.as_ref().is_some()); + + pin_mut!(has_local_users, has_local_invites); + if has_local_users.or(has_local_invites).await { + trace!(?room_id, "Not deleting with local joined or invited"); + return; + } + + debug!(?room_id, "Preparing to delete room..."); + + // Some arbitrary delay has to account for the leave event being synced to the + // client or they'll never be updated on their leave. This can be removed once + // a tombstone solution is implemented instead. + sleep(Duration::from_millis(2500)).await; + + self.services + .delete + .delete_room(room_id, state_lock) + .boxed() + .await + .expect("unhandled error during room deletion"); + } + + pub async fn delete_room(&self, room_id: &RoomId, state_lock: RoomMutexGuard) -> Result { // ban the room locally so new users cannot join while we're in the process of // deleting it - debug!("Banning room {}", &room_id); - self.services.metadata.ban_room(&room_id); + debug!("Banning room {room_id} prior to deletion."); + self.services.metadata.ban_room(room_id); + + // This might have to be dropped here to prevent deadlock, but the goal should + // be to hold it all the way through. For now the room is banned under lock at + // least. + drop(state_lock); debug!("Making all users leave the room {room_id} and forgetting it"); let mut users = self .services .state_cache - .room_members(&room_id) + .room_members(room_id) .ready_filter(|user| self.services.globals.user_is_local(user)) .boxed(); @@ -40,24 +95,22 @@ impl Service { if let Err(e) = self .services .membership - .remote_leave(user_id, &room_id) + .remote_leave(user_id, room_id) .await { warn!("Failed to leave room: {e}"); } - self.services - .state_cache - .forget(&room_id, user_id); + self.services.state_cache.forget(room_id, user_id); } - debug!("Disabling incoming federation on room {}", &room_id); - self.services.metadata.disable_room(&room_id); + debug!("Disabling incoming federation on room {room_id}"); + self.services.metadata.disable_room(room_id); debug!("Deleting all our room aliases for the room"); self.services .alias - .local_aliases_for_room(&room_id) + .local_aliases_for_room(room_id) .for_each(async |local_alias| { self.services .alias @@ -69,12 +122,12 @@ impl Service { .await; debug!("Removing/unpublishing room from our room directory"); - self.services.directory.set_not_public(&room_id); + self.services.directory.set_not_public(room_id); debug!("Deleting room's threads from database"); self.services .threads - .delete_all_rooms_threads(&room_id) + .delete_all_rooms_threads(room_id) .await .log_err() .ok(); @@ -82,7 +135,7 @@ impl Service { debug!("Deleting all the room's search token IDs from our database"); self.services .search - .delete_all_search_tokenids_for_room(&room_id) + .delete_all_search_tokenids_for_room(room_id) .await .log_err() .ok(); @@ -90,7 +143,7 @@ impl Service { debug!("Deleting all room's forward extremities from our database"); self.services .state - .delete_all_rooms_forward_extremities(&room_id) + .delete_all_rooms_forward_extremities(room_id) .await .log_err() .ok(); @@ -98,7 +151,7 @@ impl Service { debug!("Deleting all the room's event (PDU) references"); self.services .pdu_metadata - .delete_all_referenced_for_room(&room_id) + .delete_all_referenced_for_room(room_id) .await .log_err() .ok(); @@ -106,7 +159,7 @@ impl Service { debug!("Deleting all the room's member counts"); self.services .state_cache - .delete_room_join_counts(&room_id) + .delete_room_join_counts(room_id) .await .log_err() .ok(); @@ -114,7 +167,7 @@ impl Service { debug!("Deleting all the room's private read receipts"); self.services .read_receipt - .delete_all_read_receipts(&room_id) + .delete_all_read_receipts(room_id) .await .log_err() .ok(); @@ -122,12 +175,12 @@ impl Service { debug!("Final stages of deleting the room"); debug!("Obtaining a mutex state lock for safety and future database operations"); - let state_lock = self.services.state.mutex.lock(&room_id).await; + let state_lock = self.services.state.mutex.lock(room_id).await; debug!("Deleting room state hash from our database"); self.services .state - .delete_room_shortstatehash(&room_id, &state_lock) + .delete_room_shortstatehash(room_id, &state_lock) .await .log_err() .ok(); @@ -135,7 +188,7 @@ impl Service { debug!("Deleting PDUs"); self.services .timeline - .delete_pdus(&room_id) + .delete_pdus(room_id) .await .log_err() .ok(); @@ -143,18 +196,18 @@ impl Service { debug!("Deleting internal room ID from our database"); self.services .short - .delete_shortroomid(&room_id) + .delete_shortroomid(room_id) .await .log_err() .ok(); // TODO: add option to keep a room banned (`--block` or `--ban`) - self.services.metadata.enable_room(&room_id); - self.services.metadata.unban_room(&room_id); + self.services.metadata.enable_room(room_id); + self.services.metadata.unban_room(room_id); drop(state_lock); - debug!("Successfully deleted room {} from our database", &room_id); + debug!("Successfully deleted room {room_id} from our database"); Ok(()) } } diff --git a/tuwunel-example.toml b/tuwunel-example.toml index b5ecb7bd..639411fc 100644 --- a/tuwunel-example.toml +++ b/tuwunel-example.toml @@ -1657,6 +1657,17 @@ # #hydra_backports = false +# Delete rooms when the last user from this server leaves. This feature is +# experimental and for the purpose of least-surprise is not enabled by +# default but can be enabled for deployments interested in conserving +# space. It may eventually default to true in a future release. +# +# Note that not all pathways which can remove the last local user +# currently invoke this operation, so in some cases you may find the room +# still exists. +# +#delete_rooms_after_leave = false + #[global.tls] # Path to a valid TLS certificate file.