chain_width to 50

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk
2025-04-22 04:42:26 +00:00
parent 9b658d86b2
commit 76509830e6
190 changed files with 3469 additions and 930 deletions

View File

@@ -369,7 +369,11 @@ where
fn key(name: &str) -> Result<Key> {
// tikv asserts the output buffer length is tight to the number of required mibs
// so we slice that down here.
let segs = name.chars().filter(is_equal_to!(&'.')).count().try_add(1)?;
let segs = name
.chars()
.filter(is_equal_to!(&'.'))
.count()
.try_add(1)?;
let name = self::name(name)?;
let mut buf = [0_usize; KEY_SEGS];

View File

@@ -150,15 +150,18 @@ pub fn check(config: &Config) -> Result {
}
// check if we can read the token file path, and check if the file is empty
if config.registration_token_file.as_ref().is_some_and(|path| {
let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| {
error!("Failed to read the registration token file: {e}");
}) else {
return true;
};
if config
.registration_token_file
.as_ref()
.is_some_and(|path| {
let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| {
error!("Failed to read the registration token file: {e}");
}) else {
return true;
};
token == String::new()
}) {
token == String::new()
}) {
return Err!(Config(
"registration_token_file",
"Registration token file was specified but is empty or failed to be read"

View File

@@ -2218,8 +2218,12 @@ fn default_admin_room_tag() -> String { "m.server_notice".to_owned() }
fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) }
fn parallelism_scaled_u32(val: u32) -> u32 {
let val = val.try_into().expect("failed to cast u32 to usize");
parallelism_scaled(val).try_into().unwrap_or(u32::MAX)
let val = val
.try_into()
.expect("failed to cast u32 to usize");
parallelism_scaled(val)
.try_into()
.unwrap_or(u32::MAX)
}
fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) }

View File

@@ -46,7 +46,10 @@ impl ProxyConfig {
| Self::Global { url } => Some(Proxy::all(url)?),
| Self::ByDomain(proxies) => Some(Proxy::custom(move |url| {
// first matching proxy
proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned()
proxies
.iter()
.find_map(|proxy| proxy.for_url(url))
.cloned()
})),
})
}

View File

@@ -100,7 +100,9 @@ pub fn trap() {
#[must_use]
pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str {
p.downcast_ref::<&str>().copied().unwrap_or_default()
p.downcast_ref::<&str>()
.copied()
.unwrap_or_default()
}
#[inline(always)]

View File

@@ -184,7 +184,9 @@ impl Error {
| Self::Request(kind, _, code) => response::status_code(kind, *code),
| Self::BadRequest(kind, ..) => response::bad_request_code(kind),
| Self::FeatureDisabled(..) => response::bad_request_code(&self.kind()),
| Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
| Self::Reqwest(error) => error
.status()
.unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
| Self::Conflict(_) => StatusCode::CONFLICT,
| Self::Io(error) => response::io_error_code(error.kind()),
| _ => StatusCode::INTERNAL_SERVER_ERROR,

View File

@@ -20,7 +20,11 @@ impl axum::response::IntoResponse for Error {
.inspect_err(|e| error!("error response error: {e}"))
.map_or_else(
|_| StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|r| r.map(BytesMut::freeze).map(Full::new).into_response(),
|r| {
r.map(BytesMut::freeze)
.map(Full::new)
.into_response()
},
)
}
}

View File

@@ -42,7 +42,10 @@ static DEPENDENCIES: OnceLock<DepsSet> = OnceLock::new();
#[must_use]
pub fn dependencies_names() -> Vec<&'static str> {
dependencies().keys().map(String::as_str).collect()
dependencies()
.keys()
.map(String::as_str)
.collect()
}
pub fn dependencies() -> &'static DepsSet {

View File

@@ -25,7 +25,8 @@ type RoomVersion = (RoomVersionId, RoomVersionStability);
impl crate::Server {
#[inline]
pub fn supported_room_version(&self, version: &RoomVersionId) -> bool {
self.supported_room_versions().any(is_equal_to!(*version))
self.supported_room_versions()
.any(is_equal_to!(*version))
}
#[inline]

View File

@@ -20,10 +20,19 @@ impl Data<'_> {
pub fn level(&self) -> Level { *self.event.metadata().level() }
#[must_use]
pub fn mod_name(&self) -> &str { self.event.metadata().module_path().unwrap_or_default() }
pub fn mod_name(&self) -> &str {
self.event
.metadata()
.module_path()
.unwrap_or_default()
}
#[must_use]
pub fn span_name(&self) -> &str { self.current.metadata().map_or(EMPTY, |s| s.name()) }
pub fn span_name(&self) -> &str {
self.current
.metadata()
.map_or(EMPTY, |s| s.name())
}
#[must_use]
pub fn message(&self) -> &str {

View File

@@ -103,8 +103,10 @@ where
writer: Writer<'_>,
event: &Event<'_>,
) -> Result<(), std::fmt::Error> {
let is_debug =
cfg!(debug_assertions) && event.fields().any(|field| field.name() == "_debug");
let is_debug = cfg!(debug_assertions)
&& event
.fields()
.any(|field| field.name() == "_debug");
match *event.metadata().level() {
| Level::ERROR if !is_debug => self.pretty.format_event(ctx, writer, event),

View File

@@ -56,7 +56,9 @@ impl LogLevelReloadHandles {
.iter()
.filter(|(name, _)| names.is_some_and(|names| names.contains(&name.as_str())))
.for_each(|(_, handle)| {
_ = handle.reload(new_value.clone()).or_else(error::else_log);
_ = handle
.reload(new_value.clone())
.or_else(error::else_log);
});
Ok(())

View File

@@ -58,7 +58,11 @@ fn matches_sender(&self, filter: &RoomEventFilter) -> bool {
#[implement(super::Pdu)]
fn matches_type(&self, filter: &RoomEventFilter) -> bool {
let event_type = &self.kind.to_cow_str();
if filter.not_types.iter().any(is_equal_to!(event_type)) {
if filter
.not_types
.iter()
.any(is_equal_to!(event_type))
{
return false;
}

View File

@@ -97,7 +97,11 @@ impl From<Id> for RawId {
match id.shorteventid {
| Count::Normal(shorteventid) => {
vec.extend(shorteventid.to_be_bytes());
Self::Normal(vec.as_ref().try_into().expect("RawVec into RawId::Normal"))
Self::Normal(
vec.as_ref()
.try_into()
.expect("RawVec into RawId::Normal"),
)
},
| Count::Backfilled(shorteventid) => {
vec.extend(0_u64.to_be_bytes());

View File

@@ -102,7 +102,8 @@ where
#[implement(Pdu)]
#[must_use]
pub fn get_unsigned_as_value(&self) -> JsonValue {
self.get_unsigned::<JsonValue>().unwrap_or_default()
self.get_unsigned::<JsonValue>()
.unwrap_or_default()
}
#[implement(Pdu)]

View File

@@ -104,7 +104,10 @@ pub fn auth_types_for_event(
}
if membership == MembershipState::Invite {
if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) {
if let Some(Ok(t_id)) = content
.third_party_invite
.map(|t| t.deserialize())
{
let key =
(StateEventType::RoomThirdPartyInvite, t_id.signed.token.into());
if !auth_types.contains(&key) {
@@ -564,9 +567,12 @@ fn valid_membership_change(
}
let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id);
let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id);
let target_user_membership_event_id =
target_user_membership_event.as_ref().map(Event::event_id);
let sender_membership_event_id = sender_membership_event
.as_ref()
.map(Event::event_id);
let target_user_membership_event_id = target_user_membership_event
.as_ref()
.map(Event::event_id);
let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth {
// Is the authorised user allowed to invite users into this room
@@ -725,7 +731,9 @@ fn valid_membership_change(
allow
} else if !sender_is_joined
|| target_user_current_membership == MembershipState::Ban
&& sender_power.filter(|&p| p < &power_levels.ban).is_some()
&& sender_power
.filter(|&p| p < &power_levels.ban)
.is_some()
{
warn!(
?target_user_membership_event_id,
@@ -734,8 +742,9 @@ fn valid_membership_change(
);
false
} else {
let allow = sender_power.filter(|&p| p >= &power_levels.kick).is_some()
&& target_power < sender_power;
let allow = sender_power
.filter(|&p| p >= &power_levels.kick)
.is_some() && target_power < sender_power;
if !allow {
warn!(
?target_user_membership_event_id,
@@ -750,8 +759,9 @@ fn valid_membership_change(
warn!(?sender_membership_event_id, "Can't ban user if sender is not joined");
false
} else {
let allow = sender_power.filter(|&p| p >= &power_levels.ban).is_some()
&& target_power < sender_power;
let allow = sender_power
.filter(|&p| p >= &power_levels.ban)
.is_some() && target_power < sender_power;
if !allow {
warn!(
?target_user_membership_event_id,
@@ -829,7 +839,9 @@ fn can_send_event(event: impl Event, ple: Option<impl Event>, user_level: Int) -
return false;
}
if event.state_key().is_some_and(|k| k.starts_with('@'))
if event
.state_key()
.is_some_and(|k| k.starts_with('@'))
&& event.state_key() != Some(event.sender().as_str())
{
return false; // permission required to post in this room
@@ -1040,13 +1052,17 @@ fn get_send_level(
.and_then(|ple| {
from_json_str::<RoomPowerLevelsEventContent>(ple.content().get())
.map(|content| {
content.events.get(e_type).copied().unwrap_or_else(|| {
if state_key.is_some() {
content.state_default
} else {
content.events_default
}
})
content
.events
.get(e_type)
.copied()
.unwrap_or_else(|| {
if state_key.is_some() {
content.state_default
} else {
content.events_default
}
})
})
.ok()
})
@@ -1135,13 +1151,21 @@ mod tests {
#[test]
fn test_ban_pass() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = INITIAL_EVENTS();
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.clone(),
)
})
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@@ -1180,13 +1204,21 @@ mod tests {
#[test]
fn test_join_non_creator() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = INITIAL_EVENTS_CREATE_ROOM();
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.clone(),
)
})
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@@ -1225,13 +1257,21 @@ mod tests {
#[test]
fn test_join_creator() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = INITIAL_EVENTS_CREATE_ROOM();
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.clone(),
)
})
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@@ -1270,13 +1310,21 @@ mod tests {
#[test]
fn test_ban_fail() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = INITIAL_EVENTS();
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.clone(),
)
})
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@@ -1315,7 +1363,9 @@ mod tests {
#[test]
fn test_restricted_join_rule() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let mut events = INITIAL_EVENTS();
*events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event(
@@ -1338,7 +1388,13 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.clone(),
)
})
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@@ -1395,7 +1451,9 @@ mod tests {
#[test]
fn test_knock() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let mut events = INITIAL_EVENTS();
*events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event(
@@ -1410,7 +1468,13 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.clone(),
)
})
.collect::<StateMap<_>>();
let requester = to_pdu_event(

View File

@@ -429,7 +429,10 @@ where
reverse_graph.entry(node).or_default();
for edge in edges {
reverse_graph.entry(edge).or_default().insert(node);
reverse_graph
.entry(edge)
.or_default()
.insert(node);
}
}
@@ -710,7 +713,9 @@ where
.iter()
.stream()
.broad_filter_map(async |ev_id| {
fetch_event(ev_id.clone()).await.map(|event| (event, ev_id))
fetch_event(ev_id.clone())
.await
.map(|event| (event, ev_id))
})
.broad_filter_map(|(event, ev_id)| {
get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event)
@@ -782,7 +787,11 @@ async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
while let Some(eid) = state.pop() {
graph.entry(eid.clone()).or_default();
let event = fetch_event(eid.clone()).await;
let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten();
let auth_events = event
.as_ref()
.map(Event::auth_events)
.into_iter()
.flatten();
// Prefer the store to event as the store filters dedups the events
for aid in auth_events {
@@ -792,7 +801,10 @@ async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
}
// We just inserted this at the start of the while loop
graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned());
graph
.get_mut(eid.borrow())
.unwrap()
.insert(aid.to_owned());
}
}
}
@@ -890,13 +902,21 @@ mod tests {
use futures::future::ready;
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = INITIAL_EVENTS();
let event_map = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.clone(),
)
})
.collect::<StateMap<_>>();
let auth_chain: HashSet<OwnedEventId> = HashSet::new();
@@ -965,7 +985,9 @@ mod tests {
#[tokio::test]
async fn ban_vs_power_level() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = &[
@@ -1015,7 +1037,9 @@ mod tests {
#[tokio::test]
async fn topic_basic() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = &[
@@ -1080,7 +1104,9 @@ mod tests {
#[tokio::test]
async fn topic_reset() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = &[
@@ -1130,7 +1156,9 @@ mod tests {
#[tokio::test]
async fn join_rule_evasion() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = &[
@@ -1163,7 +1191,9 @@ mod tests {
#[tokio::test]
async fn offtopic_power_level() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = &[
@@ -1199,7 +1229,10 @@ mod tests {
.map(|list| list.into_iter().map(event_id).collect::<Vec<_>>())
.collect::<Vec<_>>();
let expected_state_ids = vec!["PC"].into_iter().map(event_id).collect::<Vec<_>>();
let expected_state_ids = vec!["PC"]
.into_iter()
.map(event_id)
.collect::<Vec<_>>();
do_check(events, edges, expected_state_ids).await;
}
@@ -1207,7 +1240,9 @@ mod tests {
#[tokio::test]
async fn topic_setting() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let events = &[
@@ -1289,7 +1324,9 @@ mod tests {
use futures::future::ready;
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let mut store = TestStore::<PduEvent>(hashmap! {});
@@ -1332,7 +1369,9 @@ mod tests {
#[tokio::test]
async fn test_lexicographical_sort() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let graph = hashmap! {
@@ -1361,7 +1400,9 @@ mod tests {
#[tokio::test]
async fn ban_with_auth_chains() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let ban = BAN_STATE_SET();
@@ -1383,7 +1424,9 @@ mod tests {
use futures::future::ready;
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),
tracing_subscriber::fmt()
.with_test_writer()
.finish(),
);
let init = INITIAL_EVENTS();
let ban = BAN_STATE_SET();
@@ -1402,7 +1445,13 @@ mod tests {
inner.get(&event_id("PA")).unwrap(),
]
.iter()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.event_id.clone(),
)
})
.collect::<StateMap<_>>();
let state_set_b = [
@@ -1415,7 +1464,13 @@ mod tests {
inner.get(&event_id("PA")).unwrap(),
]
.iter()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone()))
.map(|ev| {
(
ev.event_type()
.with_state_key(ev.state_key().unwrap()),
ev.event_id.clone(),
)
})
.collect::<StateMap<_>>();
let ev_map = &store.0;
@@ -1479,7 +1534,10 @@ mod tests {
.map(|list| list.into_iter().map(event_id).collect::<Vec<_>>())
.collect::<Vec<_>>();
let expected_state_ids = vec!["JR"].into_iter().map(event_id).collect::<Vec<_>>();
let expected_state_ids = vec!["JR"]
.into_iter()
.map(event_id)
.collect::<Vec<_>>();
do_check(&join_rule.values().cloned().collect::<Vec<_>>(), edges, expected_state_ids)
.await;

View File

@@ -206,12 +206,16 @@ pub(crate) async fn do_check(
)
});
let key = ev.event_type().with_state_key(ev.state_key().unwrap());
let key = ev
.event_type()
.with_state_key(ev.state_key().unwrap());
expected_state.insert(key, node);
}
let start_state = state_at_event.get(event_id!("$START:foo")).unwrap();
let start_state = state_at_event
.get(event_id!("$START:foo"))
.unwrap();
let end_state = state_at_event
.get(event_id!("$END:foo"))
@@ -340,21 +344,33 @@ impl TestStore<PduEvent> {
let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem]
.iter()
.map(|e| {
(e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned())
(
e.event_type()
.with_state_key(e.state_key().unwrap()),
e.event_id().to_owned(),
)
})
.collect::<StateMap<_>>();
let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem]
.iter()
.map(|e| {
(e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned())
(
e.event_type()
.with_state_key(e.state_key().unwrap()),
e.event_id().to_owned(),
)
})
.collect::<StateMap<_>>();
let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem]
.iter()
.map(|e| {
(e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned())
(
e.event_type()
.with_state_key(e.state_key().unwrap()),
e.event_id().to_owned(),
)
})
.collect::<StateMap<_>>();

View File

@@ -31,7 +31,9 @@ impl Metrics {
let runtime_monitor = runtime.as_ref().map(RuntimeMonitor::new);
#[cfg(tokio_unstable)]
let runtime_intervals = runtime_monitor.as_ref().map(RuntimeMonitor::intervals);
let runtime_intervals = runtime_monitor
.as_ref()
.map(RuntimeMonitor::intervals);
Self {
_runtime: runtime.clone(),

View File

@@ -27,7 +27,10 @@ pub fn to_name(path: &OsStr) -> Result<String> {
.expect("path file stem")
.to_str()
.expect("name string");
let name = name.strip_prefix("lib").unwrap_or(name).to_owned();
let name = name
.strip_prefix("lib")
.unwrap_or(name)
.to_owned();
Ok(name)
}

View File

@@ -21,7 +21,9 @@ pub fn from_str(str: &str) -> Result<usize> {
#[inline]
#[must_use]
pub fn pretty(bytes: usize) -> String {
let bytes: u64 = bytes.try_into().expect("failed to convert usize to u64");
let bytes: u64 = bytes
.try_into()
.expect("failed to convert usize to u64");
ByteSize::b(bytes).display().iec().to_string()
}

View File

@@ -73,7 +73,9 @@ where
Ok(Guard::<Key, Val> {
map: Arc::clone(&self.map),
val: val.try_lock_owned().map_err(|_| err!("would yield"))?,
val: val
.try_lock_owned()
.map_err(|_| err!("would yield"))?,
})
}
@@ -97,7 +99,9 @@ where
Ok(Guard::<Key, Val> {
map: Arc::clone(&self.map),
val: val.try_lock_owned().map_err(|_| err!("would yield"))?,
val: val
.try_lock_owned()
.map_err(|_| err!("would yield"))?,
})
}

View File

@@ -12,6 +12,9 @@ impl<T> UnwrapInfallible<T> for Result<T, Infallible> {
fn unwrap_infallible(self) -> T {
// SAFETY: Branchless unwrap for errors that can never happen. In debug
// mode this is asserted.
unsafe { self.debug_inspect_err(error::infallible).unwrap_unchecked() }
unsafe {
self.debug_inspect_err(error::infallible)
.unwrap_unchecked()
}
}
}

View File

@@ -68,7 +68,11 @@ where
a.map(move |ai| (ai, b.clone()))
.filter_map(|(ai, b)| async move {
let mut lock = b.lock().await;
while let Some(bi) = Pin::new(&mut *lock).next_if(|bi| *bi <= ai).await.as_ref() {
while let Some(bi) = Pin::new(&mut *lock)
.next_if(|bi| *bi <= ai)
.await
.as_ref()
{
if ai == *bi {
return Some(ai);
}

View File

@@ -65,7 +65,11 @@ where
let h = h.into().unwrap_or_else(runtime::Handle::current);
self.broadn_and_then(n, move |val| {
let (h, f) = (h.clone(), f.clone());
async move { h.spawn_blocking(move || f(val)).map_err(E::from).await? }
async move {
h.spawn_blocking(move || f(val))
.map_err(E::from)
.await?
}
})
}
}

View File

@@ -27,7 +27,8 @@ impl<'a> Unquote<'a> for &'a str {
#[inline]
fn unquote(&self) -> Option<&'a str> {
self.strip_prefix(QUOTE).and_then(|s| s.strip_suffix(QUOTE))
self.strip_prefix(QUOTE)
.and_then(|s| s.strip_suffix(QUOTE))
}
#[inline]

View File

@@ -51,6 +51,8 @@ pub unsafe fn current_exe() -> Result<PathBuf> {
/// accurate on all platforms; defaults to false.
#[must_use]
pub fn current_exe_deleted() -> bool {
std::env::current_exe()
.is_ok_and(|exe| exe.to_str().is_some_and(|exe| exe.ends_with(" (deleted)")))
std::env::current_exe().is_ok_and(|exe| {
exe.to_str()
.is_some_and(|exe| exe.ends_with(" (deleted)"))
})
}

View File

@@ -70,7 +70,9 @@ pub fn get_affinity() -> impl Iterator<Item = Id> { from_mask(CORE_AFFINITY.get(
/// List the cores sharing SMT-tier resources
pub fn smt_siblings() -> impl Iterator<Item = Id> {
from_mask(get_affinity().fold(0_u128, |mask, id| {
mask | SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus")
mask | SMT_TOPOLOGY
.get(id)
.expect("ID must not exceed max cpus")
}))
}
@@ -78,20 +80,30 @@ pub fn smt_siblings() -> impl Iterator<Item = Id> {
/// affinity.
pub fn node_siblings() -> impl Iterator<Item = Id> {
from_mask(get_affinity().fold(0_u128, |mask, id| {
mask | NODE_TOPOLOGY.get(id).expect("Id must not exceed max cpus")
mask | NODE_TOPOLOGY
.get(id)
.expect("Id must not exceed max cpus")
}))
}
/// Get the cores sharing SMT resources relative to id.
#[inline]
pub fn smt_affinity(id: Id) -> impl Iterator<Item = Id> {
from_mask(*SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus"))
from_mask(
*SMT_TOPOLOGY
.get(id)
.expect("ID must not exceed max cpus"),
)
}
/// Get the cores sharing Node resources relative to id.
#[inline]
pub fn node_affinity(id: Id) -> impl Iterator<Item = Id> {
from_mask(*NODE_TOPOLOGY.get(id).expect("ID must not exceed max cpus"))
from_mask(
*NODE_TOPOLOGY
.get(id)
.expect("ID must not exceed max cpus"),
)
}
/// Get the number of threads which could execute in parallel based on hardware

View File

@@ -42,7 +42,9 @@ pub struct Queue {
/// Get device characteristics useful for random access throughput by name.
#[must_use]
pub fn parallelism(path: &Path) -> Parallelism {
let dev_id = dev_from_path(path).log_debug_err().unwrap_or_default();
let dev_id = dev_from_path(path)
.log_debug_err()
.unwrap_or_default();
let mq_path = block_path(dev_id).join("mq/");
@@ -60,7 +62,12 @@ pub fn parallelism(path: &Path) -> Parallelism {
.into_iter()
.flat_map(IntoIterator::into_iter)
.filter_map(Result::ok)
.filter(|entry| entry.file_type().as_ref().is_ok_and(FileType::is_dir))
.filter(|entry| {
entry
.file_type()
.as_ref()
.is_ok_and(FileType::is_dir)
})
.map(|dir| queue_parallelism(&dir.path()))
.collect(),
}