From 9e09162817431be2ca64cdcc56be8b817f234c0f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 26 Feb 2026 00:39:50 +0000 Subject: [PATCH] Mitigate issues from low RLIMIT_NPROC defaults. (fixes #337) Signed-off-by: Jason Volk --- src/core/utils/sys/limits.rs | 22 ++++++++++++++++++++++ src/database/pool/configure.rs | 12 ++++++++++-- src/main/runtime.rs | 14 ++++++++++---- src/main/server.rs | 5 +++-- 4 files changed, 45 insertions(+), 8 deletions(-) diff --git a/src/core/utils/sys/limits.rs b/src/core/utils/sys/limits.rs index 83470133..aa34a143 100644 --- a/src/core/utils/sys/limits.rs +++ b/src/core/utils/sys/limits.rs @@ -28,6 +28,28 @@ pub fn maximize_fd_limit() -> Result { #[cfg(not(unix))] pub fn maximize_fd_limit() -> Result { Ok(()) } +#[cfg(unix)] +/// Some distributions ship with very low defaults for thread counts; similar to +/// low default file descriptor limits. But unlike fd's, thread limit is rarely +/// reached, though on large systems (32+ cores) shipping with defaults of +/// ~1024 as have been observed are problematic. +pub fn maximize_thread_limit() -> Result { + use nix::sys::resource::setrlimit; + + let (soft_limit, hard_limit) = max_threads()?; + if soft_limit < hard_limit { + let new_limit = hard_limit.try_into()?; + setrlimit(Resource::RLIMIT_NPROC, new_limit, new_limit)?; + assert_eq!((hard_limit, hard_limit), max_threads()?, "getrlimit != setrlimit"); + debug!(to = hard_limit, from = soft_limit, "Raised RLIMIT_NPROC"); + } + + Ok(()) +} + +#[cfg(not(unix))] +pub fn maximize_thread_limit() -> Result { Ok(()) } + #[cfg(unix)] pub fn max_file_descriptors() -> Result<(usize, usize)> { getrlimit(Resource::RLIMIT_NOFILE) diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index fb7b0d4e..4bc61910 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, sync::Arc}; use tuwunel_core::{ - Server, debug, + Server, at, debug, debug::INFO_SPAN_LEVEL, debug_info, debug_warn, expected, info, is_equal_to, utils::{ @@ -12,7 +12,7 @@ use tuwunel_core::{ stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, sys::{ compute::{available_parallelism, cores_available, is_core_available}, - storage, + max_threads, storage, }, }, }; @@ -116,6 +116,13 @@ pub(super) fn configure(server: &Arc) -> (Vec, Vec, Vec) -> (Vec, Vec, Vec) -> Result { .set(args.gc_muzzy) .expect("set GC_MUZZY from program argument"); + let max_blocking_threads = max_threads() + .expect("obtained RLIMIT_NPROC or default") + .0 + .saturating_div(3) + .clamp(WORKER_MIN, MAX_BLOCKING_THREADS); + let mut builder = Builder::new_multi_thread(); builder .enable_io() .enable_time() .thread_name(WORKER_NAME) .worker_threads(args.worker_threads.max(WORKER_MIN)) - .max_blocking_threads(MAX_BLOCKING_THREADS) + .max_blocking_threads(max_blocking_threads) .thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE)) .global_queue_interval(args.global_event_interval) .event_interval(args.kernel_event_interval) @@ -110,7 +116,7 @@ pub fn shutdown(server: &Arc, runtime: Runtime) -> Result { tuwunel_core::event!(LEVEL, ?runtime_metrics, "Final runtime metrics."); } - if let Ok(resource_usage) = usage() { + if let Ok(resource_usage) = tuwunel_core::utils::sys::usage() { tuwunel_core::event!(LEVEL, ?resource_usage, "Final resource usage."); } @@ -208,7 +214,7 @@ fn set_worker_mallctl(_: usize) {} )] fn thread_stop() { if cfg!(any(tokio_unstable, not(feature = "release_max_log_level"))) - && let Ok(resource_usage) = thread_usage() + && let Ok(resource_usage) = tuwunel_core::utils::sys::thread_usage() { tuwunel_core::debug!(?resource_usage, "Thread resource usage."); } diff --git a/src/main/server.rs b/src/main/server.rs index e4833664..c707ac04 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -51,8 +51,9 @@ pub fn new(args: Option<&Args>, runtime: Option<&runtime::Handle>) -> Result