Replace hardcoded pool worker limit with configurable default.
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
@@ -111,6 +111,7 @@ ENTRYPOINT tuwunel -Oserver_name=\""$SERVER_NAME\""
|
||||
|
||||
|
||||
FROM input AS complement-testee-valgrind
|
||||
ARG db_pool_max_workers=128
|
||||
|
||||
EXPOSE 8008 8448
|
||||
RUN mkdir /database
|
||||
@@ -122,7 +123,8 @@ ENTRYPOINT valgrind \
|
||||
--exit-on-first-error=yes \
|
||||
--error-exitcode=1 \
|
||||
tuwunel \
|
||||
-Oserver_name=\""$SERVER_NAME\""
|
||||
-Oserver_name=\""$SERVER_NAME\"" \
|
||||
-Odb_pool_max_workers=${db_pool_max_workers}
|
||||
|
||||
|
||||
FROM input AS complement-base
|
||||
|
||||
@@ -28,6 +28,7 @@ EOF
|
||||
FROM input AS smoke-valgrind
|
||||
ARG leak_check="no"
|
||||
ARG undef_value_errors="no"
|
||||
ARG db_pool_max_workers=128
|
||||
COPY --link --from=input . .
|
||||
ENV TUWUNEL_LOG="info"
|
||||
ENV TUWUNEL_DATABASE_PATH="/tmp/smoketest.db"
|
||||
@@ -42,7 +43,8 @@ RUN <<EOF
|
||||
tuwunel \
|
||||
-Otest='["smoke"]' \
|
||||
-Oserver_name=\"localhost\" \
|
||||
-Odatabase_path=\"${TUWUNEL_DATABASE_PATH}\"
|
||||
-Odatabase_path=\"${TUWUNEL_DATABASE_PATH}\" \
|
||||
-Odb_pool_max_workers=${db_pool_max_workers}
|
||||
|
||||
rm -rf "${TUWUNEL_DATABASE_PATH}"
|
||||
EOF
|
||||
|
||||
@@ -2003,6 +2003,20 @@ pub struct Config {
|
||||
#[serde(default = "default_db_pool_workers_limit")]
|
||||
pub db_pool_workers_limit: usize,
|
||||
|
||||
/// Limits the total number of workers across all worker groups. When the
|
||||
/// sum of all groups exceeds this value the worker counts are reduced until
|
||||
/// this constraint is satisfied.
|
||||
///
|
||||
/// By default this value is only effective on larger systems (e.g. 16+
|
||||
/// cores) where it will tamper the overall thread-count. The thread-pool
|
||||
/// model will never achieve hardware capacity but this value can be raised
|
||||
/// on huge systems if the scheduling overhead is determined to not
|
||||
/// bottleneck and the worker groups are divided too small.
|
||||
///
|
||||
/// default: 2048
|
||||
#[serde(default = "default_db_pool_max_workers")]
|
||||
pub db_pool_max_workers: usize,
|
||||
|
||||
/// Determines the size of the queues feeding the database's frontend-pool.
|
||||
/// The size of the queue is determined by multiplying this value with the
|
||||
/// number of pool workers. When this queue is full, tokio tasks conducting
|
||||
@@ -3149,6 +3163,8 @@ fn default_db_pool_workers() -> usize {
|
||||
|
||||
fn default_db_pool_workers_limit() -> usize { 64 }
|
||||
|
||||
fn default_db_pool_max_workers() -> usize { 2048 }
|
||||
|
||||
fn default_db_pool_queue_mult() -> usize { 4 }
|
||||
|
||||
fn default_stream_width_default() -> usize { 32 }
|
||||
|
||||
@@ -65,8 +65,8 @@ pub(crate) type BatchQuery<'a> = SmallVec<[KeyBuf; BATCH_INLINE]>;
|
||||
pub(crate) type BatchResult<'a> = SmallVec<[ResultHandle<'a>; BATCH_INLINE]>;
|
||||
pub(crate) type ResultHandle<'a> = Result<Handle<'a>>;
|
||||
|
||||
const WORKER_LIMIT: (usize, usize) = (1, 1024);
|
||||
const QUEUE_LIMIT: (usize, usize) = (1, 4096);
|
||||
const WORKER_LIMIT: (usize, usize) = (1, 4096);
|
||||
const QUEUE_LIMIT: (usize, usize) = (1, 1024);
|
||||
const BATCH_INLINE: usize = 1;
|
||||
|
||||
const WORKER_STACK_SIZE: usize = 1_048_576;
|
||||
|
||||
@@ -124,6 +124,7 @@ pub(super) fn configure(server: &Arc<Server>) -> (Vec<usize>, Vec<usize>, Vec<us
|
||||
.filter_map(|mq| mq.nr_tags)
|
||||
.chain(default_worker_count.into_iter())
|
||||
.fold(0_usize, usize::saturating_add)
|
||||
.min(config.db_pool_max_workers)
|
||||
.clamp(WORKER_LIMIT.0, WORKER_LIMIT.1);
|
||||
|
||||
// Tamper for the total number of workers by reducing the count for each group.
|
||||
|
||||
@@ -1725,6 +1725,18 @@
|
||||
#
|
||||
#db_pool_workers_limit = 64
|
||||
|
||||
# Limits the total number of workers across all worker groups. When the
|
||||
# sum of all groups exceeds this value the worker counts are reduced until
|
||||
# this constraint is satisfied.
|
||||
#
|
||||
# By default this value is only effective on larger systems (e.g. 16+
|
||||
# cores) where it will tamper the overall thread-count. The thread-pool
|
||||
# model will never achieve hardware capacity but this value can be raised
|
||||
# on huge systems if the scheduling overhead is determined to not
|
||||
# bottleneck and the worker groups are divided too small.
|
||||
#
|
||||
#db_pool_max_workers = 2048
|
||||
|
||||
# Determines the size of the queues feeding the database's frontend-pool.
|
||||
# The size of the queue is determined by multiplying this value with the
|
||||
# number of pool workers. When this queue is full, tokio tasks conducting
|
||||
|
||||
Reference in New Issue
Block a user