From 885e93936b671ea1da96ec828ae76e81776a8fdf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rapha=C3=ABl=20Vinot?= Date: Thu, 17 Oct 2024 14:00:26 +0200 Subject: [PATCH] chg: Bump kvrocks config file to 2.10 --- full_index/kvrocks.conf | 86 +++++++++++++++++++++++++++++++++++------ 1 file changed, 75 insertions(+), 11 deletions(-) diff --git a/full_index/kvrocks.conf b/full_index/kvrocks.conf index 207a3312..8407ec23 100644 --- a/full_index/kvrocks.conf +++ b/full_index/kvrocks.conf @@ -30,7 +30,7 @@ timeout 0 workers 8 # By default, kvrocks does not run as a daemon. Use 'yes' if you need it. -# Note that kvrocks will write a PID file in /var/run/kvrocks.pid when daemonized +# It will create a PID file when daemonize is enabled, and its path is specified by pidfile. daemonize yes # Kvrocks implements the cluster solution that is similar to the Redis cluster solution. @@ -61,6 +61,12 @@ cluster-enabled no # Default: no repl-namespace-enabled no +# By default, the max length of bulk string is limited to 512MB. If you want to +# change this limit to a different value(must >= 1MiB), you can use the following configuration. +# It can be just an integer (e.g. 10000000), or an integer followed by a unit (e.g. 12M, 7G, 2T). +# +# proto-max-bulk-len 536870912 + # Persist the cluster nodes topology in local file($dir/nodes.conf). This configuration # takes effect only if the cluster mode was enabled. # @@ -308,7 +314,7 @@ max-bitmap-to-string-mb 16 # Whether to enable SCAN-like cursor compatible with Redis. # If enabled, the cursor will be unsigned 64-bit integers. # If disabled, the cursor will be a string. -# Default: no +# Default: yes redis-cursor-compatible yes # Whether to enable the RESP3 protocol. @@ -329,6 +335,20 @@ json-max-nesting-depth 1024 # Default: json json-storage-format json +# Whether to enable transactional mode engine::Context. +# +# If enabled, is_txn_mode in engine::Context will be set properly, +# which is expected to improve the consistency of commands. +# If disabled, is_txn_mode in engine::Context will be set to false, +# making engine::Context equivalent to engine::Storage. +# +# NOTE: This is an experimental feature. If you find errors, performance degradation, +# excessive memory usage, excessive disk I/O, etc. after enabling it, please try disabling it. +# At the same time, we welcome feedback on related issues to help iterative improvements. +# +# Default: no +txn-context-enabled no + ################################## TLS ################################### # By default, TLS/SSL is disabled, i.e. `tls-port` is set to 0. @@ -492,15 +512,23 @@ profiling-sample-record-threshold-ms 100 ################################## CRON ################################### # Compact Scheduler, auto compact at schedule time -# time expression format is the same as crontab(currently only support * and int) -# e.g. compact-cron 0 3 * * * 0 4 * * * +# Time expression format is the same as crontab (supported cron syntax: *, n, */n, `1,3-6,9,11`) +# e.g. compact-cron 0 3,4 * * * # would compact the db at 3am and 4am everyday # compact-cron 0 3 * * * # The hour range that compaction checker would be active # e.g. compaction-checker-range 0-7 means compaction checker would be worker between # 0-7am every day. -compaction-checker-range 0-7 +# WARNING: this config option is deprecated and will be removed, +# please use compaction-checker-cron instead +# compaction-checker-range 0-7 + +# The time pattern that compaction checker would be active +# Time expression format is the same as crontab (supported cron syntax: *, n, */n, `1,3-6,9,11`) +# e.g. compaction-checker-cron * 0-7 * * * means compaction checker would be worker between +# 0-7am every day. +compaction-checker-cron * 0-7 * * * # When the compaction checker is triggered, the db will periodically pick the SST file # with the highest "deleted percentage" (i.e. the percentage of deleted keys in the SST @@ -515,10 +543,17 @@ compaction-checker-range 0-7 # force-compact-file-min-deleted-percentage 10 # Bgsave scheduler, auto bgsave at scheduled time -# time expression format is the same as crontab(currently only support * and int) -# e.g. bgsave-cron 0 3 * * * 0 4 * * * +# Time expression format is the same as crontab (supported cron syntax: *, n, */n, `1,3-6,9,11`) +# e.g. bgsave-cron 0 3,4 * * * # would bgsave the db at 3am and 4am every day +# Kvrocks doesn't store the key number directly. It needs to scan the DB and +# then retrieve the key number by using the dbsize scan command. +# The Dbsize scan scheduler auto-recalculates the estimated keys at scheduled time. +# Time expression format is the same as crontab (supported cron syntax: *, n, */n, `1,3-6,9,11`) +# e.g. dbsize-scan-cron 0 * * * * +# would recalculate the keyspace infos of the db every hour. + # Command renaming. # # It is possible to change the name of dangerous commands in a shared @@ -671,7 +706,7 @@ rocksdb.max_background_flushes -1 # concurrently perform a compaction job by breaking it into multiple, # smaller ones that are run simultaneously. # Default: 2 -rocksdb.max_sub_compactions 2 +rocksdb.max_subcompactions 2 # In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size # as the trigger of column family flush. Once WALs exceed this size, RocksDB @@ -727,6 +762,29 @@ rocksdb.cache_index_and_filter_blocks yes # default snappy rocksdb.compression snappy +# Specify the compression level to use. It trades compression speed +# and ratio, might be useful when tuning for disk space. +# See details: https://github.com/facebook/rocksdb/wiki/Space-Tuning +# For zstd: valid range is from 1 (fastest) to 19 (best ratio), +# For zlib: valid range is from 1 (fastest) to 9 (best ratio), +# For lz4: adjusting the level influences the 'acceleration'. +# RocksDB sets a negative level to indicate acceleration directly, +# with more negative values indicating higher speed and less compression. +# Note: This setting is ignored for compression algorithms like Snappy that +# do not support variable compression levels. +# +# RocksDB Default: +# - zstd: 3 +# - zlib: Z_DEFAULT_COMPRESSION (currently -1) +# - kLZ4: -1 (i.e., `acceleration=1`; see `CompressionOptions::level` doc) +# For all others, RocksDB does not specify a compression level. +# If the compression type doesn't support the setting, it will be a no-op. +# +# Default: 32767 (RocksDB's generic default compression level. Internally +# it'll be translated to the default compression level specific to the +# compression library as mentioned above) +rocksdb.compression_level 32767 + # If non-zero, we perform bigger reads when doing compaction. If you're # running RocksDB on spinning disks, you should set this to at least 2MB. # That way RocksDB's compaction is doing sequential instead of random reads. @@ -851,8 +909,8 @@ rocksdb.max_bytes_for_level_multiplier 10 # In iterators, it will prefetch data asynchronously in the background for each file being iterated on. # In MultiGet, it will read the necessary data blocks from those files in parallel as much as possible. -# Default no -rocksdb.read_options.async_io no +# Default yes +rocksdb.read_options.async_io yes # If yes, the write will be flushed from the operating system # buffer cache before the write is considered complete. @@ -909,6 +967,12 @@ rocksdb.rate_limiter_auto_tuned yes # Default: yes # rocksdb.avoid_unnecessary_blocking_io yes +# Specifies the maximum size in bytes for a write batch in RocksDB. +# If set to 0, there is no size limit for write batches. +# This option can help control memory usage and manage large WriteBatch operations more effectively. +# +# Default: 0 +# rocksdb.write_options.write_batch_max_bytes 0 + ################################ NAMESPACE ##################################### # namespace.test change.me -backup-dir .//backup