chg: [etc] new default kvrocks configuration

main
Alexandre Dulaunoy 2023-02-04 10:09:22 +01:00
parent 608610f5ac
commit 1e001cfd55
No known key found for this signature in database
GPG Key ID: 09E2CD4944E6CBCD
1 changed files with 439 additions and 70 deletions

View File

@ -1,15 +1,23 @@
################################ GENERAL ##################################### ################################ GENERAL #####################################
# By default kvrocks listens for connections from all the network interfaces # By default kvrocks listens for connections from localhost interface.
# available on the server. It is possible to listen to just one or multiple # It is possible to listen to just one or multiple interfaces using
# interfaces using the "bind" configuration directive, followed by one or # the "bind" configuration directive, followed by one or more IP addresses.
# more IP addresses.
# #
# Examples: # Examples:
# #
# bind 192.168.1.100 10.0.0.1 # bind 192.168.1.100 10.0.0.1
bind 127.0.0.1 # bind 127.0.0.1 ::1
#bind 0.0.0.0 bind 0.0.0.0
# Unix socket.
#
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so kvrocks will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/kvrocks.sock
# unixsocketperm 777
# Accept connections on the specified port, default is 6666. # Accept connections on the specified port, default is 6666.
port 3033 port 3033
@ -17,19 +25,43 @@ port 3033
# Close the connection after a client is idle for N seconds (0 to disable) # Close the connection after a client is idle for N seconds (0 to disable)
timeout 0 timeout 0
# The number of worker's threads, increase or decrease it would effect the performance. # The number of worker's threads, increase or decrease would affect the performance.
workers 8 workers 8
# The number of replication worker's threads, increase or decrease it would effect the replication performance. # By default, kvrocks does not run as a daemon. Use 'yes' if you need it.
# Default: 1 # Note that kvrocks will write a PID file in /var/run/kvrocks.pid when daemonized
repl-workers 1 daemonize no
# By default kvrocks does not run as a daemon. Use 'yes' if you need it. # Kvrocks implements the cluster solution that is similar to the Redis cluster solution.
# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized. # You can get cluster information by CLUSTER NODES|SLOTS|INFO command, it also is
daemonize yes # adapted to redis-cli, redis-benchmark, Redis cluster SDK, and Redis cluster proxy.
# But kvrocks doesn't support communicating with each other, so you must set
# cluster topology by CLUSTER SETNODES|SETNODEID commands, more details: #219.
#
# PLEASE NOTE:
# If you enable cluster, kvrocks will encode key with its slot id calculated by
# CRC16 and modulo 16384, encoding key with its slot id makes it efficient to
# migrate keys based on the slot. So if you enabled at first time, cluster mode must
# not be disabled after restarting, and vice versa. That is to say, data is not
# compatible between standalone mode with cluster mode, you must migrate data
# if you want to change mode, otherwise, kvrocks will make data corrupt.
#
# Default: no
cluster-enabled no
# Persist the cluster nodes topology in local file($dir/nodes.conf). This configuration
# takes effect only if the cluster mode was enabled.
#
# If yes, it will try to load the cluster topology from the local file when starting,
# and dump the cluster nodes into the file if it was changed.
#
# Default: yes
# persist-cluster-nodes-enabled yes
# Set the max number of connected clients at the same time. By default # Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the server is not # this limit is set to 10000 clients. However, if the server is not
# able to configure the process file limit to allow for the specified limit # able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit # the max number of allowed clients is set to the current file limit
# #
@ -53,13 +85,13 @@ maxclients 10000
# If the master is password protected (using the "masterauth" configuration # If the master is password protected (using the "masterauth" configuration
# directive below) it is possible to tell the slave to authenticate before # directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will # starting the replication synchronization process. Otherwise, the master will
# refuse the slave request. # refuse the slave request.
# #
# masterauth foobared # masterauth foobared
# Master-Salve replication would check db name is matched. if not, the slave should # Master-Salve replication would check db name is matched. if not, the slave should
# refuse to sync the db from master. Don't use default value, set the db-name to identify # refuse to sync the db from master. Don't use the default value, set the db-name to identify
# the cluster. # the cluster.
db-name change.me.db db-name change.me.db
@ -67,14 +99,30 @@ db-name change.me.db
# #
# The DB will be written inside this directory # The DB will be written inside this directory
# Note that you must specify a directory here, not a file name. # Note that you must specify a directory here, not a file name.
dir ./dbcycat dir /tmp/kvrocks
# The logs of server will be stored in this directory. If you don't specify # You can configure where to store your server logs by the log-dir.
# one directory, by default, we store logs in the working directory that set # If you don't specify one, we will use the above `dir` as our default log directory.
# by 'dir' above. # We also can send logs to stdout/stderr is as simple as:
# log-dir /tmp/kvrocks #
log-dir stdout
# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by # Log level
# Possible values: info, warning, error, fatal
# Default: info
log-level info
# You can configure log-retention-days to control whether to enable the log cleaner
# and the maximum retention days that the INFO level logs will be kept.
#
# if set to -1, that means to disable the log cleaner.
# if set to 0, all previous INFO level logs will be immediately removed.
# if set to between 0 to INT_MAX, that means it will retent latest N(log-retention-days) day logs.
# By default the log-retention-days is -1.
log-retention-days -1
# When running in daemonize mode, kvrocks writes a PID file in ${CONFIG_DIR}/kvrocks.pid by
# default. You can specify a custom pid file location here. # default. You can specify a custom pid file location here.
# pidfile /var/run/kvrocks.pid # pidfile /var/run/kvrocks.pid
pidfile "" pidfile ""
@ -121,6 +169,20 @@ tcp-backlog 511
# connect 'master's listening port' when synchronization. # connect 'master's listening port' when synchronization.
master-use-repl-port no master-use-repl-port no
# Currently, master only checks sequence number when replica asks for PSYNC,
# that is not enough since they may have different replication histories even
# the replica asking sequence is in the range of the master current WAL.
#
# We design 'Replication Sequence ID' PSYNC, we add unique replication id for
# every write batch (the operation of each command on the storage engine), so
# the combination of replication id and sequence is unique for write batch.
# The master can identify whether the replica has the same replication history
# by checking replication id and sequence.
#
# By default, it is not enabled since this stricter check may easily lead to
# full synchronization.
use-rsid-psync no
# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of # Master-Slave replication. Use slaveof to make a kvrocks instance a copy of
# another kvrocks server. A few things to understand ASAP about kvrocks replication. # another kvrocks server. A few things to understand ASAP about kvrocks replication.
# #
@ -142,16 +204,39 @@ master-use-repl-port no
# is still in progress, the slave can act in two different ways: # is still in progress, the slave can act in two different ways:
# #
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the # still reply to client requests, possibly with out-of-date data, or the
# data set may just be empty if this is the first synchronization. # data set may just be empty if this is the first synchronization.
# #
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with # 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands # an error "SYNC with master in progress" to all kinds of commands
# but to INFO and SLAVEOF. # but to INFO and SLAVEOF.
# #
slave-serve-stale-data yes slave-serve-stale-data yes
# The maximum allowed rate (in MB/s) that should be used by Replication. # To guarantee slave's data safe and serve when it is in full synchronization
# state, slave still keep itself data. But this way needs to occupy much disk
# space, so we provide a way to reduce disk occupation, slave will delete itself
# entire database before fetching files from master during full synchronization.
# If you want to enable this way, you can set 'slave-delete-db-before-fullsync'
# to yes, but you must know that database will be lost if master is down during
# full synchronization, unless you have a backup of database.
#
# This option is similar redis replicas RDB diskless load option:
# repl-diskless-load on-empty-db
#
# Default: no
slave-empty-db-before-fullsync no
# If replicas need full synchronization with master, master need to create
# checkpoint for feeding replicas, and replicas also stage a checkpoint of
# the master. If we also keep the backup, it maybe occupy extra disk space.
# You can enable 'purge-backup-on-fullsync' if disk is not sufficient, but
# that may cause remote backup copy failing.
#
# Default: no
purge-backup-on-fullsync no
# The maximum allowed rate (in MB/s) that should be used by replication.
# If the rate exceeds max-replication-mb, replication will slow down. # If the rate exceeds max-replication-mb, replication will slow down.
# Default: 0 (i.e. no limit) # Default: 0 (i.e. no limit)
max-replication-mb 0 max-replication-mb 0
@ -170,39 +255,99 @@ max-db-size 0
# The maximum backup to keep, server cron would run every minutes to check the num of current # The maximum backup to keep, server cron would run every minutes to check the num of current
# backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep # backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep
# is 0, no backup would be keep. # is 0, no backup would be kept. But now, we only support 0 or 1.
# exception: the backup will not be purged if it's not yet expired (when max-backup-keep-hours is not 0)
max-backup-to-keep 1 max-backup-to-keep 1
# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup. # The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup.
# default: 1 Week # default: 1 day
max-backup-keep-hours 168 max-backup-keep-hours 24
# Enable the kvrocks to support the codis protocol, if the db enabled the codis mode at first open, # max-bitmap-to-string-mb use to limit the max size of bitmap to string transformation(MB).
# this option must not be disabled after restarted, and vice versa
# Defalut: no
codis-enabled no
# Ratio of the samples would be recorded when the profiling was enabled.
# we simply use the rand to determine whether to record the sample or not.
# #
# Default: 0 # Default: 16
profiling-sample-ratio 0 max-bitmap-to-string-mb 16
# There is no limit to this length. Just be aware that it will consume memory. ################################## TLS ###################################
# You can reclaim memory used by the perf log with PERFLOG RESET.
#
# Default: 256
profiling-sample-record-max-len 256
# profiling-sample-record-threshold-ms use to tell the kvrocks when to record. # By default, TLS/SSL is disabled, i.e. `tls-port` is set to 0.
# To enable it, `tls-port` can be used to define TLS-listening ports.
# tls-port 0
# Configure a X.509 certificate and private key to use for authenticating the
# server to connected clients, masters or cluster peers.
# These files should be PEM formatted.
# #
# Default: 100 millisecond # tls-cert-file kvrocks.crt
profiling-sample-record-threshold-ms 100 # tls-key-file kvrocks.key
# If the key file is encrypted using a passphrase, it can be included here
# as well.
#
# tls-key-file-pass secret
# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
# clients and peers. Kvrocks requires an explicit configuration of at least one
# of these, and will not implicitly use the system wide configuration.
#
# tls-ca-cert-file ca.crt
# tls-ca-cert-dir /etc/ssl/certs
# By default, clients on a TLS port are required
# to authenticate using valid client side certificates.
#
# If "no" is specified, client certificates are not required and not accepted.
# If "optional" is specified, client certificates are accepted and must be
# valid if provided, but are not required.
#
# tls-auth-clients no
# tls-auth-clients optional
# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
# that older formally deprecated versions are kept disabled to reduce the attack surface.
# You can explicitly specify TLS versions to support.
# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
# To enable only TLSv1.2 and TLSv1.3, use:
#
# tls-protocols "TLSv1.2 TLSv1.3"
# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
# about the syntax of this string.
#
# Note: this configuration applies only to <= TLSv1.2.
#
# tls-ciphers DEFAULT:!MEDIUM
# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
# information about the syntax of this string, and specifically for TLSv1.3
# ciphersuites.
#
# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
# When choosing a cipher, use the server's preference instead of the client
# preference. By default, the server follows the client's preference.
#
# tls-prefer-server-ciphers yes
# By default, TLS session caching is enabled to allow faster and less expensive
# reconnections by clients that support it. Use the following directive to disable
# caching.
#
# tls-session-caching no
# Change the default number of TLS sessions cached. A zero value sets the cache
# to unlimited size. The default size is 20480.
#
# tls-session-cache-size 5000
# Change the default timeout of cached TLS sessions. The default timeout is 300
# seconds.
#
# tls-session-cache-timeout 60
################################## SLOW LOG ################################### ################################## SLOW LOG ###################################
# The Kvrocks Slow Log is a system to log queries that exceeded a specified # The Kvrocks Slow Log is a mechanism to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations # execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth, # like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only # but just the time needed to actually execute the command (this is the only
@ -235,6 +380,46 @@ slowlog-max-len 128
# They do not enable continuous liveness pings back to your supervisor. # They do not enable continuous liveness pings back to your supervisor.
supervised no supervised no
################################## PERF LOG ###################################
# The Kvrocks Perf Log is a mechanism to log queries' performance context that
# exceeded a specified execution time. This mechanism uses rocksdb's
# Perf Context and IO Stats Context, Please see:
# https://github.com/facebook/rocksdb/wiki/Perf-Context-and-IO-Stats-Context
#
# This mechanism is enabled when profiling-sample-commands is not empty and
# profiling-sample-ratio greater than 0.
# It is important to note that this mechanism affects performance, but it is
# useful for troubleshooting performance bottlenecks, so it should only be
# enabled when performance problems occur.
# The name of the commands you want to record. Must be original name of
# commands supported by Kvrocks. Use ',' to separate multiple commands and
# use '*' to record all commands supported by Kvrocks.
# Example:
# - Single command: profiling-sample-commands get
# - Multiple commands: profiling-sample-commands get,mget,hget
#
# Default: empty
# profiling-sample-commands ""
# Ratio of the samples would be recorded. It is a number between 0 and 100.
# We simply use the rand to determine whether to record the sample or not.
#
# Default: 0
profiling-sample-ratio 0
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the perf log with PERFLOG RESET.
#
# Default: 256
profiling-sample-record-max-len 256
# profiling-sample-record-threshold-ms use to tell the kvrocks when to record.
#
# Default: 100 millisecond
profiling-sample-record-threshold-ms 100
################################## CRON ################################### ################################## CRON ###################################
# Compact Scheduler, auto compact at schedule time # Compact Scheduler, auto compact at schedule time
@ -248,23 +433,80 @@ supervised no
# 0-7am every day. # 0-7am every day.
compaction-checker-range 0-7 compaction-checker-range 0-7
# Bgsave scheduler, auto bgsave at schedule time # Bgsave scheduler, auto bgsave at scheduled time
# time expression format is the same as crontab(currently only support * and int) # time expression format is the same as crontab(currently only support * and int)
# e.g. bgsave-cron 0 3 * * * 0 4 * * * # e.g. bgsave-cron 0 3 * * * 0 4 * * *
# would bgsave the db at 3am and 4am every day # would bgsave the db at 3am and 4am every day
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance, the KEYS command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command KEYS b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command KEYS ""
################################ MIGRATE #####################################
# If the network bandwidth is completely consumed by the migration task,
# it will affect the availability of kvrocks. To avoid this situation,
# migrate-speed is adopted to limit the migrating speed.
# Migrating speed is limited by controlling the duration between sending data,
# the duration is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us).
# Value: [0,INT_MAX], 0 means no limit
#
# Default: 4096
migrate-speed 4096
# In order to reduce data transmission times and improve the efficiency of data migration,
# pipeline is adopted to send multiple data at once. Pipeline size can be set by this option.
# Value: [1, INT_MAX], it can't be 0
#
# Default: 16
migrate-pipeline-size 16
# In order to reduce the write forbidden time during migrating slot, we will migrate the incremental
# data several times to reduce the amount of incremental data. Until the quantity of incremental
# data is reduced to a certain threshold, slot will be forbidden write. The threshold is set by
# this option.
# Value: [1, INT_MAX], it can't be 0
#
# Default: 10000
migrate-sequence-gap 10000
################################ ROCKSDB ##################################### ################################ ROCKSDB #####################################
# Specify the capacity of metadata column family block cache. Larger block cache # Specify the capacity of metadata column family block cache. A larger block cache
# may make request faster while more keys would be cached. Max Size is 200*1024. # may make requests faster while more keys would be cached. Max Size is 200*1024.
# Default: 2048MB # Default: 2048MB
rocksdb.metadata_block_cache_size 2048 rocksdb.metadata_block_cache_size 2048
# Specify the capacity of subkey column family block cache. Larger block cache # Specify the capacity of subkey column family block cache. A larger block cache
# may make request faster while more keys would be cached. Max Size is 200*1024. # may make requests faster while more keys would be cached. Max Size is 200*1024.
# Default: 2048MB # Default: 2048MB
rocksdb.subkey_block_cache_size 2048 rocksdb.subkey_block_cache_size 2048
# Metadata column family and subkey column family will share a single block cache
# if set 'yes'. The capacity of shared block cache is
# metadata_block_cache_size + subkey_block_cache_size
#
# Default: yes
rocksdb.share_metadata_and_subkey_block_cache yes
# A global cache for table-level rows in RocksDB. If almost always point
# lookups, enlarging row cache may improve read performance. Otherwise,
# if we enlarge this value, we can lessen metadata/subkey block cache size.
#
# Default: 0 (disabled)
rocksdb.row_cache_size 0
# Number of open files that can be used by the DB. You may need to # Number of open files that can be used by the DB. You may need to
# increase this if your database has a large working set. Value -1 means # increase this if your database has a large working set. Value -1 means
# files opened are always kept open. You can estimate number of files based # files opened are always kept open. You can estimate number of files based
@ -289,7 +531,7 @@ rocksdb.max_open_files 8096
# default is 64MB # default is 64MB
rocksdb.write_buffer_size 64 rocksdb.write_buffer_size 64
# Target file size for compaction, target file size for Leve N can be caculated # Target file size for compaction, target file size for Leve N can be calculated
# by target_file_size_base * (target_file_size_multiplier ^ (L-1)) # by target_file_size_base * (target_file_size_multiplier ^ (L-1))
# #
# Default: 128MB # Default: 128MB
@ -320,14 +562,6 @@ rocksdb.max_background_flushes 4
# Default: 2 (i.e. no subcompactions) # Default: 2 (i.e. no subcompactions)
rocksdb.max_sub_compactions 2 rocksdb.max_sub_compactions 2
# We impl the repliction with rocksdb WAL, it would trigger full sync when the seq was out of range.
# wal_ttl_seconds and wal_size_limit_mb would affect how archived logswill be deleted.
# If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that
# are older than WAL_ttl_seconds will be deleted#
#
# Default: 3 Hours
rocksdb.wal_ttl_seconds 10800
# In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size # In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size
# as the trigger of column family flush. Once WALs exceed this size, RocksDB # as the trigger of column family flush. Once WALs exceed this size, RocksDB
# will start forcing the flush of column families to allow deletion of some # will start forcing the flush of column families to allow deletion of some
@ -349,6 +583,14 @@ rocksdb.wal_ttl_seconds 10800
# default is 512MB # default is 512MB
rocksdb.max_total_wal_size 512 rocksdb.max_total_wal_size 512
# We implement the replication with rocksdb WAL, it would trigger full sync when the seq was out of range.
# wal_ttl_seconds and wal_size_limit_mb would affect how archived logs will be deleted.
# If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that
# are older than WAL_ttl_seconds will be deleted#
#
# Default: 3 Hours
rocksdb.wal_ttl_seconds 10800
# If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, # If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
# WAL files will be checked every 10 min and if total size is greater # WAL files will be checked every 10 min and if total size is greater
# then WAL_size_limit_MB, they will be deleted starting with the # then WAL_size_limit_MB, they will be deleted starting with the
@ -369,8 +611,9 @@ rocksdb.block_size 16384
# Default: no # Default: no
rocksdb.cache_index_and_filter_blocks yes rocksdb.cache_index_and_filter_blocks yes
# Specify the compression to use. # Specify the compression to use. Only compress level greater
# Accept value: "no", "snappy" # than 2 to improve performance.
# Accept value: "no", "snappy", "lz4", "zstd", "zlib"
# default snappy # default snappy
rocksdb.compression snappy rocksdb.compression snappy
@ -405,6 +648,16 @@ rocksdb.enable_pipelined_write no
# Default: 20 # Default: 20
rocksdb.level0_slowdown_writes_trigger 20 rocksdb.level0_slowdown_writes_trigger 20
# Maximum number of level-0 files. We stop writes at this point.
#
# Default: 40
rocksdb.level0_stop_writes_trigger 40
# Number of files to trigger level-0 compaction.
#
# Default: 4
rocksdb.level0_file_num_compaction_trigger 4
# if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec # if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
# #
# Default: 0 # Default: 0
@ -414,5 +667,121 @@ rocksdb.stats_dump_period_sec 0
# #
# Default: no # Default: no
rocksdb.disable_auto_compactions no rocksdb.disable_auto_compactions no
# BlobDB(key-value separation) is essentially RocksDB for large-value use cases.
# Since 6.18.0, The new implementation is integrated into the RocksDB core.
# When set, large values (blobs) are written to separate blob files, and only
# pointers to them are stored in SST files. This can reduce write amplification
# for large-value use cases at the cost of introducing a level of indirection
# for reads. Please see: https://github.com/facebook/rocksdb/wiki/BlobDB.
#
# Note that when enable_blob_files is set to yes, BlobDB-related configuration
# items will take effect.
#
# Default: no
rocksdb.enable_blob_files no
# The size of the smallest value to be stored separately in a blob file. Values
# which have an uncompressed size smaller than this threshold are stored alongside
# the keys in SST files in the usual fashion.
#
# Default: 4096 byte, 0 means that all values are stored in blob files
rocksdb.min_blob_size 4096
# The size limit for blob files. When writing blob files, a new file is
# opened once this limit is reached.
#
# Default: 268435456 bytes
rocksdb.blob_file_size 268435456
# Enables garbage collection of blobs. Valid blobs residing in blob files
# older than a cutoff get relocated to new files as they are encountered
# during compaction, which makes it possible to clean up blob files once
# they contain nothing but obsolete/garbage blobs.
# See also rocksdb.blob_garbage_collection_age_cutoff below.
#
# Default: yes
rocksdb.enable_blob_garbage_collection yes
# The percentage cutoff in terms of blob file age for garbage collection.
# Blobs in the oldest N blob files will be relocated when encountered during
# compaction, where N = (garbage_collection_cutoff/100) * number_of_blob_files.
# Note that this value must belong to [0, 100].
#
# Default: 25
rocksdb.blob_garbage_collection_age_cutoff 25
# The purpose of the following three options are to dynamically adjust the upper limit of
# the data that each layer can store according to the size of the different
# layers of the LSM. Enabling this option will bring some improvements in
# deletion efficiency and space amplification, but it will lose a certain
# amount of read performance.
# If you want to know more details about Levels' Target Size, you can read RocksDB wiki:
# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size
#
# Default: no
rocksdb.level_compaction_dynamic_level_bytes no
# The total file size of level-1 sst.
#
# Default: 268435456 bytes
rocksdb.max_bytes_for_level_base 268435456
# Multiplication factor for the total file size of L(n+1) layers.
# This option is a double type number in RocksDB, but kvrocks is
# not support the double data type number yet, so we use integer
# number instead of double currently.
#
# Default: 10
rocksdb.max_bytes_for_level_multiplier 10
# This feature only takes effect in Iterators and MultiGet.
# If yes, RocksDB will try to read asynchronously and in parallel as much as possible to hide IO latency.
# In iterators, it will prefetch data asynchronously in the background for each file being iterated on.
# In MultiGet, it will read the necessary data blocks from those files in parallel as much as possible.
# Default no
rocksdb.read_options.async_io no
# If yes, the write will be flushed from the operating system
# buffer cache before the write is considered complete.
# If this flag is enabled, writes will be slower.
# If this flag is disabled, and the machine crashes, some recent
# rites may be lost. Note that if it is just the process that
# crashes (i.e., the machine does not reboot), no writes will be
# lost even if sync==false.
#
# Default: no
rocksdb.write_options.sync no
# If yes, writes will not first go to the write ahead log,
# and the write may get lost after a crash.
#
# Deafult: no
rocksdb.write_options.disable_wal no
# If enabled and we need to wait or sleep for the write request, fails
# immediately.
#
# Default: no
rocksdb.write_options.no_slowdown no
# If enabled, write requests are of lower priority if compaction is
# behind. In this case, no_slowdown = true, the request will be canceled
# immediately. Otherwise, it will be slowed down.
# The slowdown value is determined by RocksDB to guarantee
# it introduces minimum impacts to high priority writes.
#
# Default: no
rocksdb.write_options.low_pri no
# If enabled, this writebatch will maintain the last insert positions of each
# memtable as hints in concurrent write. It can improve write performance
# in concurrent writes if keys in one writebatch are sequential.
#
# Default: no
rocksdb.write_options.memtable_insert_hint_per_batch no
################################ NAMESPACE ##################################### ################################ NAMESPACE #####################################
# namespace.test change.me # namespace.test change.me