chg: update all the things
parent
9bfe809f83
commit
0c9d5e6a91
|
@ -256,7 +256,6 @@ class BGPRanking():
|
|||
if (not response.get('data') or not response['data'].get('countries') or not
|
||||
response['data']['countries'][0].get('routed')):
|
||||
logging.warning(f'Invalid response: {response}')
|
||||
# FIXME: return something
|
||||
return 0, [(0, 0)]
|
||||
routed_asns = re.findall(r"AsnSingle\(([\d]*)\)", response['data']['countries'][0]['routed'])
|
||||
ranks = [self.asn_rank(asn, d, source, ipversion)['response'] for asn in routed_asns]
|
||||
|
|
|
@ -1,52 +1,79 @@
|
|||
################################ GENERAL #####################################
|
||||
|
||||
# By default kvrocks listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
# By default kvrocks listens for connections from localhost interface.
|
||||
# It is possible to listen to just one or multiple interfaces using
|
||||
# the "bind" configuration directive, followed by one or more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
# bind 127.0.0.1 ::1
|
||||
# bind 0.0.0.0
|
||||
# bind 127.0.0.1
|
||||
bind 0.0.0.0
|
||||
|
||||
# Unix socket.
|
||||
#
|
||||
# Specify the path for the unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so kvrocks will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
unixsocket ranking.sock
|
||||
unixsocketperm 777
|
||||
|
||||
# Accept connections on the specified port, default is 6666.
|
||||
# port 6666
|
||||
port 5189
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# The number of worker's threads, increase or decrease it would effect the performance.
|
||||
# The number of worker's threads, increase or decrease would affect the performance.
|
||||
workers 8
|
||||
|
||||
# The number of replication worker's threads, increase or decrease it would effect the replication performance.
|
||||
# Default: 1
|
||||
repl-workers 1
|
||||
# By default, kvrocks does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that kvrocks will write a PID file in /var/run/kvrocks.pid when daemonized
|
||||
daemonize yes
|
||||
|
||||
# By default kvrocks does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized.
|
||||
daemonize no
|
||||
|
||||
# Kvrocks implements cluster solution that is similar with redis cluster sulution.
|
||||
# Kvrocks implements the cluster solution that is similar to the Redis cluster solution.
|
||||
# You can get cluster information by CLUSTER NODES|SLOTS|INFO command, it also is
|
||||
# adapted to redis-cli, redis-benchmark, redis cluster SDK and redis cluster proxy.
|
||||
# But kvrocks doesn't support to communicate with each others, so you must set
|
||||
# adapted to redis-cli, redis-benchmark, Redis cluster SDK, and Redis cluster proxy.
|
||||
# But kvrocks doesn't support communicating with each other, so you must set
|
||||
# cluster topology by CLUSTER SETNODES|SETNODEID commands, more details: #219.
|
||||
#
|
||||
# PLEASE NOTE:
|
||||
# If you enable cluster, kvrocks will encode key with its slot id calculated by
|
||||
# CRC16 and modulo 16384, endoding key with its slot id makes it efficient to
|
||||
# migrate keys based on slot. So if you enabled at first time, cluster mode must
|
||||
# CRC16 and modulo 16384, encoding key with its slot id makes it efficient to
|
||||
# migrate keys based on the slot. So if you enabled at first time, cluster mode must
|
||||
# not be disabled after restarting, and vice versa. That is to say, data is not
|
||||
# compatible between standalone mode with cluster mode, you must migrate data
|
||||
# if you want to change mode, otherwise, kvrocks will make data corrupt.
|
||||
#
|
||||
# Default: no
|
||||
|
||||
cluster-enabled no
|
||||
|
||||
# By default, namespaces are stored in the configuration file and won't be replicated
|
||||
# to replicas. This option allows to change this behavior, so that namespaces are also
|
||||
# propagated to slaves. Note that:
|
||||
# 1) it won't replicate the 'masterauth' to prevent breaking master/replica replication
|
||||
# 2) it will overwrite replica's namespace with master's namespace, so be careful of in-using namespaces
|
||||
# 3) cannot switch off the namespace replication once it's enabled
|
||||
#
|
||||
# Default: no
|
||||
repl-namespace-enabled no
|
||||
|
||||
# Persist the cluster nodes topology in local file($dir/nodes.conf). This configuration
|
||||
# takes effect only if the cluster mode was enabled.
|
||||
#
|
||||
# If yes, it will try to load the cluster topology from the local file when starting,
|
||||
# and dump the cluster nodes into the file if it was changed.
|
||||
#
|
||||
# Default: yes
|
||||
persist-cluster-nodes-enabled yes
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the server is not
|
||||
# this limit is set to 10000 clients. However, if the server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
#
|
||||
|
@ -70,15 +97,15 @@ maxclients 10000
|
|||
|
||||
# If the master is password protected (using the "masterauth" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# starting the replication synchronization process. Otherwise, the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth foobared
|
||||
|
||||
# Master-Salve replication would check db name is matched. if not, the slave should
|
||||
# refuse to sync the db from master. Don't use default value, set the db-name to identify
|
||||
# refuse to sync the db from master. Don't use the default value, set the db-name to identify
|
||||
# the cluster.
|
||||
db-name storage.db
|
||||
db-name ranking.db
|
||||
|
||||
# The working directory
|
||||
#
|
||||
|
@ -86,12 +113,28 @@ db-name storage.db
|
|||
# Note that you must specify a directory here, not a file name.
|
||||
dir ./
|
||||
|
||||
# The logs of server will be stored in this directory. If you don't specify
|
||||
# one directory, by default, we store logs in the working directory that set
|
||||
# by 'dir' above.
|
||||
# log-dir /tmp/kvrocks
|
||||
# You can configure where to store your server logs by the log-dir.
|
||||
# If you don't specify one, we will use the above `dir` as our default log directory.
|
||||
# We also can send logs to stdout/stderr is as simple as:
|
||||
#
|
||||
log-dir stdout
|
||||
|
||||
# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by
|
||||
# Log level
|
||||
# Possible values: info, warning, error, fatal
|
||||
# Default: info
|
||||
log-level info
|
||||
|
||||
# You can configure log-retention-days to control whether to enable the log cleaner
|
||||
# and the maximum retention days that the INFO level logs will be kept.
|
||||
#
|
||||
# if set to -1, that means to disable the log cleaner.
|
||||
# if set to 0, all previous INFO level logs will be immediately removed.
|
||||
# if set to between 0 to INT_MAX, that means it will retent latest N(log-retention-days) day logs.
|
||||
|
||||
# By default the log-retention-days is -1.
|
||||
log-retention-days -1
|
||||
|
||||
# When running in daemonize mode, kvrocks writes a PID file in ${CONFIG_DIR}/kvrocks.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
# pidfile /var/run/kvrocks.pid
|
||||
pidfile storage.pid
|
||||
|
@ -138,6 +181,20 @@ tcp-backlog 511
|
|||
# connect 'master's listening port' when synchronization.
|
||||
master-use-repl-port no
|
||||
|
||||
# Currently, master only checks sequence number when replica asks for PSYNC,
|
||||
# that is not enough since they may have different replication histories even
|
||||
# the replica asking sequence is in the range of the master current WAL.
|
||||
#
|
||||
# We design 'Replication Sequence ID' PSYNC, we add unique replication id for
|
||||
# every write batch (the operation of each command on the storage engine), so
|
||||
# the combination of replication id and sequence is unique for write batch.
|
||||
# The master can identify whether the replica has the same replication history
|
||||
# by checking replication id and sequence.
|
||||
#
|
||||
# By default, it is not enabled since this stricter check may easily lead to
|
||||
# full synchronization.
|
||||
use-rsid-psync no
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of
|
||||
# another kvrocks server. A few things to understand ASAP about kvrocks replication.
|
||||
#
|
||||
|
@ -159,11 +216,11 @@ master-use-repl-port no
|
|||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# still reply to client requests, possibly with out-of-date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# an error "SYNC with master in progress" to all kinds of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
@ -182,6 +239,35 @@ slave-serve-stale-data yes
|
|||
# Default: no
|
||||
slave-empty-db-before-fullsync no
|
||||
|
||||
# A Kvrocks master is able to list the address and port of the attached
|
||||
# replicas in different ways. For example the "INFO replication" section
|
||||
# offers this information, which is used, among other tools, by
|
||||
# Redis Sentinel in order to discover replica instances.
|
||||
# Another place where this info is available is in the output of the
|
||||
# "ROLE" command of a master.
|
||||
#
|
||||
# The listed IP address and port normally reported by a replica is
|
||||
# obtained in the following way:
|
||||
#
|
||||
# IP: The address is auto detected by checking the peer address
|
||||
# of the socket used by the replica to connect with the master.
|
||||
#
|
||||
# Port: The port is communicated by the replica during the replication
|
||||
# handshake, and is normally the port that the replica is using to
|
||||
# listen for connections.
|
||||
#
|
||||
# However when port forwarding or Network Address Translation (NAT) is
|
||||
# used, the replica may actually be reachable via different IP and port
|
||||
# pairs. The following two options can be used by a replica in order to
|
||||
# report to its master a specific set of IP and port, so that both INFO
|
||||
# and ROLE will report those values.
|
||||
#
|
||||
# There is no need to use both the options if you need to override just
|
||||
# the port or the IP address.
|
||||
#
|
||||
# replica-announce-ip 5.5.5.5
|
||||
# replica-announce-port 1234
|
||||
|
||||
# If replicas need full synchronization with master, master need to create
|
||||
# checkpoint for feeding replicas, and replicas also stage a checkpoint of
|
||||
# the master. If we also keep the backup, it maybe occupy extra disk space.
|
||||
|
@ -191,7 +277,7 @@ slave-empty-db-before-fullsync no
|
|||
# Default: no
|
||||
purge-backup-on-fullsync no
|
||||
|
||||
# The maximum allowed rate (in MB/s) that should be used by Replication.
|
||||
# The maximum allowed rate (in MB/s) that should be used by replication.
|
||||
# If the rate exceeds max-replication-mb, replication will slow down.
|
||||
# Default: 0 (i.e. no limit)
|
||||
max-replication-mb 0
|
||||
|
@ -199,8 +285,8 @@ max-replication-mb 0
|
|||
# The maximum allowed aggregated write rate of flush and compaction (in MB/s).
|
||||
# If the rate exceeds max-io-mb, io will slow down.
|
||||
# 0 is no limit
|
||||
# Default: 500
|
||||
max-io-mb 500
|
||||
# Default: 0
|
||||
max-io-mb 0
|
||||
|
||||
# The maximum allowed space (in GB) that should be used by RocksDB.
|
||||
# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail.
|
||||
|
@ -210,33 +296,130 @@ max-db-size 0
|
|||
|
||||
# The maximum backup to keep, server cron would run every minutes to check the num of current
|
||||
# backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep
|
||||
# is 0, no backup would be keep. But now, we only support 0 or 1.
|
||||
# is 0, no backup would be kept. But now, we only support 0 or 1.
|
||||
max-backup-to-keep 1
|
||||
|
||||
# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup.
|
||||
# default: 1 day
|
||||
max-backup-keep-hours 24
|
||||
|
||||
# Ratio of the samples would be recorded when the profiling was enabled.
|
||||
# we simply use the rand to determine whether to record the sample or not.
|
||||
# max-bitmap-to-string-mb use to limit the max size of bitmap to string transformation(MB).
|
||||
#
|
||||
# Default: 0
|
||||
profiling-sample-ratio 0
|
||||
# Default: 16
|
||||
max-bitmap-to-string-mb 16
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the perf log with PERFLOG RESET.
|
||||
#
|
||||
# Default: 256
|
||||
profiling-sample-record-max-len 256
|
||||
# Whether to enable SCAN-like cursor compatible with Redis.
|
||||
# If enabled, the cursor will be unsigned 64-bit integers.
|
||||
# If disabled, the cursor will be a string.
|
||||
# Default: no
|
||||
redis-cursor-compatible yes
|
||||
|
||||
# profiling-sample-record-threshold-ms use to tell the kvrocks when to record.
|
||||
# Whether to enable the RESP3 protocol.
|
||||
# NOTICE: RESP3 is still under development, don't enable it in production environment.
|
||||
#
|
||||
# Default: 100 millisecond
|
||||
profiling-sample-record-threshold-ms 100
|
||||
# Default: no
|
||||
# resp3-enabled no
|
||||
|
||||
# Maximum nesting depth allowed when parsing and serializing
|
||||
# JSON documents while using JSON commands like JSON.SET.
|
||||
# Default: 1024
|
||||
json-max-nesting-depth 1024
|
||||
|
||||
# The underlying storage format of JSON data type
|
||||
# NOTE: This option only affects newly written/updated key-values
|
||||
# The CBOR format may reduce the storage size and speed up JSON commands
|
||||
# Available values: json, cbor
|
||||
# Default: json
|
||||
json-storage-format json
|
||||
|
||||
################################## TLS ###################################
|
||||
|
||||
# By default, TLS/SSL is disabled, i.e. `tls-port` is set to 0.
|
||||
# To enable it, `tls-port` can be used to define TLS-listening ports.
|
||||
# tls-port 0
|
||||
|
||||
# Configure a X.509 certificate and private key to use for authenticating the
|
||||
# server to connected clients, masters or cluster peers.
|
||||
# These files should be PEM formatted.
|
||||
#
|
||||
# tls-cert-file kvrocks.crt
|
||||
# tls-key-file kvrocks.key
|
||||
|
||||
# If the key file is encrypted using a passphrase, it can be included here
|
||||
# as well.
|
||||
#
|
||||
# tls-key-file-pass secret
|
||||
|
||||
# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
|
||||
# clients and peers. Kvrocks requires an explicit configuration of at least one
|
||||
# of these, and will not implicitly use the system wide configuration.
|
||||
#
|
||||
# tls-ca-cert-file ca.crt
|
||||
# tls-ca-cert-dir /etc/ssl/certs
|
||||
|
||||
# By default, clients on a TLS port are required
|
||||
# to authenticate using valid client side certificates.
|
||||
#
|
||||
# If "no" is specified, client certificates are not required and not accepted.
|
||||
# If "optional" is specified, client certificates are accepted and must be
|
||||
# valid if provided, but are not required.
|
||||
#
|
||||
# tls-auth-clients no
|
||||
# tls-auth-clients optional
|
||||
|
||||
# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
|
||||
# that older formally deprecated versions are kept disabled to reduce the attack surface.
|
||||
# You can explicitly specify TLS versions to support.
|
||||
# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
|
||||
# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
|
||||
# To enable only TLSv1.2 and TLSv1.3, use:
|
||||
#
|
||||
# tls-protocols "TLSv1.2 TLSv1.3"
|
||||
|
||||
# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
|
||||
# about the syntax of this string.
|
||||
#
|
||||
# Note: this configuration applies only to <= TLSv1.2.
|
||||
#
|
||||
# tls-ciphers DEFAULT:!MEDIUM
|
||||
|
||||
# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
|
||||
# information about the syntax of this string, and specifically for TLSv1.3
|
||||
# ciphersuites.
|
||||
#
|
||||
# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
|
||||
|
||||
# When choosing a cipher, use the server's preference instead of the client
|
||||
# preference. By default, the server follows the client's preference.
|
||||
#
|
||||
# tls-prefer-server-ciphers yes
|
||||
|
||||
# By default, TLS session caching is enabled to allow faster and less expensive
|
||||
# reconnections by clients that support it. Use the following directive to disable
|
||||
# caching.
|
||||
#
|
||||
# tls-session-caching no
|
||||
|
||||
# Change the default number of TLS sessions cached. A zero value sets the cache
|
||||
# to unlimited size. The default size is 20480.
|
||||
#
|
||||
# tls-session-cache-size 5000
|
||||
|
||||
# Change the default timeout of cached TLS sessions. The default timeout is 300
|
||||
# seconds.
|
||||
#
|
||||
# tls-session-cache-timeout 60
|
||||
|
||||
# By default, a replica does not attempt to establish a TLS connection
|
||||
# with its master.
|
||||
#
|
||||
# Use the following directive to enable TLS on replication links.
|
||||
#
|
||||
# tls-replication yes
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Kvrocks Slow Log is a system to log queries that exceeded a specified
|
||||
# The Kvrocks Slow Log is a mechanism to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
|
@ -269,6 +452,46 @@ slowlog-max-len 128
|
|||
# They do not enable continuous liveness pings back to your supervisor.
|
||||
supervised no
|
||||
|
||||
################################## PERF LOG ###################################
|
||||
|
||||
# The Kvrocks Perf Log is a mechanism to log queries' performance context that
|
||||
# exceeded a specified execution time. This mechanism uses rocksdb's
|
||||
# Perf Context and IO Stats Context, Please see:
|
||||
# https://github.com/facebook/rocksdb/wiki/Perf-Context-and-IO-Stats-Context
|
||||
#
|
||||
# This mechanism is enabled when profiling-sample-commands is not empty and
|
||||
# profiling-sample-ratio greater than 0.
|
||||
# It is important to note that this mechanism affects performance, but it is
|
||||
# useful for troubleshooting performance bottlenecks, so it should only be
|
||||
# enabled when performance problems occur.
|
||||
|
||||
# The name of the commands you want to record. Must be original name of
|
||||
# commands supported by Kvrocks. Use ',' to separate multiple commands and
|
||||
# use '*' to record all commands supported by Kvrocks.
|
||||
# Example:
|
||||
# - Single command: profiling-sample-commands get
|
||||
# - Multiple commands: profiling-sample-commands get,mget,hget
|
||||
#
|
||||
# Default: empty
|
||||
# profiling-sample-commands ""
|
||||
|
||||
# Ratio of the samples would be recorded. It is a number between 0 and 100.
|
||||
# We simply use the rand to determine whether to record the sample or not.
|
||||
#
|
||||
# Default: 0
|
||||
profiling-sample-ratio 0
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the perf log with PERFLOG RESET.
|
||||
#
|
||||
# Default: 256
|
||||
profiling-sample-record-max-len 256
|
||||
|
||||
# profiling-sample-record-threshold-ms use to tell the kvrocks when to record.
|
||||
#
|
||||
# Default: 100 millisecond
|
||||
profiling-sample-record-threshold-ms 100
|
||||
|
||||
################################## CRON ###################################
|
||||
|
||||
# Compact Scheduler, auto compact at schedule time
|
||||
|
@ -282,15 +505,27 @@ supervised no
|
|||
# 0-7am every day.
|
||||
compaction-checker-range 0-7
|
||||
|
||||
# Bgsave scheduler, auto bgsave at schedule time
|
||||
# When the compaction checker is triggered, the db will periodically pick the SST file
|
||||
# with the highest "deleted percentage" (i.e. the percentage of deleted keys in the SST
|
||||
# file) to compact, in order to free disk space.
|
||||
# However, if a specific SST file was created more than "force-compact-file-age" seconds
|
||||
# ago, and its percentage of deleted keys is higher than
|
||||
# "force-compact-file-min-deleted-percentage", it will be forcely compacted as well.
|
||||
|
||||
# Default: 172800 seconds; Range: [60, INT64_MAX];
|
||||
# force-compact-file-age 172800
|
||||
# Default: 10 %; Range: [1, 100];
|
||||
# force-compact-file-min-deleted-percentage 10
|
||||
|
||||
# Bgsave scheduler, auto bgsave at scheduled time
|
||||
# time expression format is the same as crontab(currently only support * and int)
|
||||
# e.g. bgsave-cron 0 3 * * * 0 4 * * *
|
||||
# would bgsave the db at 3am and 4am everyday
|
||||
# would bgsave the db at 3am and 4am every day
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the KEYS command may be renamed into something
|
||||
# environment. For instance, the KEYS command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
|
@ -303,44 +538,86 @@ compaction-checker-range 0-7
|
|||
#
|
||||
# rename-command KEYS ""
|
||||
|
||||
# The key-value size may so be quite different in many scenes, and use 256MiB as SST file size
|
||||
# may cause data loading(large index/filter block) ineffective when the key-value was too small.
|
||||
# kvrocks supports user-defined SST file in config(rocksdb.target_file_size_base),
|
||||
# but it still too trivial and inconvenient to adjust the different sizes for different instances.
|
||||
# so we want to periodic auto-adjust the SST size in-flight with user avg key-value size.
|
||||
################################ MIGRATE #####################################
|
||||
# Slot migration supports two ways:
|
||||
# - redis-command: Migrate data by redis serialization protocol(RESP).
|
||||
# - raw-key-value: Migrate the raw key value data of the storage engine directly.
|
||||
# This way eliminates the overhead of converting to the redis
|
||||
# command, reduces resource consumption, improves migration
|
||||
# efficiency, and can implement a finer rate limit.
|
||||
#
|
||||
# If enabled, kvrocks will auto resize rocksdb.target_file_size_base
|
||||
# and rocksdb.write_buffer_size in-flight with user avg key-value size.
|
||||
# Please see #118.
|
||||
# Default: redis-command
|
||||
migrate-type redis-command
|
||||
|
||||
# If the network bandwidth is completely consumed by the migration task,
|
||||
# it will affect the availability of kvrocks. To avoid this situation,
|
||||
# migrate-speed is adopted to limit the migrating speed.
|
||||
# Migrating speed is limited by controlling the duration between sending data,
|
||||
# the duration is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us).
|
||||
# Value: [0,INT_MAX], 0 means no limit
|
||||
#
|
||||
# Default: yes
|
||||
auto-resize-block-and-sst yes
|
||||
# Default: 4096
|
||||
migrate-speed 4096
|
||||
|
||||
# In order to reduce data transmission times and improve the efficiency of data migration,
|
||||
# pipeline is adopted to send multiple data at once. Pipeline size can be set by this option.
|
||||
# Value: [1, INT_MAX], it can't be 0
|
||||
#
|
||||
# Default: 16
|
||||
migrate-pipeline-size 16
|
||||
|
||||
# In order to reduce the write forbidden time during migrating slot, we will migrate the incremental
|
||||
# data several times to reduce the amount of incremental data. Until the quantity of incremental
|
||||
# data is reduced to a certain threshold, slot will be forbidden write. The threshold is set by
|
||||
# this option.
|
||||
# Value: [1, INT_MAX], it can't be 0
|
||||
#
|
||||
# Default: 10000
|
||||
migrate-sequence-gap 10000
|
||||
|
||||
# The raw-key-value migration way uses batch for migration. This option sets the batch size
|
||||
# for each migration.
|
||||
#
|
||||
# Default: 16kb
|
||||
migrate-batch-size-kb 16
|
||||
|
||||
# Rate limit for migration based on raw-key-value, representing the maximum number of data
|
||||
# that can be migrated per second. 0 means no limit.
|
||||
#
|
||||
# Default: 16M
|
||||
migrate-batch-rate-limit-mb 16
|
||||
|
||||
################################ ROCKSDB #####################################
|
||||
|
||||
# Specify the capacity of metadata column family block cache. Larger block cache
|
||||
# may make request faster while more keys would be cached. Max Size is 200*1024.
|
||||
# Default: 2048MB
|
||||
rocksdb.metadata_block_cache_size 2048
|
||||
# Specify the capacity of column family block cache. A larger block cache
|
||||
# may make requests faster while more keys would be cached. Max Size is 400*1024.
|
||||
# Default: 4096MB
|
||||
rocksdb.block_cache_size 4096
|
||||
|
||||
# Specify the capacity of subkey column family block cache. Larger block cache
|
||||
# may make request faster while more keys would be cached. Max Size is 200*1024.
|
||||
# Default: 2048MB
|
||||
rocksdb.subkey_block_cache_size 2048
|
||||
|
||||
# Metadata column family and subkey column family will share a single block cache
|
||||
# if set 'yes'. The capacity of shared block cache is
|
||||
# metadata_block_cache_size + subkey_block_cache_size
|
||||
# Specify the type of cache used in the block cache.
|
||||
# Accept value: "lru", "hcc"
|
||||
# "lru" stands for the cache with the LRU(Least Recently Used) replacement policy.
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.share_metadata_and_subkey_block_cache yes
|
||||
# "hcc" stands for the Hyper Clock Cache, a lock-free cache alternative
|
||||
# that offers much improved CPU efficiency vs. LRU cache under high parallel
|
||||
# load or high contention.
|
||||
#
|
||||
# default lru
|
||||
rocksdb.block_cache_type lru
|
||||
|
||||
# A global cache for table-level rows in RocksDB. If almost always point
|
||||
# lookups, enlarging row cache may improve read performance. Otherwise,
|
||||
# if we enlarge this value, we can lessen metadata/subkey block cache size.
|
||||
#
|
||||
# Default: 0 (disabled)
|
||||
rocksdb.row_cache_size 0
|
||||
|
||||
# Number of open files that can be used by the DB. You may need to
|
||||
# increase this if your database has a large working set. Value -1 means
|
||||
# files opened are always kept open. You can estimate number of files based
|
||||
# on target_file_size_base and target_file_size_multiplier for level-based
|
||||
# compaction. For universal-style compaction, you can usually set it to -1.
|
||||
# Default: 4096
|
||||
# Default: 8096
|
||||
rocksdb.max_open_files 8096
|
||||
|
||||
# Amount of data to build up in memory (backed by an unsorted log
|
||||
|
@ -357,13 +634,13 @@ rocksdb.max_open_files 8096
|
|||
# See db_write_buffer_size for sharing memory across column families.
|
||||
|
||||
# default is 64MB
|
||||
rocksdb.write_buffer_size 16
|
||||
rocksdb.write_buffer_size 64
|
||||
|
||||
# Target file size for compaction, target file size for Leve N can be caculated
|
||||
# Target file size for compaction, target file size for Level N can be calculated
|
||||
# by target_file_size_base * (target_file_size_multiplier ^ (L-1))
|
||||
#
|
||||
# Default: 128MB
|
||||
rocksdb.target_file_size_base 16
|
||||
rocksdb.target_file_size_base 128
|
||||
|
||||
# The maximum number of write buffers that are built up in memory.
|
||||
# The default and the minimum number is 2, so that when 1 write buffer
|
||||
|
@ -374,20 +651,29 @@ rocksdb.target_file_size_base 16
|
|||
# allowed.
|
||||
rocksdb.max_write_buffer_number 4
|
||||
|
||||
# Maximum number of concurrent background jobs (compactions and flushes).
|
||||
# For backwards compatibility we will set `max_background_jobs =
|
||||
# max_background_compactions + max_background_flushes` in the case where user
|
||||
# sets at least one of `max_background_compactions` or `max_background_flushes`
|
||||
# (we replace -1 by 1 in case one option is unset).
|
||||
rocksdb.max_background_jobs 4
|
||||
|
||||
# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs
|
||||
# Maximum number of concurrent background compaction jobs, submitted to
|
||||
# the default LOW priority thread pool.
|
||||
rocksdb.max_background_compactions 4
|
||||
rocksdb.max_background_compactions -1
|
||||
|
||||
# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs
|
||||
# Maximum number of concurrent background memtable flush jobs, submitted by
|
||||
# default to the HIGH priority thread pool. If the HIGH priority thread pool
|
||||
# is configured to have zero threads, flush jobs will share the LOW priority
|
||||
# thread pool with compaction jobs.
|
||||
rocksdb.max_background_flushes 4
|
||||
rocksdb.max_background_flushes -1
|
||||
|
||||
# This value represents the maximum number of threads that will
|
||||
# concurrently perform a compaction job by breaking it into multiple,
|
||||
# smaller ones that are run simultaneously.
|
||||
# Default: 2 (i.e. no subcompactions)
|
||||
# Default: 2
|
||||
rocksdb.max_sub_compactions 2
|
||||
|
||||
# In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size
|
||||
|
@ -411,8 +697,8 @@ rocksdb.max_sub_compactions 2
|
|||
# default is 512MB
|
||||
rocksdb.max_total_wal_size 512
|
||||
|
||||
# We impl the repliction with rocksdb WAL, it would trigger full sync when the seq was out of range.
|
||||
# wal_ttl_seconds and wal_size_limit_mb would affect how archived logswill be deleted.
|
||||
# We implement the replication with rocksdb WAL, it would trigger full sync when the seq was out of range.
|
||||
# wal_ttl_seconds and wal_size_limit_mb would affect how archived logs will be deleted.
|
||||
# If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that
|
||||
# are older than WAL_ttl_seconds will be deleted#
|
||||
#
|
||||
|
@ -422,25 +708,25 @@ rocksdb.wal_ttl_seconds 10800
|
|||
# If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
||||
# WAL files will be checked every 10 min and if total size is greater
|
||||
# then WAL_size_limit_MB, they will be deleted starting with the
|
||||
# earliest until size_limit is met. All empty files will be deleted
|
||||
# earliest until size_limit is met. All empty files will be deleted
|
||||
# Default: 16GB
|
||||
rocksdb.wal_size_limit_mb 16384
|
||||
|
||||
# Approximate size of user data packed per block. Note that the
|
||||
# block size specified here corresponds to uncompressed data. The
|
||||
# block size specified here corresponds to uncompressed data. The
|
||||
# actual size of the unit read from disk may be smaller if
|
||||
# compression is enabled.
|
||||
#
|
||||
# Default: 4KB
|
||||
rocksdb.block_size 2048
|
||||
# Default: 16KB
|
||||
rocksdb.block_size 16384
|
||||
|
||||
# Indicating if we'd put index/filter blocks to the block cache
|
||||
#
|
||||
# Default: no
|
||||
# Default: yes
|
||||
rocksdb.cache_index_and_filter_blocks yes
|
||||
|
||||
# Specify the compression to use.
|
||||
# Accept value: "no", "snappy"
|
||||
# Accept value: "no", "snappy", "lz4", "zstd", "zlib"
|
||||
# default snappy
|
||||
rocksdb.compression snappy
|
||||
|
||||
|
@ -480,6 +766,11 @@ rocksdb.level0_slowdown_writes_trigger 20
|
|||
# Default: 40
|
||||
rocksdb.level0_stop_writes_trigger 40
|
||||
|
||||
# Number of files to trigger level-0 compaction.
|
||||
#
|
||||
# Default: 4
|
||||
rocksdb.level0_file_num_compaction_trigger 4
|
||||
|
||||
# if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
|
||||
#
|
||||
# Default: 0
|
||||
|
@ -489,9 +780,138 @@ rocksdb.stats_dump_period_sec 0
|
|||
#
|
||||
# Default: no
|
||||
rocksdb.disable_auto_compactions no
|
||||
|
||||
# BlobDB(key-value separation) is essentially RocksDB for large-value use cases.
|
||||
# Since 6.18.0, The new implementation is integrated into the RocksDB core.
|
||||
# When set, large values (blobs) are written to separate blob files, and only
|
||||
# pointers to them are stored in SST files. This can reduce write amplification
|
||||
# for large-value use cases at the cost of introducing a level of indirection
|
||||
# for reads. Please see: https://github.com/facebook/rocksdb/wiki/BlobDB.
|
||||
#
|
||||
# Note that when enable_blob_files is set to yes, BlobDB-related configuration
|
||||
# items will take effect.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.enable_blob_files no
|
||||
|
||||
# The size of the smallest value to be stored separately in a blob file. Values
|
||||
# which have an uncompressed size smaller than this threshold are stored alongside
|
||||
# the keys in SST files in the usual fashion.
|
||||
#
|
||||
# Default: 4096 byte, 0 means that all values are stored in blob files
|
||||
rocksdb.min_blob_size 4096
|
||||
|
||||
# The size limit for blob files. When writing blob files, a new file is
|
||||
# opened once this limit is reached.
|
||||
#
|
||||
# Default: 268435456 bytes
|
||||
rocksdb.blob_file_size 268435456
|
||||
|
||||
# Enables garbage collection of blobs. Valid blobs residing in blob files
|
||||
# older than a cutoff get relocated to new files as they are encountered
|
||||
# during compaction, which makes it possible to clean up blob files once
|
||||
# they contain nothing but obsolete/garbage blobs.
|
||||
# See also rocksdb.blob_garbage_collection_age_cutoff below.
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.enable_blob_garbage_collection yes
|
||||
|
||||
# The percentage cutoff in terms of blob file age for garbage collection.
|
||||
# Blobs in the oldest N blob files will be relocated when encountered during
|
||||
# compaction, where N = (garbage_collection_cutoff/100) * number_of_blob_files.
|
||||
# Note that this value must belong to [0, 100].
|
||||
#
|
||||
# Default: 25
|
||||
rocksdb.blob_garbage_collection_age_cutoff 25
|
||||
|
||||
|
||||
# The purpose of the following three options are to dynamically adjust the upper limit of
|
||||
# the data that each layer can store according to the size of the different
|
||||
# layers of the LSM. Enabling this option will bring some improvements in
|
||||
# deletion efficiency and space amplification, but it will lose a certain
|
||||
# amount of read performance.
|
||||
# If you want to know more details about Levels' Target Size, you can read RocksDB wiki:
|
||||
# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.level_compaction_dynamic_level_bytes yes
|
||||
|
||||
# The total file size of level-1 sst.
|
||||
#
|
||||
# Default: 268435456 bytes
|
||||
rocksdb.max_bytes_for_level_base 268435456
|
||||
|
||||
# Multiplication factor for the total file size of L(n+1) layers.
|
||||
# This option is a double type number in RocksDB, but kvrocks is
|
||||
# not support the double data type number yet, so we use integer
|
||||
# number instead of double currently.
|
||||
#
|
||||
# Default: 10
|
||||
rocksdb.max_bytes_for_level_multiplier 10
|
||||
|
||||
# This feature only takes effect in Iterators and MultiGet.
|
||||
# If yes, RocksDB will try to read asynchronously and in parallel as much as possible to hide IO latency.
|
||||
# In iterators, it will prefetch data asynchronously in the background for each file being iterated on.
|
||||
# In MultiGet, it will read the necessary data blocks from those files in parallel as much as possible.
|
||||
|
||||
# Default no
|
||||
rocksdb.read_options.async_io no
|
||||
|
||||
# If yes, the write will be flushed from the operating system
|
||||
# buffer cache before the write is considered complete.
|
||||
# If this flag is enabled, writes will be slower.
|
||||
# If this flag is disabled, and the machine crashes, some recent
|
||||
# writes may be lost. Note that if it is just the process that
|
||||
# crashes (i.e., the machine does not reboot), no writes will be
|
||||
# lost even if sync==false.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.sync no
|
||||
|
||||
# If yes, writes will not first go to the write ahead log,
|
||||
# and the write may get lost after a crash.
|
||||
# You must keep wal enabled if you use replication.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.disable_wal no
|
||||
|
||||
# If enabled and we need to wait or sleep for the write request, fails
|
||||
# immediately.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.no_slowdown no
|
||||
|
||||
# If enabled, write requests are of lower priority if compaction is
|
||||
# behind. In this case, no_slowdown = true, the request will be canceled
|
||||
# immediately. Otherwise, it will be slowed down.
|
||||
# The slowdown value is determined by RocksDB to guarantee
|
||||
# it introduces minimum impacts to high priority writes.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.low_pri no
|
||||
|
||||
# If enabled, this writebatch will maintain the last insert positions of each
|
||||
# memtable as hints in concurrent write. It can improve write performance
|
||||
# in concurrent writes if keys in one writebatch are sequential.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.memtable_insert_hint_per_batch no
|
||||
|
||||
|
||||
# Support RocksDB auto-tune rate limiter for the background IO
|
||||
# if enabled, Rate limiter will limit the compaction write if flush write is high
|
||||
# Please see https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.rate_limiter_auto_tuned yes
|
||||
|
||||
# Enable this option will schedule the deletion of obsolete files in a background thread
|
||||
# on iterator destruction. It can reduce the latency if there are many files to be removed.
|
||||
# see https://github.com/facebook/rocksdb/wiki/IO#avoid-blocking-io
|
||||
#
|
||||
# Default: yes
|
||||
# rocksdb.avoid_unnecessary_blocking_io yes
|
||||
|
||||
################################ NAMESPACE #####################################
|
||||
# namespace.test change.me
|
||||
|
||||
|
||||
backup-dir .//backup
|
||||
log-dir ./
|
||||
|
|
|
@ -1,52 +1,79 @@
|
|||
################################ GENERAL #####################################
|
||||
|
||||
# By default kvrocks listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
# By default kvrocks listens for connections from localhost interface.
|
||||
# It is possible to listen to just one or multiple interfaces using
|
||||
# the "bind" configuration directive, followed by one or more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
# bind 127.0.0.1 ::1
|
||||
# bind 0.0.0.0
|
||||
# bind 127.0.0.1
|
||||
bind 0.0.0.0
|
||||
|
||||
# Unix socket.
|
||||
#
|
||||
# Specify the path for the unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so kvrocks will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
unixsocket storage.sock
|
||||
unixsocketperm 777
|
||||
|
||||
# Accept connections on the specified port, default is 6666.
|
||||
# port 6666
|
||||
port 5188
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# The number of worker's threads, increase or decrease it would effect the performance.
|
||||
# The number of worker's threads, increase or decrease would affect the performance.
|
||||
workers 8
|
||||
|
||||
# The number of replication worker's threads, increase or decrease it would effect the replication performance.
|
||||
# Default: 1
|
||||
repl-workers 1
|
||||
# By default, kvrocks does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that kvrocks will write a PID file in /var/run/kvrocks.pid when daemonized
|
||||
daemonize yes
|
||||
|
||||
# By default kvrocks does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized.
|
||||
daemonize no
|
||||
|
||||
# Kvrocks implements cluster solution that is similar with redis cluster sulution.
|
||||
# Kvrocks implements the cluster solution that is similar to the Redis cluster solution.
|
||||
# You can get cluster information by CLUSTER NODES|SLOTS|INFO command, it also is
|
||||
# adapted to redis-cli, redis-benchmark, redis cluster SDK and redis cluster proxy.
|
||||
# But kvrocks doesn't support to communicate with each others, so you must set
|
||||
# adapted to redis-cli, redis-benchmark, Redis cluster SDK, and Redis cluster proxy.
|
||||
# But kvrocks doesn't support communicating with each other, so you must set
|
||||
# cluster topology by CLUSTER SETNODES|SETNODEID commands, more details: #219.
|
||||
#
|
||||
# PLEASE NOTE:
|
||||
# If you enable cluster, kvrocks will encode key with its slot id calculated by
|
||||
# CRC16 and modulo 16384, endoding key with its slot id makes it efficient to
|
||||
# migrate keys based on slot. So if you enabled at first time, cluster mode must
|
||||
# CRC16 and modulo 16384, encoding key with its slot id makes it efficient to
|
||||
# migrate keys based on the slot. So if you enabled at first time, cluster mode must
|
||||
# not be disabled after restarting, and vice versa. That is to say, data is not
|
||||
# compatible between standalone mode with cluster mode, you must migrate data
|
||||
# if you want to change mode, otherwise, kvrocks will make data corrupt.
|
||||
#
|
||||
# Default: no
|
||||
|
||||
cluster-enabled no
|
||||
|
||||
# By default, namespaces are stored in the configuration file and won't be replicated
|
||||
# to replicas. This option allows to change this behavior, so that namespaces are also
|
||||
# propagated to slaves. Note that:
|
||||
# 1) it won't replicate the 'masterauth' to prevent breaking master/replica replication
|
||||
# 2) it will overwrite replica's namespace with master's namespace, so be careful of in-using namespaces
|
||||
# 3) cannot switch off the namespace replication once it's enabled
|
||||
#
|
||||
# Default: no
|
||||
repl-namespace-enabled no
|
||||
|
||||
# Persist the cluster nodes topology in local file($dir/nodes.conf). This configuration
|
||||
# takes effect only if the cluster mode was enabled.
|
||||
#
|
||||
# If yes, it will try to load the cluster topology from the local file when starting,
|
||||
# and dump the cluster nodes into the file if it was changed.
|
||||
#
|
||||
# Default: yes
|
||||
persist-cluster-nodes-enabled yes
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the server is not
|
||||
# this limit is set to 10000 clients. However, if the server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
#
|
||||
|
@ -70,13 +97,13 @@ maxclients 10000
|
|||
|
||||
# If the master is password protected (using the "masterauth" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# starting the replication synchronization process. Otherwise, the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth foobared
|
||||
|
||||
# Master-Salve replication would check db name is matched. if not, the slave should
|
||||
# refuse to sync the db from master. Don't use default value, set the db-name to identify
|
||||
# refuse to sync the db from master. Don't use the default value, set the db-name to identify
|
||||
# the cluster.
|
||||
db-name storage.db
|
||||
|
||||
|
@ -86,12 +113,28 @@ db-name storage.db
|
|||
# Note that you must specify a directory here, not a file name.
|
||||
dir ./
|
||||
|
||||
# The logs of server will be stored in this directory. If you don't specify
|
||||
# one directory, by default, we store logs in the working directory that set
|
||||
# by 'dir' above.
|
||||
# log-dir /tmp/kvrocks
|
||||
# You can configure where to store your server logs by the log-dir.
|
||||
# If you don't specify one, we will use the above `dir` as our default log directory.
|
||||
# We also can send logs to stdout/stderr is as simple as:
|
||||
#
|
||||
log-dir stdout
|
||||
|
||||
# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by
|
||||
# Log level
|
||||
# Possible values: info, warning, error, fatal
|
||||
# Default: info
|
||||
log-level info
|
||||
|
||||
# You can configure log-retention-days to control whether to enable the log cleaner
|
||||
# and the maximum retention days that the INFO level logs will be kept.
|
||||
#
|
||||
# if set to -1, that means to disable the log cleaner.
|
||||
# if set to 0, all previous INFO level logs will be immediately removed.
|
||||
# if set to between 0 to INT_MAX, that means it will retent latest N(log-retention-days) day logs.
|
||||
|
||||
# By default the log-retention-days is -1.
|
||||
log-retention-days -1
|
||||
|
||||
# When running in daemonize mode, kvrocks writes a PID file in ${CONFIG_DIR}/kvrocks.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
# pidfile /var/run/kvrocks.pid
|
||||
pidfile storage.pid
|
||||
|
@ -138,6 +181,20 @@ tcp-backlog 511
|
|||
# connect 'master's listening port' when synchronization.
|
||||
master-use-repl-port no
|
||||
|
||||
# Currently, master only checks sequence number when replica asks for PSYNC,
|
||||
# that is not enough since they may have different replication histories even
|
||||
# the replica asking sequence is in the range of the master current WAL.
|
||||
#
|
||||
# We design 'Replication Sequence ID' PSYNC, we add unique replication id for
|
||||
# every write batch (the operation of each command on the storage engine), so
|
||||
# the combination of replication id and sequence is unique for write batch.
|
||||
# The master can identify whether the replica has the same replication history
|
||||
# by checking replication id and sequence.
|
||||
#
|
||||
# By default, it is not enabled since this stricter check may easily lead to
|
||||
# full synchronization.
|
||||
use-rsid-psync no
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of
|
||||
# another kvrocks server. A few things to understand ASAP about kvrocks replication.
|
||||
#
|
||||
|
@ -159,11 +216,11 @@ master-use-repl-port no
|
|||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# still reply to client requests, possibly with out-of-date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# an error "SYNC with master in progress" to all kinds of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
@ -182,6 +239,35 @@ slave-serve-stale-data yes
|
|||
# Default: no
|
||||
slave-empty-db-before-fullsync no
|
||||
|
||||
# A Kvrocks master is able to list the address and port of the attached
|
||||
# replicas in different ways. For example the "INFO replication" section
|
||||
# offers this information, which is used, among other tools, by
|
||||
# Redis Sentinel in order to discover replica instances.
|
||||
# Another place where this info is available is in the output of the
|
||||
# "ROLE" command of a master.
|
||||
#
|
||||
# The listed IP address and port normally reported by a replica is
|
||||
# obtained in the following way:
|
||||
#
|
||||
# IP: The address is auto detected by checking the peer address
|
||||
# of the socket used by the replica to connect with the master.
|
||||
#
|
||||
# Port: The port is communicated by the replica during the replication
|
||||
# handshake, and is normally the port that the replica is using to
|
||||
# listen for connections.
|
||||
#
|
||||
# However when port forwarding or Network Address Translation (NAT) is
|
||||
# used, the replica may actually be reachable via different IP and port
|
||||
# pairs. The following two options can be used by a replica in order to
|
||||
# report to its master a specific set of IP and port, so that both INFO
|
||||
# and ROLE will report those values.
|
||||
#
|
||||
# There is no need to use both the options if you need to override just
|
||||
# the port or the IP address.
|
||||
#
|
||||
# replica-announce-ip 5.5.5.5
|
||||
# replica-announce-port 1234
|
||||
|
||||
# If replicas need full synchronization with master, master need to create
|
||||
# checkpoint for feeding replicas, and replicas also stage a checkpoint of
|
||||
# the master. If we also keep the backup, it maybe occupy extra disk space.
|
||||
|
@ -191,7 +277,7 @@ slave-empty-db-before-fullsync no
|
|||
# Default: no
|
||||
purge-backup-on-fullsync no
|
||||
|
||||
# The maximum allowed rate (in MB/s) that should be used by Replication.
|
||||
# The maximum allowed rate (in MB/s) that should be used by replication.
|
||||
# If the rate exceeds max-replication-mb, replication will slow down.
|
||||
# Default: 0 (i.e. no limit)
|
||||
max-replication-mb 0
|
||||
|
@ -199,8 +285,8 @@ max-replication-mb 0
|
|||
# The maximum allowed aggregated write rate of flush and compaction (in MB/s).
|
||||
# If the rate exceeds max-io-mb, io will slow down.
|
||||
# 0 is no limit
|
||||
# Default: 500
|
||||
max-io-mb 500
|
||||
# Default: 0
|
||||
max-io-mb 0
|
||||
|
||||
# The maximum allowed space (in GB) that should be used by RocksDB.
|
||||
# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail.
|
||||
|
@ -210,33 +296,130 @@ max-db-size 0
|
|||
|
||||
# The maximum backup to keep, server cron would run every minutes to check the num of current
|
||||
# backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep
|
||||
# is 0, no backup would be keep. But now, we only support 0 or 1.
|
||||
# is 0, no backup would be kept. But now, we only support 0 or 1.
|
||||
max-backup-to-keep 1
|
||||
|
||||
# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup.
|
||||
# default: 1 day
|
||||
max-backup-keep-hours 24
|
||||
|
||||
# Ratio of the samples would be recorded when the profiling was enabled.
|
||||
# we simply use the rand to determine whether to record the sample or not.
|
||||
# max-bitmap-to-string-mb use to limit the max size of bitmap to string transformation(MB).
|
||||
#
|
||||
# Default: 0
|
||||
profiling-sample-ratio 0
|
||||
# Default: 16
|
||||
max-bitmap-to-string-mb 16
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the perf log with PERFLOG RESET.
|
||||
#
|
||||
# Default: 256
|
||||
profiling-sample-record-max-len 256
|
||||
# Whether to enable SCAN-like cursor compatible with Redis.
|
||||
# If enabled, the cursor will be unsigned 64-bit integers.
|
||||
# If disabled, the cursor will be a string.
|
||||
# Default: no
|
||||
redis-cursor-compatible yes
|
||||
|
||||
# profiling-sample-record-threshold-ms use to tell the kvrocks when to record.
|
||||
# Whether to enable the RESP3 protocol.
|
||||
# NOTICE: RESP3 is still under development, don't enable it in production environment.
|
||||
#
|
||||
# Default: 100 millisecond
|
||||
profiling-sample-record-threshold-ms 100
|
||||
# Default: no
|
||||
# resp3-enabled no
|
||||
|
||||
# Maximum nesting depth allowed when parsing and serializing
|
||||
# JSON documents while using JSON commands like JSON.SET.
|
||||
# Default: 1024
|
||||
json-max-nesting-depth 1024
|
||||
|
||||
# The underlying storage format of JSON data type
|
||||
# NOTE: This option only affects newly written/updated key-values
|
||||
# The CBOR format may reduce the storage size and speed up JSON commands
|
||||
# Available values: json, cbor
|
||||
# Default: json
|
||||
json-storage-format json
|
||||
|
||||
################################## TLS ###################################
|
||||
|
||||
# By default, TLS/SSL is disabled, i.e. `tls-port` is set to 0.
|
||||
# To enable it, `tls-port` can be used to define TLS-listening ports.
|
||||
# tls-port 0
|
||||
|
||||
# Configure a X.509 certificate and private key to use for authenticating the
|
||||
# server to connected clients, masters or cluster peers.
|
||||
# These files should be PEM formatted.
|
||||
#
|
||||
# tls-cert-file kvrocks.crt
|
||||
# tls-key-file kvrocks.key
|
||||
|
||||
# If the key file is encrypted using a passphrase, it can be included here
|
||||
# as well.
|
||||
#
|
||||
# tls-key-file-pass secret
|
||||
|
||||
# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
|
||||
# clients and peers. Kvrocks requires an explicit configuration of at least one
|
||||
# of these, and will not implicitly use the system wide configuration.
|
||||
#
|
||||
# tls-ca-cert-file ca.crt
|
||||
# tls-ca-cert-dir /etc/ssl/certs
|
||||
|
||||
# By default, clients on a TLS port are required
|
||||
# to authenticate using valid client side certificates.
|
||||
#
|
||||
# If "no" is specified, client certificates are not required and not accepted.
|
||||
# If "optional" is specified, client certificates are accepted and must be
|
||||
# valid if provided, but are not required.
|
||||
#
|
||||
# tls-auth-clients no
|
||||
# tls-auth-clients optional
|
||||
|
||||
# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended
|
||||
# that older formally deprecated versions are kept disabled to reduce the attack surface.
|
||||
# You can explicitly specify TLS versions to support.
|
||||
# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2",
|
||||
# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination.
|
||||
# To enable only TLSv1.2 and TLSv1.3, use:
|
||||
#
|
||||
# tls-protocols "TLSv1.2 TLSv1.3"
|
||||
|
||||
# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
|
||||
# about the syntax of this string.
|
||||
#
|
||||
# Note: this configuration applies only to <= TLSv1.2.
|
||||
#
|
||||
# tls-ciphers DEFAULT:!MEDIUM
|
||||
|
||||
# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
|
||||
# information about the syntax of this string, and specifically for TLSv1.3
|
||||
# ciphersuites.
|
||||
#
|
||||
# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
|
||||
|
||||
# When choosing a cipher, use the server's preference instead of the client
|
||||
# preference. By default, the server follows the client's preference.
|
||||
#
|
||||
# tls-prefer-server-ciphers yes
|
||||
|
||||
# By default, TLS session caching is enabled to allow faster and less expensive
|
||||
# reconnections by clients that support it. Use the following directive to disable
|
||||
# caching.
|
||||
#
|
||||
# tls-session-caching no
|
||||
|
||||
# Change the default number of TLS sessions cached. A zero value sets the cache
|
||||
# to unlimited size. The default size is 20480.
|
||||
#
|
||||
# tls-session-cache-size 5000
|
||||
|
||||
# Change the default timeout of cached TLS sessions. The default timeout is 300
|
||||
# seconds.
|
||||
#
|
||||
# tls-session-cache-timeout 60
|
||||
|
||||
# By default, a replica does not attempt to establish a TLS connection
|
||||
# with its master.
|
||||
#
|
||||
# Use the following directive to enable TLS on replication links.
|
||||
#
|
||||
# tls-replication yes
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Kvrocks Slow Log is a system to log queries that exceeded a specified
|
||||
# The Kvrocks Slow Log is a mechanism to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
|
@ -269,6 +452,46 @@ slowlog-max-len 128
|
|||
# They do not enable continuous liveness pings back to your supervisor.
|
||||
supervised no
|
||||
|
||||
################################## PERF LOG ###################################
|
||||
|
||||
# The Kvrocks Perf Log is a mechanism to log queries' performance context that
|
||||
# exceeded a specified execution time. This mechanism uses rocksdb's
|
||||
# Perf Context and IO Stats Context, Please see:
|
||||
# https://github.com/facebook/rocksdb/wiki/Perf-Context-and-IO-Stats-Context
|
||||
#
|
||||
# This mechanism is enabled when profiling-sample-commands is not empty and
|
||||
# profiling-sample-ratio greater than 0.
|
||||
# It is important to note that this mechanism affects performance, but it is
|
||||
# useful for troubleshooting performance bottlenecks, so it should only be
|
||||
# enabled when performance problems occur.
|
||||
|
||||
# The name of the commands you want to record. Must be original name of
|
||||
# commands supported by Kvrocks. Use ',' to separate multiple commands and
|
||||
# use '*' to record all commands supported by Kvrocks.
|
||||
# Example:
|
||||
# - Single command: profiling-sample-commands get
|
||||
# - Multiple commands: profiling-sample-commands get,mget,hget
|
||||
#
|
||||
# Default: empty
|
||||
# profiling-sample-commands ""
|
||||
|
||||
# Ratio of the samples would be recorded. It is a number between 0 and 100.
|
||||
# We simply use the rand to determine whether to record the sample or not.
|
||||
#
|
||||
# Default: 0
|
||||
profiling-sample-ratio 0
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the perf log with PERFLOG RESET.
|
||||
#
|
||||
# Default: 256
|
||||
profiling-sample-record-max-len 256
|
||||
|
||||
# profiling-sample-record-threshold-ms use to tell the kvrocks when to record.
|
||||
#
|
||||
# Default: 100 millisecond
|
||||
profiling-sample-record-threshold-ms 100
|
||||
|
||||
################################## CRON ###################################
|
||||
|
||||
# Compact Scheduler, auto compact at schedule time
|
||||
|
@ -282,15 +505,27 @@ supervised no
|
|||
# 0-7am every day.
|
||||
compaction-checker-range 0-7
|
||||
|
||||
# Bgsave scheduler, auto bgsave at schedule time
|
||||
# When the compaction checker is triggered, the db will periodically pick the SST file
|
||||
# with the highest "deleted percentage" (i.e. the percentage of deleted keys in the SST
|
||||
# file) to compact, in order to free disk space.
|
||||
# However, if a specific SST file was created more than "force-compact-file-age" seconds
|
||||
# ago, and its percentage of deleted keys is higher than
|
||||
# "force-compact-file-min-deleted-percentage", it will be forcely compacted as well.
|
||||
|
||||
# Default: 172800 seconds; Range: [60, INT64_MAX];
|
||||
# force-compact-file-age 172800
|
||||
# Default: 10 %; Range: [1, 100];
|
||||
# force-compact-file-min-deleted-percentage 10
|
||||
|
||||
# Bgsave scheduler, auto bgsave at scheduled time
|
||||
# time expression format is the same as crontab(currently only support * and int)
|
||||
# e.g. bgsave-cron 0 3 * * * 0 4 * * *
|
||||
# would bgsave the db at 3am and 4am everyday
|
||||
# would bgsave the db at 3am and 4am every day
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the KEYS command may be renamed into something
|
||||
# environment. For instance, the KEYS command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
|
@ -303,44 +538,86 @@ compaction-checker-range 0-7
|
|||
#
|
||||
# rename-command KEYS ""
|
||||
|
||||
# The key-value size may so be quite different in many scenes, and use 256MiB as SST file size
|
||||
# may cause data loading(large index/filter block) ineffective when the key-value was too small.
|
||||
# kvrocks supports user-defined SST file in config(rocksdb.target_file_size_base),
|
||||
# but it still too trivial and inconvenient to adjust the different sizes for different instances.
|
||||
# so we want to periodic auto-adjust the SST size in-flight with user avg key-value size.
|
||||
################################ MIGRATE #####################################
|
||||
# Slot migration supports two ways:
|
||||
# - redis-command: Migrate data by redis serialization protocol(RESP).
|
||||
# - raw-key-value: Migrate the raw key value data of the storage engine directly.
|
||||
# This way eliminates the overhead of converting to the redis
|
||||
# command, reduces resource consumption, improves migration
|
||||
# efficiency, and can implement a finer rate limit.
|
||||
#
|
||||
# If enabled, kvrocks will auto resize rocksdb.target_file_size_base
|
||||
# and rocksdb.write_buffer_size in-flight with user avg key-value size.
|
||||
# Please see #118.
|
||||
# Default: redis-command
|
||||
migrate-type redis-command
|
||||
|
||||
# If the network bandwidth is completely consumed by the migration task,
|
||||
# it will affect the availability of kvrocks. To avoid this situation,
|
||||
# migrate-speed is adopted to limit the migrating speed.
|
||||
# Migrating speed is limited by controlling the duration between sending data,
|
||||
# the duration is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us).
|
||||
# Value: [0,INT_MAX], 0 means no limit
|
||||
#
|
||||
# Default: yes
|
||||
auto-resize-block-and-sst yes
|
||||
# Default: 4096
|
||||
migrate-speed 4096
|
||||
|
||||
# In order to reduce data transmission times and improve the efficiency of data migration,
|
||||
# pipeline is adopted to send multiple data at once. Pipeline size can be set by this option.
|
||||
# Value: [1, INT_MAX], it can't be 0
|
||||
#
|
||||
# Default: 16
|
||||
migrate-pipeline-size 16
|
||||
|
||||
# In order to reduce the write forbidden time during migrating slot, we will migrate the incremental
|
||||
# data several times to reduce the amount of incremental data. Until the quantity of incremental
|
||||
# data is reduced to a certain threshold, slot will be forbidden write. The threshold is set by
|
||||
# this option.
|
||||
# Value: [1, INT_MAX], it can't be 0
|
||||
#
|
||||
# Default: 10000
|
||||
migrate-sequence-gap 10000
|
||||
|
||||
# The raw-key-value migration way uses batch for migration. This option sets the batch size
|
||||
# for each migration.
|
||||
#
|
||||
# Default: 16kb
|
||||
migrate-batch-size-kb 16
|
||||
|
||||
# Rate limit for migration based on raw-key-value, representing the maximum number of data
|
||||
# that can be migrated per second. 0 means no limit.
|
||||
#
|
||||
# Default: 16M
|
||||
migrate-batch-rate-limit-mb 16
|
||||
|
||||
################################ ROCKSDB #####################################
|
||||
|
||||
# Specify the capacity of metadata column family block cache. Larger block cache
|
||||
# may make request faster while more keys would be cached. Max Size is 200*1024.
|
||||
# Default: 2048MB
|
||||
rocksdb.metadata_block_cache_size 2048
|
||||
# Specify the capacity of column family block cache. A larger block cache
|
||||
# may make requests faster while more keys would be cached. Max Size is 400*1024.
|
||||
# Default: 4096MB
|
||||
rocksdb.block_cache_size 4096
|
||||
|
||||
# Specify the capacity of subkey column family block cache. Larger block cache
|
||||
# may make request faster while more keys would be cached. Max Size is 200*1024.
|
||||
# Default: 2048MB
|
||||
rocksdb.subkey_block_cache_size 2048
|
||||
|
||||
# Metadata column family and subkey column family will share a single block cache
|
||||
# if set 'yes'. The capacity of shared block cache is
|
||||
# metadata_block_cache_size + subkey_block_cache_size
|
||||
# Specify the type of cache used in the block cache.
|
||||
# Accept value: "lru", "hcc"
|
||||
# "lru" stands for the cache with the LRU(Least Recently Used) replacement policy.
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.share_metadata_and_subkey_block_cache yes
|
||||
# "hcc" stands for the Hyper Clock Cache, a lock-free cache alternative
|
||||
# that offers much improved CPU efficiency vs. LRU cache under high parallel
|
||||
# load or high contention.
|
||||
#
|
||||
# default lru
|
||||
rocksdb.block_cache_type lru
|
||||
|
||||
# A global cache for table-level rows in RocksDB. If almost always point
|
||||
# lookups, enlarging row cache may improve read performance. Otherwise,
|
||||
# if we enlarge this value, we can lessen metadata/subkey block cache size.
|
||||
#
|
||||
# Default: 0 (disabled)
|
||||
rocksdb.row_cache_size 0
|
||||
|
||||
# Number of open files that can be used by the DB. You may need to
|
||||
# increase this if your database has a large working set. Value -1 means
|
||||
# files opened are always kept open. You can estimate number of files based
|
||||
# on target_file_size_base and target_file_size_multiplier for level-based
|
||||
# compaction. For universal-style compaction, you can usually set it to -1.
|
||||
# Default: 4096
|
||||
# Default: 8096
|
||||
rocksdb.max_open_files 8096
|
||||
|
||||
# Amount of data to build up in memory (backed by an unsorted log
|
||||
|
@ -357,13 +634,13 @@ rocksdb.max_open_files 8096
|
|||
# See db_write_buffer_size for sharing memory across column families.
|
||||
|
||||
# default is 64MB
|
||||
rocksdb.write_buffer_size 16
|
||||
rocksdb.write_buffer_size 64
|
||||
|
||||
# Target file size for compaction, target file size for Leve N can be caculated
|
||||
# Target file size for compaction, target file size for Level N can be calculated
|
||||
# by target_file_size_base * (target_file_size_multiplier ^ (L-1))
|
||||
#
|
||||
# Default: 128MB
|
||||
rocksdb.target_file_size_base 16
|
||||
rocksdb.target_file_size_base 128
|
||||
|
||||
# The maximum number of write buffers that are built up in memory.
|
||||
# The default and the minimum number is 2, so that when 1 write buffer
|
||||
|
@ -374,20 +651,29 @@ rocksdb.target_file_size_base 16
|
|||
# allowed.
|
||||
rocksdb.max_write_buffer_number 4
|
||||
|
||||
# Maximum number of concurrent background jobs (compactions and flushes).
|
||||
# For backwards compatibility we will set `max_background_jobs =
|
||||
# max_background_compactions + max_background_flushes` in the case where user
|
||||
# sets at least one of `max_background_compactions` or `max_background_flushes`
|
||||
# (we replace -1 by 1 in case one option is unset).
|
||||
rocksdb.max_background_jobs 4
|
||||
|
||||
# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs
|
||||
# Maximum number of concurrent background compaction jobs, submitted to
|
||||
# the default LOW priority thread pool.
|
||||
rocksdb.max_background_compactions 4
|
||||
rocksdb.max_background_compactions -1
|
||||
|
||||
# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs
|
||||
# Maximum number of concurrent background memtable flush jobs, submitted by
|
||||
# default to the HIGH priority thread pool. If the HIGH priority thread pool
|
||||
# is configured to have zero threads, flush jobs will share the LOW priority
|
||||
# thread pool with compaction jobs.
|
||||
rocksdb.max_background_flushes 4
|
||||
rocksdb.max_background_flushes -1
|
||||
|
||||
# This value represents the maximum number of threads that will
|
||||
# concurrently perform a compaction job by breaking it into multiple,
|
||||
# smaller ones that are run simultaneously.
|
||||
# Default: 2 (i.e. no subcompactions)
|
||||
# Default: 2
|
||||
rocksdb.max_sub_compactions 2
|
||||
|
||||
# In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size
|
||||
|
@ -411,8 +697,8 @@ rocksdb.max_sub_compactions 2
|
|||
# default is 512MB
|
||||
rocksdb.max_total_wal_size 512
|
||||
|
||||
# We impl the repliction with rocksdb WAL, it would trigger full sync when the seq was out of range.
|
||||
# wal_ttl_seconds and wal_size_limit_mb would affect how archived logswill be deleted.
|
||||
# We implement the replication with rocksdb WAL, it would trigger full sync when the seq was out of range.
|
||||
# wal_ttl_seconds and wal_size_limit_mb would affect how archived logs will be deleted.
|
||||
# If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that
|
||||
# are older than WAL_ttl_seconds will be deleted#
|
||||
#
|
||||
|
@ -422,25 +708,25 @@ rocksdb.wal_ttl_seconds 10800
|
|||
# If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0,
|
||||
# WAL files will be checked every 10 min and if total size is greater
|
||||
# then WAL_size_limit_MB, they will be deleted starting with the
|
||||
# earliest until size_limit is met. All empty files will be deleted
|
||||
# earliest until size_limit is met. All empty files will be deleted
|
||||
# Default: 16GB
|
||||
rocksdb.wal_size_limit_mb 16384
|
||||
|
||||
# Approximate size of user data packed per block. Note that the
|
||||
# block size specified here corresponds to uncompressed data. The
|
||||
# block size specified here corresponds to uncompressed data. The
|
||||
# actual size of the unit read from disk may be smaller if
|
||||
# compression is enabled.
|
||||
#
|
||||
# Default: 4KB
|
||||
rocksdb.block_size 2048
|
||||
# Default: 16KB
|
||||
rocksdb.block_size 16384
|
||||
|
||||
# Indicating if we'd put index/filter blocks to the block cache
|
||||
#
|
||||
# Default: no
|
||||
# Default: yes
|
||||
rocksdb.cache_index_and_filter_blocks yes
|
||||
|
||||
# Specify the compression to use.
|
||||
# Accept value: "no", "snappy"
|
||||
# Accept value: "no", "snappy", "lz4", "zstd", "zlib"
|
||||
# default snappy
|
||||
rocksdb.compression snappy
|
||||
|
||||
|
@ -480,6 +766,11 @@ rocksdb.level0_slowdown_writes_trigger 20
|
|||
# Default: 40
|
||||
rocksdb.level0_stop_writes_trigger 40
|
||||
|
||||
# Number of files to trigger level-0 compaction.
|
||||
#
|
||||
# Default: 4
|
||||
rocksdb.level0_file_num_compaction_trigger 4
|
||||
|
||||
# if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec
|
||||
#
|
||||
# Default: 0
|
||||
|
@ -489,9 +780,138 @@ rocksdb.stats_dump_period_sec 0
|
|||
#
|
||||
# Default: no
|
||||
rocksdb.disable_auto_compactions no
|
||||
|
||||
# BlobDB(key-value separation) is essentially RocksDB for large-value use cases.
|
||||
# Since 6.18.0, The new implementation is integrated into the RocksDB core.
|
||||
# When set, large values (blobs) are written to separate blob files, and only
|
||||
# pointers to them are stored in SST files. This can reduce write amplification
|
||||
# for large-value use cases at the cost of introducing a level of indirection
|
||||
# for reads. Please see: https://github.com/facebook/rocksdb/wiki/BlobDB.
|
||||
#
|
||||
# Note that when enable_blob_files is set to yes, BlobDB-related configuration
|
||||
# items will take effect.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.enable_blob_files no
|
||||
|
||||
# The size of the smallest value to be stored separately in a blob file. Values
|
||||
# which have an uncompressed size smaller than this threshold are stored alongside
|
||||
# the keys in SST files in the usual fashion.
|
||||
#
|
||||
# Default: 4096 byte, 0 means that all values are stored in blob files
|
||||
rocksdb.min_blob_size 4096
|
||||
|
||||
# The size limit for blob files. When writing blob files, a new file is
|
||||
# opened once this limit is reached.
|
||||
#
|
||||
# Default: 268435456 bytes
|
||||
rocksdb.blob_file_size 268435456
|
||||
|
||||
# Enables garbage collection of blobs. Valid blobs residing in blob files
|
||||
# older than a cutoff get relocated to new files as they are encountered
|
||||
# during compaction, which makes it possible to clean up blob files once
|
||||
# they contain nothing but obsolete/garbage blobs.
|
||||
# See also rocksdb.blob_garbage_collection_age_cutoff below.
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.enable_blob_garbage_collection yes
|
||||
|
||||
# The percentage cutoff in terms of blob file age for garbage collection.
|
||||
# Blobs in the oldest N blob files will be relocated when encountered during
|
||||
# compaction, where N = (garbage_collection_cutoff/100) * number_of_blob_files.
|
||||
# Note that this value must belong to [0, 100].
|
||||
#
|
||||
# Default: 25
|
||||
rocksdb.blob_garbage_collection_age_cutoff 25
|
||||
|
||||
|
||||
# The purpose of the following three options are to dynamically adjust the upper limit of
|
||||
# the data that each layer can store according to the size of the different
|
||||
# layers of the LSM. Enabling this option will bring some improvements in
|
||||
# deletion efficiency and space amplification, but it will lose a certain
|
||||
# amount of read performance.
|
||||
# If you want to know more details about Levels' Target Size, you can read RocksDB wiki:
|
||||
# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.level_compaction_dynamic_level_bytes yes
|
||||
|
||||
# The total file size of level-1 sst.
|
||||
#
|
||||
# Default: 268435456 bytes
|
||||
rocksdb.max_bytes_for_level_base 268435456
|
||||
|
||||
# Multiplication factor for the total file size of L(n+1) layers.
|
||||
# This option is a double type number in RocksDB, but kvrocks is
|
||||
# not support the double data type number yet, so we use integer
|
||||
# number instead of double currently.
|
||||
#
|
||||
# Default: 10
|
||||
rocksdb.max_bytes_for_level_multiplier 10
|
||||
|
||||
# This feature only takes effect in Iterators and MultiGet.
|
||||
# If yes, RocksDB will try to read asynchronously and in parallel as much as possible to hide IO latency.
|
||||
# In iterators, it will prefetch data asynchronously in the background for each file being iterated on.
|
||||
# In MultiGet, it will read the necessary data blocks from those files in parallel as much as possible.
|
||||
|
||||
# Default no
|
||||
rocksdb.read_options.async_io no
|
||||
|
||||
# If yes, the write will be flushed from the operating system
|
||||
# buffer cache before the write is considered complete.
|
||||
# If this flag is enabled, writes will be slower.
|
||||
# If this flag is disabled, and the machine crashes, some recent
|
||||
# writes may be lost. Note that if it is just the process that
|
||||
# crashes (i.e., the machine does not reboot), no writes will be
|
||||
# lost even if sync==false.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.sync no
|
||||
|
||||
# If yes, writes will not first go to the write ahead log,
|
||||
# and the write may get lost after a crash.
|
||||
# You must keep wal enabled if you use replication.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.disable_wal no
|
||||
|
||||
# If enabled and we need to wait or sleep for the write request, fails
|
||||
# immediately.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.no_slowdown no
|
||||
|
||||
# If enabled, write requests are of lower priority if compaction is
|
||||
# behind. In this case, no_slowdown = true, the request will be canceled
|
||||
# immediately. Otherwise, it will be slowed down.
|
||||
# The slowdown value is determined by RocksDB to guarantee
|
||||
# it introduces minimum impacts to high priority writes.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.low_pri no
|
||||
|
||||
# If enabled, this writebatch will maintain the last insert positions of each
|
||||
# memtable as hints in concurrent write. It can improve write performance
|
||||
# in concurrent writes if keys in one writebatch are sequential.
|
||||
#
|
||||
# Default: no
|
||||
rocksdb.write_options.memtable_insert_hint_per_batch no
|
||||
|
||||
|
||||
# Support RocksDB auto-tune rate limiter for the background IO
|
||||
# if enabled, Rate limiter will limit the compaction write if flush write is high
|
||||
# Please see https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html
|
||||
#
|
||||
# Default: yes
|
||||
rocksdb.rate_limiter_auto_tuned yes
|
||||
|
||||
# Enable this option will schedule the deletion of obsolete files in a background thread
|
||||
# on iterator destruction. It can reduce the latency if there are many files to be removed.
|
||||
# see https://github.com/facebook/rocksdb/wiki/IO#avoid-blocking-io
|
||||
#
|
||||
# Default: yes
|
||||
# rocksdb.avoid_unnecessary_blocking_io yes
|
||||
|
||||
################################ NAMESPACE #####################################
|
||||
# namespace.test change.me
|
||||
|
||||
|
||||
backup-dir .//backup
|
||||
log-dir ./
|
||||
|
|
1751
temp/intake.conf
1751
temp/intake.conf
File diff suppressed because it is too large
Load Diff
1751
temp/prepare.conf
1751
temp/prepare.conf
File diff suppressed because it is too large
Load Diff
|
@ -3,5 +3,14 @@
|
|||
set -e
|
||||
set -x
|
||||
|
||||
../../redis/src/redis-server ./intake.conf
|
||||
../../redis/src/redis-server ./prepare.conf
|
||||
if [ -f ../../valkey/src/valkey-server ]; then
|
||||
../../valkey/src/redis-server ./intake.conf
|
||||
../../valkey/src/redis-server ./prepare.conf
|
||||
elif [ -f ../../redis/src/redis-server ]; then
|
||||
../../redis/src/redis-server ./intake.conf
|
||||
../../redis/src/redis-server ./prepare.conf
|
||||
else
|
||||
echo "Warning: using system redis-server. Valkey-server or redis-server from source is recommended." >&2
|
||||
/usr/bin/redis-server ./intake.conf
|
||||
/usr/bin/redis-server ./prepare.conf
|
||||
fi
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import uuid
|
||||
|
||||
from redis import Redis
|
||||
from bgpranking.default import get_socket_path
|
||||
|
||||
redis_sanitized = Redis(unix_socket_path=get_socket_path('prepare'), db=0, decode_responses=True)
|
||||
to_delete = []
|
||||
for name in redis_sanitized.scan_iter(_type='HASH', count=100):
|
||||
try:
|
||||
uuid.UUID(name)
|
||||
except Exception as e:
|
||||
continue
|
||||
if not redis_sanitized.sismember('to_insert', name):
|
||||
to_delete.append(name)
|
||||
if len(to_delete) >= 100000:
|
||||
redis_sanitized.delete(*to_delete)
|
||||
to_delete = []
|
||||
if to_delete:
|
||||
redis_sanitized.delete(*to_delete)
|
Loading…
Reference in New Issue