diff --git a/bin/LAUNCH.sh b/bin/LAUNCH.sh index 68c20ac7..862a49a0 100755 --- a/bin/LAUNCH.sh +++ b/bin/LAUNCH.sh @@ -602,7 +602,7 @@ function launch_all { function menu_display { - options=("Redis" "Ardb" "Kvrocks" "Logs" "Scripts" "Flask" "Killall" "Update" "Update-config" "Update-thirdparty") + options=("Redis" "Kvrocks" "Logs" "Scripts" "Flask" "Killall" "Update" "Update-config" "Update-thirdparty") menu() { echo "What do you want to Launch?:" @@ -630,9 +630,6 @@ function menu_display { Redis) launch_redis; ;; - Ardb) - launch_ardb; - ;; Kvrocks) launch_kvrocks; ;; diff --git a/configs/6383.conf b/configs/6383.conf index dfc1f205..0b889dbe 100644 --- a/configs/6383.conf +++ b/configs/6383.conf @@ -1,14 +1,14 @@ ################################ GENERAL ##################################### -# By default kvrocks listens for connections from all the network interfaces -# available on the server. It is possible to listen to just one or multiple -# interfaces using the "bind" configuration directive, followed by one or -# more IP addresses. +# By default kvrocks listens for connections from localhost interface. +# It is possible to listen to just one or multiple interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. # # Examples: # # bind 192.168.1.100 10.0.0.1 # bind 127.0.0.1 ::1 +# bind 0.0.0.0 bind 127.0.0.1 # Unix socket. @@ -26,32 +26,52 @@ port 6383 # Close the connection after a client is idle for N seconds (0 to disable) timeout 0 -# The number of worker's threads, increase or decrease it would effect the performance. +# The number of worker's threads, increase or decrease would affect the performance. workers 8 -# By default kvrocks does not run as a daemon. Use 'yes' if you need it. -# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized. +# By default, kvrocks does not run as a daemon. Use 'yes' if you need it. +# Note that kvrocks will write a PID file in /var/run/kvrocks.pid when daemonized daemonize no -# Kvrocks implements cluster solution that is similar with redis cluster solution. +# Kvrocks implements the cluster solution that is similar to the Redis cluster solution. # You can get cluster information by CLUSTER NODES|SLOTS|INFO command, it also is -# adapted to redis-cli, redis-benchmark, redis cluster SDK and redis cluster proxy. -# But kvrocks doesn't support to communicate with each others, so you must set +# adapted to redis-cli, redis-benchmark, Redis cluster SDK, and Redis cluster proxy. +# But kvrocks doesn't support communicating with each other, so you must set # cluster topology by CLUSTER SETNODES|SETNODEID commands, more details: #219. # # PLEASE NOTE: # If you enable cluster, kvrocks will encode key with its slot id calculated by -# CRC16 and modulo 16384, endoding key with its slot id makes it efficient to -# migrate keys based on slot. So if you enabled at first time, cluster mode must +# CRC16 and modulo 16384, encoding key with its slot id makes it efficient to +# migrate keys based on the slot. So if you enabled at first time, cluster mode must # not be disabled after restarting, and vice versa. That is to say, data is not # compatible between standalone mode with cluster mode, you must migrate data # if you want to change mode, otherwise, kvrocks will make data corrupt. # # Default: no + cluster-enabled no +# By default, namespaces are stored in the configuration file and won't be replicated +# to replicas. This option allows to change this behavior, so that namespaces are also +# propagated to slaves. Note that: +# 1) it won't replicate the 'masterauth' to prevent breaking master/replica replication +# 2) it will overwrite replica's namespace with master's namespace, so be careful of in-using namespaces +# 3) cannot switch off the namespace replication once it's enabled +# +# Default: no +repl-namespace-enabled no + +# Persist the cluster nodes topology in local file($dir/nodes.conf). This configuration +# takes effect only if the cluster mode was enabled. +# +# If yes, it will try to load the cluster topology from the local file when starting, +# and dump the cluster nodes into the file if it was changed. +# +# Default: yes +persist-cluster-nodes-enabled yes + # Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the server is not +# this limit is set to 10000 clients. However, if the server is not # able to configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit # @@ -71,18 +91,17 @@ maxclients 10000 # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # -# requirepass foobared requirepass ail # If the master is password protected (using the "masterauth" configuration # directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will +# starting the replication synchronization process. Otherwise, the master will # refuse the slave request. # # masterauth foobared # Master-Salve replication would check db name is matched. if not, the slave should -# refuse to sync the db from master. Don't use default value, set the db-name to identify +# refuse to sync the db from master. Don't use the default value, set the db-name to identify # the cluster. db-name change.me.db @@ -98,7 +117,22 @@ dir DATA_KVROCKS # # log-dir stdout -# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by +# Log level +# Possible values: info, warning, error, fatal +# Default: info +log-level info + +# You can configure log-retention-days to control whether to enable the log cleaner +# and the maximum retention days that the INFO level logs will be kept. +# +# if set to -1, that means to disable the log cleaner. +# if set to 0, all previous INFO level logs will be immediately removed. +# if set to between 0 to INT_MAX, that means it will retent latest N(log-retention-days) day logs. + +# By default the log-retention-days is -1. +log-retention-days -1 + +# When running in daemonize mode, kvrocks writes a PID file in ${CONFIG_DIR}/kvrocks.pid by # default. You can specify a custom pid file location here. # pidfile /var/run/kvrocks.pid pidfile DATA_KVROCKS/kvrocks.pid @@ -146,7 +180,7 @@ tcp-backlog 511 master-use-repl-port no # Currently, master only checks sequence number when replica asks for PSYNC, -# that is not enough since they may have different replication history even +# that is not enough since they may have different replication histories even # the replica asking sequence is in the range of the master current WAL. # # We design 'Replication Sequence ID' PSYNC, we add unique replication id for @@ -180,11 +214,11 @@ use-rsid-psync no # is still in progress, the slave can act in two different ways: # # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the +# still reply to client requests, possibly with out-of-date data, or the # data set may just be empty if this is the first synchronization. # # 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands +# an error "SYNC with master in progress" to all kinds of commands # but to INFO and SLAVEOF. # slave-serve-stale-data yes @@ -203,6 +237,35 @@ slave-serve-stale-data yes # Default: no slave-empty-db-before-fullsync no +# A Kvrocks master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + # If replicas need full synchronization with master, master need to create # checkpoint for feeding replicas, and replicas also stage a checkpoint of # the master. If we also keep the backup, it maybe occupy extra disk space. @@ -212,7 +275,7 @@ slave-empty-db-before-fullsync no # Default: no purge-backup-on-fullsync no -# The maximum allowed rate (in MB/s) that should be used by Replication. +# The maximum allowed rate (in MB/s) that should be used by replication. # If the rate exceeds max-replication-mb, replication will slow down. # Default: 0 (i.e. no limit) max-replication-mb 0 @@ -220,8 +283,8 @@ max-replication-mb 0 # The maximum allowed aggregated write rate of flush and compaction (in MB/s). # If the rate exceeds max-io-mb, io will slow down. # 0 is no limit -# Default: 500 -max-io-mb 500 +# Default: 0 +max-io-mb 0 # The maximum allowed space (in GB) that should be used by RocksDB. # If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail. @@ -231,7 +294,7 @@ max-db-size 0 # The maximum backup to keep, server cron would run every minutes to check the num of current # backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep -# is 0, no backup would be keep. But now, we only support 0 or 1. +# is 0, no backup would be kept. But now, we only support 0 or 1. max-backup-to-keep 1 # The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup. @@ -243,6 +306,115 @@ max-backup-keep-hours 24 # Default: 16 max-bitmap-to-string-mb 16 +# Whether to enable SCAN-like cursor compatible with Redis. +# If enabled, the cursor will be unsigned 64-bit integers. +# If disabled, the cursor will be a string. +# Default: no +redis-cursor-compatible yes + +# Whether to enable the RESP3 protocol. +# NOTICE: RESP3 is still under development, don't enable it in production environment. +# +# Default: no +# resp3-enabled no + +# Maximum nesting depth allowed when parsing and serializing +# JSON documents while using JSON commands like JSON.SET. +# Default: 1024 +json-max-nesting-depth 1024 + +# The underlying storage format of JSON data type +# NOTE: This option only affects newly written/updated key-values +# The CBOR format may reduce the storage size and speed up JSON commands +# Available values: json, cbor +# Default: json +json-storage-format json + +################################## TLS ################################### + +# By default, TLS/SSL is disabled, i.e. `tls-port` is set to 0. +# To enable it, `tls-port` can be used to define TLS-listening ports. +# tls-port 0 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. +# These files should be PEM formatted. +# +# tls-cert-file kvrocks.crt +# tls-key-file kvrocks.key + +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Kvrocks requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +# By default, a replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + ################################## SLOW LOG ################################### # The Kvrocks Slow Log is a mechanism to log queries that exceeded a specified @@ -301,8 +473,8 @@ supervised no # Default: empty # profiling-sample-commands "" -# Ratio of the samples would be recorded. We simply use the rand to determine -# whether to record the sample or not. +# Ratio of the samples would be recorded. It is a number between 0 and 100. +# We simply use the rand to determine whether to record the sample or not. # # Default: 0 profiling-sample-ratio 0 @@ -331,15 +503,27 @@ profiling-sample-record-threshold-ms 100 # 0-7am every day. compaction-checker-range 0-7 -# Bgsave scheduler, auto bgsave at schedule time +# When the compaction checker is triggered, the db will periodically pick the SST file +# with the highest "deleted percentage" (i.e. the percentage of deleted keys in the SST +# file) to compact, in order to free disk space. +# However, if a specific SST file was created more than "force-compact-file-age" seconds +# ago, and its percentage of deleted keys is higher than +# "force-compact-file-min-deleted-percentage", it will be forcely compacted as well. + +# Default: 172800 seconds; Range: [60, INT64_MAX]; +# force-compact-file-age 172800 +# Default: 10 %; Range: [1, 100]; +# force-compact-file-min-deleted-percentage 10 + +# Bgsave scheduler, auto bgsave at scheduled time # time expression format is the same as crontab(currently only support * and int) # e.g. bgsave-cron 0 3 * * * 0 4 * * * -# would bgsave the db at 3am and 4am everyday +# would bgsave the db at 3am and 4am every day # Command renaming. # # It is possible to change the name of dangerous commands in a shared -# environment. For instance the KEYS command may be renamed into something +# environment. For instance, the KEYS command may be renamed into something # hard to guess so that it will still be available for internal-use tools # but not available for general clients. # @@ -352,39 +536,26 @@ compaction-checker-range 0-7 # # rename-command KEYS "" -# The key-value size may so be quite different in many scenes, and use 256MiB as SST file size -# may cause data loading(large index/filter block) ineffective when the key-value was too small. -# kvrocks supports user-defined SST file in config(rocksdb.target_file_size_base), -# but it still too trivial and inconvenient to adjust the different sizes for different instances. -# so we want to periodic auto-adjust the SST size in-flight with user avg key-value size. -# -# If enabled, kvrocks will auto resize rocksdb.target_file_size_base -# and rocksdb.write_buffer_size in-flight with user avg key-value size. -# Please see #118. -# -# Default: yes -auto-resize-block-and-sst yes - ################################ MIGRATE ##################################### # If the network bandwidth is completely consumed by the migration task, # it will affect the availability of kvrocks. To avoid this situation, -# migrate-speed is adpoted to limit the migrating speed. -# Migrating speed is limited by controling the duraiton between sending data, -# the duation is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us). +# migrate-speed is adopted to limit the migrating speed. +# Migrating speed is limited by controlling the duration between sending data, +# the duration is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us). # Value: [0,INT_MAX], 0 means no limit # # Default: 4096 migrate-speed 4096 -# In order to reduce data transimission times and improve the efficiency of data migration, +# In order to reduce data transmission times and improve the efficiency of data migration, # pipeline is adopted to send multiple data at once. Pipeline size can be set by this option. # Value: [1, INT_MAX], it can't be 0 # # Default: 16 migrate-pipeline-size 16 -# In order to reduce the write forbidden time during migrating slot, we will migrate the incremetal -# data sevral times to reduce the amount of incremetal data. Until the quantity of incremetal +# In order to reduce the write forbidden time during migrating slot, we will migrate the incremental +# data several times to reduce the amount of incremental data. Until the quantity of incremental # data is reduced to a certain threshold, slot will be forbidden write. The threshold is set by # this option. # Value: [1, INT_MAX], it can't be 0 @@ -394,22 +565,21 @@ migrate-sequence-gap 10000 ################################ ROCKSDB ##################################### -# Specify the capacity of metadata column family block cache. Larger block cache -# may make request faster while more keys would be cached. Max Size is 200*1024. -# Default: 2048MB -rocksdb.metadata_block_cache_size 2048 +# Specify the capacity of column family block cache. A larger block cache +# may make requests faster while more keys would be cached. Max Size is 400*1024. +# Default: 4096MB +rocksdb.block_cache_size 4096 -# Specify the capacity of subkey column family block cache. Larger block cache -# may make request faster while more keys would be cached. Max Size is 200*1024. -# Default: 2048MB -rocksdb.subkey_block_cache_size 2048 - -# Metadata column family and subkey column family will share a single block cache -# if set 'yes'. The capacity of shared block cache is -# metadata_block_cache_size + subkey_block_cache_size +# Specify the type of cache used in the block cache. +# Accept value: "lru", "hcc" +# "lru" stands for the cache with the LRU(Least Recently Used) replacement policy. # -# Default: yes -rocksdb.share_metadata_and_subkey_block_cache yes +# "hcc" stands for the Hyper Clock Cache, a lock-free cache alternative +# that offers much improved CPU efficiency vs. LRU cache under high parallel +# load or high contention. +# +# default lru +rocksdb.block_cache_type lru # A global cache for table-level rows in RocksDB. If almost always point # lookups, enlarging row cache may improve read performance. Otherwise, @@ -423,7 +593,7 @@ rocksdb.row_cache_size 0 # files opened are always kept open. You can estimate number of files based # on target_file_size_base and target_file_size_multiplier for level-based # compaction. For universal-style compaction, you can usually set it to -1. -# Default: 4096 +# Default: 8096 rocksdb.max_open_files 8096 # Amount of data to build up in memory (backed by an unsorted log @@ -442,7 +612,7 @@ rocksdb.max_open_files 8096 # default is 64MB rocksdb.write_buffer_size 64 -# Target file size for compaction, target file size for Leve N can be caculated +# Target file size for compaction, target file size for Level N can be calculated # by target_file_size_base * (target_file_size_multiplier ^ (L-1)) # # Default: 128MB @@ -457,20 +627,29 @@ rocksdb.target_file_size_base 128 # allowed. rocksdb.max_write_buffer_number 4 +# Maximum number of concurrent background jobs (compactions and flushes). +# For backwards compatibility we will set `max_background_jobs = +# max_background_compactions + max_background_flushes` in the case where user +# sets at least one of `max_background_compactions` or `max_background_flushes` +# (we replace -1 by 1 in case one option is unset). +rocksdb.max_background_jobs 4 + +# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs # Maximum number of concurrent background compaction jobs, submitted to # the default LOW priority thread pool. -rocksdb.max_background_compactions 4 +rocksdb.max_background_compactions -1 +# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs # Maximum number of concurrent background memtable flush jobs, submitted by # default to the HIGH priority thread pool. If the HIGH priority thread pool # is configured to have zero threads, flush jobs will share the LOW priority # thread pool with compaction jobs. -rocksdb.max_background_flushes 4 +rocksdb.max_background_flushes -1 # This value represents the maximum number of threads that will # concurrently perform a compaction job by breaking it into multiple, # smaller ones that are run simultaneously. -# Default: 2 (i.e. no subcompactions) +# Default: 2 rocksdb.max_sub_compactions 2 # In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size @@ -494,8 +673,8 @@ rocksdb.max_sub_compactions 2 # default is 512MB rocksdb.max_total_wal_size 512 -# We impl the repliction with rocksdb WAL, it would trigger full sync when the seq was out of range. -# wal_ttl_seconds and wal_size_limit_mb would affect how archived logswill be deleted. +# We implement the replication with rocksdb WAL, it would trigger full sync when the seq was out of range. +# wal_ttl_seconds and wal_size_limit_mb would affect how archived logs will be deleted. # If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that # are older than WAL_ttl_seconds will be deleted# # @@ -505,26 +684,26 @@ rocksdb.wal_ttl_seconds 10800 # If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, # WAL files will be checked every 10 min and if total size is greater # then WAL_size_limit_MB, they will be deleted starting with the -# earliest until size_limit is met. All empty files will be deleted +# earliest until size_limit is met. All empty files will be deleted # Default: 16GB rocksdb.wal_size_limit_mb 16384 # Approximate size of user data packed per block. Note that the -# block size specified here corresponds to uncompressed data. The +# block size specified here corresponds to uncompressed data. The # actual size of the unit read from disk may be smaller if # compression is enabled. # -# Default: 4KB +# Default: 16KB rocksdb.block_size 16384 # Indicating if we'd put index/filter blocks to the block cache # -# Default: no +# Default: yes rocksdb.cache_index_and_filter_blocks yes # Specify the compression to use. Only compress level greater # than 2 to improve performance. -# Accept value: "no", "snappy" +# Accept value: "no", "snappy", "lz4", "zstd", "zlib" # default snappy rocksdb.compression snappy @@ -579,7 +758,7 @@ rocksdb.stats_dump_period_sec 0 # Default: no rocksdb.disable_auto_compactions no -# BlobDB(key-value separation) is essentially RocksDB for large-value use cases. +# BlobDB(key-value separation) is essentially RocksDB for large-value use cases. # Since 6.18.0, The new implementation is integrated into the RocksDB core. # When set, large values (blobs) are written to separate blob files, and only # pointers to them are stored in SST files. This can reduce write amplification @@ -608,7 +787,7 @@ rocksdb.blob_file_size 268435456 # Enables garbage collection of blobs. Valid blobs residing in blob files # older than a cutoff get relocated to new files as they are encountered # during compaction, which makes it possible to clean up blob files once -# they contain nothing but obsolete/garbage blobs. +# they contain nothing but obsolete/garbage blobs. # See also rocksdb.blob_garbage_collection_age_cutoff below. # # Default: yes @@ -623,16 +802,16 @@ rocksdb.enable_blob_garbage_collection yes rocksdb.blob_garbage_collection_age_cutoff 25 -# The purpose of following three options are to dynamically adjust the upper limit of -# the data that each layer can store according to the size of the different +# The purpose of the following three options are to dynamically adjust the upper limit of +# the data that each layer can store according to the size of the different # layers of the LSM. Enabling this option will bring some improvements in -# deletion efficiency and space amplification, but it will lose a certain +# deletion efficiency and space amplification, but it will lose a certain # amount of read performance. -# If you want know more details about Levels' Target Size, you can read RocksDB wiki: +# If you want to know more details about Levels' Target Size, you can read RocksDB wiki: # https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size # -# Default: no -rocksdb.level_compaction_dynamic_level_bytes no +# Default: yes +rocksdb.level_compaction_dynamic_level_bytes yes # The total file size of level-1 sst. # @@ -641,39 +820,92 @@ rocksdb.max_bytes_for_level_base 268435456 # Multiplication factor for the total file size of L(n+1) layers. # This option is a double type number in RocksDB, but kvrocks is -# not support double data type number yet, so we use int data +# not support the double data type number yet, so we use integer # number instead of double currently. # # Default: 10 rocksdb.max_bytes_for_level_multiplier 10 +# This feature only takes effect in Iterators and MultiGet. +# If yes, RocksDB will try to read asynchronously and in parallel as much as possible to hide IO latency. +# In iterators, it will prefetch data asynchronously in the background for each file being iterated on. +# In MultiGet, it will read the necessary data blocks from those files in parallel as much as possible. + +# Default no +rocksdb.read_options.async_io no + +# If yes, the write will be flushed from the operating system +# buffer cache before the write is considered complete. +# If this flag is enabled, writes will be slower. +# If this flag is disabled, and the machine crashes, some recent +# rites may be lost. Note that if it is just the process that +# crashes (i.e., the machine does not reboot), no writes will be +# lost even if sync==false. +# +# Default: no +rocksdb.write_options.sync no + +# If yes, writes will not first go to the write ahead log, +# and the write may get lost after a crash. +# You must keep wal enabled if you use replication. +# +# Default: no +rocksdb.write_options.disable_wal no + +# If enabled and we need to wait or sleep for the write request, fails +# immediately. +# +# Default: no +rocksdb.write_options.no_slowdown no + +# If enabled, write requests are of lower priority if compaction is +# behind. In this case, no_slowdown = true, the request will be canceled +# immediately. Otherwise, it will be slowed down. +# The slowdown value is determined by RocksDB to guarantee +# it introduces minimum impacts to high priority writes. +# +# Default: no +rocksdb.write_options.low_pri no + +# If enabled, this writebatch will maintain the last insert positions of each +# memtable as hints in concurrent write. It can improve write performance +# in concurrent writes if keys in one writebatch are sequential. +# +# Default: no +rocksdb.write_options.memtable_insert_hint_per_batch no + + +# Support RocksDB auto-tune rate limiter for the background IO +# if enabled, Rate limiter will limit the compaction write if flush write is high +# Please see https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html +# +# Default: yes +rocksdb.rate_limiter_auto_tuned yes + +# Enable this option will schedule the deletion of obsolete files in a background thread +# on iterator destruction. It can reduce the latency if there are many files to be removed. +# see https://github.com/facebook/rocksdb/wiki/IO#avoid-blocking-io +# +# Default: yes +# rocksdb.avoid_unnecessary_blocking_io yes + ################################ NAMESPACE ##################################### # namespace.test change.me + +-# investigation -> db ???? +-# ail2ail -> a2a ???? + + backup-dir DATA_KVROCKS/backup -fullsync-recv-file-delay 0 log-dir DATA_KVROCKS -unixsocketperm 26 - - - - namespace.cor ail_correls namespace.crawl ail_crawlers namespace.db ail_datas namespace.dup ail_dups namespace.obj ail_objs -namespace.tl ail_tls namespace.rel ail_rels namespace.stat ail_stats namespace.tag ail_tags +namespace.tl ail_tls namespace.track ail_trackers - -# investigation -> db ???? -# ail2ail -> a2a ????? - - - - - - diff --git a/installing_deps.sh b/installing_deps.sh index e6f907a1..c681249b 100755 --- a/installing_deps.sh +++ b/installing_deps.sh @@ -88,7 +88,7 @@ DEFAULT_HOME=$(pwd) #### KVROCKS #### test ! -d kvrocks/ && git clone https://github.com/apache/incubator-kvrocks.git kvrocks pushd kvrocks -./x.py build +./x.py build -j 4 popd DEFAULT_KVROCKS_DATA=$DEFAULT_HOME/DATA_KVROCKS diff --git a/update/v5.3/Update.sh b/update/v5.3/Update.sh index 1e040200..534cd295 100755 --- a/update/v5.3/Update.sh +++ b/update/v5.3/Update.sh @@ -14,7 +14,7 @@ GREEN="\\033[1;32m" DEFAULT="\\033[0;39m" echo -e $GREEN"Shutting down AIL ..."$DEFAULT -bash ${AIL_BIN}/LAUNCH.sh -ks +bash ${AIL_BIN}/LAUNCH.sh -k wait # SUBMODULES # @@ -28,6 +28,17 @@ pip install -U libretranslatepy pip install -U xxhash pip install -U DomainClassifier +echo "" +echo -e $GREEN"Updating KVROCKS ..."$DEFAULT +echo "" +pushd ${AIL_HOME}/kvrocks +git pull +./x.py build -j 4 +popd + +bash ${AIL_BIN}/LAUNCH.sh -lrv +bash ${AIL_BIN}/LAUNCH.sh -lkv + echo "" echo -e $GREEN"Updating AIL VERSION ..."$DEFAULT echo ""