diff --git a/cache/cache.conf b/cache/cache.conf index fe2702cd..990f539e 100644 --- a/cache/cache.conf +++ b/cache/cache.conf @@ -51,6 +51,7 @@ # # loadmodule /path/to/my_module.so # loadmodule /path/to/other_module.so +# loadmodule /path/to/args_module.so [arg [arg ...]] ################################## NETWORK ##################################### @@ -86,7 +87,7 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ bind 127.0.0.1 -::1 -# By default, outgoing connections (from replica to master, from Sentinel to +# By default, outgoing connections (from replica to primary, from Sentinel to # instances, cluster bus, etc.) are not bound to a specific local address. In # most cases, this means the operating system will handle that based on routing # and the interface through which the connection goes out. @@ -152,6 +153,9 @@ tcp-backlog 511 # incoming connections. There is no default, so the server will not listen # on a unix socket when not specified. # +# unixsocket /run/valkey.sock +# unixsocketgroup wheel +# unixsocketperm 700 unixsocket cache.sock unixsocketperm 700 @@ -192,7 +196,7 @@ tcp-keepalive 300 # tls-port 6379 # Configure a X.509 certificate and private key to use for authenticating the -# server to connected clients, masters or cluster peers. These files should be +# server to connected clients, primaries or cluster peers. These files should be # PEM formatted. # # tls-cert-file valkey.crt @@ -204,7 +208,7 @@ tcp-keepalive 300 # tls-key-file-pass secret # Normally the server uses the same certificate for both server functions (accepting -# connections) and client functions (replicating from a master, establishing +# connections) and client functions (replicating from a primary, establishing # cluster bus connections, etc.). # # Sometimes certificates are issued with attributes that designate them as @@ -244,7 +248,7 @@ tcp-keepalive 300 # tls-auth-clients optional # By default, a replica does not attempt to establish a TLS connection -# with its master. +# with its primary. # # Use the following directive to enable TLS on replication links. # @@ -328,7 +332,7 @@ daemonize yes # # When the server runs non daemonized, no pid file is created if none is # specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". +# is used even if not specified, defaulting to "/var/run/valkey.pid". # # Creating a pid file is best effort: if the server is not able to create it # nothing bad happens, the server will start and run normally. @@ -356,7 +360,7 @@ logfile "" # syslog-enabled no # Specify the syslog identity. -# syslog-ident redis +# syslog-ident valkey # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. # syslog-facility local0 @@ -385,6 +389,14 @@ databases 16 # ASCII art logo in startup logs by setting the following option to yes. always-show-logo no +# User data, including keys, values, client names, and ACL usernames, can be +# logged as part of assertions and other error cases. To prevent sensitive user +# information, such as PII, from being recorded in the server log file, this +# user data is hidden from the log by default. If you need to log user data for +# debugging or troubleshooting purposes, you can disable this feature by +# changing the config value to no. +hide-user-data-from-log yes + # By default, the server modifies the process title (as seen in 'top' and 'ps') to # provide some runtime information. It is possible to disable this and leave # the process name as executed by setting the following to no. @@ -412,6 +424,15 @@ proc-title-template "{title} {listen-addr} {server-mode}" # is derived from the environment variables. locale-collate "" +# Valkey is largely compatible with Redis OSS, apart from a few cases where +# Valkey identifies itself itself as "Valkey" rather than "Redis". Extended +# Redis OSS compatibility mode makes Valkey pretend to be Redis. Enable this +# only if you have problems with tools or clients. This is a temporary +# configuration added in Valkey 8.0 and is scheduled to have no effect in Valkey +# 9.0 and be completely removed in Valkey 10.0. +# +# extended-redis-compatibility no + ################################ SNAPSHOTTING ################################ # Save the DB to disk. @@ -433,6 +454,7 @@ locale-collate "" # # You can set these explicitly by uncommenting the following line. # +# save 3600 1 300 100 60 10000 save 3600 1 # By default the server will stop accepting writes if RDB snapshots are enabled @@ -472,7 +494,7 @@ rdbchecksum yes # no - Never perform full sanitization # yes - Always perform full sanitization # clients - Perform full sanitization only for user connections. -# Excludes: RDB files, RESTORE commands received from the master +# Excludes: RDB files, RESTORE commands received from the primary # connection, and client connections which have the # skip-sanitize-payload ACL flag. # The default should be 'clients' but since it currently affects cluster @@ -486,13 +508,13 @@ dbfilename dump.rdb # Remove RDB files used by replication in instances without persistence # enabled. By default this option is disabled, however there are environments # where for regulations or other security concerns, RDB files persisted on -# disk by masters in order to feed replicas, or stored on disk by replicas +# disk by primaries in order to feed replicas, or stored on disk by replicas # in order to load them for the initial synchronization, should be deleted # ASAP. Note that this option ONLY WORKS in instances that have both AOF # and RDB persistence disabled, otherwise is completely ignored. # # An alternative (and sometimes better) way to obtain the same effect is -# to use diskless replication on both master and replicas instances. However +# to use diskless replication on both primary and replicas instances. However # in the case of replicas, diskless is not always an option. rdb-del-sync-files no @@ -503,6 +525,9 @@ rdb-del-sync-files no # # The Append Only File will also be created inside this directory. # +# The Cluster config file is written relative this directory, if the +# 'cluster-config-file' configuration directive is a relative path. +# # Note that you must specify a directory here, not a file name. dir ./ @@ -516,38 +541,38 @@ dir ./ # | (receive writes) | | (exact copy) | # +------------------+ +---------------+ # -# 1) Replication is asynchronous, but you can configure a master to +# 1) Replication is asynchronous, but you can configure a primary to # stop accepting writes if it appears to be not connected with at least # a given number of replicas. # 2) Replicas are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of +# primary if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a -# network partition replicas automatically try to reconnect to masters +# network partition replicas automatically try to reconnect to primaries # and resynchronize with them. # -# replicaof +# replicaof -# If the master is password protected (using the "requirepass" configuration +# If the primary is password protected (using the "requirepass" configuration # directive below) it is possible to tell the replica to authenticate before -# starting the replication synchronization process, otherwise the master will +# starting the replication synchronization process, otherwise the primary will # refuse the replica request. # -# masterauth +# primaryauth # # However this is not enough if you are using ACLs # and the default user is not capable of running the PSYNC # command and/or other commands needed for replication. In this case it's # better to configure a special user to use with replication, and specify the -# masteruser configuration as such: +# primaryuser configuration as such: # -# masteruser +# primaryuser # -# When masteruser is specified, the replica will authenticate against its -# master using the new AUTH form: AUTH . +# When primaryuser is specified, the replica will authenticate against its +# primary using the new AUTH form: AUTH . -# When a replica loses its connection with the master, or when the replication +# When a replica loses its connection with the primary, or when the replication # is still in progress, the replica can act in two different ways: # # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will @@ -565,7 +590,7 @@ replica-serve-stale-data yes # You can configure a replica instance to accept writes or not. Writing against # a replica instance may be useful to store some ephemeral data (because data -# written on a replica will be easily deleted after resync with the master) but +# written on a replica will be easily deleted after resync with the primary) but # may also cause problems if clients are writing to it because of a # misconfiguration. # @@ -583,15 +608,15 @@ replica-read-only yes # # New replicas and reconnecting replicas that are not able to continue the # replication process just receiving differences, need to do what is called a -# "full synchronization". An RDB file is transmitted from the master to the +# "full synchronization". An RDB file is transmitted from the primary to the # replicas. # # The transmission can happen in two different ways: # -# 1) Disk-backed: The master creates a new process that writes the RDB +# 1) Disk-backed: The primary creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent # process to the replicas incrementally. -# 2) Diskless: The master creates a new process that directly writes the +# 2) Diskless: The primary creates a new process that directly writes the # RDB file to replica sockets, without touching the disk at all. # # With disk-backed replication, while the RDB file is generated, more replicas @@ -600,7 +625,7 @@ replica-read-only yes # once the transfer starts, new replicas arriving will be queued and a new # transfer will start when the current one terminates. # -# When diskless replication is used, the master waits a configurable amount of +# When diskless replication is used, the primary waits a configurable amount of # time (in seconds) before starting the transfer in the hope that multiple # replicas will arrive and the transfer can be parallelized. # @@ -630,15 +655,15 @@ repl-diskless-sync-max-replicas 0 # WARNING: Since in this setup the replica does not immediately store an RDB on # disk, it may cause data loss during failovers. RDB diskless load + server # modules not handling I/O reads may cause the server to abort in case of I/O errors -# during the initial synchronization stage with the master. +# during the initial synchronization stage with the primary. # ----------------------------------------------------------------------------- # # Replica can load the RDB it reads from the replication link directly from the # socket, or store the RDB to a file and read that file after it was completely -# received from the master. +# received from the primary. # # In many cases the disk is slower than the network, and storing and loading -# the RDB file may increase replication time (and even increase the master's +# the RDB file may increase replication time (and even increase the primary's # Copy on Write memory and replica buffers). # However, when parsing the RDB file directly from the socket, in order to avoid # data loss it's only safe to flush the current dataset when the new dataset is @@ -649,7 +674,7 @@ repl-diskless-sync-max-replicas 0 # "swapdb" - Keep current db contents in RAM while parsing the data directly # from the socket. Replicas in this mode can keep serving current # dataset while replication is in progress, except for cases where -# they can't recognize master as having a data set from same +# they can't recognize primary as having a data set from same # replication history. # Note that this requires sufficient memory, if you don't have it, # you risk an OOM kill. @@ -658,6 +683,29 @@ repl-diskless-sync-max-replicas 0 # during replication. repl-diskless-load disabled +# This dual channel replication sync feature optimizes the full synchronization process +# between a primary and its replicas. When enabled, it reduces both memory and CPU load +# on the primary server. +# +# How it works: +# 1. During full sync, instead of accumulating replication data on the primary server, +# the data is sent directly to the syncing replica. +# 2. The primary's background save (bgsave) process streams the RDB snapshot directly +# to the replica over a separate connection. +# +# Tradeoff: +# While this approach reduces load on the primary, it shifts the burden of storing +# the replication buffer to the replica. This means the replica must have sufficient +# memory to accommodate the buffer during synchronization. However, this tradeoff is +# generally beneficial as it prevents potential performance degradation on the primary +# server, which is typically handling more critical operations. +# +# When toggling this configuration on or off during an ongoing synchronization process, +# it does not change the already running sync method. The new configuration will take +# effect only for subsequent synchronization processes. + +dual-channel-replication-enabled no + # Master send PINGs to its replicas in a predefined interval. It's possible to # change this interval with the repl_ping_replica_period option. The default # value is 10 seconds. @@ -668,11 +716,11 @@ repl-diskless-load disabled # # 1) Bulk transfer I/O during SYNC, from the point of view of replica. # 2) Master timeout from the point of view of replicas (data, pings). -# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# 3) Replica timeout from the point of view of primaries (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value # specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. The default +# every time there is low traffic between the primary and the replica. The default # value is 60 seconds. # # repl-timeout 60 @@ -688,7 +736,7 @@ repl-diskless-load disabled # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions -# or when the master and replicas are many hops away, turning this to "yes" may +# or when the primary and replicas are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no @@ -703,15 +751,15 @@ repl-disable-tcp-nodelay no # # The backlog is only allocated if there is at least one replica connected. # -# repl-backlog-size 1mb +# repl-backlog-size 10mb -# After a master has no connected replicas for some time, the backlog will be +# After a primary has no connected replicas for some time, the backlog will be # freed. The following option configures the amount of seconds that need to # elapse, starting from the time the last replica disconnected, for the backlog # buffer to be freed. # # Note that replicas never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially +# promoted to primaries later, and should be able to correctly "partially # resynchronize" with other replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. @@ -720,21 +768,21 @@ repl-disable-tcp-nodelay no # The replica priority is an integer number published by the server in the INFO # output. It is used by Sentinel in order to select a replica to promote -# into a master if the master is no longer working correctly. +# into a primary if the primary is no longer working correctly. # # A replica with a low priority number is considered better for promotion, so # for instance if there are three replicas with priority 10, 100, 25 Sentinel # will pick the one with priority 10, that is the lowest. # # However a special priority of 0 marks the replica as not able to perform the -# role of master, so a replica with priority of 0 will never be selected by +# role of primary, so a replica with priority of 0 will never be selected by # Sentinel for promotion. # # By default the priority is 100. replica-priority 100 # The propagation error behavior controls how the server will behave when it is -# unable to handle a command being processed in the replication stream from a master +# unable to handle a command being processed in the replication stream from a primary # or processed while reading from an AOF file. Errors that occur during propagation # are unexpected, and can cause data inconsistency. # @@ -747,7 +795,7 @@ replica-priority 100 # propagation-error-behavior ignore # Replica ignore disk write errors controls the behavior of a replica when it is -# unable to persist a write command received from its master to disk. By default, +# unable to persist a write command received from its primary to disk. By default, # this configuration is set to 'no' and will crash the replica in this condition. # It is not recommended to change this default. # @@ -756,16 +804,16 @@ replica-priority 100 # ----------------------------------------------------------------------------- # By default, Sentinel includes all replicas in its reports. A replica # can be excluded from Sentinel's announcements. An unannounced replica -# will be ignored by the 'sentinel replicas ' command and won't be +# will be ignored by the 'sentinel replicas ' command and won't be # exposed to Sentinel's clients. # # This option does not change the behavior of replica-priority. Even with -# replica-announced set to 'no', the replica can be promoted to master. To +# replica-announced set to 'no', the replica can be promoted to primary. To # prevent this behavior, set replica-priority to 0. # # replica-announced yes -# It is possible for a master to stop accepting writes if there are less than +# It is possible for a primary to stop accepting writes if there are less than # N replicas connected, having a lag less or equal than M seconds. # # The N replicas need to be in "online" state. @@ -787,18 +835,18 @@ replica-priority 100 # By default min-replicas-to-write is set to 0 (feature disabled) and # min-replicas-max-lag is set to 10. -# A master is able to list the address and port of the attached +# A primary is able to list the address and port of the attached # replicas in different ways. For example the "INFO replication" section # offers this information, which is used, among other tools, by # Sentinel in order to discover replica instances. # Another place where this info is available is in the output of the -# "ROLE" command of a master. +# "ROLE" command of a primary. # # The listed IP address and port normally reported by a replica is # obtained in the following way: # # IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the master. +# of the socket used by the replica to connect with the primary. # # Port: The port is communicated by the replica during the replication # handshake, and is normally the port that the replica is using to @@ -807,7 +855,7 @@ replica-priority 100 # However when port forwarding or Network Address Translation (NAT) is # used, the replica may actually be reachable via different IP and port # pairs. The following two options can be used by a replica in order to -# report to its master a specific set of IP and port, so that both INFO +# report to its primary a specific set of IP and port, so that both INFO # and ROLE will report those values. # # There is no need to use both the options if you need to override just @@ -1151,7 +1199,8 @@ acllog-max-len 128 # configuration directive. # # The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. +# true LRU but costs more CPU. 3 is faster but not very accurate. The maximum +# value that can be set is 64. # # maxmemory-samples 5 @@ -1164,11 +1213,11 @@ acllog-max-len 128 # maxmemory-eviction-tenacity 10 # By default a replica will ignore its maxmemory setting -# (unless it is promoted to master after a failover or manually). It means -# that the eviction of keys will be just handled by the master, sending the -# DEL commands to the replica as keys evict in the master side. +# (unless it is promoted to primary after a failover or manually). It means +# that the eviction of keys will be just handled by the primary, sending the +# DEL commands to the replica as keys evict in the primary side. # -# This behavior ensures that masters and replicas stay consistent, and is usually +# This behavior ensures that primaries and replicas stay consistent, and is usually # what you want, however if your replica is writable, or you want the replica # to have a different memory setting, and you are sure all the writes performed # to the replica are idempotent, then you may change this default (but be sure @@ -1179,7 +1228,7 @@ acllog-max-len 128 # be larger on the replica, or data structures may sometimes take more memory # and so forth). So make sure you monitor your replicas and make sure they # have enough memory to never hit a real out-of-memory condition before the -# master hits the configured maxmemory setting. +# primary hits the configured maxmemory setting. # # replica-ignore-maxmemory yes @@ -1202,8 +1251,8 @@ acllog-max-len 128 ############################# LAZY FREEING #################################### -# The server has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands +# When keys are deleted, the served has historically freed their memory using +# blocking operations. It means that the server stopped processing new commands # in order to reclaim all the memory associated with an object in a synchronous # way. If the key deleted is associated with a small object, the time needed # in order to execute the DEL command is very small and comparable to most other @@ -1211,15 +1260,16 @@ acllog-max-len 128 # aggregated value containing millions of elements, the server can block for # a long time (even seconds) in order to complete the operation. # -# For the above reasons the server also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. +# For the above reasons, lazy freeing (or asynchronous freeing), has been +# introduced. With lazy freeing, keys are deleted in constant time. Another +# thread will incrementally free the object in the background as fast as +# possible. # -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the server sometimes has to +# Starting from Valkey 8.0, lazy freeing is enabled by default. It is possible +# to retain the synchronous freeing behaviour by setting the lazyfree related +# configuration directives to 'no'. + +# Commands like DEL, FLUSHALL and FLUSHDB delete keys, but the server can also # delete keys or flush the whole database as a side effect of other operations. # Specifically the server deletes objects independently of a user call in the # following scenarios: @@ -1236,32 +1286,34 @@ acllog-max-len 128 # itself removes any old content of the specified key in order to replace # it with the specified string. # 4) During replication, when a replica performs a full resynchronization with -# its master, the content of the whole database is removed in order to +# its primary, the content of the whole database is removed in order to # load the RDB file just transferred. # -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives. +# In all the above cases, the default is to release memory in a non-blocking +# way. -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -replica-lazy-flush no +lazyfree-lazy-eviction yes +lazyfree-lazy-expire yes +lazyfree-lazy-server-del yes +replica-lazy-flush yes -# It is also possible, for the case when to replace the user code DEL calls -# with UNLINK calls is not easy, to modify the default behavior of the DEL -# command to act exactly like UNLINK, using the following configuration -# directive: +# For keys deleted using the DEL command, lazy freeing is controlled by the +# configuration directive 'lazyfree-lazy-user-del'. The default is 'yes'. The +# UNLINK command is identical to the DEL command, except that UNLINK always +# frees the memory lazily, regardless of this configuration directive: -lazyfree-lazy-user-del no +lazyfree-lazy-user-del yes # FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous # deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the # commands. When neither flag is passed, this directive will be used to determine # if the data should be deleted asynchronously. -lazyfree-lazy-user-flush no +# There are many problems with running flush synchronously. Even in single CPU +# environments, the thread managers should balance between the freeing and +# serving incoming requests. The default value is yes. + +lazyfree-lazy-user-flush yes ################################ THREADED I/O ################################# @@ -1277,9 +1329,8 @@ lazyfree-lazy-user-flush no # to pipelining nor sharding of the instance. # # By default threading is disabled, we suggest enabling it only in machines -# that have at least 4 or more cores, leaving at least one spare core. -# Using more than 8 threads is unlikely to help much. We also recommend using -# threaded I/O only if you actually have performance problems, with +# that have at least 3 or more cores, leaving at least one spare core. +# We also recommend using threaded I/O only if you actually have performance problems, with # instances being able to use a quite big percentage of CPU time, otherwise # there is no point in using this feature. # @@ -1290,21 +1341,23 @@ lazyfree-lazy-user-flush no # io-threads 4 # # Setting io-threads to 1 will just use the main thread as usual. -# When I/O threads are enabled, we only use threads for writes, that is -# to thread the write(2) syscall and transfer the client buffers to the -# socket. However it is also possible to enable threading of reads and -# protocol parsing using the following configuration directive, by setting -# it to yes: +# When I/O threads are enabled, we use threads for reads and writes, that is +# to thread the write and read syscall and transfer the client buffers to the +# socket and to enable threading of reads and protocol parsing. # -# io-threads-do-reads no +# When multiple commands are parsed by the I/O threads and ready for execution, +# we take advantage of knowing the next set of commands and prefetch their +# required dictionary entries in a batch. This reduces memory access costs. # -# Usually threading reads doesn't help much. +# The optimal batch size depends on the specific workflow of the user. +# The default batch size is 16, which can be modified using the +# 'prefetch-batch-max-size' config. # -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. Also, this feature currently does not work when SSL is -# enabled. +# When the config is set to 0, prefetching is disabled. # -# NOTE 2: If you want to test the server speedup using valkey-benchmark, make +# prefetch-batch-max-size 16 +# +# NOTE: If you want to test the server speedup using valkey-benchmark, make # sure you also run the benchmark itself in threaded mode, using the # --threads option to match the number of server threads, otherwise you'll not # be able to notice the improvements. @@ -1317,7 +1370,7 @@ lazyfree-lazy-user-flush no # Enabling this feature makes the server actively control the oom_score_adj value # for all its processes, depending on their role. The default scores will # attempt to have background child processes killed before all others, and -# replicas killed before masters. +# replicas killed before primaries. # # The server supports these options: # @@ -1331,7 +1384,7 @@ lazyfree-lazy-user-flush no oom-score-adj no # When oom-score-adj is used, this directive controls the specific values used -# for master, replica and background child processes. Values range -2000 to +# for primary, replica and background child processes. Values range -2000 to # 2000 (higher means more likely to be killed). # # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) @@ -1371,6 +1424,10 @@ disable-thp yes # If the AOF is enabled on startup the server will load the AOF, that is the file # with the better durability guarantees. # +# Note that changing this value in a config file of an existing database and +# restarting the server can lead to data loss. A conversion needs to be done +# by setting it via CONFIG command on a live server first. +# # Please check https://valkey.io/topics/persistence for more information. appendonly no @@ -1515,7 +1572,7 @@ aof-timestamp-enabled no # Maximum time to wait for replicas when shutting down, in seconds. # # During shut down, a grace period allows any lagging replicas to catch up with -# the latest replication offset before the master exists. This period can +# the latest replication offset before the primary exists. This period can # prevent data loss, especially for deployments without configured disk backups. # # The 'shutdown-timeout' value is the grace period's duration in seconds. It is @@ -1591,7 +1648,7 @@ aof-timestamp-enabled no # you to specify the cluster bus port when executing cluster meet. # cluster-port 0 -# A replica of a failing master will avoid to start a failover if its data +# A replica of a failing primary will avoid to start a failover if its data # looks too old. # # There is no simple way for a replica to actually have an exact measure of @@ -1599,35 +1656,35 @@ aof-timestamp-enabled no # # 1) If there are multiple replicas able to failover, they exchange messages # in order to try to give an advantage to the replica with the best -# replication offset (more data from the master processed). +# replication offset (more data from the primary processed). # Replicas will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # # 2) Every single replica computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master +# its primary. This can be the last ping or command received (if the primary # is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). +# disconnection with the primary (if the replication link is currently down). # If the last interaction is too old, the replica will not try to failover # at all. # # The point "2" can be tuned by user. Specifically a replica will not perform -# the failover if, since the last interaction with the master, the time +# the failover if, since the last interaction with the primary, the time # elapsed is greater than: # # (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period # # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the -# replica will not try to failover if it was not able to talk with the master +# replica will not try to failover if it was not able to talk with the primary # for longer than 310 seconds. # # A large cluster-replica-validity-factor may allow replicas with too old data to failover -# a master, while a too small value may prevent the cluster from being able to +# a primary, while a too small value may prevent the cluster from being able to # elect a replica at all. # # For maximum availability, it is possible to set the cluster-replica-validity-factor # to a value of 0, which means, that replicas will always try to failover the -# master regardless of the last time they interacted with the master. +# primary regardless of the last time they interacted with the primary. # (However they'll always try to apply a delay proportional to their # offset rank). # @@ -1636,19 +1693,19 @@ aof-timestamp-enabled no # # cluster-replica-validity-factor 10 -# Cluster replicas are able to migrate to orphaned masters, that are masters +# Cluster replicas are able to migrate to orphaned primaries, that are primaries # that are left without working replicas. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over +# to resist to failures as otherwise an orphaned primary can't be failed over # in case of failure if it has no working replicas. # -# Replicas migrate to orphaned masters only if there are still at least a -# given number of other working replicas for their old master. This number +# Replicas migrate to orphaned primaries only if there are still at least a +# given number of other working replicas for their old primary. This number # is the "migration barrier". A migration barrier of 1 means that a replica -# will migrate only if there is at least 1 other working replica for its master +# will migrate only if there is at least 1 other working replica for its primary # and so forth. It usually reflects the number of replicas you want for every -# master in your cluster. +# primary in your cluster. # -# Default is 1 (replicas migrate only if their masters remain with at least +# Default is 1 (replicas migrate only if their primaries remain with at least # one replica). To disable migration just set it to a very large value or # set cluster-allow-replica-migration to 'no'. # A value of 0 can be set but is useful only for debugging and dangerous @@ -1657,8 +1714,10 @@ aof-timestamp-enabled no # cluster-migration-barrier 1 # Turning off this option allows to use less automatic cluster configuration. -# It both disables migration to orphaned masters and migration from masters -# that became empty. +# It disables migration of replicas to orphaned primaries. Masters that become +# empty due to losing their last slots to another primary will not automatically +# replicate from the primary that took over their last slots. Instead, they will +# remain as empty primaries without any slots. # # Default is 'yes' (allow automatic migrations). # @@ -1678,7 +1737,7 @@ aof-timestamp-enabled no # cluster-require-full-coverage yes # This option, when set to yes, prevents replicas from trying to failover its -# master during master failures. However the replica can still perform a +# primary during primary failures. However the replica can still perform a # manual failover, if forced to do so. # # This is useful in different scenarios, especially in the case of multiple @@ -1697,9 +1756,9 @@ aof-timestamp-enabled no # # The second use case is for configurations that don't meet the recommended # three shards but want to enable cluster mode and scale later. A -# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# primary outage in a 1 or 2 shard configuration causes a read/write outage to the # entire cluster without this option set, with it set there is only a write outage. -# Without a quorum of masters, slot ownership will not change automatically. +# Without a quorum of primaries, slot ownership will not change automatically. # # cluster-allow-reads-when-down no @@ -1754,6 +1813,26 @@ aof-timestamp-enabled no # # cluster-preferred-endpoint-type ip +# The cluster blacklist is used when removing a node from the cluster completely. +# When CLUSTER FORGET is called for a node, that node is put into the blacklist for +# some time so that when gossip messages are received from other nodes that still +# remember it, it is not re-added. This gives time for CLUSTER FORGET to be sent to +# every node in the cluster. The blacklist TTL is 60 seconds by default, which should +# be sufficient for most clusters, but you may considering increasing this if you see +# nodes getting re-added while using CLUSTER FORGET. +# +# cluster-blacklist-ttl 60 + +# Clusters can be configured to track per-slot resource statistics, +# which are accessible by the CLUSTER SLOT-STATS command. +# +# By default, the 'cluster-slot-stats-enabled' is disabled, and only 'key-count' is captured. +# By enabling the 'cluster-slot-stats-enabled' config, the cluster will begin to capture advanced statistics. +# These statistics can be leveraged to assess general slot usage trends, identify hot / cold slots, +# migrate slots for a balanced cluster workload, and / or re-write application logic to better utilize slots. +# +# cluster-slot-stats-enabled no + # In order to setup your cluster make sure to read the documentation # available at https://valkey.io web site. @@ -1765,22 +1844,28 @@ aof-timestamp-enabled no # # In order to make a cluster work in such environments, a static # configuration where each node knows its public address is needed. The -# following four options are used for this scope, and are: +# following options are used for this scope, and are: # # * cluster-announce-ip +# * cluster-announce-client-ipv4 +# * cluster-announce-client-ipv6 # * cluster-announce-port # * cluster-announce-tls-port # * cluster-announce-bus-port # -# Each instructs the node about its address, client ports (for connections -# without and with TLS) and cluster message bus port. The information is then -# published in the header of the bus packets so that other nodes will be able to -# correctly map the address of the node publishing the information. +# Each instructs the node about its address, possibly other addresses to expose +# to clients, client ports (for connections without and with TLS) and cluster +# message bus port. The information is then published in the bus packets so that +# other nodes will be able to correctly map the address of the node publishing +# the information. # # If tls-cluster is set to yes and cluster-announce-tls-port is omitted or set # to zero, then cluster-announce-port refers to the TLS port. Note also that # cluster-announce-tls-port has no effect if tls-cluster is set to no. # +# If cluster-announce-client-ipv4 and cluster-announce-client-ipv6 are omitted, +# then cluster-announce-ip is exposed to clients. +# # If the above options are not used, the normal cluster auto-detection # will be used instead. # @@ -1792,6 +1877,8 @@ aof-timestamp-enabled no # Example: # # cluster-announce-ip 10.1.1.5 +# cluster-announce-client-ipv4 123.123.123.5 +# cluster-announce-client-ipv6 2001:db8::8a2e:370:7334 # cluster-announce-tls-port 6379 # cluster-announce-port 0 # cluster-announce-bus-port 6380 @@ -2059,7 +2146,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # amount by default in order to avoid that a protocol desynchronization (for # instance due to a bug in the client) will lead to unbound memory usage in # the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. +# needs, such as a command with huge argument, or huge multi/exec requests or alike. # # client-query-buffer-limit 1gb @@ -2184,6 +2271,26 @@ rdb-save-incremental-fsync yes # lfu-log-factor 10 # lfu-decay-time 1 + +# The maximum number of new client connections accepted per event-loop cycle. This configuration +# is set independently for TLS connections. +# +# By default, up to 10 new connection will be accepted per event-loop cycle for normal connections +# and up to 1 new connection per event-loop cycle for TLS connections. +# +# Adjusting this to a larger number can slightly improve efficiency for new connections +# at the risk of causing timeouts for regular commands on established connections. It is +# not advised to change this without ensuring that all clients have limited connection +# pools and exponential backoff in the case of command/connection timeouts. +# +# If your application is establishing a large number of new connections per second you should +# also consider tuning the value of tcp-backlog, which allows the kernel to buffer more +# pending connections before dropping or rejecting connections. +# +# max-new-connections-per-cycle 10 +# max-new-tls-connections-per-cycle 1 + + ########################### ACTIVE DEFRAGMENTATION ####################### # # What is active defragmentation? @@ -2264,17 +2371,17 @@ jemalloc-bg-thread yes # the bgsave child process. The syntax to specify the cpu list is the same as # the taskset command: # -# Set redis server/io threads to cpu affinity 0,2,4,6: -# server_cpulist 0-7:2 +# Set server/io threads to cpu affinity 0,2,4,6: +# server-cpulist 0-7:2 # # Set bio threads to cpu affinity 1,3: -# bio_cpulist 1,3 +# bio-cpulist 1,3 # # Set aof rewrite child process to cpu affinity 8,9,10,11: -# aof_rewrite_cpulist 8-11 +# aof-rewrite-cpulist 8-11 # # Set bgsave child process to cpu affinity 1,10,11 -# bgsave_cpulist 1,10-11 +# bgsave-cpulist 1,10-11 # In some cases the server will emit warnings and even refuse to start if it detects # that the system is in bad state, it is possible to suppress these warnings @@ -2282,3 +2389,9 @@ jemalloc-bg-thread yes # to suppress # # ignore-warnings ARM64-COW-BUG + +# Inform Valkey of the availability zone if running in a cloud environment. Currently +# this is only exposed via the info command for clients to use, but in the future we +# we may also use this when making decisions for replication. +# +# availability-zone "zone-name" diff --git a/indexing/indexing.conf b/indexing/indexing.conf index 8460520b..f10a35d4 100644 --- a/indexing/indexing.conf +++ b/indexing/indexing.conf @@ -51,6 +51,7 @@ # # loadmodule /path/to/my_module.so # loadmodule /path/to/other_module.so +# loadmodule /path/to/args_module.so [arg [arg ...]] ################################## NETWORK ##################################### @@ -86,7 +87,7 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ bind 127.0.0.1 -::1 -# By default, outgoing connections (from replica to master, from Sentinel to +# By default, outgoing connections (from replica to primary, from Sentinel to # instances, cluster bus, etc.) are not bound to a specific local address. In # most cases, this means the operating system will handle that based on routing # and the interface through which the connection goes out. @@ -152,6 +153,9 @@ tcp-backlog 511 # incoming connections. There is no default, so the server will not listen # on a unix socket when not specified. # +# unixsocket /run/valkey.sock +# unixsocketgroup wheel +# unixsocketperm 700 unixsocket indexing.sock unixsocketperm 700 @@ -192,7 +196,7 @@ tcp-keepalive 300 # tls-port 6379 # Configure a X.509 certificate and private key to use for authenticating the -# server to connected clients, masters or cluster peers. These files should be +# server to connected clients, primaries or cluster peers. These files should be # PEM formatted. # # tls-cert-file valkey.crt @@ -204,7 +208,7 @@ tcp-keepalive 300 # tls-key-file-pass secret # Normally the server uses the same certificate for both server functions (accepting -# connections) and client functions (replicating from a master, establishing +# connections) and client functions (replicating from a primary, establishing # cluster bus connections, etc.). # # Sometimes certificates are issued with attributes that designate them as @@ -244,7 +248,7 @@ tcp-keepalive 300 # tls-auth-clients optional # By default, a replica does not attempt to establish a TLS connection -# with its master. +# with its primary. # # Use the following directive to enable TLS on replication links. # @@ -328,7 +332,7 @@ daemonize yes # # When the server runs non daemonized, no pid file is created if none is # specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". +# is used even if not specified, defaulting to "/var/run/valkey.pid". # # Creating a pid file is best effort: if the server is not able to create it # nothing bad happens, the server will start and run normally. @@ -356,7 +360,7 @@ logfile "" # syslog-enabled no # Specify the syslog identity. -# syslog-ident redis +# syslog-ident valkey # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. # syslog-facility local0 @@ -385,6 +389,14 @@ databases 16 # ASCII art logo in startup logs by setting the following option to yes. always-show-logo no +# User data, including keys, values, client names, and ACL usernames, can be +# logged as part of assertions and other error cases. To prevent sensitive user +# information, such as PII, from being recorded in the server log file, this +# user data is hidden from the log by default. If you need to log user data for +# debugging or troubleshooting purposes, you can disable this feature by +# changing the config value to no. +hide-user-data-from-log yes + # By default, the server modifies the process title (as seen in 'top' and 'ps') to # provide some runtime information. It is possible to disable this and leave # the process name as executed by setting the following to no. @@ -412,6 +424,15 @@ proc-title-template "{title} {listen-addr} {server-mode}" # is derived from the environment variables. locale-collate "" +# Valkey is largely compatible with Redis OSS, apart from a few cases where +# Valkey identifies itself itself as "Valkey" rather than "Redis". Extended +# Redis OSS compatibility mode makes Valkey pretend to be Redis. Enable this +# only if you have problems with tools or clients. This is a temporary +# configuration added in Valkey 8.0 and is scheduled to have no effect in Valkey +# 9.0 and be completely removed in Valkey 10.0. +# +# extended-redis-compatibility no + ################################ SNAPSHOTTING ################################ # Save the DB to disk. @@ -433,6 +454,7 @@ locale-collate "" # # You can set these explicitly by uncommenting the following line. # +# save 3600 1 300 100 60 10000 save 3600 1 # By default the server will stop accepting writes if RDB snapshots are enabled @@ -472,7 +494,7 @@ rdbchecksum yes # no - Never perform full sanitization # yes - Always perform full sanitization # clients - Perform full sanitization only for user connections. -# Excludes: RDB files, RESTORE commands received from the master +# Excludes: RDB files, RESTORE commands received from the primary # connection, and client connections which have the # skip-sanitize-payload ACL flag. # The default should be 'clients' but since it currently affects cluster @@ -486,13 +508,13 @@ dbfilename dump.rdb # Remove RDB files used by replication in instances without persistence # enabled. By default this option is disabled, however there are environments # where for regulations or other security concerns, RDB files persisted on -# disk by masters in order to feed replicas, or stored on disk by replicas +# disk by primaries in order to feed replicas, or stored on disk by replicas # in order to load them for the initial synchronization, should be deleted # ASAP. Note that this option ONLY WORKS in instances that have both AOF # and RDB persistence disabled, otherwise is completely ignored. # # An alternative (and sometimes better) way to obtain the same effect is -# to use diskless replication on both master and replicas instances. However +# to use diskless replication on both primary and replicas instances. However # in the case of replicas, diskless is not always an option. rdb-del-sync-files no @@ -503,6 +525,9 @@ rdb-del-sync-files no # # The Append Only File will also be created inside this directory. # +# The Cluster config file is written relative this directory, if the +# 'cluster-config-file' configuration directive is a relative path. +# # Note that you must specify a directory here, not a file name. dir ./ @@ -516,38 +541,38 @@ dir ./ # | (receive writes) | | (exact copy) | # +------------------+ +---------------+ # -# 1) Replication is asynchronous, but you can configure a master to +# 1) Replication is asynchronous, but you can configure a primary to # stop accepting writes if it appears to be not connected with at least # a given number of replicas. # 2) Replicas are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of +# primary if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a -# network partition replicas automatically try to reconnect to masters +# network partition replicas automatically try to reconnect to primaries # and resynchronize with them. # -# replicaof +# replicaof -# If the master is password protected (using the "requirepass" configuration +# If the primary is password protected (using the "requirepass" configuration # directive below) it is possible to tell the replica to authenticate before -# starting the replication synchronization process, otherwise the master will +# starting the replication synchronization process, otherwise the primary will # refuse the replica request. # -# masterauth +# primaryauth # # However this is not enough if you are using ACLs # and the default user is not capable of running the PSYNC # command and/or other commands needed for replication. In this case it's # better to configure a special user to use with replication, and specify the -# masteruser configuration as such: +# primaryuser configuration as such: # -# masteruser +# primaryuser # -# When masteruser is specified, the replica will authenticate against its -# master using the new AUTH form: AUTH . +# When primaryuser is specified, the replica will authenticate against its +# primary using the new AUTH form: AUTH . -# When a replica loses its connection with the master, or when the replication +# When a replica loses its connection with the primary, or when the replication # is still in progress, the replica can act in two different ways: # # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will @@ -565,7 +590,7 @@ replica-serve-stale-data yes # You can configure a replica instance to accept writes or not. Writing against # a replica instance may be useful to store some ephemeral data (because data -# written on a replica will be easily deleted after resync with the master) but +# written on a replica will be easily deleted after resync with the primary) but # may also cause problems if clients are writing to it because of a # misconfiguration. # @@ -583,15 +608,15 @@ replica-read-only yes # # New replicas and reconnecting replicas that are not able to continue the # replication process just receiving differences, need to do what is called a -# "full synchronization". An RDB file is transmitted from the master to the +# "full synchronization". An RDB file is transmitted from the primary to the # replicas. # # The transmission can happen in two different ways: # -# 1) Disk-backed: The master creates a new process that writes the RDB +# 1) Disk-backed: The primary creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent # process to the replicas incrementally. -# 2) Diskless: The master creates a new process that directly writes the +# 2) Diskless: The primary creates a new process that directly writes the # RDB file to replica sockets, without touching the disk at all. # # With disk-backed replication, while the RDB file is generated, more replicas @@ -600,7 +625,7 @@ replica-read-only yes # once the transfer starts, new replicas arriving will be queued and a new # transfer will start when the current one terminates. # -# When diskless replication is used, the master waits a configurable amount of +# When diskless replication is used, the primary waits a configurable amount of # time (in seconds) before starting the transfer in the hope that multiple # replicas will arrive and the transfer can be parallelized. # @@ -630,15 +655,15 @@ repl-diskless-sync-max-replicas 0 # WARNING: Since in this setup the replica does not immediately store an RDB on # disk, it may cause data loss during failovers. RDB diskless load + server # modules not handling I/O reads may cause the server to abort in case of I/O errors -# during the initial synchronization stage with the master. +# during the initial synchronization stage with the primary. # ----------------------------------------------------------------------------- # # Replica can load the RDB it reads from the replication link directly from the # socket, or store the RDB to a file and read that file after it was completely -# received from the master. +# received from the primary. # # In many cases the disk is slower than the network, and storing and loading -# the RDB file may increase replication time (and even increase the master's +# the RDB file may increase replication time (and even increase the primary's # Copy on Write memory and replica buffers). # However, when parsing the RDB file directly from the socket, in order to avoid # data loss it's only safe to flush the current dataset when the new dataset is @@ -649,7 +674,7 @@ repl-diskless-sync-max-replicas 0 # "swapdb" - Keep current db contents in RAM while parsing the data directly # from the socket. Replicas in this mode can keep serving current # dataset while replication is in progress, except for cases where -# they can't recognize master as having a data set from same +# they can't recognize primary as having a data set from same # replication history. # Note that this requires sufficient memory, if you don't have it, # you risk an OOM kill. @@ -658,6 +683,29 @@ repl-diskless-sync-max-replicas 0 # during replication. repl-diskless-load disabled +# This dual channel replication sync feature optimizes the full synchronization process +# between a primary and its replicas. When enabled, it reduces both memory and CPU load +# on the primary server. +# +# How it works: +# 1. During full sync, instead of accumulating replication data on the primary server, +# the data is sent directly to the syncing replica. +# 2. The primary's background save (bgsave) process streams the RDB snapshot directly +# to the replica over a separate connection. +# +# Tradeoff: +# While this approach reduces load on the primary, it shifts the burden of storing +# the replication buffer to the replica. This means the replica must have sufficient +# memory to accommodate the buffer during synchronization. However, this tradeoff is +# generally beneficial as it prevents potential performance degradation on the primary +# server, which is typically handling more critical operations. +# +# When toggling this configuration on or off during an ongoing synchronization process, +# it does not change the already running sync method. The new configuration will take +# effect only for subsequent synchronization processes. + +dual-channel-replication-enabled no + # Master send PINGs to its replicas in a predefined interval. It's possible to # change this interval with the repl_ping_replica_period option. The default # value is 10 seconds. @@ -668,11 +716,11 @@ repl-diskless-load disabled # # 1) Bulk transfer I/O during SYNC, from the point of view of replica. # 2) Master timeout from the point of view of replicas (data, pings). -# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# 3) Replica timeout from the point of view of primaries (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value # specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. The default +# every time there is low traffic between the primary and the replica. The default # value is 60 seconds. # # repl-timeout 60 @@ -688,7 +736,7 @@ repl-diskless-load disabled # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions -# or when the master and replicas are many hops away, turning this to "yes" may +# or when the primary and replicas are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no @@ -703,15 +751,15 @@ repl-disable-tcp-nodelay no # # The backlog is only allocated if there is at least one replica connected. # -# repl-backlog-size 1mb +# repl-backlog-size 10mb -# After a master has no connected replicas for some time, the backlog will be +# After a primary has no connected replicas for some time, the backlog will be # freed. The following option configures the amount of seconds that need to # elapse, starting from the time the last replica disconnected, for the backlog # buffer to be freed. # # Note that replicas never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially +# promoted to primaries later, and should be able to correctly "partially # resynchronize" with other replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. @@ -720,21 +768,21 @@ repl-disable-tcp-nodelay no # The replica priority is an integer number published by the server in the INFO # output. It is used by Sentinel in order to select a replica to promote -# into a master if the master is no longer working correctly. +# into a primary if the primary is no longer working correctly. # # A replica with a low priority number is considered better for promotion, so # for instance if there are three replicas with priority 10, 100, 25 Sentinel # will pick the one with priority 10, that is the lowest. # # However a special priority of 0 marks the replica as not able to perform the -# role of master, so a replica with priority of 0 will never be selected by +# role of primary, so a replica with priority of 0 will never be selected by # Sentinel for promotion. # # By default the priority is 100. replica-priority 100 # The propagation error behavior controls how the server will behave when it is -# unable to handle a command being processed in the replication stream from a master +# unable to handle a command being processed in the replication stream from a primary # or processed while reading from an AOF file. Errors that occur during propagation # are unexpected, and can cause data inconsistency. # @@ -747,7 +795,7 @@ replica-priority 100 # propagation-error-behavior ignore # Replica ignore disk write errors controls the behavior of a replica when it is -# unable to persist a write command received from its master to disk. By default, +# unable to persist a write command received from its primary to disk. By default, # this configuration is set to 'no' and will crash the replica in this condition. # It is not recommended to change this default. # @@ -756,16 +804,16 @@ replica-priority 100 # ----------------------------------------------------------------------------- # By default, Sentinel includes all replicas in its reports. A replica # can be excluded from Sentinel's announcements. An unannounced replica -# will be ignored by the 'sentinel replicas ' command and won't be +# will be ignored by the 'sentinel replicas ' command and won't be # exposed to Sentinel's clients. # # This option does not change the behavior of replica-priority. Even with -# replica-announced set to 'no', the replica can be promoted to master. To +# replica-announced set to 'no', the replica can be promoted to primary. To # prevent this behavior, set replica-priority to 0. # # replica-announced yes -# It is possible for a master to stop accepting writes if there are less than +# It is possible for a primary to stop accepting writes if there are less than # N replicas connected, having a lag less or equal than M seconds. # # The N replicas need to be in "online" state. @@ -787,18 +835,18 @@ replica-priority 100 # By default min-replicas-to-write is set to 0 (feature disabled) and # min-replicas-max-lag is set to 10. -# A master is able to list the address and port of the attached +# A primary is able to list the address and port of the attached # replicas in different ways. For example the "INFO replication" section # offers this information, which is used, among other tools, by # Sentinel in order to discover replica instances. # Another place where this info is available is in the output of the -# "ROLE" command of a master. +# "ROLE" command of a primary. # # The listed IP address and port normally reported by a replica is # obtained in the following way: # # IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the master. +# of the socket used by the replica to connect with the primary. # # Port: The port is communicated by the replica during the replication # handshake, and is normally the port that the replica is using to @@ -807,7 +855,7 @@ replica-priority 100 # However when port forwarding or Network Address Translation (NAT) is # used, the replica may actually be reachable via different IP and port # pairs. The following two options can be used by a replica in order to -# report to its master a specific set of IP and port, so that both INFO +# report to its primary a specific set of IP and port, so that both INFO # and ROLE will report those values. # # There is no need to use both the options if you need to override just @@ -1151,7 +1199,8 @@ acllog-max-len 128 # configuration directive. # # The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. +# true LRU but costs more CPU. 3 is faster but not very accurate. The maximum +# value that can be set is 64. # # maxmemory-samples 5 @@ -1164,11 +1213,11 @@ acllog-max-len 128 # maxmemory-eviction-tenacity 10 # By default a replica will ignore its maxmemory setting -# (unless it is promoted to master after a failover or manually). It means -# that the eviction of keys will be just handled by the master, sending the -# DEL commands to the replica as keys evict in the master side. +# (unless it is promoted to primary after a failover or manually). It means +# that the eviction of keys will be just handled by the primary, sending the +# DEL commands to the replica as keys evict in the primary side. # -# This behavior ensures that masters and replicas stay consistent, and is usually +# This behavior ensures that primaries and replicas stay consistent, and is usually # what you want, however if your replica is writable, or you want the replica # to have a different memory setting, and you are sure all the writes performed # to the replica are idempotent, then you may change this default (but be sure @@ -1179,7 +1228,7 @@ acllog-max-len 128 # be larger on the replica, or data structures may sometimes take more memory # and so forth). So make sure you monitor your replicas and make sure they # have enough memory to never hit a real out-of-memory condition before the -# master hits the configured maxmemory setting. +# primary hits the configured maxmemory setting. # # replica-ignore-maxmemory yes @@ -1202,8 +1251,8 @@ acllog-max-len 128 ############################# LAZY FREEING #################################### -# The server has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands +# When keys are deleted, the served has historically freed their memory using +# blocking operations. It means that the server stopped processing new commands # in order to reclaim all the memory associated with an object in a synchronous # way. If the key deleted is associated with a small object, the time needed # in order to execute the DEL command is very small and comparable to most other @@ -1211,15 +1260,16 @@ acllog-max-len 128 # aggregated value containing millions of elements, the server can block for # a long time (even seconds) in order to complete the operation. # -# For the above reasons the server also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. +# For the above reasons, lazy freeing (or asynchronous freeing), has been +# introduced. With lazy freeing, keys are deleted in constant time. Another +# thread will incrementally free the object in the background as fast as +# possible. # -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the server sometimes has to +# Starting from Valkey 8.0, lazy freeing is enabled by default. It is possible +# to retain the synchronous freeing behaviour by setting the lazyfree related +# configuration directives to 'no'. + +# Commands like DEL, FLUSHALL and FLUSHDB delete keys, but the server can also # delete keys or flush the whole database as a side effect of other operations. # Specifically the server deletes objects independently of a user call in the # following scenarios: @@ -1236,32 +1286,34 @@ acllog-max-len 128 # itself removes any old content of the specified key in order to replace # it with the specified string. # 4) During replication, when a replica performs a full resynchronization with -# its master, the content of the whole database is removed in order to +# its primary, the content of the whole database is removed in order to # load the RDB file just transferred. # -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives. +# In all the above cases, the default is to release memory in a non-blocking +# way. -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -replica-lazy-flush no +lazyfree-lazy-eviction yes +lazyfree-lazy-expire yes +lazyfree-lazy-server-del yes +replica-lazy-flush yes -# It is also possible, for the case when to replace the user code DEL calls -# with UNLINK calls is not easy, to modify the default behavior of the DEL -# command to act exactly like UNLINK, using the following configuration -# directive: +# For keys deleted using the DEL command, lazy freeing is controlled by the +# configuration directive 'lazyfree-lazy-user-del'. The default is 'yes'. The +# UNLINK command is identical to the DEL command, except that UNLINK always +# frees the memory lazily, regardless of this configuration directive: -lazyfree-lazy-user-del no +lazyfree-lazy-user-del yes # FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous # deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the # commands. When neither flag is passed, this directive will be used to determine # if the data should be deleted asynchronously. -lazyfree-lazy-user-flush no +# There are many problems with running flush synchronously. Even in single CPU +# environments, the thread managers should balance between the freeing and +# serving incoming requests. The default value is yes. + +lazyfree-lazy-user-flush yes ################################ THREADED I/O ################################# @@ -1277,9 +1329,8 @@ lazyfree-lazy-user-flush no # to pipelining nor sharding of the instance. # # By default threading is disabled, we suggest enabling it only in machines -# that have at least 4 or more cores, leaving at least one spare core. -# Using more than 8 threads is unlikely to help much. We also recommend using -# threaded I/O only if you actually have performance problems, with +# that have at least 3 or more cores, leaving at least one spare core. +# We also recommend using threaded I/O only if you actually have performance problems, with # instances being able to use a quite big percentage of CPU time, otherwise # there is no point in using this feature. # @@ -1290,21 +1341,23 @@ lazyfree-lazy-user-flush no # io-threads 4 # # Setting io-threads to 1 will just use the main thread as usual. -# When I/O threads are enabled, we only use threads for writes, that is -# to thread the write(2) syscall and transfer the client buffers to the -# socket. However it is also possible to enable threading of reads and -# protocol parsing using the following configuration directive, by setting -# it to yes: +# When I/O threads are enabled, we use threads for reads and writes, that is +# to thread the write and read syscall and transfer the client buffers to the +# socket and to enable threading of reads and protocol parsing. # -# io-threads-do-reads no +# When multiple commands are parsed by the I/O threads and ready for execution, +# we take advantage of knowing the next set of commands and prefetch their +# required dictionary entries in a batch. This reduces memory access costs. # -# Usually threading reads doesn't help much. +# The optimal batch size depends on the specific workflow of the user. +# The default batch size is 16, which can be modified using the +# 'prefetch-batch-max-size' config. # -# NOTE 1: This configuration directive cannot be changed at runtime via -# CONFIG SET. Also, this feature currently does not work when SSL is -# enabled. +# When the config is set to 0, prefetching is disabled. # -# NOTE 2: If you want to test the server speedup using valkey-benchmark, make +# prefetch-batch-max-size 16 +# +# NOTE: If you want to test the server speedup using valkey-benchmark, make # sure you also run the benchmark itself in threaded mode, using the # --threads option to match the number of server threads, otherwise you'll not # be able to notice the improvements. @@ -1317,7 +1370,7 @@ lazyfree-lazy-user-flush no # Enabling this feature makes the server actively control the oom_score_adj value # for all its processes, depending on their role. The default scores will # attempt to have background child processes killed before all others, and -# replicas killed before masters. +# replicas killed before primaries. # # The server supports these options: # @@ -1331,7 +1384,7 @@ lazyfree-lazy-user-flush no oom-score-adj no # When oom-score-adj is used, this directive controls the specific values used -# for master, replica and background child processes. Values range -2000 to +# for primary, replica and background child processes. Values range -2000 to # 2000 (higher means more likely to be killed). # # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) @@ -1371,6 +1424,10 @@ disable-thp yes # If the AOF is enabled on startup the server will load the AOF, that is the file # with the better durability guarantees. # +# Note that changing this value in a config file of an existing database and +# restarting the server can lead to data loss. A conversion needs to be done +# by setting it via CONFIG command on a live server first. +# # Please check https://valkey.io/topics/persistence for more information. appendonly no @@ -1515,7 +1572,7 @@ aof-timestamp-enabled no # Maximum time to wait for replicas when shutting down, in seconds. # # During shut down, a grace period allows any lagging replicas to catch up with -# the latest replication offset before the master exists. This period can +# the latest replication offset before the primary exists. This period can # prevent data loss, especially for deployments without configured disk backups. # # The 'shutdown-timeout' value is the grace period's duration in seconds. It is @@ -1591,7 +1648,7 @@ aof-timestamp-enabled no # you to specify the cluster bus port when executing cluster meet. # cluster-port 0 -# A replica of a failing master will avoid to start a failover if its data +# A replica of a failing primary will avoid to start a failover if its data # looks too old. # # There is no simple way for a replica to actually have an exact measure of @@ -1599,35 +1656,35 @@ aof-timestamp-enabled no # # 1) If there are multiple replicas able to failover, they exchange messages # in order to try to give an advantage to the replica with the best -# replication offset (more data from the master processed). +# replication offset (more data from the primary processed). # Replicas will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # # 2) Every single replica computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master +# its primary. This can be the last ping or command received (if the primary # is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). +# disconnection with the primary (if the replication link is currently down). # If the last interaction is too old, the replica will not try to failover # at all. # # The point "2" can be tuned by user. Specifically a replica will not perform -# the failover if, since the last interaction with the master, the time +# the failover if, since the last interaction with the primary, the time # elapsed is greater than: # # (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period # # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the -# replica will not try to failover if it was not able to talk with the master +# replica will not try to failover if it was not able to talk with the primary # for longer than 310 seconds. # # A large cluster-replica-validity-factor may allow replicas with too old data to failover -# a master, while a too small value may prevent the cluster from being able to +# a primary, while a too small value may prevent the cluster from being able to # elect a replica at all. # # For maximum availability, it is possible to set the cluster-replica-validity-factor # to a value of 0, which means, that replicas will always try to failover the -# master regardless of the last time they interacted with the master. +# primary regardless of the last time they interacted with the primary. # (However they'll always try to apply a delay proportional to their # offset rank). # @@ -1636,19 +1693,19 @@ aof-timestamp-enabled no # # cluster-replica-validity-factor 10 -# Cluster replicas are able to migrate to orphaned masters, that are masters +# Cluster replicas are able to migrate to orphaned primaries, that are primaries # that are left without working replicas. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over +# to resist to failures as otherwise an orphaned primary can't be failed over # in case of failure if it has no working replicas. # -# Replicas migrate to orphaned masters only if there are still at least a -# given number of other working replicas for their old master. This number +# Replicas migrate to orphaned primaries only if there are still at least a +# given number of other working replicas for their old primary. This number # is the "migration barrier". A migration barrier of 1 means that a replica -# will migrate only if there is at least 1 other working replica for its master +# will migrate only if there is at least 1 other working replica for its primary # and so forth. It usually reflects the number of replicas you want for every -# master in your cluster. +# primary in your cluster. # -# Default is 1 (replicas migrate only if their masters remain with at least +# Default is 1 (replicas migrate only if their primaries remain with at least # one replica). To disable migration just set it to a very large value or # set cluster-allow-replica-migration to 'no'. # A value of 0 can be set but is useful only for debugging and dangerous @@ -1657,8 +1714,10 @@ aof-timestamp-enabled no # cluster-migration-barrier 1 # Turning off this option allows to use less automatic cluster configuration. -# It both disables migration to orphaned masters and migration from masters -# that became empty. +# It disables migration of replicas to orphaned primaries. Masters that become +# empty due to losing their last slots to another primary will not automatically +# replicate from the primary that took over their last slots. Instead, they will +# remain as empty primaries without any slots. # # Default is 'yes' (allow automatic migrations). # @@ -1678,7 +1737,7 @@ aof-timestamp-enabled no # cluster-require-full-coverage yes # This option, when set to yes, prevents replicas from trying to failover its -# master during master failures. However the replica can still perform a +# primary during primary failures. However the replica can still perform a # manual failover, if forced to do so. # # This is useful in different scenarios, especially in the case of multiple @@ -1697,9 +1756,9 @@ aof-timestamp-enabled no # # The second use case is for configurations that don't meet the recommended # three shards but want to enable cluster mode and scale later. A -# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# primary outage in a 1 or 2 shard configuration causes a read/write outage to the # entire cluster without this option set, with it set there is only a write outage. -# Without a quorum of masters, slot ownership will not change automatically. +# Without a quorum of primaries, slot ownership will not change automatically. # # cluster-allow-reads-when-down no @@ -1754,6 +1813,26 @@ aof-timestamp-enabled no # # cluster-preferred-endpoint-type ip +# The cluster blacklist is used when removing a node from the cluster completely. +# When CLUSTER FORGET is called for a node, that node is put into the blacklist for +# some time so that when gossip messages are received from other nodes that still +# remember it, it is not re-added. This gives time for CLUSTER FORGET to be sent to +# every node in the cluster. The blacklist TTL is 60 seconds by default, which should +# be sufficient for most clusters, but you may considering increasing this if you see +# nodes getting re-added while using CLUSTER FORGET. +# +# cluster-blacklist-ttl 60 + +# Clusters can be configured to track per-slot resource statistics, +# which are accessible by the CLUSTER SLOT-STATS command. +# +# By default, the 'cluster-slot-stats-enabled' is disabled, and only 'key-count' is captured. +# By enabling the 'cluster-slot-stats-enabled' config, the cluster will begin to capture advanced statistics. +# These statistics can be leveraged to assess general slot usage trends, identify hot / cold slots, +# migrate slots for a balanced cluster workload, and / or re-write application logic to better utilize slots. +# +# cluster-slot-stats-enabled no + # In order to setup your cluster make sure to read the documentation # available at https://valkey.io web site. @@ -1765,22 +1844,28 @@ aof-timestamp-enabled no # # In order to make a cluster work in such environments, a static # configuration where each node knows its public address is needed. The -# following four options are used for this scope, and are: +# following options are used for this scope, and are: # # * cluster-announce-ip +# * cluster-announce-client-ipv4 +# * cluster-announce-client-ipv6 # * cluster-announce-port # * cluster-announce-tls-port # * cluster-announce-bus-port # -# Each instructs the node about its address, client ports (for connections -# without and with TLS) and cluster message bus port. The information is then -# published in the header of the bus packets so that other nodes will be able to -# correctly map the address of the node publishing the information. +# Each instructs the node about its address, possibly other addresses to expose +# to clients, client ports (for connections without and with TLS) and cluster +# message bus port. The information is then published in the bus packets so that +# other nodes will be able to correctly map the address of the node publishing +# the information. # # If tls-cluster is set to yes and cluster-announce-tls-port is omitted or set # to zero, then cluster-announce-port refers to the TLS port. Note also that # cluster-announce-tls-port has no effect if tls-cluster is set to no. # +# If cluster-announce-client-ipv4 and cluster-announce-client-ipv6 are omitted, +# then cluster-announce-ip is exposed to clients. +# # If the above options are not used, the normal cluster auto-detection # will be used instead. # @@ -1792,6 +1877,8 @@ aof-timestamp-enabled no # Example: # # cluster-announce-ip 10.1.1.5 +# cluster-announce-client-ipv4 123.123.123.5 +# cluster-announce-client-ipv6 2001:db8::8a2e:370:7334 # cluster-announce-tls-port 6379 # cluster-announce-port 0 # cluster-announce-bus-port 6380 @@ -2059,7 +2146,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # amount by default in order to avoid that a protocol desynchronization (for # instance due to a bug in the client) will lead to unbound memory usage in # the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. +# needs, such as a command with huge argument, or huge multi/exec requests or alike. # # client-query-buffer-limit 1gb @@ -2184,6 +2271,26 @@ rdb-save-incremental-fsync yes # lfu-log-factor 10 # lfu-decay-time 1 + +# The maximum number of new client connections accepted per event-loop cycle. This configuration +# is set independently for TLS connections. +# +# By default, up to 10 new connection will be accepted per event-loop cycle for normal connections +# and up to 1 new connection per event-loop cycle for TLS connections. +# +# Adjusting this to a larger number can slightly improve efficiency for new connections +# at the risk of causing timeouts for regular commands on established connections. It is +# not advised to change this without ensuring that all clients have limited connection +# pools and exponential backoff in the case of command/connection timeouts. +# +# If your application is establishing a large number of new connections per second you should +# also consider tuning the value of tcp-backlog, which allows the kernel to buffer more +# pending connections before dropping or rejecting connections. +# +# max-new-connections-per-cycle 10 +# max-new-tls-connections-per-cycle 1 + + ########################### ACTIVE DEFRAGMENTATION ####################### # # What is active defragmentation? @@ -2264,17 +2371,17 @@ jemalloc-bg-thread yes # the bgsave child process. The syntax to specify the cpu list is the same as # the taskset command: # -# Set redis server/io threads to cpu affinity 0,2,4,6: -# server_cpulist 0-7:2 +# Set server/io threads to cpu affinity 0,2,4,6: +# server-cpulist 0-7:2 # # Set bio threads to cpu affinity 1,3: -# bio_cpulist 1,3 +# bio-cpulist 1,3 # # Set aof rewrite child process to cpu affinity 8,9,10,11: -# aof_rewrite_cpulist 8-11 +# aof-rewrite-cpulist 8-11 # # Set bgsave child process to cpu affinity 1,10,11 -# bgsave_cpulist 1,10-11 +# bgsave-cpulist 1,10-11 # In some cases the server will emit warnings and even refuse to start if it detects # that the system is in bad state, it is possible to suppress these warnings @@ -2282,3 +2389,9 @@ jemalloc-bg-thread yes # to suppress # # ignore-warnings ARM64-COW-BUG + +# Inform Valkey of the availability zone if running in a cloud environment. Currently +# this is only exposed via the info command for clients to use, but in the future we +# we may also use this when making decisions for replication. +# +# availability-zone "zone-name" diff --git a/poetry.lock b/poetry.lock index 9245d4e7..1b364b62 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2695,6 +2695,98 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa typing = ["typing-extensions"] xmp = ["defusedxml"] +[[package]] +name = "pillow" +version = "11.0.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, + {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, + {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, + {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, + {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, + {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, + {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, + {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, + {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, + {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, + {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, + {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, + {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, + {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, + {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, + {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, + {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, + {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, + {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, + {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, + {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, + {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + [[package]] name = "pkgutil-resolve-name" version = "1.3.10" @@ -2952,13 +3044,13 @@ files = [ [[package]] name = "publicsuffixlist" -version = "1.0.2.20241015" +version = "1.0.2.20241017" description = "publicsuffixlist implement" optional = false python-versions = ">=3.5" files = [ - {file = "publicsuffixlist-1.0.2.20241015-py2.py3-none-any.whl", hash = "sha256:ca405fb3e40ef11cedbd38192447d28fd994eb0e93c00bbea5b585c0bc98a1b4"}, - {file = "publicsuffixlist-1.0.2.20241015.tar.gz", hash = "sha256:aba38be392cbbcc97286a8250c0405fcf81c7a9f3346fa0ae7c48e55512682f3"}, + {file = "publicsuffixlist-1.0.2.20241017-py2.py3-none-any.whl", hash = "sha256:7420cc5a8fc10418043d2f5bcd8bb3fa3800a83d33136130c71607c366bb7e4c"}, + {file = "publicsuffixlist-1.0.2.20241017.tar.gz", hash = "sha256:387a7b318bbd7a8de159014a0a1b81d58c3c2ea6a5f0d5c9a0444056fd694bbf"}, ] [package.extras] @@ -3374,19 +3466,19 @@ docs = ["Sphinx (<7.2)", "Sphinx (>=7.2,<8.0)", "Sphinx (>=8,<9)"] [[package]] name = "pymisp" -version = "2.5.0" +version = "2.5.1" description = "Python API for MISP." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "pymisp-2.5.0-py3-none-any.whl", hash = "sha256:b15915aa88376b94cb608ce38469e3bbdc7ca5507709e64dfeb46b874a17969b"}, - {file = "pymisp-2.5.0.tar.gz", hash = "sha256:d3bfc7a337e4382026343f05aa442bfdecc3e11a4f3a5479ad0e6087dcde7e78"}, + {file = "pymisp-2.5.1-py3-none-any.whl", hash = "sha256:29afd529b1e18ebf7a242c68ab240561dbf5b706d11612d75fc692413fd92990"}, + {file = "pymisp-2.5.1.tar.gz", hash = "sha256:8aa3aa9e8ad9d9eb7a31e8ad5c07741855d4bac6efa65d83ea6a62689efae2c2"}, ] [package.dependencies] deprecated = ">=1.2.14,<2.0.0" lief = {version = ">=0.15.0,<0.16.0", optional = true, markers = "extra == \"fileobjects\""} -publicsuffixlist = ">=1.0.2.20241003,<2.0.0.0" +publicsuffixlist = ">=1.0.2.20241017,<2.0.0.0" pydeep2 = {version = ">=0.5.1,<0.6.0", optional = true, markers = "extra == \"fileobjects\""} pyfaup = {version = ">=1.2,<2.0", optional = true, markers = "extra == \"url\""} python-dateutil = ">=2.9.0.post0,<3.0.0" @@ -3395,8 +3487,8 @@ requests = ">=2.32.3,<3.0.0" [package.extras] brotli = ["urllib3[brotli]"] -docs = ["Sphinx (>=8,<9)", "docutils (>=0.21.1,<0.22.0)", "recommonmark (>=0.7.1,<0.8.0)", "sphinx-autodoc-typehints (>=2.4.4,<3.0.0)"] -email = ["RTFDE (>=0.1.1,<0.2.0)", "extract_msg (>=0.49,<0.50)", "oletools (>=0.60.1,<0.61.0)"] +docs = ["Sphinx (>=8,<9)", "docutils (>=0.21.1,<0.22.0)", "recommonmark (>=0.7.1,<0.8.0)", "sphinx-autodoc-typehints (>=2.5.0,<3.0.0)"] +email = ["RTFDE (>=0.1.1,<0.2.0)", "extract_msg (>=0.51,<0.52)", "oletools (>=0.60.1,<0.61.0)"] fileobjects = ["lief (>=0.15.0,<0.16.0)", "pydeep2 (>=0.5.1,<0.6.0)", "python-magic (>=0.4.27,<0.5.0)"] openioc = ["beautifulsoup4 (>=4.12.3,<5.0.0)"] pdfexport = ["reportlab (>=4.2.5,<5.0.0)"] @@ -4013,13 +4105,13 @@ test = ["pytest"] [[package]] name = "setuptools" -version = "75.1.0" +version = "75.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, - {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, + {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"}, + {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"}, ] [package.extras] @@ -4279,13 +4371,13 @@ types-urllib3 = "*" [[package]] name = "types-requests" -version = "2.32.0.20240914" +version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, ] [package.dependencies] @@ -4697,4 +4789,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<3.13" -content-hash = "3289656ed6b4790b8f1c1f4a5d9e0a029a45c570397ca433ebfcd82e6a2209ca" +content-hash = "ebeed49d8baecb2e60479e29c556f632af9d36112847cf5f28a60ee5e6438c2c" diff --git a/pyproject.toml b/pyproject.toml index 381fbcbe..6ae4926b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,8 +56,11 @@ dnspython = [ {version = "^2.7.0", python = ">=3.9"} ] pytaxonomies = "^1.5.0" -pymisp = {version = "^2.5.0", extras = ["url", "fileobjects"]} -Pillow = "^10.4.0" +pymisp = {version = "^2.5.1", extras = ["url", "fileobjects"]} +Pillow = [ + {version = "<11", python = "<3.9"}, + {version = "^11", python = ">=3.9"} +] flask-restx = "^1.3.0" rich = "^13.9.2" pyphishtanklookup = "^1.4.0" @@ -74,7 +77,7 @@ pypandora = "^1.9.0" lacuscore = "^1.11.1" pylacus = "^1.11.1" pyipasnhistory = "^2.1.2" -publicsuffixlist = "^1.0.2.20241015" +publicsuffixlist = "^1.0.2.20241017" pyfaup = "^1.2" chardet = "^5.2.0" pysecuritytxt = "^1.3.2"