change leveldb to ardb

pull/201/head
Terrtia 2018-05-07 14:50:40 +02:00
parent c17b9402f7
commit 3841c159a0
No known key found for this signature in database
GPG Key ID: 1E1B1F50D84613D0
19 changed files with 386 additions and 621 deletions

View File

@ -53,9 +53,9 @@ if __name__ == "__main__":
faup = Faup()
server_cred = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_TermCred", "host"),
port=p.config.get("Redis_Level_DB_TermCred", "port"),
db=p.config.get("Redis_Level_DB_TermCred", "db"),
host=p.config.get("ARDB_TermCred", "host"),
port=p.config.get("ARDB_TermCred", "port"),
db=p.config.get("ARDB_TermCred", "db"),
decode_responses=True)
criticalNumberToAlert = p.config.getint("Credential", "criticalNumberToAlert")

View File

@ -82,15 +82,15 @@ if __name__ == "__main__":
# REDIS #
r_serv1 = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_Curve", "host"),
port=p.config.get("Redis_Level_DB_Curve", "port"),
db=p.config.get("Redis_Level_DB_Curve", "db"),
host=p.config.get("ARDB_Curve", "host"),
port=p.config.get("ARDB_Curve", "port"),
db=p.config.get("ARDB_Curve", "db"),
decode_responses=True)
server_term = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_TermFreq", "host"),
port=p.config.get("Redis_Level_DB_TermFreq", "port"),
db=p.config.get("Redis_Level_DB_TermFreq", "db"),
host=p.config.get("ARDB_TermFreq", "host"),
port=p.config.get("ARDB_TermFreq", "port"),
db=p.config.get("ARDB_TermFreq", "db"),
decode_responses=True)
# FUNCTIONS #

View File

@ -148,9 +148,9 @@ if __name__ == '__main__':
r_temp.sadd("MODULE_TYPE_"+ "CurveManageTopSets" , str(os.getpid()))
server_term = redis.StrictRedis(
host=cfg.get("Redis_Level_DB_TermFreq", "host"),
port=cfg.getint("Redis_Level_DB_TermFreq", "port"),
db=cfg.getint("Redis_Level_DB_TermFreq", "db"),
host=cfg.get("ARDB_TermFreq", "host"),
port=cfg.getint("ARDB_TermFreq", "port"),
db=cfg.getint("ARDB_TermFreq", "db"),
decode_responses=True)
publisher.info("Script Curve_manage_top_set started")

View File

@ -52,8 +52,9 @@ if __name__ == "__main__":
for year in range(2013, date_today.year+1):
for month in range(0, 13):
dico_redis[str(year)+str(month).zfill(2)] = redis.StrictRedis(
host=p.config.get("Redis_Level_DB", "host"), port=year,
db=month,
host=p.config.get("ARDB_DB", "host"),
port=p.config.get("ARDB_DB", "port"),
db='year' + 'month',
decode_responses=True)
# FUNCTIONS #

View File

@ -11,11 +11,11 @@ CYAN="\\033[1;36m"
[ -z "$AIL_HOME" ] && echo "Needs the env var AIL_HOME. Run the script from the virtual environment." && exit 1;
[ -z "$AIL_REDIS" ] && echo "Needs the env var AIL_REDIS. Run the script from the virtual environment." && exit 1;
[ -z "$AIL_LEVELDB" ] && echo "Needs the env var AIL_LEVELDB. Run the script from the virtual environment." && exit 1;
[ -z "$AIL_ARDB" ] && echo "Needs the env var AIL_ARDB. Run the script from the virtual environment." && exit 1;
export PATH=$AIL_HOME:$PATH
export PATH=$AIL_REDIS:$PATH
export PATH=$AIL_LEVELDB:$PATH
export PATH=$AIL_ARDB:$PATH
function helptext {
echo -e $YELLOW"
@ -40,7 +40,7 @@ function helptext {
(Inside screen Daemons)
"$RED"
But first of all you'll need to edit few path where you installed
your redis & leveldb servers.
your redis & ardb servers.
"$DEFAULT"
Usage:
-----
@ -58,33 +58,17 @@ function launching_redis {
screen -S "Redis_AIL" -X screen -t "6380" bash -c 'redis-server '$conf_dir'6380.conf ; read x'
sleep 0.1
screen -S "Redis_AIL" -X screen -t "6381" bash -c 'redis-server '$conf_dir'6381.conf ; read x'
# For Words and curves
sleep 0.1
screen -S "Redis_AIL" -X screen -t "6382" bash -c 'redis-server '$conf_dir'6382.conf ; read x'
}
function launching_lvldb {
lvdbhost='127.0.0.1'
lvdbdir="${AIL_HOME}/LEVEL_DB_DATA/"
nb_db=13
function launching_ardb {
conf_dir="${AIL_HOME}/configs/"
db_y=`date +%Y`
#Verify that a dir with the correct year exists, create it otherwise
if [ ! -d "$lvdbdir$db_y" ]; then
mkdir -p "$db_y"
fi
screen -dmS "LevelDB_AIL"
screen -dmS "ARDB_AIL"
sleep 0.1
echo -e $GREEN"\t* Launching Levels DB servers"$DEFAULT
echo -e $GREEN"\t* Launching ARDB servers"$DEFAULT
#Launch a DB for each dir
for pathDir in $lvdbdir*/ ; do
yDir=$(basename "$pathDir")
sleep 0.1
screen -S "LevelDB_AIL" -X screen -t "$yDir" bash -c 'redis-leveldb -h; redis-leveldb -H '$lvdbhost' -D '$pathDir' -P '$yDir' -M '$nb_db'; read x'
done
sleep 0.1
screen -S "ARDB_AIL" -X screen -t "6382" bash -c 'ardb-server '$conf_dir'6382.conf ; read x'
}
function launching_logs {
@ -118,25 +102,25 @@ function launching_scripts {
sleep 0.1
echo -e $GREEN"\t* Launching ZMQ scripts"$DEFAULT
screen -S "Script_AIL" -X screen -t "ModuleInformation" bash -c 'python3 ModulesInformationV2.py -k 0 -c 1; read x'
screen -S "Script_AIL" -X screen -t "ModuleInformation" bash -c './ModulesInformationV2.py -k 0 -c 1; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Mixer" bash -c 'python3 Mixer.py; read x'
screen -S "Script_AIL" -X screen -t "Mixer" bash -c './Mixer.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Global" bash -c 'python3 Global.py; read x'
screen -S "Script_AIL" -X screen -t "Global" bash -c './Global.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Duplicates" bash -c 'python3 Duplicates.py; read x'
screen -S "Script_AIL" -X screen -t "Duplicates" bash -c './Duplicates.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Attributes" bash -c 'python3 Attributes.py; read x'
screen -S "Script_AIL" -X screen -t "Attributes" bash -c './Attributes.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Lines" bash -c 'python3 Lines.py; read x'
screen -S "Script_AIL" -X screen -t "Lines" bash -c './Lines.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "DomClassifier" bash -c './DomClassifier.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Categ" bash -c 'python3 Categ.py; read x'
screen -S "Script_AIL" -X screen -t "Categ" bash -c './Categ.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Tokenize" bash -c 'python3 Tokenize.py; read x'
screen -S "Script_AIL" -X screen -t "Tokenize" bash -c './Tokenize.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "CreditCards" bash -c 'python3 CreditCards.py; read x'
screen -S "Script_AIL" -X screen -t "CreditCards" bash -c './CreditCards.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Onion" bash -c './Onion.py; read x'
sleep 0.1
@ -162,6 +146,8 @@ function launching_scripts {
sleep 0.1
screen -S "Script_AIL" -X screen -t "Base64" bash -c './Base64.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "DbDump" bash -c './DbDump.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Bitcoin" bash -c './Bitcoin.py; read x'
sleep 0.1
screen -S "Script_AIL" -X screen -t "Phone" bash -c './Phone.py; read x'
@ -189,7 +175,10 @@ function shutting_down_redis {
bash -c $redis_dir'redis-cli -p 6380 SHUTDOWN'
sleep 0.1
bash -c $redis_dir'redis-cli -p 6381 SHUTDOWN'
sleep 0.1
}
function shutting_down_ardb {
redis_dir=${AIL_HOME}/redis/src/
bash -c $redis_dir'redis-cli -p 6382 SHUTDOWN'
}
@ -214,12 +203,21 @@ function checking_redis {
flag_redis=1
fi
sleep 0.1
return $flag_redis;
}
function checking_ardb {
flag_ardb=0
redis_dir=${AIL_HOME}/redis/src/
sleep 0.2
bash -c $redis_dir'redis-cli -p 6382 PING | grep "PONG" &> /dev/null'
if [ ! $? == 0 ]; then
echo -e $RED"\t6382 not ready"$DEFAULT
flag_redis=1
flag_ardb=1
fi
return $flag_redis;
return $flag_ardb;
}
#If no params, display the help
@ -229,12 +227,12 @@ helptext;
############### TESTS ###################
isredis=`screen -ls | egrep '[0-9]+.Redis_AIL' | cut -d. -f1`
islvldb=`screen -ls | egrep '[0-9]+.LevelDB_AIL' | cut -d. -f1`
isardb=`screen -ls | egrep '[0-9]+.ARDB_AIL' | cut -d. -f1`
islogged=`screen -ls | egrep '[0-9]+.Logging_AIL' | cut -d. -f1`
isqueued=`screen -ls | egrep '[0-9]+.Queue_AIL' | cut -d. -f1`
isscripted=`screen -ls | egrep '[0-9]+.Script_AIL' | cut -d. -f1`
options=("Redis" "LevelDB" "Logs" "Queues" "Scripts" "Killall" "Shutdown" "Update-config")
options=("Redis" "Ardb" "Logs" "Queues" "Scripts" "Killall" "Shutdown" "Update-config")
menu() {
echo "What do you want to Launch?:"
@ -265,9 +263,9 @@ for i in ${!options[@]}; do
echo -e $RED"\t* A screen is already launched"$DEFAULT
fi
;;
LevelDB)
if [[ ! $islvldb ]]; then
launching_lvldb;
Ardb)
if [[ ! $isardb ]]; then
launching_ardb;
else
echo -e $RED"\t* A screen is already launched"$DEFAULT
fi
@ -288,12 +286,13 @@ for i in ${!options[@]}; do
;;
Scripts)
if [[ ! $isscripted ]]; then
if checking_redis; then
sleep 1
if checking_redis && checking_ardb; then
launching_scripts;
else
echo -e $YELLOW"\tScript not started, waiting 10 secondes"$DEFAULT
sleep 10
if checking_redis; then
echo -e $YELLOW"\tScript not started, waiting 5 secondes"$DEFAULT
sleep 5
if checking_redis && checking_ardb; then
launching_scripts;
else
echo -e $RED"\tScript not started"$DEFAULT
@ -304,14 +303,17 @@ for i in ${!options[@]}; do
fi
;;
Killall)
if [[ $isredis || $islvldb || $islogged || $isqueued || $isscripted ]]; then
if [[ $isredis || $isardb || $islogged || $isqueued || $isscripted ]]; then
echo -e $GREEN"Gracefully closing redis servers"$DEFAULT
shutting_down_redis;
sleep 0.2
echo -e $GREEN"Gracefully closing ardb servers"$DEFAULT
shutting_down_ardb;
echo -e $GREEN"Killing all"$DEFAULT
kill $isredis $islvldb $islogged $isqueued $isscripted
kill $isredis $isardb $islogged $isqueued $isscripted
sleep 0.2
echo -e $ROSE`screen -ls`$DEFAULT
echo -e $GREEN"\t* $isredis $islvldb $islogged $isqueued $isscripted killed."$DEFAULT
echo -e $GREEN"\t* $isredis $isardb $islogged $isqueued $isscripted killed."$DEFAULT
else
echo -e $RED"\t* No screen to kill"$DEFAULT
fi

View File

@ -133,9 +133,9 @@ if __name__ == '__main__':
# REDIS #
r_serv_trend = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_Trending", "host"),
port=p.config.get("Redis_Level_DB_Trending", "port"),
db=p.config.get("Redis_Level_DB_Trending", "db"),
host=p.config.get("ARDB_Trending", "host"),
port=p.config.get("ARDB_Trending", "port"),
db=p.config.get("ARDB_Trending", "db"),
decode_responses=True)
r_serv_pasteName = redis.StrictRedis(

View File

@ -54,9 +54,9 @@ if __name__ == "__main__":
# REDIS #
server_term = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_TermFreq", "host"),
port=p.config.get("Redis_Level_DB_TermFreq", "port"),
db=p.config.get("Redis_Level_DB_TermFreq", "db"),
host=p.config.get("ARDB_TermFreq", "host"),
port=p.config.get("ARDB_TermFreq", "port"),
db=p.config.get("ARDB_TermFreq", "db"),
decode_responses=True)
# FUNCTIONS #

View File

@ -33,9 +33,9 @@ def main():
# port generated automatically depending on the date
curYear = datetime.now().year if args.year is None else args.year
r_serv = redis.StrictRedis(
host=cfg.get("Redis_Level_DB_Hashs", "host"),
port=curYear,
db=cfg.getint("Redis_Level_DB_Hashs", "db"),
host=cfg.get("ARDB_Hashs", "host"),
port=cfg.getint("ARDB_Hashs", "port"),
db=curYear,
decode_responses=True)
# LOGGING #

View File

@ -152,9 +152,9 @@ if __name__ == '__main__':
# REDIS_LEVEL_DB #
server = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_Sentiment", "host"),
port=p.config.get("Redis_Level_DB_Sentiment", "port"),
db=p.config.get("Redis_Level_DB_Sentiment", "db"),
host=p.config.get("ARDB_Sentiment", "host"),
port=p.config.get("ARDB_Sentiment", "port"),
db=p.config.get("ARDB_Sentiment", "db"),
decode_responses=True)
while True:

View File

@ -52,9 +52,9 @@ if __name__ == "__main__":
# REDIS #
server_term = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_TermFreq", "host"),
port=p.config.get("Redis_Level_DB_TermFreq", "port"),
db=p.config.get("Redis_Level_DB_TermFreq", "db"),
host=p.config.get("ARDB_TermFreq", "host"),
port=p.config.get("ARDB_TermFreq", "port"),
db=p.config.get("ARDB_TermFreq", "db"),
decode_responses=True)
# FUNCTIONS #

View File

@ -114,9 +114,9 @@ if __name__ == '__main__':
# REDIS #
r_serv_trend = redis.StrictRedis(
host=p.config.get("Redis_Level_DB_Trending", "host"),
port=p.config.get("Redis_Level_DB_Trending", "port"),
db=p.config.get("Redis_Level_DB_Trending", "db"),
host=p.config.get("ARDB_Trending", "host"),
port=p.config.get("ARDB_Trending", "port"),
db=p.config.get("ARDB_Trending", "db"),
decode_responses=True)
# FILE CURVE SECTION #

View File

@ -52,9 +52,9 @@ if __name__ == "__main__":
# port generated automatically depending on the date
curYear = datetime.now().year
server = redis.StrictRedis(
host=p.config.get("Redis_Level_DB", "host"),
port=curYear,
db=p.config.get("Redis_Level_DB", "db"),
host=p.config.get("ARDB_DB", "host"),
port=p.config.get("ARDB_DB", "port"),
db=curYear,
decode_responses=True)
# FUNCTIONS #

View File

@ -112,37 +112,38 @@ host = localhost
port = 6381
db = 1
##### LevelDB #####
[Redis_Level_DB_Curve]
##### ARDB #####
[ARDB_Curve]
host = localhost
port = 6382
db = 1
[Redis_Level_DB_Sentiment]
[ARDB_Sentiment]
host = localhost
port = 6382
db = 4
[Redis_Level_DB_TermFreq]
[ARDB_TermFreq]
host = localhost
port = 6382
db = 2
[Redis_Level_DB_TermCred]
[ARDB_TermCred]
host = localhost
port = 6382
db = 5
[Redis_Level_DB]
[ARDB_DB]
host = localhost
port = 6382
db = 0
[Redis_Level_DB_Trending]
[ARDB_Trending]
host = localhost
port = 6382
db = 3
[Redis_Level_DB_Hashs]
[ARDB_Hashs]
host = localhost
db = 1

View File

@ -114,6 +114,10 @@ publish = Redis_Duplicate,Redis_alertHandler
subscribe = Redis_Global
publish = Redis_Duplicate,Redis_alertHandler
[DbDump]
subscribe = Redis_Global
publish = Redis_Duplicate,Redis_alertHandler
[Bitcoin]
subscribe = Redis_Global
publish = Redis_Duplicate,Redis_alertHandler

728
configs/6382.conf Normal file → Executable file
View File

@ -1,4 +1,7 @@
# Redis configuration file example
# Ardb configuration file example, modified from redis's conf file.
# Home dir for ardb instance, it can be referenced by ${ARDB_HOME} in this config file
home ../DATA_ARDB/
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
@ -12,63 +15,71 @@
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include /path/to/local.conf
# include /path/to/other.conf
################################ GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
# By default Ardb does not run as a daemon. Use 'yes' if you need it.
daemonize no
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# When running daemonized, Ardb writes a pid file in ${ARDB_HOME}/ardb.pid by
# default. You can specify a custom pid file location here.
#pidfile /var/run/redis.pid
pidfile ${ARDB_HOME}/ardb.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6382
# The thread pool size for the corresponding all listen servers, -1 means current machine's cpu number
thread-pool-size 4
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
#Accept connections on the specified host&port/unix socket, default is 0.0.0.0:16379.
server[0].listen 127.0.0.1:6382
# If current qps exceed the limit, Ardb would return an error.
#server[0].qps-limit 1000
# By default Redis listens for connections from all the network interfaces
# available on the server. It is possible to listen to just one or multiple
# interfaces using the "bind" configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1
#listen on unix socket
#server[1].listen /tmp/ardb.sock
#server[1].unixsocketperm 755
#server[1].qps-limit 1000
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
# 'qps-limit-per-host' used to limit the request per second from same host
# 'qps-limit-per-connection' used to limit the request per second from same connection
qps-limit-per-host 0
qps-limit-per-connection 0
# Specify the optimized RocksDB compaction strategies.
# If anything other than none is set then the rocksdb.options will not be used.
# The property can one of:
# OptimizeLevelStyleCompaction
# OptimizeUniversalStyleCompaction
# none
#
#unixsocket /tmp/redis.sock
#unixsocketperm 755
rocksdb.compaction OptimizeLevelStyleCompaction
# Enable this to indicate that hsca/sscan/zscan command use total order mode for rocksdb engine
rocksdb.scan-total-order false
# Disable RocksDB WAL may improve the write performance but
# data in the un-flushed memtables might be lost in case of a RocksDB shutdown.
# Disabling WAL provides similar guarantees as Redis.
rocksdb.disableWAL false
#rocksdb's options
rocksdb.options write_buffer_size=512M;max_write_buffer_number=5;min_write_buffer_number_to_merge=3;compression=kSnappyCompression;\
bloom_locality=1;memtable_prefix_bloom_size_ratio=0.1;\
block_based_table_factory={block_cache=512M;filter_policy=bloomfilter:10:true};\
create_if_missing=true;max_open_files=10000;rate_limiter_bytes_per_sec=50M;\
use_direct_io_for_flush_and_compaction=true;use_adaptive_mutex=true
#leveldb's options
leveldb.options block_cache_size=512M,write_buffer_size=128M,max_open_files=5000,block_size=4k,block_restart_interval=16,\
bloom_bits=10,compression=snappy,logenable=yes,max_file_size=2M
#lmdb's options
lmdb.options database_maxsize=10G,database_maxdbs=4096,readahead=no,batch_commit_watermark=1024
#perconaft's options
perconaft.options cache_size=128M,compression=snappy
#wiredtiger's options
wiredtiger.options cache_size=512M,session_max=8k,chunk_size=100M,block_size=4k,bloom_bits=10,\
mmap=false,compressor=snappy
#forestdb's options
forestdb.options chunksize=8,blocksize=4K
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
@ -91,92 +102,21 @@ tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# error
# warn
# info
# debug
# trace
loglevel info
# Specify the log file name. Also the empty string can be used to force
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile ""
#logfile ${ARDB_HOME}/log/ardb-server.log
logfile stdout
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
#save 900 1
#save 300 10
save 300 100000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump6382.rdb
# The working directory.
# The working data directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
@ -184,22 +124,29 @@ dbfilename dump6382.rdb
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir ../dumps/
data-dir ${ARDB_HOME}/data
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# Master-Slave replication. Use slaveof to make a Ardb instance a copy of
# another Ardb server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# slaveof <masterip>:<masterport>
#slaveof 127.0.0.1:6379
# By default, ardb use 2 threads to execute commands synced from master.
# -1 means use current CPU number threads instead.
slave-workers 2
# Max synced command queue size in memory.
max-slave-worker-queue 1024
# The directory for replication.
repl-dir ${ARDB_HOME}/repl
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
@ -214,33 +161,55 @@ dir ../dumps/
#
slave-serve-stale-data yes
# The slave priority is an integer number published by Ardb/Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
#
# Note: any requests processed by non read only slaves would no write to replication
# log and sync to connected slaves.
slave-read-only yes
# The directory for backup.
backup-dir ${ARDB_HOME}/backup
#
# You can configure the backup file format as 'redis' or 'ardb'. The 'ardb' format
# can only used by ardb instance, while 'redis' format file can be used by redis
# and ardb instance.
backup-file-format ardb
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
@ -250,7 +219,7 @@ slave-read-only yes
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# If you select "yes" Ardb will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
@ -272,9 +241,46 @@ repl-disable-tcp-nodelay no
# The biggest the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
# If the size is configured by 0, then Ardb instance can NOT serve as a master.
#
# repl-backlog-size 1mb
# repl-backlog-size 500m
repl-backlog-size 1G
repl-backlog-cache-size 100M
snapshot-max-lag-offset 500M
# Set the max number of snapshots. By default this limit is set to 10 snapshot.
# Once the limit is reached Ardb would try to remove the oldest snapshots
maxsnapshots 10
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
@ -285,42 +291,32 @@ repl-disable-tcp-nodelay no
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# Slave clear current data store before full resync to master.
# It make sure that slave keep consistent with master's data. But slave may cost a
# long time to delete data, it depends on
# If set by no, then slave may have different data with master.
slave-cleardb-before-fullresync yes
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
# Master/Slave instance would persist sync state every 'repl-backlog-sync-period' secs.
repl-backlog-sync-period 5
# Slave would ignore any 'expire' setting from replication command if set by 'yes'.
# It could be used if master is redis instance serve hot data with expire setting, slave is
# ardb instance which persist all data.
# Since master redis instance would generate a 'del' for each expired key, slave should ignore
# all 'del' command too by setting 'slave-ignore-del' to 'yes' for this scenario.
slave-ignore-expire no
slave-ignore-del no
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# The N slaves need to be in "online" state.
# A value of 0 means to never release the backlog.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEES that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
# repl-backlog-ttl 3600
################################## SECURITY ###################################
@ -356,6 +352,15 @@ slave-priority 100
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################ CLUSTER ###############################
# Max execution time of a Lua script in milliseconds.
#zookeeper-servers 127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183
#zk-recv-timeout 10000
#zk-clientid-file ${ARDB_HOME}/ardb.zkclientid
cluster-name ardb-cluster
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
@ -369,173 +374,13 @@ slave-priority 100
#
# maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub/Slave client can't consume messages as fast as the
# publisher can produce them).
slave-client-output-buffer-limit 256mb
pubsub-client-output-buffer-limit 32mb
################################## SLOW LOG ###################################
@ -561,156 +406,63 @@ slowlog-log-slower-than 10000
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
############################# Event notification ##############################
################################ LUA SCRIPTING ###############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/keyspace-events
# Max execution time of a Lua script in milliseconds.
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
# E Keyevent events, published with __keyevent@<db>__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# by zero or multiple characters. The empty string means that notifications
# are disabled at all.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
############################### ADVANCED CONFIG ###############################
## Since some redis clients would check info command's output, this configuration
## would be set in 'misc' section of 'info's output
#additional-misc-info redis_version:2.8.9\nredis_trick:yes
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is convereted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. Thev value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
#trusted-ip 10.10.10.10
#trusted-ip 10.10.10.*
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# By default Ardb would not compact whole db after loading a snapshot, which may happens
# when slave syncing from master, processing 'import' command from client.
# This configuration only works with rocksdb engine.
# If ardb dord not compact data after loading snapshot file, there would be poor read performance before rocksdb
# completes the next compaction task internally. While the compaction task would cost very long time for a huge data set.
compact-after-snapshot-load false
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# Ardb would store cursor in memory
scan-redis-compatible yes
scan-cursor-expire-after 60
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
redis-compatible-mode yes
redis-compatible-version 2.8.0
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
statistics-log-period 600
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
# Range deletion min size trigger
range-delete-min-size 100

View File

@ -60,11 +60,9 @@ sudo ldconfig
popd
popd
# REDIS LEVEL DB #
test ! -d redis-leveldb/ && git clone https://github.com/KDr2/redis-leveldb.git
pushd redis-leveldb/
git submodule init
git submodule update
# ARDB #
test ! -d ardb/ && git clone https://github.com/yinqiwen/ardb.git
pushd ardb/
make
popd
@ -84,7 +82,7 @@ if [ -z "$VIRTUAL_ENV" ]; then
echo export AIL_BIN=$(pwd)/bin/ >> ./AILENV/bin/activate
echo export AIL_FLASK=$(pwd)/var/www/ >> ./AILENV/bin/activate
echo export AIL_REDIS=$(pwd)/redis/src/ >> ./AILENV/bin/activate
echo export AIL_LEVELDB=$(pwd)/redis-leveldb/ >> ./AILENV/bin/activate
echo export AIL_ARDB=$(pwd)/ardb/src/ >> ./AILENV/bin/activate
. ./AILENV/bin/activate
@ -93,7 +91,6 @@ fi
year1=20`date +%y`
year2=20`date --date='-1 year' +%y`
mkdir -p $AIL_HOME/{PASTES,Blooms,dumps}
mkdir -p $AIL_HOME/LEVEL_DB_DATA/{$year1,$year2}
pip3 install -U pip
pip3 install -U -r pip3_packages_requirement.txt

View File

@ -1,6 +1,8 @@
#!/bin/bash
sudo rm -rf AILENV
sudo mv indexdir old_indexdir_python2
mkdir old
sudo mv indexdir old/old_indexdir_python2
sudo mv LEVEL_DB_DATA old/old_LEVEL_DB_DATA
./installing_deps.sh

View File

@ -36,27 +36,27 @@ r_serv_log = redis.StrictRedis(
decode_responses=True)
r_serv_charts = redis.StrictRedis(
host=cfg.get("Redis_Level_DB_Trending", "host"),
port=cfg.getint("Redis_Level_DB_Trending", "port"),
db=cfg.getint("Redis_Level_DB_Trending", "db"),
host=cfg.get("ARDB_Trending", "host"),
port=cfg.getint("ARDB_Trending", "port"),
db=cfg.getint("ARDB_Trending", "db"),
decode_responses=True)
r_serv_sentiment = redis.StrictRedis(
host=cfg.get("Redis_Level_DB_Sentiment", "host"),
port=cfg.getint("Redis_Level_DB_Sentiment", "port"),
db=cfg.getint("Redis_Level_DB_Sentiment", "db"),
host=cfg.get("ARDB_Sentiment", "host"),
port=cfg.getint("ARDB_Sentiment", "port"),
db=cfg.getint("ARDB_Sentiment", "db"),
decode_responses=True)
r_serv_term = redis.StrictRedis(
host=cfg.get("Redis_Level_DB_TermFreq", "host"),
port=cfg.getint("Redis_Level_DB_TermFreq", "port"),
db=cfg.getint("Redis_Level_DB_TermFreq", "db"),
host=cfg.get("ARDB_TermFreq", "host"),
port=cfg.getint("ARDB_TermFreq", "port"),
db=cfg.getint("ARDB_TermFreq", "db"),
decode_responses=True)
r_serv_cred = redis.StrictRedis(
host=cfg.get("Redis_Level_DB_TermCred", "host"),
port=cfg.getint("Redis_Level_DB_TermCred", "port"),
db=cfg.getint("Redis_Level_DB_TermCred", "db"),
host=cfg.get("ARDB_TermCred", "host"),
port=cfg.getint("ARDB_TermCred", "port"),
db=cfg.getint("ARDB_TermCred", "db"),
decode_responses=True)
r_serv_pasteName = redis.StrictRedis(

View File

@ -23,22 +23,22 @@ max_preview_modal = Flask_config.max_preview_modal
#init all lvlDB servers
curYear = datetime.now().year
int_year = int(curYear)
r_serv_db = {}
# port generated automatically depending on available levelDB date
yearList = []
lvdbdir= os.path.join(os.environ['AIL_HOME'], "LEVEL_DB_DATA/")
for year in os.listdir(lvdbdir):
try:
intYear = int(year)
except:
continue
yearList.append([year, intYear, int(curYear) == intYear])
for x in range(0, (int_year - 2018) + 1):
intYear = int_year - x
yearList.append([str(intYear), intYear, int(curYear) == intYear])
r_serv_db[intYear] = redis.StrictRedis(
host=cfg.get("Redis_Level_DB", "host"),
port=intYear,
db=cfg.getint("Redis_Level_DB", "db"),
host=cfg.get("ARDB_DB", "host"),
port=cfg.getint("ARDB_DB", "port"),
db=intYear,
decode_responses=True)
yearList.sort(reverse=True)
browsepastes = Blueprint('browsepastes', __name__, template_folder='templates')
@ -86,7 +86,13 @@ def browseImportantPaste():
@browsepastes.route("/importantPasteByModule/", methods=['GET'])
def importantPasteByModule():
module_name = request.args.get('moduleName')
currentSelectYear = int(request.args.get('year'))
# # TODO: VERIFY YEAR VALIDITY
try:
currentSelectYear = int(request.args.get('year'))
except:
print('Invalid year input')
currentSelectYear = int(datetime.now().year)
all_content = []
paste_date = []