new: Add startup scripts for redis & ardb

pull/12/head
Raphaël Vinot 2018-03-21 17:58:36 +01:00
parent b3c68db8ec
commit 9979c9abe1
24 changed files with 5954 additions and 107 deletions

11
.gitignore vendored
View File

@ -99,3 +99,14 @@ ENV/
# mypy
.mypy_cache/
# Redis
*.rdb
# Storage
rawdata
# ardb
storage/ardb.pid
storage/data
storage/repl

View File

@ -29,7 +29,7 @@ New version of BGP Ranking, complete rewrite in python3.6+ and an ARDB backend
# Databases
## Intake (redis, port 6379)
## Intake (redis, port 6579)
*Usage*: All the modules push their entries in this database.
@ -42,7 +42,7 @@ UUID = {'ip': <ip>, 'source': <source>, 'datetime': <datetime>}
Creates a set `intake` for further processing containing all the UUIDs.
## Pre-Insert (redis, port 6380)
## Pre-Insert (redis, port 6580)
*Usage*: Make sure th IPs are global, validate input from the intake module.
@ -59,7 +59,7 @@ Creates a set `to_insert` for further processing containing all the UUIDs.
Creates a set `for_ris_lookup` to lookup on the RIS database. Contains all the IPs.
## Routing Information Service cache (redis, port 6381)
## Routing Information Service cache (redis, port 6581)
*Usage*: Lookup IPs against the RIPE's RIS database
@ -71,7 +71,7 @@ Creates the following hashes:
IP = {'asn': <asn>, 'prefix': <prefix>, 'description': <description>}
```
## Ranking Information cache (redis, port 6382)
## Ranking Information cache (redis, port 6582)
*Usage*: Store the current list of known ASNs at RIPE, and the prefixes originating from them.
@ -90,7 +90,7 @@ And the following keys:
<asn>|v6|ipcount = <Total amount of IP v6 addresses originating this AS>
```
## Long term storage (ardb, port 16379)
## Long term storage (ardb, port 16579)
*Usage*: Stores the IPs with the required meta informations required for ranking.

1317
cache/6581.conf vendored Normal file

File diff suppressed because it is too large Load Diff

1317
cache/6582.conf vendored Normal file

File diff suppressed because it is too large Load Diff

7
cache/run_redis.sh vendored Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
set -e
set -x
../../redis/src/redis-server ./6581.conf
../../redis/src/redis-server ./6582.conf

7
cache/shutdown_redis.sh vendored Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
# set -e
set -x
../../redis/src/redis-cli -p 6581 shutdown
../../redis/src/redis-cli -p 6582 shutdown

View File

@ -10,9 +10,9 @@ class DatabaseInsert():
def __init__(self, loglevel: int=logging.DEBUG):
self.__init_logger(loglevel)
self.ardb_storage = StrictRedis(host='localhost', port=16379, decode_responses=True)
self.redis_sanitized = Redis(host='localhost', port=6380, db=0, decode_responses=True)
self.ris_cache = Redis(host='localhost', port=6381, db=0, decode_responses=True)
self.ardb_storage = StrictRedis(host='localhost', port=16579, decode_responses=True)
self.redis_sanitized = Redis(host='localhost', port=6580, db=0, decode_responses=True)
self.ris_cache = Redis(host='localhost', port=6581, db=0, decode_responses=True)
self.logger.debug('Starting import')
def __init_logger(self, loglevel):

View File

@ -2,75 +2,65 @@
# -*- coding: utf-8 -*-
import logging
import json
from redis import Redis
import asyncio
from telnetlib import Telnet
from .libs.StatsRipeText import RIPECaching
from ipaddress import ip_network
import requests
import gzip
from io import BytesIO
from collections import defaultdict
import re
# Dataset source: Routeviews Prefix to AS mappings Dataset for IPv4 and IPv6
# http://www.caida.org/data/routing/routeviews-prefix2as.xml
class ASNLookup(RIPECaching):
class PrefixDatabase():
def __init__(self, sourceapp: str='bgpranking-ng', loglevel: int=logging.DEBUG):
super().__init__(sourceapp, loglevel)
self.redis_cache = Redis(host='localhost', port=6382, db=0, decode_responses=True)
self.logger.debug('Starting ASN lookup cache')
def __init__(self, loglevel: int=logging.DEBUG):
self.__init_logger(loglevel)
self.redis_cache = Redis(host='localhost', port=6582, db=0, decode_responses=True)
self.ipv6_url = 'http://data.caida.org/datasets/routing/routeviews6-prefix2as/{}'
self.ipv4_url = 'http://data.caida.org/datasets/routing/routeviews-prefix2as/{}'
def get_all_asns(self):
with Telnet(self.hostname, self.port) as tn:
tn.write(b'-k\n')
to_send = '-d ris-asns list_asns=true asn_types=o sourceapp={}\n'.format(self.sourceapp)
tn.write(to_send.encode())
ris_asns = json.loads(tn.read_until(b'\n}\n'))
all_asns = ris_asns['asns']['originating']
if not all_asns:
self.logger.warning('No ASNs in ris-asns, something went wrong.')
else:
self.redis_cache.sadd('asns', *all_asns)
self.redis_cache.sadd('asns_to_lookup', *all_asns)
tn.write(b'-k\n')
def __init_logger(self, loglevel):
self.logger = logging.getLogger('{}'.format(self.__class__.__name__))
self.logger.setLevel(loglevel)
def fix_ipv4_networks(self, networks):
'''Because we can't have nice things.
Some netorks come without the last(s) bytes (i.e. 170.254.25/24)'''
to_return = []
for net in networks:
try:
to_return.append(ip_network(net))
except ValueError:
ip, mask = net.split('/')
iplist = ip.split('.')
iplist = iplist + ['0'] * (4 - len(iplist))
to_return.append(ip_network('{}/{}'.format('.'.join(iplist), mask)))
return to_return
def _has_new(self, address_family, root_url):
r = requests.get(root_url.format('pfx2as-creation.log'))
last_entry = r.text.split('\n')[-2]
path = last_entry.split('\t')[-1]
if path == self.redis_cache.get('current|{}'.format(address_family)):
self.logger.debug('Same file already loaded: {}'.format(path))
return False, path
return True, path
async def get_originating_prefixes(self):
reader, writer = await asyncio.open_connection(self.hostname, self.port)
writer.write(b'-k\n')
while True:
asn = self.redis_cache.spop('asns_to_lookup')
if not asn:
break
self.logger.debug('ASN lookup: {}'.format(asn))
to_send = '-d ris-prefixes {} list_prefixes=true types=o af=v4,v6 noise=filter sourceapp={}\n'.format(asn, self.sourceapp)
writer.write(to_send.encode())
data = await reader.readuntil(b'\n}\n')
ris_prefixes = json.loads(data)
p = self.redis_cache.pipeline()
if ris_prefixes['prefixes']['v4']['originating']:
self.logger.debug('{} has ipv4'.format(asn))
fixed_networks = self.fix_ipv4_networks(ris_prefixes['prefixes']['v4']['originating'])
p.sadd('{}|v4'.format(asn), *[str(net) for net in fixed_networks])
total_ipv4 = sum([net.num_addresses for net in fixed_networks])
p.set('{}|v4|ipcount'.format(asn), total_ipv4)
if ris_prefixes['prefixes']['v6']['originating']:
self.logger.debug('{} has ipv6'.format(asn))
p.sadd('{}|v6'.format(asn), *ris_prefixes['prefixes']['v6']['originating'])
total_ipv6 = sum([ip_network(prefix).num_addresses for prefix in ris_prefixes['prefixes']['v6']['originating']])
p.set('{}|v4|ipcount'.format(asn), total_ipv6)
p.execute()
writer.write(b'-k\n')
writer.close()
def _init_routes(self, address_family, root_url, path):
self.logger.debug('Loading {}'.format(path))
r = requests.get(root_url.format(path))
to_import = defaultdict(lambda: {address_family: set(), 'ipcount': 0})
with gzip.open(BytesIO(r.content), 'r') as f:
for line in f:
prefix, length, asns = line.decode().strip().split('\t')
for asn in re.split('[,_]', asns):
network = ip_network('{}/{}'.format(prefix, length))
to_import[asn][address_family].add(str(network))
to_import[asn]['ipcount'] += network.num_addresses
p = self.redis_cache.pipeline()
p.sadd('asns', *to_import.keys())
for asn, data in to_import.items():
p.sadd('{}|{}'.format(asn, address_family), *data[address_family])
p.set('{}|{}|ipcount'.format(asn, address_family), data['ipcount'])
p.set('current|{}'.format(address_family), path)
p.execute()
return True
def load_prefixes(self):
v4_is_new, v4_path = self._has_new('v4', self.ipv4_url)
v6_is_new, v6_path = self._has_new('v6', self.ipv6_url)
if v4_is_new or v6_is_new:
self.redis_cache.flushdb()
self._init_routes('v6', self.ipv6_url, v6_path)
self._init_routes('v4', self.ipv4_url, v4_path)

View File

@ -2,26 +2,92 @@
# -*- coding: utf-8 -*-
import requests
from enum import Enum
from datetime import datetime
from ipaddress import IPv4Address, IPv6Address, IPv4Network, IPv6Network
from typing import TypeVar
IPTypes = TypeVar('IPTypes', IPv4Address, IPv6Address, 'str')
PrefixTypes = TypeVar('PrefixTypes', IPv4Network, IPv6Network, 'str')
TimeTypes = TypeVar('TimeTypes', datetime, 'str')
class ASNsTypes(Enum):
transiting = 't'
originating = 'o'
all_types = 't,o'
undefined = ''
class AddressFamilies(Enum):
ipv4 = 'v4'
ipv6 = 'v6'
all_families = 'v4,v6'
undefined = ''
class Noise(Enum):
keep = 'keep'
remove = 'filter'
class StatsRIPE():
def __init__(self, sourceapp='bgpranking-ng - CIRCL'):
self.url = "https://stat.ripe.net/data/{method}/data.json?{parameters}"
self.url_parameters = {'sourceapp': sourceapp}
self.sourceapp = sourceapp
async def network_info(self, ip: str) -> dict:
method = 'network-info'
self.url_parameters['resource'] = ip
parameters = '&'.join(['='.join(item) for item in self.url_parameters.items()])
url = self.url.format(method=method, parameters=parameters)
def __time_to_text(self, query_time: TimeTypes) -> str:
if type(query_time, datetime):
return query_time.isoformat()
return query_time
def _get(self, method: str, parameters: dict) -> dict:
parameters['sourceapp'] = self.sourceapp
url = self.url.format(method=method, parameters='&'.join(['{}={}'.format(k, str(v).lower()) for k, v in parameters.items()]))
response = requests.get(url)
return response.json()
async def prefix_overview(self, prefix: str) -> dict:
method = 'prefix-overview'
self.url_parameters['resource'] = prefix
parameters = '&'.join(['='.join(item) for item in self.url_parameters.items()])
url = self.url.format(method=method, parameters=parameters)
response = requests.get(url)
return response.json()
async def network_info(self, ip: IPTypes) -> dict:
parameters = {'resource': ip}
return self._get('network-info', parameters)
async def prefix_overview(self, prefix: PrefixTypes, min_peers_seeing: int= 0,
max_related: int=0, query_time: TimeTypes=None) -> dict:
parameters = {'resource': prefix}
if min_peers_seeing:
parameters['min_peers_seeing'] = min_peers_seeing
if max_related:
parameters['max_related'] = max_related
if query_time:
parameters['query_time'] = self.__time_to_text(query_time)
return self._get('prefix-overview', parameters)
async def ris_asns(self, query_time: TimeTypes=None, list_asns: bool=False, asn_types: ASNsTypes=ASNsTypes.undefined):
parameters = {}
if list_asns:
parameters['list_asns'] = list_asns
if asn_types:
parameters['asn_types'] = asn_types.value
if query_time:
if type(query_time, datetime):
parameters['query_time'] = query_time.isoformat()
else:
parameters['query_time'] = query_time
return self._get('ris-asns', parameters)
async def ris_prefixes(self, asn: int, query_time: TimeTypes=None,
list_prefixes: bool=False, types: ASNsTypes=ASNsTypes.undefined,
af: AddressFamilies=AddressFamilies.undefined, noise: Noise=Noise.keep):
parameters = {'resource': str(asn)}
if query_time:
parameters['query_time'] = self.__time_to_text(query_time)
if list_prefixes:
parameters['list_prefixes'] = list_prefixes
if types:
parameters['types'] = types.value
if af:
parameters['af'] = af.value
if noise:
parameters['noise'] = noise.value
return self._get('ris-prefixes', parameters)

View File

@ -3,5 +3,5 @@
"vendor": "dshield",
"name": "daily",
"impact": 0.1,
"parser": "parsers.dshield"
"parser": ".parsers.dshield"
}

View File

@ -31,7 +31,7 @@ class RawFilesParser():
self.directory = storage_directory / self.vendor / self.listname
safe_create_dir(self.directory)
self.__init_logger(loglevel)
self.redis_intake = Redis(host='localhost', port=6379, db=0)
self.redis_intake = Redis(host='localhost', port=6579, db=0)
self.logger.debug('Starting intake on {}'.format(self.source))
def __init_logger(self, loglevel):
@ -46,6 +46,11 @@ class RawFilesParser():
def extract_ipv4(self, bytestream: bytes) -> List[bytes]:
return re.findall(rb'[0-9]+(?:\.[0-9]+){3}', bytestream)
def strip_leading_zeros(self, ips: List[bytes]) -> List[bytes]:
'''Helper to get rid of leading 0s in an IP list.
Only run it when needed, it is nasty and slow'''
return ['.'.join(str(int(part)) for part in ip.split(b'.')).encode() for ip in ips]
def parse_raw_file(self, f: BytesIO):
self.datetime = datetime.now()
return self.extract_ipv4(f.getvalue())

View File

@ -8,4 +8,6 @@ from io import BytesIO
def parse_raw_file(self, f: BytesIO):
self.datetime = parse(re.findall(b'# updated (.*)\n', f.getvalue())[0])
return self.extract_ipv4(f.getvalue())
iplist = self.extract_ipv4(f.getvalue())
# The IPS have leading 0s. Getting tid of them directly here.
return self.strip_leading_zeros(iplist)

View File

@ -28,7 +28,7 @@ class RISPrefixLookup(RIPECaching):
p.execute()
async def run(self):
redis_cache = Redis(host='localhost', port=6381, db=0, decode_responses=True)
redis_cache = Redis(host='localhost', port=6581, db=0, decode_responses=True)
reader, writer = await asyncio.open_connection(self.hostname, self.port)
writer.write(b'-k\n')

View File

@ -12,9 +12,9 @@ class Sanitizer():
def __init__(self, loglevel: int=logging.DEBUG):
self.__init_logger(loglevel)
self.redis_intake = Redis(host='localhost', port=6379, db=0, decode_responses=True)
self.redis_sanitized = Redis(host='localhost', port=6380, db=0, decode_responses=True)
self.ris_cache = Redis(host='localhost', port=6381, db=0, decode_responses=True)
self.redis_intake = Redis(host='localhost', port=6579, db=0, decode_responses=True)
self.redis_sanitized = Redis(host='localhost', port=6580, db=0, decode_responses=True)
self.ris_cache = Redis(host='localhost', port=6581, db=0, decode_responses=True)
self.logger.debug('Starting import')
def __init_logger(self, loglevel):

View File

@ -2,8 +2,7 @@
# -*- coding: utf-8 -*-
import logging
import asyncio
from listimport.initranking import ASNLookup
from listimport.initranking import PrefixDatabase
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s:%(message)s',
@ -13,18 +12,12 @@ logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s:%(message)s',
class RankingManager():
def __init__(self, loglevel: int=logging.DEBUG):
self.asn_fetcher = ASNLookup(loglevel=loglevel)
self.prefix_db = PrefixDatabase(loglevel=loglevel)
async def run_fetcher(self):
# self.asn_fetcher.get_all_asns()
await asyncio.gather(
self.asn_fetcher.get_originating_prefixes(),
self.asn_fetcher.get_originating_prefixes(),
self.asn_fetcher.get_originating_prefixes()
)
def load_prefixes(self):
self.prefix_db.load_prefixes()
if __name__ == '__main__':
modules_manager = RankingManager()
loop = asyncio.get_event_loop()
loop.run_until_complete(modules_manager.run_fetcher())
rm = RankingManager()
rm.load_prefixes()

4
ris.py
View File

@ -16,6 +16,10 @@ class RISManager():
async def run_fetcher(self):
await asyncio.gather(
self.ris_fetcher.run(),
self.ris_fetcher.run(),
self.ris_fetcher.run(),
self.ris_fetcher.run(),
self.ris_fetcher.run(),
self.ris_fetcher.run(),
# self.ris_fetcher.run(2)

View File

@ -6,12 +6,12 @@ import asyncio
from listimport.sanitizer import Sanitizer
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s:%(message)s',
level=logging.INFO, datefmt='%I:%M:%S')
level=logging.WARNING, datefmt='%I:%M:%S')
class SanitizerManager():
def __init__(self, loglevel: int=logging.INFO):
def __init__(self, loglevel: int=logging.WARNING):
self.loglevel = loglevel
self.sanitizer = Sanitizer(loglevel)

468
storage/ardb.conf Executable file
View File

@ -0,0 +1,468 @@
# Ardb configuration file example, modified from redis's conf file.
# Home dir for ardb instance, it can be referenced by ${ARDB_HOME} in this config file
home .
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Ardb does not run as a daemon. Use 'yes' if you need it.
daemonize yes
# When running daemonized, Ardb writes a pid file in ${ARDB_HOME}/ardb.pid by
# default. You can specify a custom pid file location here.
pidfile ${ARDB_HOME}/ardb.pid
# The thread pool size for the corresponding all listen servers, -1 means current machine's cpu number
thread-pool-size 4
#Accept connections on the specified host&port/unix socket, default is 0.0.0.0:16379.
server[0].listen 0.0.0.0:16579
# If current qps exceed the limit, Ardb would return an error.
#server[0].qps-limit 1000
#listen on unix socket
#server[1].listen /tmp/ardb.sock
#server[1].unixsocketperm 755
#server[1].qps-limit 1000
# 'qps-limit-per-host' used to limit the request per second from same host
# 'qps-limit-per-connection' used to limit the request per second from same connection
qps-limit-per-host 0
qps-limit-per-connection 0
# Specify the optimized RocksDB compaction strategies.
# If anything other than none is set then the rocksdb.options will not be used.
# The property can one of:
# OptimizeLevelStyleCompaction
# OptimizeUniversalStyleCompaction
# none
#
rocksdb.compaction OptimizeLevelStyleCompaction
# Enable this to indicate that hsca/sscan/zscan command use total order mode for rocksdb engine
rocksdb.scan-total-order false
# Disable RocksDB WAL may improve the write performance but
# data in the un-flushed memtables might be lost in case of a RocksDB shutdown.
# Disabling WAL provides similar guarantees as Redis.
rocksdb.disableWAL false
#rocksdb's options
rocksdb.options write_buffer_size=512M;max_write_buffer_number=5;min_write_buffer_number_to_merge=3;compression=kSnappyCompression;\
bloom_locality=1;memtable_prefix_bloom_size_ratio=0.1;\
block_based_table_factory={block_cache=512M;filter_policy=bloomfilter:10:true};\
create_if_missing=true;max_open_files=10000;rate_limiter_bytes_per_sec=50M;\
use_direct_io_for_flush_and_compaction=true;use_adaptive_mutex=true
#leveldb's options
leveldb.options block_cache_size=512M,write_buffer_size=128M,max_open_files=5000,block_size=4k,block_restart_interval=16,\
bloom_bits=10,compression=snappy,logenable=yes,max_file_size=2M
#lmdb's options
lmdb.options database_maxsize=10G,database_maxdbs=4096,readahead=no,batch_commit_watermark=1024
#perconaft's options
perconaft.options cache_size=128M,compression=snappy
#wiredtiger's options
wiredtiger.options cache_size=512M,session_max=8k,chunk_size=100M,block_size=4k,bloom_bits=10,\
mmap=false,compressor=snappy
#forestdb's options
forestdb.options chunksize=8,blocksize=4K
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# error
# warn
# info
# debug
# trace
loglevel info
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
#logfile ${ARDB_HOME}/log/ardb-server.log
logfile stdout
# The working data directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
data-dir ${ARDB_HOME}/data
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Ardb instance a copy of
# another Ardb server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip>:<masterport>
#slaveof 127.0.0.1:6379
# By default, ardb use 2 threads to execute commands synced from master.
# -1 means use current CPU number threads instead.
slave-workers 2
# Max synced command queue size in memory.
max-slave-worker-queue 1024
# The directory for replication.
repl-dir ${ARDB_HOME}/repl
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# The slave priority is an integer number published by Ardb/Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
#
# Note: any requests processed by non read only slaves would no write to replication
# log and sync to connected slaves.
slave-read-only yes
# The directory for backup.
backup-dir ${ARDB_HOME}/backup
#
# You can configure the backup file format as 'redis' or 'ardb'. The 'ardb' format
# can only used by ardb instance, while 'redis' format file can be used by redis
# and ardb instance.
backup-file-format ardb
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Ardb will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The biggest the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# If the size is configured by 0, then Ardb instance can NOT serve as a master.
#
# repl-backlog-size 500m
repl-backlog-size 1G
repl-backlog-cache-size 100M
snapshot-max-lag-offset 500M
# Set the max number of snapshots. By default this limit is set to 10 snapshot.
# Once the limit is reached Ardb would try to remove the oldest snapshots
maxsnapshots 10
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# Slave clear current data store before full resync to master.
# It make sure that slave keep consistent with master's data. But slave may cost a
# long time to delete data, it depends on
# If set by no, then slave may have different data with master.
slave-cleardb-before-fullresync yes
# Master/Slave instance would persist sync state every 'repl-backlog-sync-period' secs.
repl-backlog-sync-period 5
# Slave would ignore any 'expire' setting from replication command if set by 'yes'.
# It could be used if master is redis instance serve hot data with expire setting, slave is
# ardb instance which persist all data.
# Since master redis instance would generate a 'del' for each expired key, slave should ignore
# all 'del' command too by setting 'slave-ignore-del' to 'yes' for this scenario.
slave-ignore-expire no
slave-ignore-del no
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################ CLUSTER ###############################
# Max execution time of a Lua script in milliseconds.
#zookeeper-servers 127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183
#zk-recv-timeout 10000
#zk-clientid-file ${ARDB_HOME}/ardb.zkclientid
cluster-name ardb-cluster
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub/Slave client can't consume messages as fast as the
# publisher can produce them).
slave-client-output-buffer-limit 256mb
pubsub-client-output-buffer-limit 32mb
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
############################### ADVANCED CONFIG ###############################
## Since some redis clients would check info command's output, this configuration
## would be set in 'misc' section of 'info's output
#additional-misc-info redis_version:2.8.9\nredis_trick:yes
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is convereted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. Thev value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
#trusted-ip 10.10.10.10
#trusted-ip 10.10.10.*
# By default Ardb would not compact whole db after loading a snapshot, which may happens
# when slave syncing from master, processing 'import' command from client.
# This configuration only works with rocksdb engine.
# If ardb dord not compact data after loading snapshot file, there would be poor read performance before rocksdb
# completes the next compaction task internally. While the compaction task would cost very long time for a huge data set.
compact-after-snapshot-load false
# Ardb would store cursor in memory
scan-redis-compatible yes
scan-cursor-expire-after 60
redis-compatible-mode yes
redis-compatible-version 2.8.0
statistics-log-period 600
# Range deletion min size trigger
range-delete-min-size 100

6
storage/run_ardb.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
set -e
set -x
../../ardb/src/ardb-server ardb.conf

6
storage/shutdown_ardb.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
set -e
set -x
../../redis/src/redis-cli -p 16579 shutdown save

1317
temp/6579.conf Normal file

File diff suppressed because it is too large Load Diff

1317
temp/6580.conf Normal file

File diff suppressed because it is too large Load Diff

7
temp/run_redis.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
set -e
set -x
../../redis/src/redis-server ./6579.conf
../../redis/src/redis-server ./6580.conf

7
temp/shutdown_redis.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
# set -e
set -x
../../redis/src/redis-cli -p 6579 shutdown
../../redis/src/redis-cli -p 6580 shutdown