2018-05-04 13:53:29 +02:00
|
|
|
#!/usr/bin/env python3
|
2016-06-30 14:38:28 +02:00
|
|
|
# -*-coding:UTF-8 -*
|
2017-05-09 11:13:16 +02:00
|
|
|
|
2016-06-30 14:38:28 +02:00
|
|
|
"""
|
2017-05-09 11:13:16 +02:00
|
|
|
The WebStats Module
|
|
|
|
======================
|
|
|
|
|
|
|
|
This module makes stats on URL recolted from the web module.
|
|
|
|
It consider the TLD, Domain and protocol.
|
|
|
|
|
2016-06-30 14:38:28 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
import time
|
2016-07-01 16:59:08 +02:00
|
|
|
import datetime
|
2016-06-30 14:38:28 +02:00
|
|
|
import redis
|
|
|
|
import os
|
2016-07-01 16:59:08 +02:00
|
|
|
from packages import lib_words
|
2016-07-05 16:53:03 +02:00
|
|
|
from packages.Date import Date
|
2016-06-30 14:38:28 +02:00
|
|
|
from pubsublogger import publisher
|
|
|
|
from Helper import Process
|
2016-07-01 16:59:08 +02:00
|
|
|
from pyfaup.faup import Faup
|
2016-06-30 14:38:28 +02:00
|
|
|
|
2016-07-05 16:53:03 +02:00
|
|
|
# Config Var
|
2016-07-21 13:44:22 +02:00
|
|
|
threshold_total_sum = 200 # Above this value, a keyword is eligible for a progression
|
|
|
|
threshold_increase = 1.0 # The percentage representing the keyword occurence since num_day_to_look
|
|
|
|
max_set_cardinality = 10 # The cardinality of the progression set
|
|
|
|
num_day_to_look = 5 # the detection of the progression start num_day_to_look in the past
|
2016-07-20 14:12:18 +02:00
|
|
|
|
2016-07-22 09:25:05 +02:00
|
|
|
def analyse(server, field_name, date, url_parsed):
|
2016-07-01 16:59:08 +02:00
|
|
|
field = url_parsed[field_name]
|
|
|
|
if field is not None:
|
2018-04-16 17:00:44 +02:00
|
|
|
field = field.decode('utf8')
|
2016-08-18 15:34:19 +02:00
|
|
|
server.hincrby(field, date, 1)
|
|
|
|
if field_name == "domain": #save domain in a set for the monthly plot
|
|
|
|
domain_set_name = "domain_set_" + date[0:6]
|
|
|
|
server.sadd(domain_set_name, field)
|
2018-04-16 14:50:04 +02:00
|
|
|
print("added in " + domain_set_name +": "+ field)
|
2016-07-05 16:53:03 +02:00
|
|
|
|
2016-07-21 13:44:22 +02:00
|
|
|
def get_date_range(num_day):
|
2016-07-05 16:53:03 +02:00
|
|
|
curr_date = datetime.date.today()
|
2016-07-21 13:44:22 +02:00
|
|
|
date = Date(str(curr_date.year)+str(curr_date.month).zfill(2)+str(curr_date.day).zfill(2))
|
|
|
|
date_list = []
|
|
|
|
|
|
|
|
for i in range(0, num_day+1):
|
|
|
|
date_list.append(date.substract_day(i))
|
|
|
|
return date_list
|
|
|
|
|
2016-12-08 10:05:07 +01:00
|
|
|
# Compute the progression for one keyword
|
2017-01-10 16:46:46 +01:00
|
|
|
def compute_progression_word(server, num_day, keyword):
|
2016-12-08 10:05:07 +01:00
|
|
|
date_range = get_date_range(num_day)
|
|
|
|
# check if this keyword is eligible for progression
|
|
|
|
keyword_total_sum = 0
|
|
|
|
value_list = []
|
|
|
|
for date in date_range: # get value up to date_range
|
|
|
|
curr_value = server.hget(keyword, date)
|
|
|
|
value_list.append(int(curr_value if curr_value is not None else 0))
|
|
|
|
keyword_total_sum += int(curr_value) if curr_value is not None else 0
|
|
|
|
oldest_value = value_list[-1] if value_list[-1] != 0 else 1 #Avoid zero division
|
|
|
|
|
|
|
|
# The progression is based on the ratio: value[i] / value[i-1]
|
|
|
|
keyword_increase = 0
|
|
|
|
value_list_reversed = value_list[:]
|
|
|
|
value_list_reversed.reverse()
|
|
|
|
for i in range(1, len(value_list_reversed)):
|
|
|
|
divisor = value_list_reversed[i-1] if value_list_reversed[i-1] != 0 else 1
|
|
|
|
keyword_increase += value_list_reversed[i] / divisor
|
|
|
|
|
|
|
|
return (keyword_increase, keyword_total_sum)
|
|
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
recompute the set top_progression zset
|
|
|
|
- Compute the current field progression
|
|
|
|
- re-compute the current progression for each first 2*max_set_cardinality fields in the top_progression_zset
|
|
|
|
'''
|
2016-07-21 13:44:22 +02:00
|
|
|
def compute_progression(server, field_name, num_day, url_parsed):
|
2016-12-08 10:05:07 +01:00
|
|
|
redis_progression_name_set = "z_top_progression_"+field_name
|
2016-07-21 13:44:22 +02:00
|
|
|
|
|
|
|
keyword = url_parsed[field_name]
|
|
|
|
if keyword is not None:
|
2016-12-08 10:05:07 +01:00
|
|
|
|
|
|
|
#compute the progression of the current word
|
2017-01-10 16:46:46 +01:00
|
|
|
keyword_increase, keyword_total_sum = compute_progression_word(server, num_day, keyword)
|
2016-12-08 10:05:07 +01:00
|
|
|
|
|
|
|
#re-compute the progression of 2*max_set_cardinality
|
|
|
|
current_top = server.zrevrangebyscore(redis_progression_name_set, '+inf', '-inf', withscores=True, start=0, num=2*max_set_cardinality)
|
2017-01-10 16:46:46 +01:00
|
|
|
for word, value in current_top:
|
|
|
|
word_inc, word_tot_sum = compute_progression_word(server, num_day, word)
|
2016-12-08 10:05:07 +01:00
|
|
|
server.zrem(redis_progression_name_set, word)
|
|
|
|
if (word_tot_sum > threshold_total_sum) and (word_inc > threshold_increase):
|
|
|
|
server.zadd(redis_progression_name_set, float(word_inc), word)
|
|
|
|
|
|
|
|
# filter before adding
|
2016-07-21 13:44:22 +02:00
|
|
|
if (keyword_total_sum > threshold_total_sum) and (keyword_increase > threshold_increase):
|
2016-12-08 10:05:07 +01:00
|
|
|
server.zadd(redis_progression_name_set, float(keyword_increase), keyword)
|
|
|
|
|
2016-07-05 16:53:03 +02:00
|
|
|
|
2016-06-30 14:38:28 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
# If you wish to use an other port of channel, do not forget to run a subscriber accordingly (see launch_logs.sh)
|
|
|
|
# Port of the redis instance used by pubsublogger
|
|
|
|
publisher.port = 6380
|
|
|
|
# Script is the default channel used for the modules.
|
|
|
|
publisher.channel = 'Script'
|
|
|
|
|
|
|
|
# Section name in bin/packages/modules.cfg
|
|
|
|
config_section = 'WebStats'
|
|
|
|
|
|
|
|
# Setup the I/O queues
|
|
|
|
p = Process(config_section)
|
|
|
|
|
|
|
|
# Sent to the logging a description of the module
|
|
|
|
publisher.info("Makes statistics about valid URL")
|
|
|
|
|
|
|
|
# REDIS #
|
2016-07-21 13:44:22 +02:00
|
|
|
r_serv_trend = redis.StrictRedis(
|
2018-05-07 14:50:40 +02:00
|
|
|
host=p.config.get("ARDB_Trending", "host"),
|
|
|
|
port=p.config.get("ARDB_Trending", "port"),
|
|
|
|
db=p.config.get("ARDB_Trending", "db"),
|
2018-05-04 13:53:29 +02:00
|
|
|
decode_responses=True)
|
2016-06-30 14:38:28 +02:00
|
|
|
|
|
|
|
# FILE CURVE SECTION #
|
2016-07-01 16:59:08 +02:00
|
|
|
csv_path_proto = os.path.join(os.environ['AIL_HOME'],
|
2016-07-20 14:12:18 +02:00
|
|
|
p.config.get("Directories", "protocolstrending_csv"))
|
2016-06-30 14:38:28 +02:00
|
|
|
protocolsfile_path = os.path.join(os.environ['AIL_HOME'],
|
|
|
|
p.config.get("Directories", "protocolsfile"))
|
2018-04-16 14:50:04 +02:00
|
|
|
|
2016-07-01 16:59:08 +02:00
|
|
|
csv_path_tld = os.path.join(os.environ['AIL_HOME'],
|
2016-07-20 14:12:18 +02:00
|
|
|
p.config.get("Directories", "tldstrending_csv"))
|
2016-07-01 16:59:08 +02:00
|
|
|
tldsfile_path = os.path.join(os.environ['AIL_HOME'],
|
|
|
|
p.config.get("Directories", "tldsfile"))
|
|
|
|
|
2016-07-05 16:53:03 +02:00
|
|
|
csv_path_domain = os.path.join(os.environ['AIL_HOME'],
|
2016-07-20 14:12:18 +02:00
|
|
|
p.config.get("Directories", "domainstrending_csv"))
|
2016-07-05 16:53:03 +02:00
|
|
|
|
2016-07-01 16:59:08 +02:00
|
|
|
faup = Faup()
|
|
|
|
generate_new_graph = False
|
2016-06-30 14:38:28 +02:00
|
|
|
# Endless loop getting messages from the input queue
|
|
|
|
while True:
|
|
|
|
# Get one message from the input queue
|
|
|
|
message = p.get_from_set()
|
2016-07-20 14:12:18 +02:00
|
|
|
|
2016-06-30 14:38:28 +02:00
|
|
|
if message is None:
|
|
|
|
if generate_new_graph:
|
|
|
|
generate_new_graph = False
|
|
|
|
today = datetime.date.today()
|
|
|
|
year = today.year
|
|
|
|
month = today.month
|
2016-07-12 11:47:51 +02:00
|
|
|
|
2018-04-16 14:50:04 +02:00
|
|
|
print('Building protocol graph')
|
2016-07-21 13:44:22 +02:00
|
|
|
lib_words.create_curve_with_word_file(r_serv_trend, csv_path_proto,
|
2016-06-30 14:38:28 +02:00
|
|
|
protocolsfile_path, year,
|
|
|
|
month)
|
2016-07-12 11:47:51 +02:00
|
|
|
|
2018-04-16 14:50:04 +02:00
|
|
|
print('Building tld graph')
|
2016-07-21 13:44:22 +02:00
|
|
|
lib_words.create_curve_with_word_file(r_serv_trend, csv_path_tld,
|
2016-07-01 16:59:08 +02:00
|
|
|
tldsfile_path, year,
|
|
|
|
month)
|
2016-07-12 11:47:51 +02:00
|
|
|
|
2018-04-16 14:50:04 +02:00
|
|
|
print('Building domain graph')
|
2016-07-21 13:44:22 +02:00
|
|
|
lib_words.create_curve_from_redis_set(r_serv_trend, csv_path_domain,
|
|
|
|
"domain", year,
|
2016-07-05 16:53:03 +02:00
|
|
|
month)
|
2018-04-16 14:50:04 +02:00
|
|
|
print('end building')
|
|
|
|
|
2016-07-12 11:47:51 +02:00
|
|
|
|
2016-06-30 14:38:28 +02:00
|
|
|
publisher.debug("{} queue is empty, waiting".format(config_section))
|
2018-04-16 14:50:04 +02:00
|
|
|
print('sleeping')
|
2016-07-21 13:44:22 +02:00
|
|
|
time.sleep(5*60)
|
2016-06-30 14:38:28 +02:00
|
|
|
continue
|
|
|
|
|
2016-07-20 14:12:18 +02:00
|
|
|
else:
|
2016-06-30 14:38:28 +02:00
|
|
|
generate_new_graph = True
|
|
|
|
# Do something with the message from the queue
|
2016-08-02 15:43:11 +02:00
|
|
|
url, date, path = message.split()
|
2016-07-01 16:59:08 +02:00
|
|
|
faup.decode(url)
|
|
|
|
url_parsed = faup.get()
|
2018-04-16 14:50:04 +02:00
|
|
|
|
|
|
|
# Scheme analysis
|
|
|
|
analyse(r_serv_trend, 'scheme', date, url_parsed)
|
|
|
|
# Tld analysis
|
|
|
|
analyse(r_serv_trend, 'tld', date, url_parsed)
|
|
|
|
# Domain analysis
|
2018-04-16 17:00:44 +02:00
|
|
|
analyse(r_serv_trend, 'domain', date, url_parsed)
|
2018-04-16 14:50:04 +02:00
|
|
|
|
2016-07-21 13:44:22 +02:00
|
|
|
compute_progression(r_serv_trend, 'scheme', num_day_to_look, url_parsed)
|
|
|
|
compute_progression(r_serv_trend, 'tld', num_day_to_look, url_parsed)
|
|
|
|
compute_progression(r_serv_trend, 'domain', num_day_to_look, url_parsed)
|