2018-05-04 13:53:29 +02:00
|
|
|
#!/usr/bin/env python3
|
2014-08-06 11:43:40 +02:00
|
|
|
# -*-coding:UTF-8 -*
|
|
|
|
"""
|
|
|
|
The ZMQ_Sub_Onion Module
|
|
|
|
============================
|
|
|
|
|
|
|
|
This module is consuming the Redis-list created by the ZMQ_Sub_Onion_Q Module.
|
|
|
|
|
2014-08-20 15:14:57 +02:00
|
|
|
It trying to extract url from paste and returning only ones which are tor
|
|
|
|
related (.onion)
|
2014-08-06 11:43:40 +02:00
|
|
|
|
|
|
|
..seealso:: Paste method (get_regex)
|
|
|
|
|
|
|
|
..note:: Module ZMQ_Something_Q and ZMQ_Something are closely bound, always put
|
|
|
|
the same Subscriber name in both of them.
|
|
|
|
|
|
|
|
Requirements
|
|
|
|
------------
|
|
|
|
|
|
|
|
*Need running Redis instances. (Redis)
|
|
|
|
*Need the ZMQ_Sub_Onion_Q Module running to be able to work properly.
|
|
|
|
|
|
|
|
"""
|
2014-08-14 17:55:18 +02:00
|
|
|
import time
|
|
|
|
from packages import Paste
|
2014-08-06 11:43:40 +02:00
|
|
|
from pubsublogger import publisher
|
2014-08-31 22:42:12 +02:00
|
|
|
import datetime
|
|
|
|
import os
|
|
|
|
import base64
|
|
|
|
import subprocess
|
2014-09-01 16:18:06 +02:00
|
|
|
import redis
|
2019-01-29 09:46:03 +01:00
|
|
|
import signal
|
2018-08-21 15:54:53 +02:00
|
|
|
import re
|
2014-08-06 11:43:40 +02:00
|
|
|
|
2019-04-23 11:15:34 +02:00
|
|
|
from pyfaup.faup import Faup
|
|
|
|
|
2014-08-29 19:37:56 +02:00
|
|
|
from Helper import Process
|
2014-08-14 17:55:18 +02:00
|
|
|
|
2019-01-29 09:46:03 +01:00
|
|
|
class TimeoutException(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def timeout_handler(signum, frame):
|
|
|
|
raise TimeoutException
|
|
|
|
|
|
|
|
signal.signal(signal.SIGALRM, timeout_handler)
|
2014-08-31 22:42:12 +02:00
|
|
|
|
2014-09-01 16:18:06 +02:00
|
|
|
def fetch(p, r_cache, urls, domains, path):
|
2014-09-02 15:21:36 +02:00
|
|
|
failed = []
|
2014-09-08 16:51:43 +02:00
|
|
|
downloaded = []
|
2018-04-23 16:22:23 +02:00
|
|
|
print('{} Urls to fetch'.format(len(urls)))
|
2014-08-31 22:42:12 +02:00
|
|
|
for url, domain in zip(urls, domains):
|
2014-09-02 15:21:36 +02:00
|
|
|
if r_cache.exists(url) or url in failed:
|
2014-09-01 16:18:06 +02:00
|
|
|
continue
|
2018-04-16 17:00:44 +02:00
|
|
|
to_fetch = base64.standard_b64encode(url.encode('utf8'))
|
2018-04-23 16:22:23 +02:00
|
|
|
print('fetching url: {}'.format(to_fetch))
|
2014-08-31 22:42:12 +02:00
|
|
|
process = subprocess.Popen(["python", './tor_fetcher.py', to_fetch],
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
while process.poll() is None:
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
if process.returncode == 0:
|
2014-09-01 16:18:06 +02:00
|
|
|
r_cache.setbit(url, 0, 1)
|
2014-09-08 16:51:43 +02:00
|
|
|
r_cache.expire(url, 360000)
|
|
|
|
downloaded.append(url)
|
2018-04-23 16:22:23 +02:00
|
|
|
print('downloaded : {}'.format(downloaded))
|
|
|
|
'''tempfile = process.stdout.read().strip()
|
2018-04-20 10:42:19 +02:00
|
|
|
tempfile = tempfile.decode('utf8')
|
2018-04-23 16:22:23 +02:00
|
|
|
#with open(tempfile, 'r') as f:
|
2014-09-08 16:51:43 +02:00
|
|
|
filename = path + domain + '.gz'
|
2014-09-30 16:55:16 +02:00
|
|
|
fetched = f.read()
|
|
|
|
content = base64.standard_b64decode(fetched)
|
2014-08-31 22:42:12 +02:00
|
|
|
save_path = os.path.join(os.environ['AIL_HOME'],
|
|
|
|
p.config.get("Directories", "pastes"),
|
|
|
|
filename)
|
|
|
|
dirname = os.path.dirname(save_path)
|
|
|
|
if not os.path.exists(dirname):
|
|
|
|
os.makedirs(dirname)
|
|
|
|
with open(save_path, 'w') as ff:
|
|
|
|
ff.write(content)
|
2014-09-01 16:18:06 +02:00
|
|
|
p.populate_set_out(save_path, 'Global')
|
|
|
|
p.populate_set_out(url, 'ValidOnion')
|
2018-04-23 16:22:23 +02:00
|
|
|
p.populate_set_out(fetched, 'FetchedOnion')'''
|
|
|
|
yield url
|
|
|
|
#os.unlink(tempfile)
|
2014-08-31 22:42:12 +02:00
|
|
|
else:
|
2014-09-08 16:51:43 +02:00
|
|
|
r_cache.setbit(url, 0, 0)
|
|
|
|
r_cache.expire(url, 3600)
|
2014-09-02 15:21:36 +02:00
|
|
|
failed.append(url)
|
2018-04-16 14:50:04 +02:00
|
|
|
print('Failed at downloading', url)
|
|
|
|
print(process.stdout.read())
|
|
|
|
print('Failed:', len(failed), 'Downloaded:', len(downloaded))
|
2014-08-31 22:42:12 +02:00
|
|
|
|
|
|
|
|
2014-08-20 15:14:57 +02:00
|
|
|
if __name__ == "__main__":
|
2014-08-22 17:35:40 +02:00
|
|
|
publisher.port = 6380
|
2014-08-20 15:14:57 +02:00
|
|
|
publisher.channel = "Script"
|
2014-08-06 11:43:40 +02:00
|
|
|
|
2014-08-31 22:42:12 +02:00
|
|
|
torclient_host = '127.0.0.1'
|
|
|
|
torclient_port = 9050
|
|
|
|
|
2014-08-29 19:37:56 +02:00
|
|
|
config_section = 'Onion'
|
2014-08-06 11:43:40 +02:00
|
|
|
|
2014-08-29 19:37:56 +02:00
|
|
|
p = Process(config_section)
|
2014-09-01 16:18:06 +02:00
|
|
|
r_cache = redis.StrictRedis(
|
|
|
|
host=p.config.get("Redis_Cache", "host"),
|
|
|
|
port=p.config.getint("Redis_Cache", "port"),
|
2018-05-04 13:53:29 +02:00
|
|
|
db=p.config.getint("Redis_Cache", "db"),
|
|
|
|
decode_responses=True)
|
2014-08-06 11:43:40 +02:00
|
|
|
|
2018-08-21 15:54:53 +02:00
|
|
|
r_onion = redis.StrictRedis(
|
|
|
|
host=p.config.get("ARDB_Onion", "host"),
|
|
|
|
port=p.config.getint("ARDB_Onion", "port"),
|
|
|
|
db=p.config.getint("ARDB_Onion", "db"),
|
|
|
|
decode_responses=True)
|
|
|
|
|
2014-08-06 11:43:40 +02:00
|
|
|
# FUNCTIONS #
|
|
|
|
publisher.info("Script subscribed to channel onion_categ")
|
|
|
|
|
2014-08-29 19:37:56 +02:00
|
|
|
# FIXME For retro compatibility
|
|
|
|
channel = 'onion_categ'
|
|
|
|
|
2014-08-14 17:55:18 +02:00
|
|
|
# Getting the first message from redis.
|
2014-08-29 19:37:56 +02:00
|
|
|
message = p.get_from_set()
|
2014-08-06 11:43:40 +02:00
|
|
|
prec_filename = None
|
|
|
|
|
2019-01-29 09:46:03 +01:00
|
|
|
max_execution_time = p.config.getint("Onion", "max_execution_time")
|
|
|
|
|
2018-09-24 16:23:14 +02:00
|
|
|
# send to crawler:
|
|
|
|
activate_crawler = p.config.get("Crawler", "activate_crawler")
|
|
|
|
if activate_crawler == 'True':
|
|
|
|
activate_crawler = True
|
|
|
|
print('Crawler enabled')
|
|
|
|
else:
|
|
|
|
activate_crawler = False
|
|
|
|
print('Crawler disabled')
|
|
|
|
|
2019-04-23 11:15:34 +02:00
|
|
|
faup = Faup()
|
|
|
|
|
2014-08-14 17:55:18 +02:00
|
|
|
# Thanks to Faup project for this regex
|
2014-08-06 11:43:40 +02:00
|
|
|
# https://github.com/stricaud/faup
|
2018-08-13 09:23:14 +02:00
|
|
|
url_regex = "((http|https|ftp)?(?:\://)?([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.onion)(\:[0-9]+)*(/($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+))*)"
|
2018-08-21 15:54:53 +02:00
|
|
|
i2p_regex = "((http|https|ftp)?(?:\://)?([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.i2p)(\:[0-9]+)*(/($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+))*)"
|
|
|
|
re.compile(url_regex)
|
|
|
|
|
2014-08-06 11:43:40 +02:00
|
|
|
|
|
|
|
while True:
|
2019-01-29 14:39:57 +01:00
|
|
|
message = p.get_from_set()
|
2014-08-14 17:55:18 +02:00
|
|
|
if message is not None:
|
2018-04-16 14:50:04 +02:00
|
|
|
print(message)
|
2014-09-05 17:05:45 +02:00
|
|
|
filename, score = message.split()
|
2014-08-06 11:43:40 +02:00
|
|
|
|
|
|
|
# "For each new paste"
|
2014-08-14 17:55:18 +02:00
|
|
|
if prec_filename is None or filename != prec_filename:
|
2014-08-06 11:43:40 +02:00
|
|
|
domains_list = []
|
2014-08-31 22:42:12 +02:00
|
|
|
urls = []
|
2014-08-14 17:55:18 +02:00
|
|
|
PST = Paste.Paste(filename)
|
2014-08-06 11:43:40 +02:00
|
|
|
|
2019-01-29 09:46:03 +01:00
|
|
|
# max execution time on regex
|
|
|
|
signal.alarm(max_execution_time)
|
|
|
|
try:
|
|
|
|
for x in PST.get_regex(url_regex):
|
|
|
|
print(x)
|
|
|
|
# Extracting url with regex
|
|
|
|
url, s, credential, subdomain, domain, host, port, \
|
|
|
|
resource_path, query_string, f1, f2, f3, f4 = x
|
|
|
|
|
|
|
|
if '.onion' in url:
|
|
|
|
print(url)
|
|
|
|
domains_list.append(domain)
|
|
|
|
urls.append(url)
|
|
|
|
except TimeoutException:
|
|
|
|
encoded_list = []
|
|
|
|
p.incr_module_timeout_statistic()
|
2019-04-10 16:41:06 +02:00
|
|
|
print ("{0} processing timeout".format(PST.p_rel_path))
|
2019-01-29 09:46:03 +01:00
|
|
|
continue
|
2018-08-21 15:54:53 +02:00
|
|
|
|
2019-01-30 22:30:07 +01:00
|
|
|
signal.alarm(0)
|
|
|
|
|
2018-09-24 16:23:14 +02:00
|
|
|
'''
|
2018-08-21 15:54:53 +02:00
|
|
|
for x in PST.get_regex(i2p_regex):
|
|
|
|
# Extracting url with regex
|
|
|
|
url, s, credential, subdomain, domain, host, port, \
|
|
|
|
resource_path, query_string, f1, f2, f3, f4 = x
|
|
|
|
|
|
|
|
if '.i2p' in url:
|
|
|
|
print('add i2p')
|
|
|
|
print(domain)
|
2018-08-24 10:13:56 +02:00
|
|
|
if not r_onion.sismember('i2p_domain', domain) and not r_onion.sismember('i2p_domain_crawler_queue', domain):
|
2018-08-21 15:54:53 +02:00
|
|
|
r_onion.sadd('i2p_domain', domain)
|
|
|
|
r_onion.sadd('i2p_link', url)
|
2018-08-24 10:13:56 +02:00
|
|
|
r_onion.sadd('i2p_domain_crawler_queue', domain)
|
2018-11-02 16:07:27 +01:00
|
|
|
msg = '{};{}'.format(url,PST.p_rel_path)
|
2018-08-24 10:13:56 +02:00
|
|
|
r_onion.sadd('i2p_crawler_queue', msg)
|
2018-09-24 16:23:14 +02:00
|
|
|
'''
|
2014-08-06 11:43:40 +02:00
|
|
|
|
2014-08-14 17:55:18 +02:00
|
|
|
# Saving the list of extracted onion domains.
|
2014-08-06 11:43:40 +02:00
|
|
|
PST.__setattr__(channel, domains_list)
|
2014-08-21 12:22:07 +02:00
|
|
|
PST.save_attribute_redis(channel, domains_list)
|
2014-08-20 15:14:57 +02:00
|
|
|
to_print = 'Onion;{};{};{};'.format(PST.p_source, PST.p_date,
|
|
|
|
PST.p_name)
|
2018-04-23 16:22:23 +02:00
|
|
|
|
|
|
|
print(len(domains_list))
|
2014-08-06 11:43:40 +02:00
|
|
|
if len(domains_list) > 0:
|
2014-08-31 22:42:12 +02:00
|
|
|
|
2019-02-12 15:45:58 +01:00
|
|
|
if not activate_crawler:
|
|
|
|
publisher.warning('{}Detected {} .onion(s);{}'.format(
|
2019-04-10 15:43:15 +02:00
|
|
|
to_print, len(domains_list),PST.p_rel_path))
|
2019-02-12 15:45:58 +01:00
|
|
|
else:
|
|
|
|
publisher.info('{}Detected {} .onion(s);{}'.format(
|
2019-04-10 15:43:15 +02:00
|
|
|
to_print, len(domains_list),PST.p_rel_path))
|
2014-08-31 22:42:12 +02:00
|
|
|
now = datetime.datetime.now()
|
|
|
|
path = os.path.join('onions', str(now.year).zfill(4),
|
|
|
|
str(now.month).zfill(2),
|
|
|
|
str(now.day).zfill(2),
|
|
|
|
str(int(time.mktime(now.utctimetuple()))))
|
2014-09-01 16:18:06 +02:00
|
|
|
to_print = 'Onion;{};{};{};'.format(PST.p_source,
|
|
|
|
PST.p_date,
|
|
|
|
PST.p_name)
|
2018-09-24 16:23:14 +02:00
|
|
|
|
|
|
|
if activate_crawler:
|
|
|
|
date_month = datetime.datetime.now().strftime("%Y%m")
|
|
|
|
date = datetime.datetime.now().strftime("%Y%m%d")
|
|
|
|
for url in urls:
|
|
|
|
|
2019-04-23 11:15:34 +02:00
|
|
|
faup.decode(url)
|
|
|
|
url_unpack = faup.get()
|
2019-05-06 11:46:20 +02:00
|
|
|
## TODO: # FIXME: remove me
|
|
|
|
try:
|
|
|
|
domain = url_unpack['domain'].decode().lower()
|
|
|
|
except Exception as e:
|
|
|
|
domain = url_unpack['domain'].lower()
|
2019-04-23 11:15:34 +02:00
|
|
|
|
|
|
|
## TODO: blackilst by port ?
|
|
|
|
# check blacklist
|
2019-04-29 11:46:28 +02:00
|
|
|
if r_onion.sismember('blacklist_onion', domain):
|
2019-04-23 11:15:34 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
subdomain = re.findall(url_regex, url)
|
|
|
|
if len(subdomain) > 0:
|
2019-05-06 11:46:20 +02:00
|
|
|
subdomain = subdomain[0][4].lower()
|
2018-09-24 16:23:14 +02:00
|
|
|
else:
|
|
|
|
continue
|
|
|
|
|
2019-02-13 16:53:38 +01:00
|
|
|
# too many subdomain
|
2019-04-23 11:15:34 +02:00
|
|
|
if len(subdomain.split('.')) > 3:
|
|
|
|
subdomain = '{}.{}.onion'.format(subdomain[-3], subdomain[-2])
|
2019-02-13 16:53:38 +01:00
|
|
|
|
2019-04-23 11:15:34 +02:00
|
|
|
if not r_onion.sismember('month_onion_up:{}'.format(date_month), subdomain) and not r_onion.sismember('onion_down:'+date , subdomain):
|
|
|
|
if not r_onion.sismember('onion_domain_crawler_queue', subdomain):
|
2018-09-24 16:23:14 +02:00
|
|
|
print('send to onion crawler')
|
2019-04-23 11:15:34 +02:00
|
|
|
r_onion.sadd('onion_domain_crawler_queue', subdomain)
|
2018-11-02 16:07:27 +01:00
|
|
|
msg = '{};{}'.format(url,PST.p_rel_path)
|
2019-04-23 11:15:34 +02:00
|
|
|
if not r_onion.hexists('onion_metadata:{}'.format(subdomain), 'first_seen'):
|
2019-01-29 16:08:59 +01:00
|
|
|
r_onion.sadd('onion_crawler_priority_queue', msg)
|
2019-01-29 16:27:12 +01:00
|
|
|
print('send to priority queue')
|
2019-01-29 16:08:59 +01:00
|
|
|
else:
|
|
|
|
r_onion.sadd('onion_crawler_queue', msg)
|
2019-04-23 11:15:34 +02:00
|
|
|
# tag if domain was up
|
|
|
|
if r_onion.sismember('full_onion_up', subdomain):
|
|
|
|
# TAG Item
|
|
|
|
msg = 'infoleak:automatic-detection="onion";{}'.format(PST.p_rel_path)
|
|
|
|
p.populate_set_out(msg, 'Tags')
|
2018-09-24 16:23:14 +02:00
|
|
|
|
|
|
|
else:
|
|
|
|
for url in fetch(p, r_cache, urls, domains_list, path):
|
2018-11-02 16:07:27 +01:00
|
|
|
publisher.info('{}Checked {};{}'.format(to_print, url, PST.p_rel_path))
|
2018-09-24 16:23:14 +02:00
|
|
|
|
2019-02-22 17:00:24 +01:00
|
|
|
# TAG Item
|
2019-04-10 16:41:06 +02:00
|
|
|
msg = 'infoleak:automatic-detection="onion";{}'.format(PST.p_rel_path)
|
2019-02-22 17:00:24 +01:00
|
|
|
p.populate_set_out(msg, 'Tags')
|
2014-08-06 11:43:40 +02:00
|
|
|
else:
|
2018-11-02 16:07:27 +01:00
|
|
|
publisher.info('{}Onion related;{}'.format(to_print, PST.p_rel_path))
|
2014-08-06 11:43:40 +02:00
|
|
|
|
|
|
|
prec_filename = filename
|
|
|
|
else:
|
|
|
|
publisher.debug("Script url is Idling 10s")
|
2018-04-20 10:42:19 +02:00
|
|
|
#print('Sleeping')
|
2014-08-06 11:43:40 +02:00
|
|
|
time.sleep(10)
|