2021-08-20 17:46:22 +02:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
import csv
|
2022-07-12 18:44:33 +02:00
|
|
|
import gzip
|
2021-08-20 17:46:22 +02:00
|
|
|
import logging
|
2022-07-12 18:44:33 +02:00
|
|
|
import shutil
|
|
|
|
|
2021-09-07 12:59:31 +02:00
|
|
|
from collections import defaultdict
|
2022-05-23 00:15:52 +02:00
|
|
|
from collections.abc import Mapping
|
2021-09-07 12:59:31 +02:00
|
|
|
from datetime import datetime, timedelta
|
2021-08-20 17:46:22 +02:00
|
|
|
from pathlib import Path
|
2021-09-07 12:59:31 +02:00
|
|
|
from typing import Dict, List
|
2021-08-20 17:46:22 +02:00
|
|
|
|
2021-08-24 17:10:14 +02:00
|
|
|
from redis import Redis
|
|
|
|
|
2021-10-18 13:06:43 +02:00
|
|
|
from lookyloo.default import AbstractManager, get_config, get_homedir, get_socket_path
|
|
|
|
from lookyloo.helpers import get_captures_dir
|
2021-08-20 17:46:22 +02:00
|
|
|
|
|
|
|
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s:%(message)s',
|
2021-09-01 10:40:59 +02:00
|
|
|
level=logging.INFO)
|
2021-08-20 17:46:22 +02:00
|
|
|
|
|
|
|
|
|
|
|
class Archiver(AbstractManager):
|
|
|
|
|
|
|
|
def __init__(self, loglevel: int=logging.INFO):
|
|
|
|
super().__init__(loglevel)
|
|
|
|
self.script_name = 'archiver'
|
2022-05-23 00:15:52 +02:00
|
|
|
self.redis = Redis(unix_socket_path=get_socket_path('cache'), decode_responses=True)
|
2021-08-24 17:10:14 +02:00
|
|
|
|
|
|
|
# make sure archived captures dir exists
|
2021-08-24 18:32:54 +02:00
|
|
|
self.archived_captures_dir = get_homedir() / 'archived_captures'
|
2021-08-24 17:10:14 +02:00
|
|
|
self.archived_captures_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
2021-08-30 12:48:13 +02:00
|
|
|
self._load_indexes()
|
2021-08-20 17:46:22 +02:00
|
|
|
|
|
|
|
def _to_run_forever(self):
|
|
|
|
self._archive()
|
2021-08-30 12:48:13 +02:00
|
|
|
self._update_all_capture_indexes()
|
|
|
|
self._load_indexes()
|
2022-07-12 18:44:33 +02:00
|
|
|
self._compress_hars()
|
2021-08-30 12:48:13 +02:00
|
|
|
|
|
|
|
def _update_index(self, root_dir: Path) -> None:
|
2021-11-26 18:36:35 +01:00
|
|
|
current_index: Dict[str, str] = {}
|
2021-08-30 12:48:13 +02:00
|
|
|
|
|
|
|
index_file = root_dir / 'index'
|
|
|
|
if index_file.exists():
|
|
|
|
# Skip index if the directory has been archived.
|
|
|
|
existing_captures = index_file.parent.iterdir()
|
2021-11-26 18:36:35 +01:00
|
|
|
try:
|
|
|
|
with index_file.open('r') as _f:
|
|
|
|
current_index = {uuid: dirname for uuid, dirname in csv.reader(_f) if (index_file.parent / dirname) in existing_captures}
|
|
|
|
except Exception:
|
|
|
|
# the index file is broken, it will be recreated.
|
|
|
|
pass
|
2021-08-30 12:48:13 +02:00
|
|
|
if not current_index:
|
|
|
|
index_file.unlink()
|
|
|
|
|
|
|
|
for uuid_file in root_dir.glob('*/uuid'):
|
|
|
|
if uuid_file.parent.name in current_index.values():
|
|
|
|
# The path is already in the index file, no need to read the uuid file
|
|
|
|
continue
|
|
|
|
with uuid_file.open() as _f:
|
|
|
|
current_index[_f.read().strip()] = uuid_file.parent.name
|
|
|
|
|
|
|
|
if not current_index:
|
2021-08-30 12:54:17 +02:00
|
|
|
# The directory has been archived. It is probably safe to unlink, but
|
|
|
|
# if it's not, we will lose a whole buch of captures. Moving instead for safety.
|
|
|
|
root_dir.rename(get_homedir() / 'discarded_captures' / root_dir.name)
|
2021-08-30 12:48:13 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
with index_file.open('w') as _f:
|
|
|
|
index_writer = csv.writer(_f)
|
|
|
|
for uuid, dirname in current_index.items():
|
|
|
|
index_writer.writerow([uuid, dirname])
|
|
|
|
|
|
|
|
def _update_all_capture_indexes(self):
|
|
|
|
'''Run that after the captures are in the proper directories'''
|
|
|
|
# Recent captures
|
2022-07-12 18:44:33 +02:00
|
|
|
directories_to_index = {capture_dir.parent.parent for capture_dir in get_captures_dir().rglob('uuid')}
|
2021-08-30 12:48:13 +02:00
|
|
|
for directory_to_index in directories_to_index:
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info(f'Updating index for {directory_to_index}')
|
2021-08-30 12:48:13 +02:00
|
|
|
self._update_index(directory_to_index)
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info('Recent indexes updated')
|
2021-08-30 12:48:13 +02:00
|
|
|
|
|
|
|
# Archived captures
|
2022-07-12 18:44:33 +02:00
|
|
|
directories_to_index = {capture_dir.parent.parent for capture_dir in self.archived_captures_dir.rglob('uuid')}
|
2021-08-30 12:48:13 +02:00
|
|
|
for directory_to_index in directories_to_index:
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info(f'Updating index for {directory_to_index}')
|
2021-08-30 12:48:13 +02:00
|
|
|
self._update_index(directory_to_index)
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info('Archived indexes updated')
|
2021-08-20 17:46:22 +02:00
|
|
|
|
|
|
|
def _archive(self):
|
|
|
|
archive_interval = timedelta(days=get_config('generic', 'archive'))
|
2021-08-23 15:36:59 +02:00
|
|
|
cut_time = (datetime.now() - archive_interval).date()
|
2021-08-23 15:51:06 +02:00
|
|
|
cut_time = cut_time.replace(day=1)
|
2021-08-20 17:46:22 +02:00
|
|
|
|
|
|
|
# Format:
|
|
|
|
# { 2020: { 12: [(directory, uuid)] } }
|
2021-08-30 12:48:13 +02:00
|
|
|
to_archive: Dict[int, Dict[int, List[Path]]] = defaultdict(lambda: defaultdict(list))
|
2022-07-12 18:44:33 +02:00
|
|
|
for capture_uuid in get_captures_dir().rglob('uuid'):
|
2021-08-26 15:49:19 +02:00
|
|
|
timestamp = datetime.strptime(capture_uuid.parent.name, '%Y-%m-%dT%H:%M:%S.%f')
|
2021-08-23 15:51:06 +02:00
|
|
|
if timestamp.date() >= cut_time:
|
2021-08-20 17:46:22 +02:00
|
|
|
continue
|
2021-08-30 12:48:13 +02:00
|
|
|
to_archive[timestamp.year][timestamp.month].append(capture_uuid.parent)
|
2021-08-26 15:49:19 +02:00
|
|
|
self.logger.info(f'Archiving {capture_uuid.parent}.')
|
2021-08-23 12:17:44 +02:00
|
|
|
|
|
|
|
if not to_archive:
|
|
|
|
self.logger.info('Nothing to archive.')
|
|
|
|
return
|
2021-08-20 17:46:22 +02:00
|
|
|
|
2021-08-30 12:48:13 +02:00
|
|
|
p = self.redis.pipeline()
|
2021-08-20 17:46:22 +02:00
|
|
|
for year, month_captures in to_archive.items():
|
|
|
|
for month, captures in month_captures.items():
|
2021-08-24 17:10:14 +02:00
|
|
|
dest_dir = self.archived_captures_dir / str(year) / f'{month:02}'
|
2021-08-20 17:46:22 +02:00
|
|
|
dest_dir.mkdir(parents=True, exist_ok=True)
|
2021-08-30 12:48:13 +02:00
|
|
|
for capture_path in captures:
|
|
|
|
p.delete(str(capture_path))
|
2022-07-12 18:44:33 +02:00
|
|
|
(capture_path / 'tree.pickle').unlink(missing_ok=True)
|
2021-08-20 17:46:22 +02:00
|
|
|
capture_path.rename(dest_dir / capture_path.name)
|
2021-08-30 12:48:13 +02:00
|
|
|
p.execute()
|
|
|
|
|
2021-08-23 14:53:19 +02:00
|
|
|
self.logger.info('Archiving done.')
|
2021-08-20 17:46:22 +02:00
|
|
|
|
2022-07-12 18:44:33 +02:00
|
|
|
def _compress_hars(self):
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info('Compressing archived captures')
|
2022-07-12 18:44:33 +02:00
|
|
|
for index in self.archived_captures_dir.rglob('index'):
|
|
|
|
with index.open('r') as _f:
|
|
|
|
for uuid, dirname in csv.reader(_f):
|
|
|
|
for har in (index.parent / dirname).glob('*.har'):
|
|
|
|
if not har.exists():
|
|
|
|
continue
|
|
|
|
with har.open('rb') as f_in:
|
|
|
|
with gzip.open(f'{har}.gz', 'wb') as f_out:
|
|
|
|
shutil.copyfileobj(f_in, f_out)
|
|
|
|
har.unlink()
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info('Archived captures compressed')
|
2022-07-12 18:44:33 +02:00
|
|
|
|
2021-08-30 12:48:13 +02:00
|
|
|
def _load_indexes(self):
|
|
|
|
# Initialize archives
|
2022-07-12 18:44:33 +02:00
|
|
|
for index in get_captures_dir().rglob('index'):
|
2021-08-30 12:48:13 +02:00
|
|
|
with index.open('r') as _f:
|
2022-05-23 00:15:52 +02:00
|
|
|
recent_uuids: Mapping = {uuid: str(index.parent / dirname) for uuid, dirname in csv.reader(_f) if (index.parent / dirname).exists()}
|
2021-08-30 14:45:44 +02:00
|
|
|
if recent_uuids:
|
2022-05-23 00:15:52 +02:00
|
|
|
self.redis.hset('lookup_dirs', mapping=recent_uuids)
|
2021-08-30 14:45:44 +02:00
|
|
|
else:
|
|
|
|
index.unlink()
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info('Recent indexes loaded')
|
2021-08-30 12:48:13 +02:00
|
|
|
|
2021-08-24 17:10:14 +02:00
|
|
|
# Initialize archives
|
2022-07-12 18:44:33 +02:00
|
|
|
for index in self.archived_captures_dir.rglob('index'):
|
2021-08-26 15:49:19 +02:00
|
|
|
with index.open('r') as _f:
|
2022-05-23 00:15:52 +02:00
|
|
|
archived_uuids: Mapping = {uuid: str(index.parent / dirname) for uuid, dirname in csv.reader(_f) if (index.parent / dirname).exists()}
|
2021-08-30 14:45:44 +02:00
|
|
|
if archived_uuids:
|
2022-05-23 00:15:52 +02:00
|
|
|
self.redis.hset('lookup_dirs_archived', mapping=archived_uuids)
|
2022-07-27 14:33:28 +02:00
|
|
|
self.redis.hdel('lookup_dirs', *archived_uuids.keys())
|
2021-08-30 14:45:44 +02:00
|
|
|
else:
|
|
|
|
index.unlink()
|
2022-07-27 14:33:28 +02:00
|
|
|
self.logger.info('Archived indexes loaded')
|
2021-08-23 15:14:08 +02:00
|
|
|
|
2021-08-20 17:46:22 +02:00
|
|
|
|
|
|
|
def main():
|
|
|
|
a = Archiver()
|
2021-08-30 12:48:13 +02:00
|
|
|
a.run(sleep_in_sec=3600)
|
2021-08-20 17:46:22 +02:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|