mirror of
https://github.com/ep1cman/unifi-protect-backup.git
synced 2025-12-10 13:23:29 +00:00
Added event ID to uploader/downloader logging
Also fixed issue where logging outside of unifi_protect_backup was not adding colors
This commit is contained in:
@@ -16,9 +16,11 @@ from unifi_protect_backup.utils import (
|
||||
get_camera_name,
|
||||
human_readable_size,
|
||||
run_command,
|
||||
setup_event_logger,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
setup_event_logger(logger)
|
||||
|
||||
|
||||
async def get_video_length(video: bytes) -> float:
|
||||
@@ -41,41 +43,43 @@ class VideoDownloader:
|
||||
self._protect: ProtectApiClient = protect
|
||||
self._download_queue: asyncio.Queue = download_queue
|
||||
self.video_queue = VideoQueue(buffer_size)
|
||||
self.logger = logging.LoggerAdapter(logger, {'event': ''})
|
||||
|
||||
# Check if `ffprobe` is available
|
||||
ffprobe = shutil.which('ffprobe')
|
||||
if ffprobe is not None:
|
||||
logger.debug(f"ffprobe found: {ffprobe}")
|
||||
self.logger.debug(f"ffprobe found: {ffprobe}")
|
||||
self._has_ffprobe = True
|
||||
else:
|
||||
self._has_ffprobe = False
|
||||
|
||||
async def start(self):
|
||||
"""Main loop"""
|
||||
logger.info("Starting Downloader")
|
||||
self.logger.info("Starting Downloader")
|
||||
while True:
|
||||
try:
|
||||
event = await self._download_queue.get()
|
||||
self.logger = logging.LoggerAdapter(logger, {'event': f' [{event.id}]'})
|
||||
|
||||
# Fix timezones since pyunifiprotect sets all timestamps to UTC. Instead localize them to
|
||||
# the timezone of the unifi protect NVR.
|
||||
event.start = event.start.replace(tzinfo=pytz.utc).astimezone(self._protect.bootstrap.nvr.timezone)
|
||||
event.end = event.end.replace(tzinfo=pytz.utc).astimezone(self._protect.bootstrap.nvr.timezone)
|
||||
|
||||
logger.info(f"Downloading event: {event.id}")
|
||||
logger.debug(f"Remaining Download Queue: {self._download_queue.qsize()}")
|
||||
self.logger.info(f"Downloading event: {event.id}")
|
||||
self.logger.debug(f"Remaining Download Queue: {self._download_queue.qsize()}")
|
||||
output_queue_current_size = human_readable_size(self.video_queue.qsize())
|
||||
output_queue_max_size = human_readable_size(self.video_queue.maxsize)
|
||||
logger.debug(f"Video Download Buffer: {output_queue_current_size}/{output_queue_max_size}")
|
||||
logger.debug(f" Camera: {await get_camera_name(self._protect, event.camera_id)}")
|
||||
self.logger.debug(f"Video Download Buffer: {output_queue_current_size}/{output_queue_max_size}")
|
||||
self.logger.debug(f" Camera: {await get_camera_name(self._protect, event.camera_id)}")
|
||||
if event.type == EventType.SMART_DETECT:
|
||||
logger.debug(f" Type: {event.type} ({', '.join(event.smart_detect_types)})")
|
||||
self.logger.debug(f" Type: {event.type} ({', '.join(event.smart_detect_types)})")
|
||||
else:
|
||||
logger.debug(f" Type: {event.type}")
|
||||
logger.debug(f" Start: {event.start.strftime('%Y-%m-%dT%H-%M-%S')} ({event.start.timestamp()})")
|
||||
logger.debug(f" End: {event.end.strftime('%Y-%m-%dT%H-%M-%S')} ({event.end.timestamp()})")
|
||||
self.logger.debug(f" Type: {event.type}")
|
||||
self.logger.debug(f" Start: {event.start.strftime('%Y-%m-%dT%H-%M-%S')} ({event.start.timestamp()})")
|
||||
self.logger.debug(f" End: {event.end.strftime('%Y-%m-%dT%H-%M-%S')} ({event.end.timestamp()})")
|
||||
duration = (event.end - event.start).total_seconds()
|
||||
logger.debug(f" Duration: {duration}s")
|
||||
self.logger.debug(f" Duration: {duration}s")
|
||||
|
||||
# Unifi protect does not return full video clips if the clip is requested too soon.
|
||||
# There are two issues at play here:
|
||||
@@ -87,7 +91,7 @@ class VideoDownloader:
|
||||
time_since_event_ended = datetime.utcnow().replace(tzinfo=timezone.utc) - event.end
|
||||
sleep_time = (timedelta(seconds=5 * 1.5) - time_since_event_ended).total_seconds()
|
||||
if sleep_time > 0:
|
||||
logger.debug(f" Sleeping ({sleep_time}s) to ensure clip is ready to download...")
|
||||
self.logger.debug(f" Sleeping ({sleep_time}s) to ensure clip is ready to download...")
|
||||
await asyncio.sleep(sleep_time)
|
||||
|
||||
video = await self._download(event)
|
||||
@@ -99,29 +103,29 @@ class VideoDownloader:
|
||||
await self._check_video_length(video, duration)
|
||||
|
||||
await self.video_queue.put((event, video))
|
||||
logger.debug("Added to upload queue")
|
||||
self.logger.debug("Added to upload queue")
|
||||
|
||||
except Exception as e:
|
||||
logger.warn(f"Unexpected exception occurred, abandoning event {event.id}:")
|
||||
logger.exception(e)
|
||||
self.logger.warn(f"Unexpected exception occurred, abandoning event {event.id}:")
|
||||
self.logger.exception(e)
|
||||
|
||||
async def _download(self, event: Event) -> bytes:
|
||||
"""Downloads the video clip for the given event"""
|
||||
logger.debug(" Downloading video...")
|
||||
self.logger.debug(" Downloading video...")
|
||||
for x in range(5):
|
||||
try:
|
||||
video = await self._protect.get_camera_video(event.camera_id, event.start, event.end)
|
||||
assert isinstance(video, bytes)
|
||||
break
|
||||
except (AssertionError, ClientPayloadError, TimeoutError) as e:
|
||||
logger.warn(f" Failed download attempt {x+1}, retying in 1s")
|
||||
logger.exception(e)
|
||||
self.logger.warn(f" Failed download attempt {x+1}, retying in 1s")
|
||||
self.logger.exception(e)
|
||||
await asyncio.sleep(1)
|
||||
else:
|
||||
logger.warn(f"Download failed after 5 attempts, abandoning event {event.id}:")
|
||||
self.logger.warn(f"Download failed after 5 attempts, abandoning event {event.id}:")
|
||||
return
|
||||
|
||||
logger.debug(f" Downloaded video size: {human_readable_size(len(video))}s")
|
||||
self.logger.debug(f" Downloaded video size: {human_readable_size(len(video))}s")
|
||||
return video
|
||||
|
||||
async def _check_video_length(self, video, duration):
|
||||
@@ -132,8 +136,8 @@ class VideoDownloader:
|
||||
downloaded_duration = await get_video_length(video)
|
||||
msg = f" Downloaded video length: {downloaded_duration:.3f}s" f"({downloaded_duration - duration:+.3f}s)"
|
||||
if downloaded_duration < duration:
|
||||
logger.warning(msg)
|
||||
self.logger.warning(msg)
|
||||
else:
|
||||
logger.debug(msg)
|
||||
self.logger.debug(msg)
|
||||
except SubprocessException as e:
|
||||
logger.warn(" `ffprobe` failed")
|
||||
self.logger.warn(" `ffprobe` failed")
|
||||
|
||||
@@ -7,166 +7,20 @@ import shutil
|
||||
from cmath import log
|
||||
from pprint import pprint
|
||||
from time import sleep
|
||||
from typing import Callable, List, Optional
|
||||
from typing import Callable, List
|
||||
|
||||
import aiosqlite
|
||||
from pyunifiprotect import ProtectApiClient
|
||||
from pyunifiprotect.data.types import ModelType
|
||||
|
||||
from unifi_protect_backup import EventListener, MissingEventChecker, Purge, VideoDownloader, VideoUploader
|
||||
from unifi_protect_backup.utils import SubprocessException, parse_rclone_retention, run_command
|
||||
from unifi_protect_backup.utils import SubprocessException, parse_rclone_retention, run_command, setup_logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# TODO: https://github.com/cjrh/aiorun#id6 (smart shield)
|
||||
|
||||
|
||||
def add_logging_level(levelName: str, levelNum: int, methodName: Optional[str] = None) -> None:
|
||||
"""Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.
|
||||
|
||||
`levelName` becomes an attribute of the `logging` module with the value
|
||||
`levelNum`. `methodName` becomes a convenience method for both `logging`
|
||||
itself and the class returned by `logging.getLoggerClass()` (usually just
|
||||
`logging.Logger`).
|
||||
|
||||
To avoid accidental clobbering of existing attributes, this method will
|
||||
raise an `AttributeError` if the level name is already an attribute of the
|
||||
`logging` module or if the method name is already present
|
||||
|
||||
Credit: https://stackoverflow.com/a/35804945
|
||||
|
||||
Args:
|
||||
levelName (str): The name of the new logging level (in all caps).
|
||||
levelNum (int): The priority value of the logging level, lower=more verbose.
|
||||
methodName (str): The name of the method used to log using this.
|
||||
If `methodName` is not specified, `levelName.lower()` is used.
|
||||
|
||||
Example:
|
||||
::
|
||||
>>> add_logging_level('TRACE', logging.DEBUG - 5)
|
||||
>>> logging.getLogger(__name__).setLevel("TRACE")
|
||||
>>> logging.getLogger(__name__).trace('that worked')
|
||||
>>> logging.trace('so did this')
|
||||
>>> logging.TRACE
|
||||
5
|
||||
|
||||
"""
|
||||
if not methodName:
|
||||
methodName = levelName.lower()
|
||||
|
||||
if hasattr(logging, levelName):
|
||||
raise AttributeError('{} already defined in logging module'.format(levelName))
|
||||
if hasattr(logging, methodName):
|
||||
raise AttributeError('{} already defined in logging module'.format(methodName))
|
||||
if hasattr(logging.getLoggerClass(), methodName):
|
||||
raise AttributeError('{} already defined in logger class'.format(methodName))
|
||||
|
||||
# This method was inspired by the answers to Stack Overflow post
|
||||
# http://stackoverflow.com/q/2183233/2988730, especially
|
||||
# http://stackoverflow.com/a/13638084/2988730
|
||||
def logForLevel(self, message, *args, **kwargs):
|
||||
if self.isEnabledFor(levelNum):
|
||||
self._log(levelNum, message, args, **kwargs)
|
||||
|
||||
def logToRoot(message, *args, **kwargs):
|
||||
logging.log(levelNum, message, *args, **kwargs)
|
||||
|
||||
logging.addLevelName(levelNum, levelName)
|
||||
setattr(logging, levelName, levelNum)
|
||||
setattr(logging.getLoggerClass(), methodName, logForLevel)
|
||||
setattr(logging, methodName, logToRoot)
|
||||
|
||||
|
||||
def setup_logging(verbosity: int, color_logging: bool = False) -> None:
|
||||
"""Configures loggers to provided the desired level of verbosity.
|
||||
|
||||
Verbosity 0: Only log info messages created by `unifi-protect-backup`, and all warnings
|
||||
verbosity 1: Only log info & debug messages created by `unifi-protect-backup`, and all warnings
|
||||
verbosity 2: Log info & debug messages created by `unifi-protect-backup`, command output, and
|
||||
all warnings
|
||||
Verbosity 3: Log debug messages created by `unifi-protect-backup`, command output, all info
|
||||
messages, and all warnings
|
||||
Verbosity 4: Log debug messages created by `unifi-protect-backup` command output, all info
|
||||
messages, all warnings, and websocket data
|
||||
Verbosity 5: Log websocket data, command output, all debug messages, all info messages and all
|
||||
warnings
|
||||
|
||||
Args:
|
||||
verbosity (int): The desired level of verbosity
|
||||
color_logging (bool): If colors should be used in the log (default=False)
|
||||
|
||||
"""
|
||||
add_logging_level(
|
||||
'EXTRA_DEBUG',
|
||||
logging.DEBUG - 1,
|
||||
)
|
||||
add_logging_level(
|
||||
'WEBSOCKET_DATA',
|
||||
logging.DEBUG - 2,
|
||||
)
|
||||
|
||||
format = "{asctime} [{levelname:^11s}] {name:<42} :\t{message}"
|
||||
date_format = "%Y-%m-%d %H:%M:%S"
|
||||
style = '{'
|
||||
|
||||
logger = logging.getLogger("unifi_protect_backup")
|
||||
|
||||
sh = logging.StreamHandler()
|
||||
formatter = logging.Formatter(format, date_format, style)
|
||||
sh.setFormatter(formatter)
|
||||
|
||||
def decorate_emit(fn):
|
||||
# add methods we need to the class
|
||||
def new(*args):
|
||||
levelno = args[0].levelno
|
||||
if levelno >= logging.CRITICAL:
|
||||
color = '\x1b[31;1m' # RED
|
||||
elif levelno >= logging.ERROR:
|
||||
color = '\x1b[31;1m' # RED
|
||||
elif levelno >= logging.WARNING:
|
||||
color = '\x1b[33;1m' # YELLOW
|
||||
elif levelno >= logging.INFO:
|
||||
color = '\x1b[32;1m' # GREEN
|
||||
elif levelno >= logging.DEBUG:
|
||||
color = '\x1b[36;1m' # CYAN
|
||||
elif levelno >= logging.EXTRA_DEBUG:
|
||||
color = '\x1b[35;1m' # MAGENTA
|
||||
else:
|
||||
color = '\x1b[0m'
|
||||
|
||||
if color_logging:
|
||||
args[0].levelname = f"{color}{args[0].levelname:^11s}\x1b[0m"
|
||||
else:
|
||||
args[0].levelname = f"{args[0].levelname:^11s}"
|
||||
|
||||
return fn(*args)
|
||||
|
||||
return new
|
||||
|
||||
sh.emit = decorate_emit(sh.emit)
|
||||
logger.addHandler(sh)
|
||||
logger.propagate = False
|
||||
|
||||
if verbosity == 0:
|
||||
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
|
||||
logger.setLevel(logging.INFO)
|
||||
elif verbosity == 1:
|
||||
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
elif verbosity == 2:
|
||||
logging.basicConfig(level=logging.WARN, format=format, style=style, datefmt=date_format)
|
||||
logger.setLevel(logging.EXTRA_DEBUG) # type: ignore
|
||||
elif verbosity == 3:
|
||||
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
|
||||
logger.setLevel(logging.EXTRA_DEBUG) # type: ignore
|
||||
elif verbosity == 4:
|
||||
logging.basicConfig(level=logging.INFO, format=format, style=style, datefmt=date_format)
|
||||
logger.setLevel(logging.WEBSOCKET_DATA) # type: ignore
|
||||
elif verbosity >= 5:
|
||||
logging.basicConfig(level=logging.DEBUG, format=format, style=style, datefmt=date_format)
|
||||
logger.setLevel(logging.WEBSOCKET_DATA) # type: ignore
|
||||
|
||||
|
||||
async def create_database(path: str):
|
||||
"""Creates sqlite database and creates the events abd backups tables"""
|
||||
db = await aiosqlite.connect(path)
|
||||
|
||||
@@ -8,9 +8,10 @@ import aiosqlite
|
||||
from pyunifiprotect.data.nvr import Event
|
||||
from pyunifiprotect import ProtectApiClient
|
||||
|
||||
from unifi_protect_backup.utils import get_camera_name, SubprocessException, VideoQueue, run_command
|
||||
from unifi_protect_backup.utils import get_camera_name, VideoQueue, run_command, setup_event_logger
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
setup_event_logger(logger)
|
||||
|
||||
|
||||
class VideoUploader:
|
||||
@@ -34,6 +35,7 @@ class VideoUploader:
|
||||
self._rclone_args: str = rclone_args
|
||||
self._file_structure_format: str = file_structure_format
|
||||
self._db: aiosqlite.Connection = db
|
||||
self.logger = logging.LoggerAdapter(logger, {'event': ''})
|
||||
|
||||
async def start(self):
|
||||
"""Main loop
|
||||
@@ -41,24 +43,26 @@ class VideoUploader:
|
||||
Runs forever looking for video data in the video queue and then uploads it using rclone, finally it updates the database
|
||||
"""
|
||||
|
||||
logger.info("Starting Uploader")
|
||||
self.logger.info("Starting Uploader")
|
||||
while True:
|
||||
try:
|
||||
event, video = await self._video_queue.get()
|
||||
logger.info(f"Uploading event: {event.id}")
|
||||
logger.debug(f" Remaining Upload Queue: {self._video_queue.qsize_files()}")
|
||||
self.logger = logging.LoggerAdapter(logger, {'event': f' [{event.id}]'})
|
||||
|
||||
self.logger.info(f"Uploading event: {event.id}")
|
||||
self.logger.debug(f" Remaining Upload Queue: {self._video_queue.qsize_files()}")
|
||||
|
||||
destination = await self._generate_file_path(event)
|
||||
logger.debug(f" Destination: {destination}")
|
||||
self.logger.debug(f" Destination: {destination}")
|
||||
|
||||
await self._upload_video(video, destination, self._rclone_args)
|
||||
await self._update_database(event, destination)
|
||||
|
||||
logger.debug(f"Uploaded")
|
||||
self.logger.debug(f"Uploaded")
|
||||
|
||||
except Exception as e:
|
||||
logger.warn(f"Unexpected exception occurred, abandoning event {event.id}:")
|
||||
logger.exception(e)
|
||||
self.logger.warn(f"Unexpected exception occurred, abandoning event {event.id}:")
|
||||
self.logger.exception(e)
|
||||
|
||||
async def _upload_video(self, video: bytes, destination: pathlib.Path, rclone_args: str):
|
||||
"""Upload video using rclone.
|
||||
@@ -76,7 +80,7 @@ class VideoUploader:
|
||||
"""
|
||||
returncode, stdout, stderr = await run_command(f'rclone rcat -vv {rclone_args} "{destination}"', video)
|
||||
if returncode != 0:
|
||||
logger.warn(f" Failed to upload file: '{destination}'")
|
||||
self.logger.warn(f" Failed to upload file: '{destination}'")
|
||||
|
||||
async def _update_database(self, event: Event, destination: str):
|
||||
"""
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import re
|
||||
import asyncio
|
||||
from typing import Optional
|
||||
|
||||
from dateutil.relativedelta import relativedelta
|
||||
|
||||
@@ -8,6 +9,178 @@ from pyunifiprotect import ProtectApiClient
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_logging_level(levelName: str, levelNum: int, methodName: Optional[str] = None) -> None:
|
||||
"""Comprehensively adds a new logging level to the `logging` module and the currently configured logging class.
|
||||
|
||||
`levelName` becomes an attribute of the `logging` module with the value
|
||||
`levelNum`. `methodName` becomes a convenience method for both `logging`
|
||||
itself and the class returned by `logging.getLoggerClass()` (usually just
|
||||
`logging.Logger`).
|
||||
|
||||
To avoid accidental clobbering of existing attributes, this method will
|
||||
raise an `AttributeError` if the level name is already an attribute of the
|
||||
`logging` module or if the method name is already present
|
||||
|
||||
Credit: https://stackoverflow.com/a/35804945
|
||||
|
||||
Args:
|
||||
levelName (str): The name of the new logging level (in all caps).
|
||||
levelNum (int): The priority value of the logging level, lower=more verbose.
|
||||
methodName (str): The name of the method used to log using this.
|
||||
If `methodName` is not specified, `levelName.lower()` is used.
|
||||
|
||||
Example:
|
||||
::
|
||||
>>> add_logging_level('TRACE', logging.DEBUG - 5)
|
||||
>>> logging.getLogger(__name__).setLevel("TRACE")
|
||||
>>> logging.getLogger(__name__).trace('that worked')
|
||||
>>> logging.trace('so did this')
|
||||
>>> logging.TRACE
|
||||
5
|
||||
|
||||
"""
|
||||
if not methodName:
|
||||
methodName = levelName.lower()
|
||||
|
||||
if hasattr(logging, levelName):
|
||||
raise AttributeError('{} already defined in logging module'.format(levelName))
|
||||
if hasattr(logging, methodName):
|
||||
raise AttributeError('{} already defined in logging module'.format(methodName))
|
||||
if hasattr(logging.getLoggerClass(), methodName):
|
||||
raise AttributeError('{} already defined in logger class'.format(methodName))
|
||||
|
||||
# This method was inspired by the answers to Stack Overflow post
|
||||
# http://stackoverflow.com/q/2183233/2988730, especially
|
||||
# http://stackoverflow.com/a/13638084/2988730
|
||||
def logForLevel(self, message, *args, **kwargs):
|
||||
if self.isEnabledFor(levelNum):
|
||||
self._log(levelNum, message, args, **kwargs)
|
||||
|
||||
def logToRoot(message, *args, **kwargs):
|
||||
logging.log(levelNum, message, *args, **kwargs)
|
||||
|
||||
def adapterLog(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Delegate an error call to the underlying logger.
|
||||
"""
|
||||
self.log(levelNum, msg, *args, **kwargs)
|
||||
|
||||
logging.addLevelName(levelNum, levelName)
|
||||
setattr(logging, levelName, levelNum)
|
||||
setattr(logging.getLoggerClass(), methodName, logForLevel)
|
||||
setattr(logging, methodName, logToRoot)
|
||||
setattr(logging.LoggerAdapter, methodName, adapterLog)
|
||||
|
||||
|
||||
color_logging = False
|
||||
|
||||
|
||||
def create_logging_handler(format):
|
||||
date_format = "%Y-%m-%d %H:%M:%S"
|
||||
style = '{'
|
||||
|
||||
sh = logging.StreamHandler()
|
||||
formatter = logging.Formatter(format, date_format, style)
|
||||
sh.setFormatter(formatter)
|
||||
|
||||
def decorate_emit(fn):
|
||||
# add methods we need to the class
|
||||
def new(*args):
|
||||
levelno = args[0].levelno
|
||||
if levelno >= logging.CRITICAL:
|
||||
color = '\x1b[31;1m' # RED
|
||||
elif levelno >= logging.ERROR:
|
||||
color = '\x1b[31;1m' # RED
|
||||
elif levelno >= logging.WARNING:
|
||||
color = '\x1b[33;1m' # YELLOW
|
||||
elif levelno >= logging.INFO:
|
||||
color = '\x1b[32;1m' # GREEN
|
||||
elif levelno >= logging.DEBUG:
|
||||
color = '\x1b[36;1m' # CYAN
|
||||
elif levelno >= logging.EXTRA_DEBUG:
|
||||
color = '\x1b[35;1m' # MAGENTA
|
||||
else:
|
||||
color = '\x1b[0m'
|
||||
|
||||
global color_logging
|
||||
if color_logging:
|
||||
args[0].levelname = f"{color}{args[0].levelname:^11s}\x1b[0m"
|
||||
else:
|
||||
args[0].levelname = f"{args[0].levelname:^11s}"
|
||||
|
||||
return fn(*args)
|
||||
|
||||
return new
|
||||
|
||||
sh.emit = decorate_emit(sh.emit)
|
||||
return sh
|
||||
|
||||
|
||||
def setup_logging(verbosity: int, color_logging: bool = False) -> None:
|
||||
"""Configures loggers to provided the desired level of verbosity.
|
||||
|
||||
Verbosity 0: Only log info messages created by `unifi-protect-backup`, and all warnings
|
||||
verbosity 1: Only log info & debug messages created by `unifi-protect-backup`, and all warnings
|
||||
verbosity 2: Log info & debug messages created by `unifi-protect-backup`, command output, and
|
||||
all warnings
|
||||
Verbosity 3: Log debug messages created by `unifi-protect-backup`, command output, all info
|
||||
messages, and all warnings
|
||||
Verbosity 4: Log debug messages created by `unifi-protect-backup` command output, all info
|
||||
messages, all warnings, and websocket data
|
||||
Verbosity 5: Log websocket data, command output, all debug messages, all info messages and all
|
||||
warnings
|
||||
|
||||
Args:
|
||||
verbosity (int): The desired level of verbosity
|
||||
color_logging (bool): If colors should be used in the log (default=False)
|
||||
|
||||
"""
|
||||
globals()['color_logging'] = color_logging
|
||||
|
||||
add_logging_level(
|
||||
'EXTRA_DEBUG',
|
||||
logging.DEBUG - 1,
|
||||
)
|
||||
add_logging_level(
|
||||
'WEBSOCKET_DATA',
|
||||
logging.DEBUG - 2,
|
||||
)
|
||||
|
||||
format = "{asctime} [{levelname:^11s}] {name:<42} : {message}"
|
||||
sh = create_logging_handler(format)
|
||||
|
||||
logger = logging.getLogger("unifi_protect_backup")
|
||||
logger.addHandler(sh)
|
||||
logger.propagate = False
|
||||
|
||||
if verbosity == 0:
|
||||
logging.basicConfig(level=logging.WARN, handlers=[sh])
|
||||
logger.setLevel(logging.INFO)
|
||||
elif verbosity == 1:
|
||||
logging.basicConfig(level=logging.WARN, handlers=[sh])
|
||||
logger.setLevel(logging.DEBUG)
|
||||
elif verbosity == 2:
|
||||
logging.basicConfig(level=logging.WARN, handlers=[sh])
|
||||
logger.setLevel(logging.EXTRA_DEBUG) # type: ignore
|
||||
elif verbosity == 3:
|
||||
logging.basicConfig(level=logging.INFO, handlers=[sh])
|
||||
logger.setLevel(logging.EXTRA_DEBUG) # type: ignore
|
||||
elif verbosity == 4:
|
||||
logging.basicConfig(level=logging.INFO, handlers=[sh])
|
||||
logger.setLevel(logging.WEBSOCKET_DATA) # type: ignore
|
||||
elif verbosity >= 5:
|
||||
logging.basicConfig(level=logging.DEBUG, handlers=[sh])
|
||||
logger.setLevel(logging.WEBSOCKET_DATA) # type: ignore
|
||||
|
||||
|
||||
def setup_event_logger(logger):
|
||||
format = "{asctime} [{levelname:^11s}] {name:<42} :{event} {message}"
|
||||
sh = create_logging_handler(format)
|
||||
logger.addHandler(sh)
|
||||
logger.propagate = False
|
||||
|
||||
|
||||
_suffixes = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"]
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user