From 9c5c4e8c680ec939171a7c4b77a984ab09448d24 Mon Sep 17 00:00:00 2001 From: viklofg Date: Fri, 14 Jun 2024 15:59:31 +0200 Subject: [PATCH] Delete src/htrflow_core/logging directory --- src/htrflow_core/logging/gpu_profiler.py | 46 ----------------------- src/htrflow_core/logging/line_profiler.py | 31 --------------- src/htrflow_core/logging/logger.py | 25 ------------ 3 files changed, 102 deletions(-) delete mode 100644 src/htrflow_core/logging/gpu_profiler.py delete mode 100644 src/htrflow_core/logging/line_profiler.py delete mode 100644 src/htrflow_core/logging/logger.py diff --git a/src/htrflow_core/logging/gpu_profiler.py b/src/htrflow_core/logging/gpu_profiler.py deleted file mode 100644 index 3883915..0000000 --- a/src/htrflow_core/logging/gpu_profiler.py +++ /dev/null @@ -1,46 +0,0 @@ -import functools -import time - -import torch - -from htrflow_core.logging.logger import CustomLogger - - -def profile_gpu_usage(verbose=0): - logger = CustomLogger("GPUProfiler", verbose=verbose).logger - - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - if torch.cuda.is_available(): - torch.cuda.synchronize() - start_time = time.time() - start_mem = torch.cuda.memory_allocated() - result = func(*args, **kwargs) - torch.cuda.synchronize() - end_time = time.time() - end_mem = torch.cuda.memory_allocated() - logger.info(f"Memory Usage for {func.__name__}: {end_mem - start_mem} bytes") - logger.info(f"Execution Time for {func.__name__}: {end_time - start_time} seconds") - else: - logger.warning("CUDA is not available. Running the function without profiling GPU usage.") - result = func(*args, **kwargs) - return result - - return wrapper - - return decorator - - -if __name__ == "__main__": - - @profile_gpu_usage(verbose=2) - def my_gpu_intensive_function(tensor_size, n_operations): - a = torch.rand(tensor_size, tensor_size, device="cuda") - b = torch.rand(tensor_size, tensor_size, device="cuda") - for _ in range(n_operations): - a = a * b + torch.sin(a) - return a - - if torch.cuda.is_available(): - result = my_gpu_intensive_function(10000, 1000) diff --git a/src/htrflow_core/logging/line_profiler.py b/src/htrflow_core/logging/line_profiler.py deleted file mode 100644 index ecc33d6..0000000 --- a/src/htrflow_core/logging/line_profiler.py +++ /dev/null @@ -1,31 +0,0 @@ -import time - -from htrflow_core.logging.logger import CustomLogger - - -logger = CustomLogger("ProfileLogger", verbose=3) - - -def profile_performance(func): - def wrapper(*args, **kwargs): - start_time = time.time() - result = func(*args, **kwargs) - end_time = time.time() - execution_time = end_time - start_time - # Use the custom logger for logging - logger.logger.debug(f"Function: {func.__name__}, Execution Time: {execution_time} seconds") - return result - - return wrapper - - -if __name__ == "__main__": - - @profile_performance - def example_function(x): - result = 0 - for i in range(x): - result += i - return result - - example_function(100) diff --git a/src/htrflow_core/logging/logger.py b/src/htrflow_core/logging/logger.py deleted file mode 100644 index 1b2246a..0000000 --- a/src/htrflow_core/logging/logger.py +++ /dev/null @@ -1,25 +0,0 @@ -import logging - - -class CustomLogger: - def __init__(self, name, verbose=0): - self.logger = logging.getLogger(name) - self.set_verbose(verbose) - - self.logger.propagate = False - - if not self.logger.handlers: - console_handler = logging.StreamHandler() - formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") - console_handler.setFormatter(formatter) - self.logger.addHandler(console_handler) - - def set_verbose(self, verbose): - if verbose == 1: - self.logger.setLevel(logging.WARNING) - elif verbose == 2: - self.logger.setLevel(logging.INFO) - elif verbose >= 3: - self.logger.setLevel(logging.DEBUG) - else: - self.logger.setLevel(logging.ERROR)