From 9ee2e7ff25d449247d62d8d5a0b1b6fc1faff193 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 11 Mar 2024 10:58:53 -0400 Subject: [PATCH] Do not override log_memory_usage when debug logs are enabled. The speed cost of log_memory_usage=True is large. It is common to want debug log without enabling log_memory_usage. --- .../model_manager/load/model_cache/model_cache_default.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 2933b169f6f..3bcd1840353 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -19,7 +19,6 @@ """ import gc -import logging import math import sys import time @@ -92,8 +91,7 @@ def __init__( self._execution_device: torch.device = execution_device self._storage_device: torch.device = storage_device self._logger = logger or InvokeAILogger.get_logger(self.__class__.__name__) - self._log_memory_usage = log_memory_usage or self._logger.level == logging.DEBUG - # used for stats collection + self._log_memory_usage = log_memory_usage self._stats: Optional[CacheStats] = None self._cached_models: Dict[str, CacheRecord[AnyModel]] = {}