From d7969cb070f9a59663d0b4ce7aabe7d49e236fc3 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Fri, 20 Dec 2024 13:24:55 -0800 Subject: [PATCH] Replace print with logging (#6138) * Replace print with logging * nit * nit * nit * nit * nit * nit --- .ci/update_windows/update.py | 14 +++++++------- app/user_manager.py | 4 ++-- comfy/cldm/cldm.py | 1 - comfy/extra_samplers/uni_pc.py | 4 ++-- comfy/hooks.py | 3 ++- comfy/ldm/aura/mmdit.py | 1 - comfy/ldm/modules/diffusionmodules/util.py | 7 ++++--- comfy/ldm/util.py | 5 +++-- comfy/model_base.py | 1 - comfy/model_management.py | 2 +- comfy/model_patcher.py | 6 +++--- comfy/sd.py | 4 ++-- comfy/sd1_clip.py | 3 +-- comfy_extras/chainner_models/model_loading.py | 3 ++- comfy_extras/nodes_hooks.py | 5 +++-- main.py | 8 ++++---- new_updater.py | 2 +- ruff.toml | 5 ++++- tests-unit/server/routes/internal_routes_test.py | 4 ++-- tests/conftest.py | 2 +- tests/inference/test_execution.py | 6 +++--- tests/inference/test_inference.py | 4 ++-- 22 files changed, 49 insertions(+), 45 deletions(-) diff --git a/.ci/update_windows/update.py b/.ci/update_windows/update.py index 59bee980448..731b6bc5382 100755 --- a/.ci/update_windows/update.py +++ b/.ci/update_windows/update.py @@ -28,7 +28,7 @@ def pull(repo, remote_name='origin', branch='master'): if repo.index.conflicts is not None: for conflict in repo.index.conflicts: - print('Conflicts found in:', conflict[0].path) + print('Conflicts found in:', conflict[0].path) # noqa: T201 raise AssertionError('Conflicts, ahhhhh!!') user = repo.default_signature @@ -49,18 +49,18 @@ def pull(repo, remote_name='origin', branch='master'): repo = pygit2.Repository(repo_path) ident = pygit2.Signature('comfyui', 'comfy@ui') try: - print("stashing current changes") + print("stashing current changes") # noqa: T201 repo.stash(ident) except KeyError: - print("nothing to stash") + print("nothing to stash") # noqa: T201 backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) -print("creating backup branch: {}".format(backup_branch_name)) +print("creating backup branch: {}".format(backup_branch_name)) # noqa: T201 try: repo.branches.local.create(backup_branch_name, repo.head.peel()) except: pass -print("checking out master branch") +print("checking out master branch") # noqa: T201 branch = repo.lookup_branch('master') if branch is None: ref = repo.lookup_reference('refs/remotes/origin/master') @@ -72,7 +72,7 @@ def pull(repo, remote_name='origin', branch='master'): ref = repo.lookup_reference(branch.name) repo.checkout(ref) -print("pulling latest changes") +print("pulling latest changes") # noqa: T201 pull(repo) if "--stable" in sys.argv: @@ -94,7 +94,7 @@ def latest_tag(repo): if latest_tag is not None: repo.checkout(latest_tag) -print("Done!") +print("Done!") # noqa: T201 self_update = True if len(sys.argv) > 2: diff --git a/app/user_manager.py b/app/user_manager.py index e863b93dd29..e7381e621d9 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -38,8 +38,8 @@ def __init__(self): if not os.path.exists(user_directory): os.makedirs(user_directory, exist_ok=True) if not args.multi_user: - print("****** User settings have been changed to be stored on the server instead of browser storage. ******") - print("****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******") + logging.warning("****** User settings have been changed to be stored on the server instead of browser storage. ******") + logging.warning("****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******") if args.multi_user: if os.path.isfile(self.get_users_file()): diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index f12cd6eeba2..ec01665e218 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -160,7 +160,6 @@ def __init__( if isinstance(self.num_classes, int): self.label_emb = nn.Embedding(num_classes, time_embed_dim) elif self.num_classes == "continuous": - print("setting up linear c_adm embedding layer") self.label_emb = nn.Linear(1, time_embed_dim) elif self.num_classes == "sequential": assert adm_in_channels is not None diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 18ff926635b..b61baaa8e5e 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -2,6 +2,7 @@ import torch import math +import logging from tqdm.auto import trange @@ -474,7 +475,7 @@ def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **k return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs) def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True): - print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') + logging.info(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') ns = self.noise_schedule assert order <= len(model_prev_list) @@ -518,7 +519,6 @@ def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order A_p = C_inv_p if use_corrector: - print('using corrector') C_inv = torch.linalg.inv(C) A_c = C_inv diff --git a/comfy/hooks.py b/comfy/hooks.py index 356b7d65b4d..b6f0ac213bd 100644 --- a/comfy/hooks.py +++ b/comfy/hooks.py @@ -5,6 +5,7 @@ import torch import numpy as np import itertools +import logging if TYPE_CHECKING: from comfy.model_patcher import ModelPatcher, PatcherInjection @@ -575,7 +576,7 @@ def load_hook_lora_for_models(model: 'ModelPatcher', clip: 'CLIP', lora: dict[st k1 = set(k1) for x in loaded: if (x not in k) and (x not in k1): - print(f"NOT LOADED {x}") + logging.warning(f"NOT LOADED {x}") return (new_modelpatcher, new_clip, hook_group) def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]): diff --git a/comfy/ldm/aura/mmdit.py b/comfy/ldm/aura/mmdit.py index 7792151aa61..1258ae11fd0 100644 --- a/comfy/ldm/aura/mmdit.py +++ b/comfy/ldm/aura/mmdit.py @@ -381,7 +381,6 @@ def extend_pe(self, init_dim=(16, 16), target_dim=(64, 64)): pe_new = pe_as_2d.squeeze(0).permute(1, 2, 0).flatten(0, 1) self.positional_encoding.data = pe_new.unsqueeze(0).contiguous() self.h_max, self.w_max = target_dim - print("PE extended to", target_dim) def pe_selection_index_based_on_dim(self, h, w): h_p, w_p = h // self.patch_size, w // self.patch_size diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 9377b0737fb..233011dc952 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -9,6 +9,7 @@ import math +import logging import torch import torch.nn as nn import numpy as np @@ -130,7 +131,7 @@ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timestep # add one to get the final alpha values right (the ones from first scale to data during sampling) steps_out = ddim_timesteps + 1 if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') + logging.info(f'Selected timesteps for ddim sampler: {steps_out}') return steps_out @@ -142,8 +143,8 @@ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): # according the the formula provided in https://arxiv.org/abs/2010.02502 sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' + logging.info(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + logging.info(f'For the chosen value of eta, which is {eta}, ' f'this results in the following sigma_t schedule for ddim sampler {sigmas}') return sigmas, alphas, alphas_prev diff --git a/comfy/ldm/util.py b/comfy/ldm/util.py index fdd8b84a258..2ed4aa2aba5 100644 --- a/comfy/ldm/util.py +++ b/comfy/ldm/util.py @@ -1,4 +1,5 @@ import importlib +import logging import torch from torch import optim @@ -23,7 +24,7 @@ def log_txt_as_img(wh, xc, size=10): try: draw.text((0, 0), lines, fill="black", font=font) except UnicodeEncodeError: - print("Cant encode string for logging. Skipping.") + logging.warning("Cant encode string for logging. Skipping.") txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 txts.append(txt) @@ -65,7 +66,7 @@ def mean_flat(tensor): def count_params(model, verbose=False): total_params = sum(p.numel() for p in model.parameters()) if verbose: - print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") + logging.info(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") return total_params diff --git a/comfy/model_base.py b/comfy/model_base.py index 99c53e57df6..af3f0f1473c 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -770,7 +770,6 @@ def concat_cond(self, **kwargs): mask = torch.ones_like(noise)[:, :1] mask = torch.mean(mask, dim=1, keepdim=True) - print(mask.shape) mask = utils.common_upscale(mask.to(device), noise.shape[-1] * 8, noise.shape[-2] * 8, "bilinear", "center") mask = mask.view(mask.shape[0], mask.shape[2] // 8, 8, mask.shape[3] // 8, 8).permute(0, 2, 4, 1, 3).reshape(mask.shape[0], -1, mask.shape[2] // 8, mask.shape[3] // 8) mask = utils.resize_to_batch_size(mask, noise.shape[0]) diff --git a/comfy/model_management.py b/comfy/model_management.py index 6f667dfc57e..2cbdc73924b 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1084,7 +1084,7 @@ def unload_all_models(): def resolve_lowvram_weight(weight, model, key): #TODO: remove - print("WARNING: The comfy.model_management.resolve_lowvram_weight function will be removed soon, please stop using it.") + logging.warning("The comfy.model_management.resolve_lowvram_weight function will be removed soon, please stop using it.") return weight #TODO: might be cleaner to put this somewhere else diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index fb651242b9c..13684da7efb 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -773,7 +773,7 @@ def current_loaded_device(self): return self.model.device def calculate_weight(self, patches, weight, key, intermediate_dtype=torch.float32): - print("WARNING the ModelPatcher.calculate_weight function is deprecated, please use: comfy.lora.calculate_weight instead") + logging.warning("The ModelPatcher.calculate_weight function is deprecated, please use: comfy.lora.calculate_weight instead") return comfy.lora.calculate_weight(patches, weight, key, intermediate_dtype=intermediate_dtype) def cleanup(self): @@ -1029,7 +1029,7 @@ def patch_hooks(self, hooks: comfy.hooks.HookGroup): if cached_weights is not None: for key in cached_weights: if key not in model_sd_keys: - print(f"WARNING cached hook could not patch. key does not exist in model: {key}") + logging.warning(f"Cached hook could not patch. Key does not exist in model: {key}") continue self.patch_cached_hook_weights(cached_weights=cached_weights, key=key, memory_counter=memory_counter) else: @@ -1039,7 +1039,7 @@ def patch_hooks(self, hooks: comfy.hooks.HookGroup): original_weights = self.get_key_patches() for key in relevant_patches: if key not in model_sd_keys: - print(f"WARNING cached hook would not patch. key does not exist in model: {key}") + logging.warning(f"Cached hook would not patch. Key does not exist in model: {key}") continue self.patch_hook_weight_to_device(hooks=hooks, combined_patches=relevant_patches, key=key, original_weights=original_weights, memory_counter=memory_counter) diff --git a/comfy/sd.py b/comfy/sd.py index fef38294687..f79eacc2454 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -940,11 +940,11 @@ def load_diffusion_model(unet_path, model_options={}): return model def load_unet(unet_path, dtype=None): - print("WARNING: the load_unet function has been deprecated and will be removed please switch to: load_diffusion_model") + logging.warning("The load_unet function has been deprecated and will be removed please switch to: load_diffusion_model") return load_diffusion_model(unet_path, model_options={"dtype": dtype}) def load_unet_state_dict(sd, dtype=None): - print("WARNING: the load_unet_state_dict function has been deprecated and will be removed please switch to: load_diffusion_model_state_dict") + logging.warning("The load_unet_state_dict function has been deprecated and will be removed please switch to: load_diffusion_model_state_dict") return load_diffusion_model_state_dict(sd, model_options={"dtype": dtype}) def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None, extra_keys={}): diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 4845406de6a..95d41c30fcc 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -41,8 +41,7 @@ def encode_token_weights(self, token_weight_pairs): to_encode.append(self.gen_empty_tokens(self.special_tokens, max_token_len)) else: to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len)) - print(to_encode) - + o = self.encode(to_encode) out, pooled = o[:2] diff --git a/comfy_extras/chainner_models/model_loading.py b/comfy_extras/chainner_models/model_loading.py index d48bc238ccc..1bec4476f61 100644 --- a/comfy_extras/chainner_models/model_loading.py +++ b/comfy_extras/chainner_models/model_loading.py @@ -1,5 +1,6 @@ +import logging from spandrel import ModelLoader def load_state_dict(state_dict): - print("WARNING: comfy_extras.chainner_models is deprecated and has been replaced by the spandrel library.") + logging.warning("comfy_extras.chainner_models is deprecated and has been replaced by the spandrel library.") return ModelLoader().load_from_state_dict(state_dict).eval() diff --git a/comfy_extras/nodes_hooks.py b/comfy_extras/nodes_hooks.py index d0cb6990206..27fe3c423c1 100644 --- a/comfy_extras/nodes_hooks.py +++ b/comfy_extras/nodes_hooks.py @@ -1,5 +1,6 @@ from __future__ import annotations from typing import TYPE_CHECKING, Union +import logging import torch from collections.abc import Iterable @@ -539,7 +540,7 @@ def create_hook_keyframes(self, strength_start: float, strength_end: float, inte is_first = False prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) if print_keyframes: - print(f"Hook Keyframe - start_percent:{percent} = {strength}") + logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}") return (prev_hook_kf,) class CreateHookKeyframesFromFloats: @@ -588,7 +589,7 @@ def create_hook_keyframes(self, floats_strength: Union[float, list[float]], is_first = False prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) if print_keyframes: - print(f"Hook Keyframe - start_percent:{percent} = {strength}") + logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}") return (prev_hook_kf,) #------------------------------------------ ########################################### diff --git a/main.py b/main.py index 9d163263324..b65046535c8 100644 --- a/main.py +++ b/main.py @@ -63,7 +63,7 @@ def execute_script(script_path): spec.loader.exec_module(module) return True except Exception as e: - print(f"Failed to execute startup-script: {script_path} / {e}") + logging.error(f"Failed to execute startup-script: {script_path} / {e}") return False if args.disable_all_custom_nodes: @@ -85,14 +85,14 @@ def execute_script(script_path): success = execute_script(script_path) node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_prestartup_times) > 0: - print("\nPrestartup times for custom nodes:") + logging.info("\nPrestartup times for custom nodes:") for n in sorted(node_prestartup_times): if n[2]: import_message = "" else: import_message = " (PRESTARTUP FAILED)" - print("{:6.1f} seconds{}:".format(n[0], import_message), n[1]) - print() + logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) + logging.info("") apply_custom_paths() execute_prestartup_script() diff --git a/new_updater.py b/new_updater.py index a49e0877cb1..9a203acdd71 100644 --- a/new_updater.py +++ b/new_updater.py @@ -32,4 +32,4 @@ def update_windows_updater(): except: pass shutil.copy(bat_path, dest_bat_path) - print("Updated the windows standalone package updater.") + print("Updated the windows standalone package updater.") # noqa: T201 diff --git a/ruff.toml b/ruff.toml index a83d450b12a..c354505f8cf 100644 --- a/ruff.toml +++ b/ruff.toml @@ -4,7 +4,10 @@ lint.ignore = ["ALL"] # Enable specific rules lint.select = [ "S307", # suspicious-eval-usage + "T201", # print-usage # The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names. # See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f "F", -] \ No newline at end of file +] + +exclude = ["*.ipynb"] diff --git a/tests-unit/server/routes/internal_routes_test.py b/tests-unit/server/routes/internal_routes_test.py index 4fe544249aa..68c8466521a 100644 --- a/tests-unit/server/routes/internal_routes_test.py +++ b/tests-unit/server/routes/internal_routes_test.py @@ -89,9 +89,9 @@ async def test_routes_added_to_app(aiohttp_client_factory, internal_routes): client = await aiohttp_client_factory() try: resp = await client.get('/files') - print(f"Response received: status {resp.status}") + print(f"Response received: status {resp.status}") # noqa: T201 except Exception as e: - print(f"Exception occurred during GET request: {e}") + print(f"Exception occurred during GET request: {e}") # noqa: T201 raise assert resp.status != 404, "Route /files does not exist" diff --git a/tests/conftest.py b/tests/conftest.py index 1a35880af5b..bddfb6e1543 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -28,7 +28,7 @@ def pytest_collection_modifyitems(items): last_items = [] for test_name in LAST_TESTS: for item in items.copy(): - print(item.module.__name__, item) + print(item.module.__name__, item) # noqa: T201 if item.module.__name__ == test_name: last_items.append(item) items.remove(item) diff --git a/tests/inference/test_execution.py b/tests/inference/test_execution.py index 3909ca68de9..ca880abd23b 100644 --- a/tests/inference/test_execution.py +++ b/tests/inference/test_execution.py @@ -134,7 +134,7 @@ def _server(self, args_pytest, request): use_lru, lru_size = request.param if use_lru: pargs += ['--cache-lru', str(lru_size)] - print("Running server with args:", pargs) + print("Running server with args:", pargs) # noqa: T201 p = subprocess.Popen(pargs) yield p.kill() @@ -150,8 +150,8 @@ def start_client(self, listen:str, port:int): try: comfy_client.connect(listen=listen, port=port) except ConnectionRefusedError as e: - print(e) - print(f"({i+1}/{n_tries}) Retrying...") + print(e) # noqa: T201 + print(f"({i+1}/{n_tries}) Retrying...") # noqa: T201 else: break return comfy_client diff --git a/tests/inference/test_inference.py b/tests/inference/test_inference.py index 1db3c06fb0c..d9a20c475b1 100644 --- a/tests/inference/test_inference.py +++ b/tests/inference/test_inference.py @@ -171,8 +171,8 @@ def start_client(self, listen:str, port:int): try: comfy_client.connect(listen=listen, port=port) except ConnectionRefusedError as e: - print(e) - print(f"({i+1}/{n_tries}) Retrying...") + print(e) # noqa: T201 + print(f"({i+1}/{n_tries}) Retrying...") # noqa: T201 else: break return comfy_client