From 7268c43d651fec8116be96b5840bde0417f75cae Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:05:27 +1000 Subject: [PATCH 01/10] feat(app): add generalized progress event Eliminate coupling on stable diffusion for progress events. Can be used for any node. --- invokeai/app/api/sockets.py | 2 + invokeai/app/services/events/events_base.py | 12 ++++++ invokeai/app/services/events/events_common.py | 38 +++++++++++++++++++ 3 files changed, 52 insertions(+) diff --git a/invokeai/app/api/sockets.py b/invokeai/app/api/sockets.py index b39922c69bf..4c6b8308497 100644 --- a/invokeai/app/api/sockets.py +++ b/invokeai/app/api/sockets.py @@ -22,6 +22,7 @@ InvocationCompleteEvent, InvocationDenoiseProgressEvent, InvocationErrorEvent, + InvocationProgressEvent, InvocationStartedEvent, ModelEventBase, ModelInstallCancelledEvent, @@ -56,6 +57,7 @@ class BulkDownloadSubscriptionEvent(BaseModel): QUEUE_EVENTS = { InvocationStartedEvent, InvocationDenoiseProgressEvent, + InvocationProgressEvent, InvocationCompleteEvent, InvocationErrorEvent, QueueItemStatusChangedEvent, diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index bb578c23e8c..fa1edaae5a6 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -17,6 +17,7 @@ InvocationCompleteEvent, InvocationDenoiseProgressEvent, InvocationErrorEvent, + InvocationProgressEvent, InvocationStartedEvent, ModelInstallCancelledEvent, ModelInstallCompleteEvent, @@ -68,6 +69,17 @@ def emit_invocation_denoise_progress( """Emitted at each step during denoising of an invocation.""" self.dispatch(InvocationDenoiseProgressEvent.build(queue_item, invocation, intermediate_state, progress_image)) + def emit_invocation_progress( + self, + queue_item: "SessionQueueItem", + invocation: "BaseInvocation", + message: str, + percentage: float | None = None, + image: ProgressImage | None = None, + ) -> None: + """Emitted at periodically during an invocation""" + self.dispatch(InvocationProgressEvent.build(queue_item, invocation, message, percentage, image)) + def emit_invocation_complete( self, queue_item: "SessionQueueItem", invocation: "BaseInvocation", output: "BaseInvocationOutput" ) -> None: diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index adcb2267995..8940bd4fe35 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -172,6 +172,44 @@ def calc_percentage(step: int, total_steps: int, scheduler_order: float) -> floa return (step + 1 + 1) / (total_steps + 1) +@payload_schema.register +class InvocationProgressEvent(InvocationEventBase): + """Event model for invocation_progress""" + + __event_name__ = "invocation_progress" + + message: str = Field(description="A message to display") + percentage: float | None = Field( + default=None, ge=0, le=1, description="The percentage of the progress (omit to indicate indeterminate progress)" + ) + image: ProgressImage | None = Field( + default=None, description="An image representing the current state of the progress" + ) + + @classmethod + def build( + cls, + queue_item: SessionQueueItem, + invocation: AnyInvocation, + message: str, + percentage: float | None = None, + image: ProgressImage | None = None, + ) -> "InvocationProgressEvent": + return cls( + queue_id=queue_item.queue_id, + item_id=queue_item.item_id, + batch_id=queue_item.batch_id, + origin=queue_item.origin, + destination=queue_item.destination, + session_id=queue_item.session_id, + invocation=invocation, + invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id], + percentage=percentage, + image=image, + message=message, + ) + + @payload_schema.register class InvocationCompleteEvent(InvocationEventBase): """Event model for invocation_complete""" From 286cb29a811c660fc1c8742a89c3d47276b592fc Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:06:23 +1000 Subject: [PATCH 02/10] feat(app): add builder method on ProgressImage --- .../session_processor_common.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/invokeai/app/services/session_processor/session_processor_common.py b/invokeai/app/services/session_processor/session_processor_common.py index 0ca51de517c..346f12d8bbc 100644 --- a/invokeai/app/services/session_processor/session_processor_common.py +++ b/invokeai/app/services/session_processor/session_processor_common.py @@ -1,5 +1,8 @@ +from PIL.Image import Image as PILImageType from pydantic import BaseModel, Field +from invokeai.backend.util.util import image_to_dataURL + class SessionProcessorStatus(BaseModel): is_started: bool = Field(description="Whether the session processor is started") @@ -15,6 +18,16 @@ class CanceledException(Exception): class ProgressImage(BaseModel): """The progress image sent intermittently during processing""" - width: int = Field(description="The effective width of the image in pixels") - height: int = Field(description="The effective height of the image in pixels") + width: int = Field(ge=1, description="The effective width of the image in pixels") + height: int = Field(ge=1, description="The effective height of the image in pixels") dataURL: str = Field(description="The image data as a b64 data URL") + + @classmethod + def build(cls, image: PILImageType, size: tuple[int, int] | None = None) -> "ProgressImage": + """Build a ProgressImage from a PIL image""" + + return cls( + width=size[0] if size else image.width, + height=size[1] if size else image.height, + dataURL=image_to_dataURL(image, image_format="JPEG"), + ) From fc1795a667f62a39f0c66676b296a64c434c3a87 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:07:31 +1000 Subject: [PATCH 03/10] feat(app): add signal_progress method to invocation API Any node can use this at any time to signal its progress to the client. The docstrings are detailed. --- .../app/services/shared/invocation_context.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py index d8ffc1b8226..efa68ce28d0 100644 --- a/invokeai/app/services/shared/invocation_context.py +++ b/invokeai/app/services/shared/invocation_context.py @@ -14,6 +14,7 @@ from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.invocation_services import InvocationServices from invokeai.app.services.model_records.model_records_base import UnknownModelException +from invokeai.app.services.session_processor.session_processor_common import ProgressImage from invokeai.app.util.step_callback import flux_step_callback, stable_diffusion_step_callback from invokeai.backend.model_manager.config import ( AnyModel, @@ -575,6 +576,68 @@ def flux_step_callback(self, intermediate_state: PipelineIntermediateState) -> N is_canceled=self.is_canceled, ) + def signal_progress( + self, + message: str, + percentage: float | None = None, + image: Image | None = None, + image_size: tuple[int, int] | None = None, + ) -> None: + """Signals the progress of some long-running invocation. The progress is displayed in the UI. + + If a percentage is provided, the UI will display a progress bar and automatically append the percentage to the + message. You should not include the percentage in the message. + + Example: + ```py + total_steps = 10 + for i in range(total_steps): + percentage = i / (total_steps - 1) + context.util.signal_progress("Doing something cool", percentage) + ``` + + If an image is provided, the UI will display it. If your image should be displayed at a different size, provide + a tuple of `(width, height)` for the `image_size` parameter. The image will be displayed at the specified size + in the UI. + + For example, SD denoising progress images are 1/8 the size of the original image, so you'd do this to ensure the + image is displayed at the correct size: + ```py + # Calculate the output size of the image (8x the progress image's size) + width = progress_image.width * 8 + height = progress_image.height * 8 + # Signal the progress with the image and output size + signal_progress("Denoising", percentage, progress_image, (width, height)) + ``` + + If your progress image is very large, consider downscaling it to reduce the payload size and provide the original + size to the `image_size` parameter. The PIL `thumbnail` method is useful for this, as it maintains the aspect + ratio of the image: + ```py + # `thumbnail` modifies the image in-place, so we need to first make a copy + thumbnail_image = progress_image.copy() + # Resize the image to a maximum of 256x256 pixels, maintaining the aspect ratio + thumbnail_image.thumbnail((256, 256)) + # Signal the progress with the thumbnail, passing the original size + signal_progress("Denoising", percentage, thumbnail, progress_image.size) + ``` + + Args: + message: A message describing the current status. Do not include the percentage in this message. + percentage: The current percentage completion for the process. Omit for indeterminate progress. + image: An optional image to display. + image_size: The optional size of the image to display. If omitted, the image will be displayed at its + original size. + """ + + self._services.events.emit_invocation_progress( + queue_item=self._data.queue_item, + invocation=self._data.invocation, + message=message, + percentage=percentage, + image=ProgressImage.build(image, image_size) if image else None, + ) + class InvocationContext: """Provides access to various services and data for the current invocation. From 33310241b9e79ca40b9408e96a3ad9beec9dd732 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:12:54 +1000 Subject: [PATCH 04/10] feat(app): use new signal_progress for denoising - Update the step callback methods in the invocation API to use the new signal_progress API - Copy and update the `calc_percentage`, reducing special handling for step and total_steps - a followup commit will fix callers of the step callbacks --- .../app/services/shared/invocation_context.py | 6 +- invokeai/app/util/step_callback.py | 66 +++++++++---------- 2 files changed, 35 insertions(+), 37 deletions(-) diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py index efa68ce28d0..60ae978c5ee 100644 --- a/invokeai/app/services/shared/invocation_context.py +++ b/invokeai/app/services/shared/invocation_context.py @@ -551,10 +551,9 @@ def sd_step_callback(self, intermediate_state: PipelineIntermediateState, base_m """ stable_diffusion_step_callback( - context_data=self._data, + signal_progress=self.signal_progress, intermediate_state=intermediate_state, base_model=base_model, - events=self._services.events, is_canceled=self.is_canceled, ) @@ -570,9 +569,8 @@ def flux_step_callback(self, intermediate_state: PipelineIntermediateState) -> N """ flux_step_callback( - context_data=self._data, + signal_progress=self.signal_progress, intermediate_state=intermediate_state, - events=self._services.events, is_canceled=self.is_canceled, ) diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index c500a872060..55e2ff667dd 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -1,16 +1,12 @@ -from typing import TYPE_CHECKING, Callable, Optional +from math import floor +from typing import Callable, Optional, TypeAlias import torch from PIL import Image -from invokeai.app.services.session_processor.session_processor_common import CanceledException, ProgressImage +from invokeai.app.services.session_processor.session_processor_common import CanceledException from invokeai.backend.model_manager.config import BaseModelType from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState -from invokeai.backend.util.util import image_to_dataURL - -if TYPE_CHECKING: - from invokeai.app.services.events.events_base import EventServiceBase - from invokeai.app.services.shared.invocation_context import InvocationContextData # fast latents preview matrix for sdxl # generated by @StAlKeR7779 @@ -75,11 +71,28 @@ def sample_to_lowres_estimated_image( return Image.fromarray(latents_ubyte.numpy()) +def calc_percentage(intermediate_state: PipelineIntermediateState) -> float: + """Calculate the percentage of completion of denoising.""" + + step = intermediate_state.step + total_steps = intermediate_state.total_steps + order = intermediate_state.order + + if total_steps == 0: + return 0.0 + if order == 2: + return floor(step / 2) / floor(total_steps / 2) + # order == 1 + return step / total_steps + + +SignalProgressFunc: TypeAlias = Callable[[str, float | None, Image.Image | None, tuple[int, int] | None], None] + + def stable_diffusion_step_callback( - context_data: "InvocationContextData", + signal_progress: SignalProgressFunc, intermediate_state: PipelineIntermediateState, base_model: BaseModelType, - events: "EventServiceBase", is_canceled: Callable[[], bool], ) -> None: if is_canceled(): @@ -101,24 +114,16 @@ def stable_diffusion_step_callback( v1_5_latent_rgb_factors = torch.tensor(SD1_5_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device) image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors) - (width, height) = image.size - width *= 8 - height *= 8 - - dataURL = image_to_dataURL(image, image_format="JPEG") + width = image.width * 8 + height = image.height * 8 + percentage = calc_percentage(intermediate_state) - events.emit_invocation_denoise_progress( - context_data.queue_item, - context_data.invocation, - intermediate_state, - ProgressImage(dataURL=dataURL, width=width, height=height), - ) + signal_progress("Denoising", percentage, image, (width, height)) def flux_step_callback( - context_data: "InvocationContextData", + signal_progress: SignalProgressFunc, intermediate_state: PipelineIntermediateState, - events: "EventServiceBase", is_canceled: Callable[[], bool], ) -> None: if is_canceled(): @@ -131,14 +136,9 @@ def flux_step_callback( ((latent_image + 1) / 2).clamp(0, 1).mul(0xFF) # change scale from -1..1 to 0..1 # to 0..255 ).to(device="cpu", dtype=torch.uint8) image = Image.fromarray(latents_ubyte.cpu().numpy()) - (width, height) = image.size - width *= 8 - height *= 8 - dataURL = image_to_dataURL(image, image_format="JPEG") - - events.emit_invocation_denoise_progress( - context_data.queue_item, - context_data.invocation, - intermediate_state, - ProgressImage(dataURL=dataURL, width=width, height=height), - ) + + width = image.width * 8 + height = image.height * 8 + percentage = calc_percentage(intermediate_state) + + signal_progress("Denoising", percentage, image, (width, height)) From 2af8b79598351586532c5a18c4f2dab0a334f9ae Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:17:09 +1000 Subject: [PATCH 05/10] fix(app): step callbacks for SD, FLUX, MultiDiffusion Each of these was a bit off: - The SD callback started at `-1` and ended at `i`. Combined w/ the weird math on the previous `calc_percentage` util, this caused the progress bar to never finish. - The MultiDiffusion callback had the same problems as SD. - The FLUX callback didn't emit a pre-denoising step 0 image. It also reported total_steps as 1 higher than the actual step count. Each of these now emit the expected events to the frontend: - The initial latents at 0% - Progress at each step, ending at 100% --- invokeai/backend/flux/denoise.py | 15 +++++++++++++-- .../stable_diffusion/diffusers_pipeline.py | 4 ++-- .../stable_diffusion/multi_diffusion_pipeline.py | 4 ++-- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/flux/denoise.py b/invokeai/backend/flux/denoise.py index 72f87e2aefc..ebad4cf61c0 100644 --- a/invokeai/backend/flux/denoise.py +++ b/invokeai/backend/flux/denoise.py @@ -22,7 +22,18 @@ def denoise( guidance: float, traj_guidance_extension: TrajectoryGuidanceExtension | None, # noqa: F821 ): - step = 0 + # step 0 is the initial state + total_steps = len(timesteps) - 1 + step_callback( + PipelineIntermediateState( + step=0, + order=1, + total_steps=total_steps, + timestep=int(timesteps[0]), + latents=img, + ), + ) + step = 1 # guidance_vec is ignored for schnell. guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype) for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))): @@ -49,7 +60,7 @@ def denoise( PipelineIntermediateState( step=step, order=1, - total_steps=len(timesteps), + total_steps=total_steps, timestep=int(t_curr), latents=preview_img, ), diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index b3a668518b0..646e1a92d38 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -366,7 +366,7 @@ def latents_from_embeddings( with attn_ctx: callback( PipelineIntermediateState( - step=-1, + step=0, # initial latents order=self.scheduler.order, total_steps=len(timesteps), timestep=self.scheduler.config.num_train_timesteps, @@ -395,7 +395,7 @@ def latents_from_embeddings( callback( PipelineIntermediateState( - step=i, + step=i + 1, # final latents order=self.scheduler.order, total_steps=len(timesteps), timestep=int(t), diff --git a/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py b/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py index 6c07fc1c2c8..63e74de5044 100644 --- a/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py +++ b/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py @@ -81,7 +81,7 @@ def multi_diffusion_denoise( callback( PipelineIntermediateState( - step=-1, + step=0, order=self.scheduler.order, total_steps=len(timesteps), timestep=self.scheduler.config.num_train_timesteps, @@ -182,7 +182,7 @@ def multi_diffusion_denoise( callback( PipelineIntermediateState( - step=i, + step=i + 1, order=self.scheduler.order, total_steps=len(timesteps), timestep=int(t), From 65d1ba48182d9e3d19172f08b60f805f92199c36 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:18:28 +1000 Subject: [PATCH 06/10] tidy(app): remove unused invocation_denoise_progress event This is now superseded by the invocation_progress event. --- invokeai/app/api/sockets.py | 2 - invokeai/app/services/events/events_base.py | 12 ----- invokeai/app/services/events/events_common.py | 52 ------------------- 3 files changed, 66 deletions(-) diff --git a/invokeai/app/api/sockets.py b/invokeai/app/api/sockets.py index 4c6b8308497..188f958c887 100644 --- a/invokeai/app/api/sockets.py +++ b/invokeai/app/api/sockets.py @@ -20,7 +20,6 @@ DownloadStartedEvent, FastAPIEvent, InvocationCompleteEvent, - InvocationDenoiseProgressEvent, InvocationErrorEvent, InvocationProgressEvent, InvocationStartedEvent, @@ -56,7 +55,6 @@ class BulkDownloadSubscriptionEvent(BaseModel): QUEUE_EVENTS = { InvocationStartedEvent, - InvocationDenoiseProgressEvent, InvocationProgressEvent, InvocationCompleteEvent, InvocationErrorEvent, diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index fa1edaae5a6..3fe7c1ae945 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -15,7 +15,6 @@ DownloadStartedEvent, EventBase, InvocationCompleteEvent, - InvocationDenoiseProgressEvent, InvocationErrorEvent, InvocationProgressEvent, InvocationStartedEvent, @@ -31,7 +30,6 @@ QueueClearedEvent, QueueItemStatusChangedEvent, ) -from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState if TYPE_CHECKING: from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput @@ -59,16 +57,6 @@ def emit_invocation_started(self, queue_item: "SessionQueueItem", invocation: "B """Emitted when an invocation is started""" self.dispatch(InvocationStartedEvent.build(queue_item, invocation)) - def emit_invocation_denoise_progress( - self, - queue_item: "SessionQueueItem", - invocation: "BaseInvocation", - intermediate_state: PipelineIntermediateState, - progress_image: "ProgressImage", - ) -> None: - """Emitted at each step during denoising of an invocation.""" - self.dispatch(InvocationDenoiseProgressEvent.build(queue_item, invocation, intermediate_state, progress_image)) - def emit_invocation_progress( self, queue_item: "SessionQueueItem", diff --git a/invokeai/app/services/events/events_common.py b/invokeai/app/services/events/events_common.py index 8940bd4fe35..98b1ee77241 100644 --- a/invokeai/app/services/events/events_common.py +++ b/invokeai/app/services/events/events_common.py @@ -1,4 +1,3 @@ -from math import floor from typing import TYPE_CHECKING, Any, ClassVar, Coroutine, Generic, Optional, Protocol, TypeAlias, TypeVar from fastapi_events.handlers.local import local_handler @@ -16,7 +15,6 @@ from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput from invokeai.app.util.misc import get_timestamp from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType -from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState if TYPE_CHECKING: from invokeai.app.services.download.download_base import DownloadJob @@ -122,56 +120,6 @@ def build(cls, queue_item: SessionQueueItem, invocation: AnyInvocation) -> "Invo ) -@payload_schema.register -class InvocationDenoiseProgressEvent(InvocationEventBase): - """Event model for invocation_denoise_progress""" - - __event_name__ = "invocation_denoise_progress" - - progress_image: ProgressImage = Field(description="The progress image sent at each step during processing") - step: int = Field(description="The current step of the invocation") - total_steps: int = Field(description="The total number of steps in the invocation") - order: int = Field(description="The order of the invocation in the session") - percentage: float = Field(description="The percentage of completion of the invocation") - - @classmethod - def build( - cls, - queue_item: SessionQueueItem, - invocation: AnyInvocation, - intermediate_state: PipelineIntermediateState, - progress_image: ProgressImage, - ) -> "InvocationDenoiseProgressEvent": - step = intermediate_state.step - total_steps = intermediate_state.total_steps - order = intermediate_state.order - return cls( - queue_id=queue_item.queue_id, - item_id=queue_item.item_id, - batch_id=queue_item.batch_id, - origin=queue_item.origin, - destination=queue_item.destination, - session_id=queue_item.session_id, - invocation=invocation, - invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id], - progress_image=progress_image, - step=step, - total_steps=total_steps, - order=order, - percentage=cls.calc_percentage(step, total_steps, order), - ) - - @staticmethod - def calc_percentage(step: int, total_steps: int, scheduler_order: float) -> float: - """Calculate the percentage of completion of denoising.""" - if total_steps == 0: - return 0.0 - if scheduler_order == 2: - return floor((step + 1 + 1) / 2) / floor((total_steps + 1) / 2) - # order == 1 - return (step + 1 + 1) / (total_steps + 1) - - @payload_schema.register class InvocationProgressEvent(InvocationEventBase): """Event model for invocation_progress""" From e0f3864ea362a1f8eceb3eaa9ca43bc5b45ae43d Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:21:01 +1000 Subject: [PATCH 07/10] feat(app): use new signal_progress API for spandrel nodes Both the vanilla and autoscale invocations report progress while processing each tile. The autoscale version, which may run the spandrel model multiple times, also includes the current iteration. --- .../invocations/spandrel_image_to_image.py | 59 ++++++++++++++++--- 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/invokeai/app/invocations/spandrel_image_to_image.py b/invokeai/app/invocations/spandrel_image_to_image.py index ae4f48ef77c..0aa6dd33466 100644 --- a/invokeai/app/invocations/spandrel_image_to_image.py +++ b/invokeai/app/invocations/spandrel_image_to_image.py @@ -1,3 +1,4 @@ +import functools from typing import Callable import numpy as np @@ -61,6 +62,7 @@ def upscale_image( tile_size: int, spandrel_model: SpandrelImageToImageModel, is_canceled: Callable[[], bool], + step_callback: Callable[[int, int], None], ) -> Image.Image: # Compute the image tiles. if tile_size > 0: @@ -103,7 +105,12 @@ def upscale_image( image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype) # Run the model on each tile. - for tile, scaled_tile in tqdm(list(zip(tiles, scaled_tiles, strict=True)), desc="Upscaling Tiles"): + pbar = tqdm(list(zip(tiles, scaled_tiles, strict=True)), desc="Upscaling Tiles") + + # Update progress, starting with 0. + step_callback(0, pbar.total) + + for tile, scaled_tile in pbar: # Exit early if the invocation has been canceled. if is_canceled(): raise CanceledException @@ -136,6 +143,8 @@ def upscale_image( :, ] = output_tile[top_overlap:, left_overlap:, :] + step_callback(pbar.n + 1, pbar.total) + # Convert the output tensor to a PIL image. np_image = output_tensor.detach().numpy().astype(np.uint8) pil_image = Image.fromarray(np_image) @@ -151,12 +160,20 @@ def invoke(self, context: InvocationContext) -> ImageOutput: # Load the model. spandrel_model_info = context.models.load(self.image_to_image_model) + def step_callback(step: int, total_steps: int) -> None: + context.util.signal_progress( + message=f"Processing tile {step}/{total_steps}", + percentage=step / total_steps, + ) + # Do the upscaling. with spandrel_model_info as spandrel_model: assert isinstance(spandrel_model, SpandrelImageToImageModel) # Upscale the image - pil_image = self.upscale_image(image, self.tile_size, spandrel_model, context.util.is_canceled) + pil_image = self.upscale_image( + image, self.tile_size, spandrel_model, context.util.is_canceled, step_callback + ) image_dto = context.images.save(image=pil_image) return ImageOutput.build(image_dto) @@ -197,12 +214,27 @@ def invoke(self, context: InvocationContext) -> ImageOutput: target_width = int(image.width * self.scale) target_height = int(image.height * self.scale) + def step_callback(iteration: int, step: int, total_steps: int) -> None: + context.util.signal_progress( + message=self._get_progress_message(iteration, step, total_steps), + percentage=step / total_steps, + ) + # Do the upscaling. with spandrel_model_info as spandrel_model: assert isinstance(spandrel_model, SpandrelImageToImageModel) + iteration = 1 + context.util.signal_progress(self._get_progress_message(iteration)) + # First pass of upscaling. Note: `pil_image` will be mutated. - pil_image = self.upscale_image(image, self.tile_size, spandrel_model, context.util.is_canceled) + pil_image = self.upscale_image( + image, + self.tile_size, + spandrel_model, + context.util.is_canceled, + functools.partial(step_callback, iteration), + ) # Some models don't upscale the image, but we have no way to know this in advance. We'll check if the model # upscaled the image and run the loop below if it did. We'll require the model to upscale both dimensions @@ -211,16 +243,22 @@ def invoke(self, context: InvocationContext) -> ImageOutput: if is_upscale_model: # This is an upscale model, so we should keep upscaling until we reach the target size. - iterations = 1 while pil_image.width < target_width or pil_image.height < target_height: - pil_image = self.upscale_image(pil_image, self.tile_size, spandrel_model, context.util.is_canceled) - iterations += 1 + iteration += 1 + context.util.signal_progress(self._get_progress_message(iteration)) + pil_image = self.upscale_image( + pil_image, + self.tile_size, + spandrel_model, + context.util.is_canceled, + functools.partial(step_callback, iteration), + ) # Sanity check to prevent excessive or infinite loops. All known upscaling models are at least 2x. # Our max scale is 16x, so with a 2x model, we should never exceed 16x == 2^4 -> 4 iterations. # We'll allow one extra iteration "just in case" and bail at 5 upscaling iterations. In practice, # we should never reach this limit. - if iterations >= 5: + if iteration >= 5: context.logger.warning( "Upscale loop reached maximum iteration count of 5, stopping upscaling early." ) @@ -251,3 +289,10 @@ def invoke(self, context: InvocationContext) -> ImageOutput: image_dto = context.images.save(image=pil_image) return ImageOutput.build(image_dto) + + @classmethod + def _get_progress_message(cls, iteration: int, step: int | None = None, total_steps: int | None = None) -> str: + if step is not None and total_steps is not None: + return f"Processing iteration {iteration}, tile {step}/{total_steps}" + + return f"Processing iteration {iteration}" From 5ccaac29638920ba352c9c8d96fa0cf3c5602c4f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:22:57 +1000 Subject: [PATCH 08/10] fix(app): issue w/ import forward ref --- invokeai/app/services/events/events_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index 3fe7c1ae945..71afddbc257 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -63,7 +63,7 @@ def emit_invocation_progress( invocation: "BaseInvocation", message: str, percentage: float | None = None, - image: ProgressImage | None = None, + image: "ProgressImage | None" = None, ) -> None: """Emitted at periodically during an invocation""" self.dispatch(InvocationProgressEvent.build(queue_item, invocation, message, percentage, image)) From 5306d2d7e3e26580edc91b77f4c360f35346e5d3 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:23:13 +1000 Subject: [PATCH 09/10] chore(ui): typegen --- .../frontend/web/src/services/api/schema.ts | 144 +++++++++--------- 1 file changed, 69 insertions(+), 75 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index afb5500f3fe..b34e9589a4c 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -9325,81 +9325,6 @@ export type components = { */ result: components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["BoundingBoxCollectionOutput"] | components["schemas"]["BoundingBoxOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["FluxConditioningOutput"] | components["schemas"]["FluxLoRALoaderOutput"] | components["schemas"]["FluxModelLoaderOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["LoRASelectorOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["ModelIdentifierOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["VAEOutput"]; }; - /** - * InvocationDenoiseProgressEvent - * @description Event model for invocation_denoise_progress - */ - InvocationDenoiseProgressEvent: { - /** - * Timestamp - * @description The timestamp of the event - */ - timestamp: number; - /** - * Queue Id - * @description The ID of the queue - */ - queue_id: string; - /** - * Item Id - * @description The ID of the queue item - */ - item_id: number; - /** - * Batch Id - * @description The ID of the queue batch - */ - batch_id: string; - /** - * Origin - * @description The origin of the queue item - * @default null - */ - origin: string | null; - /** - * Destination - * @description The destination of the queue item - * @default null - */ - destination: string | null; - /** - * Session Id - * @description The ID of the session (aka graph execution state) - */ - session_id: string; - /** - * Invocation - * @description The ID of the invocation - */ - invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; - /** - * Invocation Source Id - * @description The ID of the prepared invocation's source node - */ - invocation_source_id: string; - /** @description The progress image sent at each step during processing */ - progress_image: components["schemas"]["ProgressImage"]; - /** - * Step - * @description The current step of the invocation - */ - step: number; - /** - * Total Steps - * @description The total number of steps in the invocation - */ - total_steps: number; - /** - * Order - * @description The order of the invocation in the session - */ - order: number; - /** - * Percentage - * @description The percentage of completion of the invocation - */ - percentage: number; - }; /** * InvocationErrorEvent * @description Event model for invocation_error @@ -9647,6 +9572,75 @@ export type components = { vae_loader: components["schemas"]["VAEOutput"]; zoe_depth_image_processor: components["schemas"]["ImageOutput"]; }; + /** + * InvocationProgressEvent + * @description Event model for invocation_progress + */ + InvocationProgressEvent: { + /** + * Timestamp + * @description The timestamp of the event + */ + timestamp: number; + /** + * Queue Id + * @description The ID of the queue + */ + queue_id: string; + /** + * Item Id + * @description The ID of the queue item + */ + item_id: number; + /** + * Batch Id + * @description The ID of the queue batch + */ + batch_id: string; + /** + * Origin + * @description The origin of the queue item + * @default null + */ + origin: string | null; + /** + * Destination + * @description The destination of the queue item + * @default null + */ + destination: string | null; + /** + * Session Id + * @description The ID of the session (aka graph execution state) + */ + session_id: string; + /** + * Invocation + * @description The ID of the invocation + */ + invocation: components["schemas"]["AddInvocation"] | components["schemas"]["AlphaMaskToTensorInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BoundingBoxInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["CannyEdgeDetectionInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CanvasV2MaskAndCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ColorMapInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ContentShuffleInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["DWOpenposeDetectionInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["DepthAnythingDepthEstimationInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["FLUXLoRACollectionLoader"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FluxDenoiseInvocation"] | components["schemas"]["FluxLoRALoaderInvocation"] | components["schemas"]["FluxModelLoaderInvocation"] | components["schemas"]["FluxTextEncoderInvocation"] | components["schemas"]["FluxVaeDecodeInvocation"] | components["schemas"]["FluxVaeEncodeInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["GroundingDinoInvocation"] | components["schemas"]["HEDEdgeDetectionInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["HeuristicResizeInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageMaskToTensorInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["InvertTensorMaskInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["LineartAnimeEdgeDetectionInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LineartEdgeDetectionInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LoRACollectionLoader"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["LoRASelectorInvocation"] | components["schemas"]["MLSDDetectionInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["MaskTensorToImageInvocation"] | components["schemas"]["MediaPipeFaceDetectionInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["ModelIdentifierInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["NormalMapInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PiDiNetEdgeDetectionInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RectangleMaskInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLLoRACollectionLoader"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SegmentAnythingInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["SpandrelImageToImageAutoscaleInvocation"] | components["schemas"]["SpandrelImageToImageInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["TiledMultiDiffusionDenoiseLatents"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"]; + /** + * Invocation Source Id + * @description The ID of the prepared invocation's source node + */ + invocation_source_id: string; + /** + * Message + * @description A message to display + */ + message: string; + /** + * Percentage + * @description The percentage of the progress (omit to indicate indeterminate progress) + * @default null + */ + percentage: number | null; + /** + * @description An image representing the current state of the progress + * @default null + */ + image: components["schemas"]["ProgressImage"] | null; + }; /** * InvocationStartedEvent * @description Event model for invocation_started From 3cd8ccc668349c4941b190aac4a0b7258b935ba4 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 21 Sep 2024 19:27:04 +1000 Subject: [PATCH 10/10] feat(ui): use updated progress event in frontend --- .../konva/CanvasProgressImageModule.ts | 19 ++++++++++----- .../nodes/CurrentImage/CurrentImageNode.tsx | 10 ++------ .../src/services/events/setEventListeners.tsx | 24 ++++++++++++------- .../web/src/services/events/stores.ts | 4 ++-- .../frontend/web/src/services/events/types.ts | 2 +- 5 files changed, 33 insertions(+), 26 deletions(-) diff --git a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasProgressImageModule.ts b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasProgressImageModule.ts index efd3069dffa..4c819f7a05f 100644 --- a/invokeai/frontend/web/src/features/controlLayers/konva/CanvasProgressImageModule.ts +++ b/invokeai/frontend/web/src/features/controlLayers/konva/CanvasProgressImageModule.ts @@ -8,6 +8,11 @@ import { atom } from 'nanostores'; import type { Logger } from 'roarr'; import { selectCanvasQueueCounts } from 'services/api/endpoints/queue'; import type { S } from 'services/api/types'; +import type { O } from 'ts-toolbelt'; + +type ProgressEventWithImage = O.NonNullable; +const isProgressEventWithImage = (val: S['InvocationProgressEvent']): val is ProgressEventWithImage => + Boolean(val.image); export class CanvasProgressImageModule extends CanvasModuleBase { readonly type = 'progress_image'; @@ -26,7 +31,7 @@ export class CanvasProgressImageModule extends CanvasModuleBase { imageElement: HTMLImageElement | null = null; subscriptions = new Set<() => void>(); - $lastProgressEvent = atom(null); + $lastProgressEvent = atom(null); hasActiveGeneration: boolean = false; mutex: Mutex = new Mutex(); @@ -62,10 +67,13 @@ export class CanvasProgressImageModule extends CanvasModuleBase { } setSocketEventListeners = (): (() => void) => { - const progressListener = (data: S['InvocationDenoiseProgressEvent']) => { + const progressListener = (data: S['InvocationProgressEvent']) => { if (data.destination !== 'canvas') { return; } + if (!isProgressEventWithImage(data)) { + return; + } if (!this.hasActiveGeneration) { return; } @@ -76,13 +84,13 @@ export class CanvasProgressImageModule extends CanvasModuleBase { this.$lastProgressEvent.set(null); }; - this.manager.socket.on('invocation_denoise_progress', progressListener); + this.manager.socket.on('invocation_progress', progressListener); this.manager.socket.on('connect', clearProgress); this.manager.socket.on('connect_error', clearProgress); this.manager.socket.on('disconnect', clearProgress); return () => { - this.manager.socket.off('invocation_denoise_progress', progressListener); + this.manager.socket.off('invocation_progress', progressListener); this.manager.socket.off('connect', clearProgress); this.manager.socket.off('connect_error', clearProgress); this.manager.socket.off('disconnect', clearProgress); @@ -111,9 +119,8 @@ export class CanvasProgressImageModule extends CanvasModuleBase { this.isLoading = true; const { x, y, width, height } = this.manager.stateApi.getBbox().rect; - const { dataURL } = event.progress_image; try { - this.imageElement = await loadImage(dataURL); + this.imageElement = await loadImage(event.image.dataURL); if (this.konva.image) { this.konva.image.setAttrs({ image: this.imageElement, diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx index 555bcbf1230..61a97622206 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/CurrentImage/CurrentImageNode.tsx @@ -19,16 +19,10 @@ const CurrentImageNode = (props: NodeProps) => { const imageDTO = useAppSelector(selectLastSelectedImage); const lastProgressEvent = useStore($lastProgressEvent); - if (lastProgressEvent?.progress_image) { + if (lastProgressEvent?.image) { return ( - + ); } diff --git a/invokeai/frontend/web/src/services/events/setEventListeners.tsx b/invokeai/frontend/web/src/services/events/setEventListeners.tsx index 0db040667a3..c983779a9dd 100644 --- a/invokeai/frontend/web/src/services/events/setEventListeners.tsx +++ b/invokeai/frontend/web/src/services/events/setEventListeners.tsx @@ -12,7 +12,7 @@ import { zNodeStatus } from 'features/nodes/types/invocation'; import ErrorToastDescription, { getTitleFromErrorType } from 'features/toast/ErrorToastDescription'; import { toast } from 'features/toast/toast'; import { t } from 'i18next'; -import { forEach } from 'lodash-es'; +import { forEach, isNil, round } from 'lodash-es'; import { api, LIST_TAG } from 'services/api'; import { modelsApi } from 'services/api/endpoints/models'; import { queueApi, queueItemsAdapter } from 'services/api/endpoints/queue'; @@ -81,13 +81,19 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis } }); - socket.on('invocation_denoise_progress', (data) => { - const { invocation_source_id, invocation, step, total_steps, progress_image, origin, percentage } = data; + socket.on('invocation_progress', (data) => { + const { invocation_source_id, invocation, image, origin, percentage, message } = data; - log.trace( - { data } as SerializableObject, - `Denoise ${Math.round(percentage * 100)}% (${invocation.type}, ${invocation_source_id})` - ); + let _message = 'Invocation progress'; + if (message) { + _message += `: ${message}`; + } + if (!isNil(percentage)) { + _message += ` ${round(percentage * 100, 2)}%`; + } + _message += ` (${invocation.type}, ${invocation_source_id})`; + + log.trace({ data } as SerializableObject, _message); $lastProgressEvent.set(data); @@ -95,8 +101,8 @@ export const setEventListeners = ({ socket, store, setIsConnected }: SetEventLis const nes = deepClone($nodeExecutionStates.get()[invocation_source_id]); if (nes) { nes.status = zNodeStatus.enum.IN_PROGRESS; - nes.progress = (step + 1) / total_steps; - nes.progressImage = progress_image ?? null; + nes.progress = percentage; + nes.progressImage = image ?? null; upsertExecutionState(nes.nodeId, nes); } } diff --git a/invokeai/frontend/web/src/services/events/stores.ts b/invokeai/frontend/web/src/services/events/stores.ts index 9144cb957ef..c951e5f9f83 100644 --- a/invokeai/frontend/web/src/services/events/stores.ts +++ b/invokeai/frontend/web/src/services/events/stores.ts @@ -6,7 +6,7 @@ import type { ManagerOptions, SocketOptions } from 'socket.io-client'; export const $socket = atom(null); export const $socketOptions = map>({}); export const $isConnected = atom(false); -export const $lastProgressEvent = atom(null); +export const $lastProgressEvent = atom(null); export const $hasProgress = computed($lastProgressEvent, (val) => Boolean(val)); -export const $progressImage = computed($lastProgressEvent, (val) => val?.progress_image ?? null); +export const $progressImage = computed($lastProgressEvent, (val) => val?.image ?? null); export const $isProgressFromCanvas = computed($lastProgressEvent, (val) => val?.destination === 'canvas'); diff --git a/invokeai/frontend/web/src/services/events/types.ts b/invokeai/frontend/web/src/services/events/types.ts index 714d94eb77f..52d9710b3c2 100644 --- a/invokeai/frontend/web/src/services/events/types.ts +++ b/invokeai/frontend/web/src/services/events/types.ts @@ -7,7 +7,7 @@ type ClientEmitSubscribeBulkDownload = { bulk_download_id: string }; type ClientEmitUnsubscribeBulkDownload = ClientEmitSubscribeBulkDownload; export type ServerToClientEvents = { - invocation_denoise_progress: (payload: S['InvocationDenoiseProgressEvent']) => void; + invocation_progress: (payload: S['InvocationProgressEvent']) => void; invocation_complete: (payload: S['InvocationCompleteEvent']) => void; invocation_error: (payload: S['InvocationErrorEvent']) => void; invocation_started: (payload: S['InvocationStartedEvent']) => void;