Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
jaydrennan committed Mar 15, 2024
1 parent f0b7f31 commit 7a2efb3
Show file tree
Hide file tree
Showing 4 changed files with 133 additions and 48 deletions.
1 change: 1 addition & 0 deletions imaginairy/api/generate_refiners.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def latent_logger(latents):
weights_config=prompt.model_weights,
for_inpainting=prompt.should_use_inpainting
and prompt.inpaint_method == "finetune",
for_ipadapter=prompt.should_use_ipadapter,
dtype=dtype,
)
lc.model = sd
Expand Down
114 changes: 95 additions & 19 deletions imaginairy/cli/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ def _imagine_cmd(
configure_logging(log_level)

init_images = [init_image] if isinstance(init_image, str) else init_image
image_prompts = [image_prompt] if isinstance(image_prompt, str) else image_prompt

from imaginairy.utils import glob_expand_paths

Expand All @@ -112,6 +113,13 @@ def _imagine_cmd(
msg = f"Could not find any images matching the glob pattern(s) {init_image}. Are you sure the file(s) exists?"
raise ValueError(msg)

num_prexpanded_init_image_prompts = len(image_prompts)
image_prompts = glob_expand_paths(image_prompts)

if len(image_prompts) < num_prexpanded_init_image_prompts:
msg = f"Could not find any images matching the glob pattern(s) {init_image}. Are you sure the file(s) exists?"
raise ValueError(msg)

total_image_count = len(prompt_texts) * max(len(init_images), 1) * repeats
img_msg = ""
if len(init_images) > 0:
Expand All @@ -123,27 +131,32 @@ def _imagine_cmd(
from imaginairy.api import imagine_image_files
from imaginairy.schema import ImaginePrompt, LazyLoadingImage

new_init_images = []
for _init_image in init_images:
if _init_image and _init_image.startswith("http"):
_init_image = LazyLoadingImage(url=_init_image)
elif _init_image.startswith("textimg="):
from imaginairy.utils import named_resolutions
from imaginairy.utils.text_image import image_from_textimg_str

resolved_width, resolved_height = named_resolutions.normalize_image_size(
size
)
_init_image = image_from_textimg_str(
_init_image, resolved_width, resolved_height
)
else:
_init_image = LazyLoadingImage(filepath=_init_image)
new_init_images.append(_init_image)
init_images = new_init_images
# new_init_images = []
# for _init_image in init_images:
# if _init_image and _init_image.startswith("http"):
# _init_image = LazyLoadingImage(url=_init_image)
# elif _init_image.startswith("textimg="):
# from imaginairy.utils import named_resolutions
# from imaginairy.utils.text_image import image_from_textimg_str
#
# resolved_width, resolved_height = named_resolutions.normalize_image_size(
# size
# )
# _init_image = image_from_textimg_str(
# _init_image, resolved_width, resolved_height
# )
# else:
# _init_image = LazyLoadingImage(filepath=_init_image)
# new_init_images.append(_init_image)
# init_images = new_init_images
init_images = images_to_lazyloaders(init_images, size)
if not init_images:
init_images = [None]

image_prompts = images_to_lazyloaders(image_prompts, size)
if not image_prompts:
image_prompts = [None]

if mask_image:
if mask_image.startswith("http"):
mask_image = LazyLoadingImage(url=mask_image)
Expand Down Expand Up @@ -188,6 +201,46 @@ def _imagine_cmd(
prompt_strength=prompt_strength,
init_image=_init_image,
init_image_strength=init_image_strength,
# image_prompt=image_prompt,
# image_prompt_strength=image_prompt_strength,
control_inputs=control_inputs,
seed=seed,
solver_type=solver,
steps=steps,
size=size,
mask_image=mask_image,
mask_prompt=mask_prompt,
mask_mode=mask_mode,
mask_modify_original=mask_modify_original,
outpaint=outpaint,
upscale=upscale,
fix_faces=fix_faces,
fix_faces_fidelity=fix_faces_fidelity,
tile_mode=_tile_mode,
allow_compose_phase=allow_compose_phase,
model_weights=model_weights_path,
caption_text=caption_text,
composition_strength=composition_strength,
)
from imaginairy.utils.prompt_schedules import (
parse_schedule_strs,
prompt_mutator,
)

if arg_schedules:
schedules = parse_schedule_strs(arg_schedules)
for new_prompt in prompt_mutator(prompt, schedules):
prompts.append(new_prompt)
else:
prompts.append(prompt)

for image_prompt in image_prompts:
prompt = ImaginePrompt(
prompt=next(prompt_iterator),
negative_prompt=negative_prompt,
prompt_strength=prompt_strength,
# init_image=_init_image,
# init_image_strength=init_image_strength,
image_prompt=image_prompt,
image_prompt_strength=image_prompt_strength,
control_inputs=control_inputs,
Expand Down Expand Up @@ -256,6 +309,29 @@ def _imagine_cmd(
logger.info(f"[compilation] saved to: {new_filename}")


def images_to_lazyloaders(images, size):
from imaginairy.schema import LazyLoadingImage
lazyloaders = []

for image in images:
if image and image.startswith("http"):
image = LazyLoadingImage(url=image)
elif image.startswith("textimg="):
from imaginairy.utils import named_resolutions
from imaginairy.utils.text_image import image_from_textimg_str

resolved_width, resolved_height = named_resolutions.normalize_image_size(
size
)
image = image_from_textimg_str(
image, resolved_width, resolved_height
)
else:
image = LazyLoadingImage(filepath=image)
lazyloaders.append(image)
return lazyloaders


def add_options(options):
def _add_options(func):
for option in reversed(options):
Expand Down Expand Up @@ -319,7 +395,7 @@ def temp_f():
click.option(
"--image-prompt",
metavar="PATH|URL",
help="Starting image.",
help="Image to be used as part of the image and test prompt.",
multiple=True,
),
click.option(
Expand Down
4 changes: 4 additions & 0 deletions imaginairy/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -782,6 +782,10 @@ def should_use_inpainting(self) -> bool:
def should_use_inpainting_weights(self) -> bool:
return self.should_use_inpainting and self.inpaint_method == "finetune"

@property
def should_use_ipadapter(self) -> bool:
return bool(self.image_prompt)

@property
def model_architecture(self) -> config.ModelArchitecture:
return self.model_weights.architecture
Expand Down
62 changes: 33 additions & 29 deletions imaginairy/utils/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ def _get_diffusion_model(
def get_diffusion_model_refiners(
weights_config: iconfig.ModelWeightsConfig,
for_inpainting=False,
for_ipadapter=False,
dtype=None,
) -> LatentDiffusionModel:
"""Load a diffusion model."""
Expand All @@ -204,41 +205,44 @@ def get_diffusion_model_refiners(
# ensures a "fresh" copy that doesn't have additional injected parts
sd = sd.structural_copy()

# inject ip-adapter (img to img prompt)
from PIL import Image
if for_ipadapter:
# inject ip-adapter (img to img prompt)
from PIL import Image

from imaginairy.vendored.refiners.fluxion.utils import (
load_from_safetensors,
no_grad,
)
from imaginairy.vendored.refiners.foundationals.latent_diffusion import (
SDXLIPAdapter,
)
from imaginairy.vendored.refiners.fluxion.utils import (
load_from_safetensors,
no_grad,
)
from imaginairy.vendored.refiners.foundationals.latent_diffusion import (
SDXLIPAdapter,
)

image_prompt = Image.open(
"/imaginAIry/docs/assets/000032_337692011_PLMS40_PS7.5_a_photo_of_a_dog.jpg"
)
image_prompt = Image.open(
"/Users/jaydrennan/projects/imaginAIry/docs/assets/mona-lisa-headshot-anim.gif"
)

ip_adapter = SDXLIPAdapter(
target=sd.unet,
weights=load_from_safetensors(
"/imaginAIry/imaginairy/utils/ip-adapter_sdxl_vit-h.safetensors"
),
)
ip_adapter.clip_image_encoder.load_from_safetensors(
"/imaginAIry/imaginairy/utils/clip_image.safetensors"
)
ip_adapter.inject()
ip_adapter = SDXLIPAdapter(
target=sd.unet,
weights=load_from_safetensors(
"/imaginAIry/imaginairy/utils/ip-adapter_sdxl_vit-h.safetensors"
),
)

scale = 0.4
ip_adapter.set_scale(scale)
print(f"SCALE: {scale}")

with no_grad():
clip_image_embedding = ip_adapter.compute_clip_image_embedding(
ip_adapter.preprocess_image(image_prompt)
ip_adapter.clip_image_encoder.load_from_safetensors(
"/imaginAIry/imaginairy/utils/clip_image.safetensors"
)
ip_adapter.set_clip_image_embedding(clip_image_embedding)
ip_adapter.inject()

scale = 0.4
ip_adapter.set_scale(scale)
print(f"SCALE: {scale}")

with no_grad():
clip_image_embedding = ip_adapter.compute_clip_image_embedding(
ip_adapter.preprocess_image(image_prompt)
)
ip_adapter.set_clip_image_embedding(clip_image_embedding)

sd.set_self_attention_guidance(enable=True)

Expand Down

0 comments on commit 7a2efb3

Please sign in to comment.