Skip to content

Commit

Permalink
Merge pull request #2439 from lllyasviel/develop
Browse files Browse the repository at this point in the history
release 2.2.1
  • Loading branch information
mashb1t authored Mar 4, 2024
2 parents 4ea3baf + ee96b85 commit 28cdc2f
Show file tree
Hide file tree
Showing 10 changed files with 42 additions and 39 deletions.
2 changes: 1 addition & 1 deletion fooocus_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version = '2.2.0'
version = '2.2.1'
2 changes: 1 addition & 1 deletion language/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@
"Metadata": "Metadata",
"Apply Metadata": "Apply Metadata",
"Metadata Scheme": "Metadata Scheme",
"Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.",
"Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.",
"fooocus (json)": "fooocus (json)",
"a1111 (plain text)": "a1111 (plain text)"
}
36 changes: 20 additions & 16 deletions modules/async_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -830,17 +830,21 @@ def callback(step, x0, x, total_steps, y):
('Negative Prompt', 'negative_prompt', task['log_negative_prompt']),
('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']),
('Styles', 'styles', str(raw_style_selections)),
('Performance', 'performance', performance_selection.value),
('Resolution', 'resolution', str((width, height))),
('Guidance Scale', 'guidance_scale', guidance_scale),
('Sharpness', 'sharpness', sharpness),
('ADM Guidance', 'adm_guidance', str((
modules.patch.patch_settings[pid].positive_adm_scale,
modules.patch.patch_settings[pid].negative_adm_scale,
modules.patch.patch_settings[pid].adm_scaler_end))),
('Base Model', 'base_model', base_model_name),
('Refiner Model', 'refiner_model', refiner_model_name),
('Refiner Switch', 'refiner_switch', refiner_switch)]
('Performance', 'performance', performance_selection.value)]

if performance_selection.steps() != steps:
d.append(('Steps', 'steps', steps))

d += [('Resolution', 'resolution', str((width, height))),
('Guidance Scale', 'guidance_scale', guidance_scale),
('Sharpness', 'sharpness', sharpness),
('ADM Guidance', 'adm_guidance', str((
modules.patch.patch_settings[pid].positive_adm_scale,
modules.patch.patch_settings[pid].negative_adm_scale,
modules.patch.patch_settings[pid].adm_scaler_end))),
('Base Model', 'base_model', base_model_name),
('Refiner Model', 'refiner_model', refiner_model_name),
('Refiner Switch', 'refiner_switch', refiner_switch)]

if refiner_model_name != 'None':
if overwrite_switch > 0:
Expand All @@ -857,17 +861,17 @@ def callback(step, x0, x, total_steps, y):
if freeu_enabled:
d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2))))

for li, (n, w) in enumerate(loras):
if n != 'None':
d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}'))

metadata_parser = None
if save_metadata_to_images:
metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
metadata_parser.set_data(task['log_positive_prompt'], task['positive'],
task['log_negative_prompt'], task['negative'],
steps, base_model_name, refiner_model_name, loras)

for li, (n, w) in enumerate(loras):
if n != 'None':
d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}'))

d.append(('Metadata Scheme', 'metadata_scheme', metadata_scheme.value if save_metadata_to_images else save_metadata_to_images))
d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version))
img_paths.append(log(x, d, metadata_parser, output_format))

Expand Down
2 changes: 1 addition & 1 deletion modules/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_
)
default_max_lora_number = get_config_item_or_set_default(
key='default_max_lora_number',
default_value=len(default_loras),
default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5,
validator=lambda x: isinstance(x, int) and x >= 1
)
default_cfg_scale = get_config_item_or_set_default(
Expand Down
14 changes: 4 additions & 10 deletions modules/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,10 +112,6 @@
margin-left: -5px !important;
}
.lora_enable {
flex-grow: 1 !important;
}
.lora_enable label {
height: 100%;
}
Expand All @@ -128,12 +124,10 @@
display: none;
}
.lora_model {
flex-grow: 5 !important;
}
.lora_weight {
flex-grow: 5 !important;
@-moz-document url-prefix() {
.lora_weight input[type=number] {
width: 80px;
}
}
'''
Expand Down
4 changes: 2 additions & 2 deletions modules/private_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True)

parsed_parameters = metadata_parser.parse_string(metadata) if metadata_parser is not None else ''
parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else ''
image = Image.fromarray(img)

if output_format == 'png':
Expand Down Expand Up @@ -90,7 +90,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
</script>"""
)

begin_part = f"<!DOCTYPE html><html><head><title>Fooocus Log {date_string}</title>{css_styles}</head><body>{js}<p>Fooocus Log {date_string} (private)</p>\n<p>All images are clean, without any hidden data/meta, and safe to share with others.</p><!--fooocus-log-split-->\n\n"
begin_part = f"<!DOCTYPE html><html><head><title>Fooocus Log {date_string}</title>{css_styles}</head><body>{js}<p>Fooocus Log {date_string} (private)</p>\n<p>Metadata is embedded if enabled in the config or developer debug mode. You can find the information for each image in line Metadata Scheme.</p><!--fooocus-log-split-->\n\n"
end_part = f'\n<!--fooocus-log-split--></body></html>'

middle_part = log_cache.get(html_name, "")
Expand Down
3 changes: 1 addition & 2 deletions modules/sdxl_styles.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,8 @@ def get_words(arrays, totalMult, index):
return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index)



def apply_arrays(text, index):
arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text)
arrays = re.findall(r'\[\[(.*?)\]\]', text)
if len(arrays) == 0:
return text

Expand Down
6 changes: 6 additions & 0 deletions update_log.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1)

* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox)
* Allow prompt weights in array syntax
* Add steps override and metadata scheme to history log

# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0)

* Isolate every image generation to truly allow multi-user usage
Expand Down
8 changes: 4 additions & 4 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,13 +355,13 @@ def update_history_link():
for i, (n, v) in enumerate(modules.config.default_loras):
with gr.Row():
lora_enabled = gr.Checkbox(label='Enable', value=True,
elem_classes=['lora_enable', 'min_check'])
elem_classes=['lora_enable', 'min_check'], scale=1)
lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
choices=['None'] + modules.config.lora_filenames, value=n,
elem_classes='lora_model')
elem_classes='lora_model', scale=5)
lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
maximum=modules.config.default_loras_max_weight, step=0.01, value=v,
elem_classes='lora_weight')
elem_classes='lora_weight', scale=5)
lora_ctrls += [lora_enabled, lora_model, lora_weight]

with gr.Row():
Expand Down Expand Up @@ -438,7 +438,7 @@ def update_history_link():
save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
info='Adds parameters to generated images allowing manual regeneration.')
metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme,
info='Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.',
info='Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.',
visible=modules.config.default_save_metadata_to_images)

save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme],
Expand Down
4 changes: 2 additions & 2 deletions wildcards/animal.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Chihuahua
Chimpanzee
Chinchilla
Chipmunk
Comodo Dragon
Komodo Dragon
Cow
Coyote
Crocodile
Expand Down Expand Up @@ -97,4 +97,4 @@ Whale
Wolf
Wombat
Yak
Zebra
Zebra

0 comments on commit 28cdc2f

Please sign in to comment.