Skip to content

Commit

Permalink
[Enhance] add outpainting to gradio_PowerPaint.py (#2084)
Browse files Browse the repository at this point in the history
* Update gradio_PowerPaint.py

* Update gradio_PowerPaint.py

* Update gradio_PowerPaint.py

* Update gradio_PowerPaint.py

* Update README.md

* Update README.md

* Update gradio_PowerPaint.py
  • Loading branch information
zhuang2002 authored Dec 9, 2023
1 parent 71d0e95 commit 9ee23d5
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 21 deletions.
4 changes: 2 additions & 2 deletions projects/powerpaint/README.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# A Task is Worth One Word: Learning with Task Prompts for High-Quality Versatile Image Inpainting

### [Project Page](https://powerpaint.github.io/) | [Paper](https://arxiv.org/abs/2312.03594) | [OpenXLab](https://openxlab.org.cn/apps/detail/rangoliu/PowerPaint#basic-information)
### [Project Page](https://powerpaint.github.io/) | [Paper](https://arxiv.org/abs/2312.03594) | [Online Demo(OpenXlab)](https://openxlab.org.cn/apps/detail/rangoliu/PowerPaint#basic-information)

This README provides a step-by-step guide to download the repository, set up the required virtual environment named "PowerPaint" using conda, and run PowerPaint with or without ControlNet.

**Stronger Model Weights and Online Demo Coming Soon!**
**Stronger Model Weights Coming Soon!**

<img src='https://github.com/open-mmlab/mmagic/assets/12782558/acd01391-c73f-4997-aafd-0869aebcc915'/>

Expand Down
141 changes: 122 additions & 19 deletions projects/powerpaint/gradio_PowerPaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ def get_depth_map(image):


def add_task(prompt, negative_prompt, control_type):
# print(control_type)
if control_type == 'object-removal':
promptA = prompt + ' P_ctxt'
promptB = prompt + ' P_ctxt'
Expand All @@ -92,6 +93,11 @@ def add_task(prompt, negative_prompt, control_type):
promptB = prompt + ' P_ctxt'
negative_promptA = negative_prompt + ' P_shape'
negative_promptB = negative_prompt + ' P_ctxt'
elif control_type == 'image-outpainting':
promptA = prompt + ' P_ctxt'
promptB = prompt + ' P_ctxt'
negative_promptA = negative_prompt + ' P_obj'
negative_promptB = negative_prompt + ' P_obj'
else:
promptA = prompt + ' P_obj'
promptB = prompt + ' P_obj'
Expand All @@ -102,16 +108,63 @@ def add_task(prompt, negative_prompt, control_type):


def predict(input_image, prompt, fitting_degree, ddim_steps, scale, seed,
negative_prompt, task):
promptA, promptB, negative_promptA, negative_promptB = add_task(
prompt, negative_prompt, task)
negative_prompt, task, vertical_expansion_ratio,
horizontal_expansion_ratio):
size1, size2 = input_image['image'].convert('RGB').size
if size1 < size2:
input_image['image'] = input_image['image'].convert('RGB').resize(
(640, int(size2 / size1 * 640)))

if task != 'image-outpainting':
if size1 < size2:
input_image['image'] = input_image['image'].convert('RGB').resize(
(640, int(size2 / size1 * 640)))
else:
input_image['image'] = input_image['image'].convert('RGB').resize(
(int(size1 / size2 * 640), 640))
else:
input_image['image'] = input_image['image'].convert('RGB').resize(
(int(size1 / size2 * 640), 640))
if size1 < size2:
input_image['image'] = input_image['image'].convert('RGB').resize(
(512, int(size2 / size1 * 512)))
else:
input_image['image'] = input_image['image'].convert('RGB').resize(
(int(size1 / size2 * 512), 512))

if (vertical_expansion_ratio is not None) and (horizontal_expansion_ratio
is not None): # noqa
o_W, o_H = input_image['image'].convert('RGB').size
c_W = int(horizontal_expansion_ratio * o_W)
c_H = int(vertical_expansion_ratio * o_H)

expand_img = np.ones((c_H, c_W, 3), dtype=np.uint8) * 127
original_img = np.array(input_image['image'])
expand_img[int((c_H - o_H) / 2.0):int((c_H - o_H) / 2.0) + o_H,
int((c_W - o_W) / 2.0):int((c_W - o_W) / 2.0) +
o_W, :] = original_img

blurry_gap = 10

expand_mask = np.ones((c_H, c_W, 3), dtype=np.uint8) * 255
if vertical_expansion_ratio == 1 and horizontal_expansion_ratio != 1:
expand_mask[int((c_H - o_H) / 2.0):int((c_H - o_H) / 2.0) + o_H,
int((c_W - o_W) / 2.0) +
blurry_gap:int((c_W - o_W) / 2.0) + o_W -
blurry_gap, :] = 0
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio != 1:
expand_mask[int((c_H - o_H) / 2.0) +
blurry_gap:int((c_H - o_H) / 2.0) + o_H - blurry_gap,
int((c_W - o_W) / 2.0) +
blurry_gap:int((c_W - o_W) / 2.0) + o_W -
blurry_gap, :] = 0
elif vertical_expansion_ratio != 1 and horizontal_expansion_ratio == 1:
expand_mask[int((c_H - o_H) / 2.0) +
blurry_gap:int((c_H - o_H) / 2.0) + o_H - blurry_gap,
int((c_W - o_W) /
2.0):int((c_W - o_W) / 2.0) + o_W, :] = 0

input_image['image'] = Image.fromarray(expand_img)
input_image['mask'] = Image.fromarray(expand_mask)

promptA, promptB, negative_promptA, negative_promptB = add_task(
prompt, negative_prompt, task)
# print(promptA, promptB, negative_promptA, negative_promptB)
img = np.array(input_image['image'].convert('RGB'))

W = int(np.shape(img)[0] - np.shape(img)[0] % 8)
Expand Down Expand Up @@ -143,7 +196,7 @@ def predict(input_image, prompt, fitting_degree, ddim_steps, scale, seed,
(result_m.astype('float') * (1 - mask_np.astype('float') / 512.0) +
mask_np.astype('float') / 512.0 * red).astype('uint8'))
m_img = input_image['mask'].convert('RGB').filter(
ImageFilter.GaussianBlur(radius=4))
ImageFilter.GaussianBlur(radius=3))
m_img = np.asarray(m_img) / 255.0
img_np = np.asarray(input_image['image'].convert('RGB')) / 255.0
ours_np = np.asarray(result) / 255.0
Expand All @@ -161,13 +214,19 @@ def predict_controlnet(input_image, input_control_image, control_type, prompt,
promptB = prompt + ' P_obj'
negative_promptA = negative_prompt + ' P_obj'
negative_promptB = negative_prompt + ' P_obj'
size1, size2 = input_image['image'].convert('RGB').size

if size1 < size2:
input_image['image'] = input_image['image'].convert('RGB').resize(
(640, int(size2 / size1 * 640)))
else:
input_image['image'] = input_image['image'].convert('RGB').resize(
(int(size1 / size2 * 640), 640))
img = np.array(input_image['image'].convert('RGB'))
W = int(np.shape(img)[0] - np.shape(img)[0] % 8)
H = int(np.shape(img)[1] - np.shape(img)[1] % 8)
input_image['image'] = input_image['image'].resize((H, W))
input_image['mask'] = input_image['mask'].resize((H, W))
print(np.shape(np.array(input_image['mask'].convert('RGB'))))
print(np.shape(np.array(input_image['image'].convert('RGB'))))

global current_control
global pipe
Expand Down Expand Up @@ -251,7 +310,8 @@ def predict_controlnet(input_image, input_control_image, control_type, prompt,
def infer(input_image, text_guided_prompt, text_guided_negative_prompt,
shape_guided_prompt, shape_guided_negative_prompt, fitting_degree,
ddim_steps, scale, seed, task, enable_control, input_control_image,
control_type):
control_type, vertical_expansion_ratio, horizontal_expansion_ratio,
outpaint_prompt, outpaint_negative_prompt):
if task == 'text-guided':
prompt = text_guided_prompt
negative_prompt = text_guided_negative_prompt
Expand All @@ -261,19 +321,24 @@ def infer(input_image, text_guided_prompt, text_guided_negative_prompt,
elif task == 'object-removal':
prompt = ''
negative_prompt = ''
scale = 10.0
elif task == 'image-outpainting':
prompt = outpaint_prompt
negative_prompt = outpaint_negative_prompt
return predict(input_image, prompt, fitting_degree, ddim_steps, scale,
seed, negative_prompt, task, vertical_expansion_ratio,
horizontal_expansion_ratio)
else:
task = 'text-guided'
prompt = text_guided_prompt
negative_prompt = text_guided_negative_prompt

if enable_control:
if enable_control and task == 'text-guided':
return predict_controlnet(input_image, input_control_image,
control_type, prompt, ddim_steps, scale,
seed, negative_prompt)
else:
return predict(input_image, prompt, fitting_degree, ddim_steps, scale,
seed, negative_prompt, task)
seed, negative_prompt, task, None, None)


def select_tab_text_guided():
Expand All @@ -284,6 +349,10 @@ def select_tab_object_removal():
return 'object-removal'


def select_tab_image_outpainting():
return 'image-outpainting'


def select_tab_shape_guided():
return 'shape-guided'

Expand All @@ -305,7 +374,10 @@ def select_tab_shape_guided():
gr.Markdown('### Input image and draw mask')
input_image = gr.Image(source='upload', tool='sketch', type='pil')

task = gr.Radio(['text-guided', 'object-removal', 'shape-guided'],
task = gr.Radio([
'text-guided', 'object-removal', 'shape-guided',
'image-outpainting'
],
show_label=False,
visible=False)

Expand Down Expand Up @@ -337,6 +409,32 @@ def select_tab_shape_guided():
tab_object_removal.select(
fn=select_tab_object_removal, inputs=None, outputs=task)

# Object image outpainting
with gr.Tab('Image outpainting') as tab_image_outpainting:
enable_object_removal = gr.Checkbox(
label='Enable image outpainting',
value=True,
interactive=False)
outpaint_prompt = gr.Textbox(label='Outpainting_prompt')
outpaint_negative_prompt = gr.Textbox(
label='Outpainting_negative_prompt')
horizontal_expansion_ratio = gr.Slider(
label='horizontal expansion ratio',
minimum=1,
maximum=4,
step=0.05,
value=1,
)
vertical_expansion_ratio = gr.Slider(
label='vertical expansion ratio',
minimum=1,
maximum=4,
step=0.05,
value=1,
)
tab_image_outpainting.select(
fn=select_tab_image_outpainting, inputs=None, outputs=task)

# Shape-guided object inpainting
with gr.Tab('Shape-guided object inpainting') as tab_shape_guided:
enable_shape_guided = gr.Checkbox(
Expand All @@ -351,7 +449,7 @@ def select_tab_shape_guided():
minimum=0,
maximum=1,
step=0.05,
randomize=True,
value=1,
)
tab_shape_guided.select(
fn=select_tab_shape_guided, inputs=None, outputs=task)
Expand All @@ -362,6 +460,10 @@ def select_tab_shape_guided():
label='Steps', minimum=1, maximum=50, value=45, step=1)
scale = gr.Slider(
label='Guidance Scale',
info='For object removal, \
it is recommended to set the value at 10 or above, \
while for image outpainting, \
it is advisable to set it at 18 or above.',
minimum=0.1,
maximum=30.0,
value=7.5,
Expand All @@ -386,9 +488,10 @@ def select_tab_shape_guided():
input_image, text_guided_prompt, text_guided_negative_prompt,
shape_guided_prompt, shape_guided_negative_prompt, fitting_degree,
ddim_steps, scale, seed, task, enable_control, input_control_image,
control_type
control_type, vertical_expansion_ratio, horizontal_expansion_ratio,
outpaint_prompt, outpaint_negative_prompt
],
outputs=[inpaint_result, gallery])

demo.queue()
demo.launch(share=False, server_name='0.0.0.0', server_port=7890)
demo.launch(share=False, server_name='0.0.0.0', server_port=7860)

0 comments on commit 9ee23d5

Please sign in to comment.