From 4a55f5086f9d8a78c23561309fe22a0b61d056dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Tue, 26 Sep 2023 19:14:20 +0800 Subject: [PATCH 1/8] Add files via upload --- applications/gradio_text2image.py | 68 +++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 applications/gradio_text2image.py diff --git a/applications/gradio_text2image.py b/applications/gradio_text2image.py new file mode 100644 index 000000000..324fb65af --- /dev/null +++ b/applications/gradio_text2image.py @@ -0,0 +1,68 @@ +from paddlemix.appflow import Appflow +from ppdiffusers.utils import load_image +import paddle +import imageio + +from PIL import Image +import gradio as gr +import traceback + +# upscaling +def ups_fun(low_res_img, prompt): + low_res_img = Image.fromarray(low_res_img.astype('uint8')).convert('RGB') + app = Appflow(app='image2image_text_guided_upscaling',models=['stabilityai/stable-diffusion-x4-upscaler']) + image = app(prompt=prompt,image=low_res_img)['result'] + return image + +# text_guided_generation +def tge_fun(image, prompt_pos, prompt_neg): + image = Image.fromarray(image.astype('uint8')).convert('RGB') + app = Appflow(app='image2image_text_guided_generation',models=['Linaqruf/anything-v3.0']) + image = app(prompt=prompt_pos,negative_prompt=prompt_neg,image=image)['result'] + return image + +# dual_text_and_image_guided_generation +def dge_fun(image, prompt): + image = Image.fromarray(image.astype('uint8')).convert('RGB') + app = Appflow(app='dual_text_and_image_guided_generation',models=['shi-labs/versatile-diffusion']) + image = app(prompt=prompt,image=image)['result'] + return image + +# video_generation +def vge_fun(prompt): + app = Appflow(app='text_to_video_generation',models=['damo-vilab/text-to-video-ms-1.7b']) + video_frames = app(prompt=prompt,num_inference_steps=25)['result'] + imageio.mimsave("gen_video.gif", video_frames, duration=8) + return "gen_video.gif" + +with gr.Blocks() as demo: + gr.Markdown("# Appflow应用:text2image") + with gr.Tab("文本引导的图像放大"): + with gr.Row(): + ups_image_in = gr.Image(label = "输入图片") + ups_image_out = gr.Image(label = "输出图片") + ups_text_in = gr.Text(label = "Prompt") + ups_button = gr.Button() + ups_button.click(fn=ups_fun, inputs = [ups_image_in, ups_text_in], outputs = [ups_image_out]) + with gr.Tab("文本引导的图像变换"): + with gr.Row(): + tge_image_in = gr.Image(label = "输入图片") + tge_image_out = gr.Image(label = "输出图片") + tge_text_pos_in = gr.Text(label = "Positive Prompt") + tge_text_neg_in = gr.Text(label = "Negative Prompt") + tge_button = gr.Button() + tge_button.click(fn=tge_fun, inputs = [tge_image_in, tge_text_pos_in, tge_text_neg_in], outputs = [tge_image_out]) + with gr.Tab("文本图像双引导图像生成"): + with gr.Row(): + dge_image_in = gr.Image(label = "输入图片") + dge_image_out = gr.Image(label = "输出图片") + dge_text_in = gr.Text(label = "Prompt") + dge_button = gr.Button() + dge_button.click(fn=dge_fun, inputs = [dge_image_in, dge_text_in], outputs = [dge_image_out]) + with gr.Tab("文本条件的视频生成"): + vge_text_in = gr.Text(label = "Prompt") + vge_video_out = gr.Video(label = "输出视频") + vge_button = gr.Button() + vge_button.click(fn=vge_fun, inputs = [vge_text_in], outputs = [vge_video_out]) + +demo.launch() From 534e07e548f9baa23de4524dd811ed77f5563e1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Wed, 27 Sep 2023 23:00:10 +0800 Subject: [PATCH 2/8] Add files via upload --- applications/gradio_autolable.py | 116 +++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 applications/gradio_autolable.py diff --git a/applications/gradio_autolable.py b/applications/gradio_autolable.py new file mode 100644 index 000000000..94825bf07 --- /dev/null +++ b/applications/gradio_autolable.py @@ -0,0 +1,116 @@ +from paddlemix.appflow import Appflow +from ppdiffusers.utils import load_image +import paddle +import cv2 + +import json +from zipfile import ZipFile +import numpy as np +from PIL import Image, ImageDraw +import gradio as gr +import traceback + +task = Appflow(app="auto_label", + models=["paddlemix/blip2-caption-opt2.7b","GroundingDino/groundingdino-swint-ogc","Sam/SamVitH-1024"]) + +def auto_label(img, prompt): + result = task(image=img,blip2_prompt = prompt) + return result + +def result2json(result, filename): + label_data = {'version': '0.0.0', + 'flags': {} , + 'shapes': [], + 'imagePath': filename, + 'imageHeight': result['image'].size[1], + 'imageWidth': result['image'].size[0]} + + for i in range(len(result['labels'])): + # label去掉末尾的置信度 + label = result['labels'][i] + spl_idx = -1 + for j in range(len(label)): + if label[j] == '(': + spl_idx = j + if spl_idx == -1: + label = label + else: + label = label[:spl_idx] + + # 增加bbox + rect = result['boxes'][i] + xmin, ymin, xmax, ymax = rect + label_data['shapes'].append( + {'label': label, + 'points': [[xmin, ymin],[xmax, ymax]], + 'group_id': None, + 'shape_type': 'rectangle', + 'flags': {} + } + ) + + # 记录polygen + seg_mask = result['seg_masks'][i].numpy()[0] + mask_img = seg_mask.astype('uint8')*255 + contours, _ = cv2.findContours(mask_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + points = [] + for contour in contours: + for point in contour: + points.append(point[0].tolist()) + + # 增加polygen + rect = result['boxes'][i] + xmin, ymin, xmax, ymax = rect + label_data['shapes'].append( + {'label': label, + 'points': points, + 'group_id': None, + 'shape_type': 'polygon', + 'flags': {} + } + ) + + return label_data + +def al_fun(img, prompt): + img = Image.fromarray(img.astype('uint8')).convert('RGB') + result = auto_label(img, prompt) + label_data = result2json(result, " ") + # 绘制 + draw = ImageDraw.Draw(img) + for i in range(len(result)): + rect = result['boxes'][i].tolist() + draw.rectangle(rect) + return img, label_data + +def al_file_fun(file_in, prompt): + with ZipFile("labeled.zip", "w") as zipObj: + for _, imgname in enumerate(file_in): + image_pil = load_image(imgname.name) + result = auto_label(image_pil, prompt) + label_data = result2json(result, imgname.name.split("/")[-1]) + with open(imgname.name.split("/")[-1]+'.josn','w') as f: + json.dump(result, f, indent=4) + zipObj.write(imgname.name.split("/")[-1]+'.josn') + return "labeled.zip" + + +with gr.Blocks() as demo: + gr.Markdown("# 自动标注(AutoLabel)") + with gr.Tab("单张图片标注"): + with gr.Row(): + al_image_in = gr.Image(label = "输入图片") + al_image_out = gr.Image(label = "标注图片") + al_text_in = gr.Text(label = "Prompt") + al_text_out = gr.Text(label = "标注信息") + al_button = gr.Button() + al_button.click(fn=al_fun, inputs = [al_image_in, al_text_in], outputs = [al_image_out, al_text_out]) + with gr.Tab("批量标注"): + with gr.Row(): + al_file_in = gr.Files(label = "上传多张图片", file_types=['.jpg', '.png', '.jpeg', '.JPG', '.PNG', '.JPEG']) + al_file_out = gr.File(label = "标注结果") + al_file_text_in = gr.Text(label = "Prompt") + al_file_button = gr.Button() + al_file_button.click(fn=al_file_fun, inputs = [al_file_in, al_file_text_in], outputs = [al_file_out]) + +demo.launch() From f2f598850a1a89d39ed4f29d066ea7ce5739d4ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Thu, 28 Sep 2023 17:14:13 +0800 Subject: [PATCH 3/8] Update gradio_autolable.py --- applications/gradio_autolable.py | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/applications/gradio_autolable.py b/applications/gradio_autolable.py index 94825bf07..490fec21d 100644 --- a/applications/gradio_autolable.py +++ b/applications/gradio_autolable.py @@ -3,6 +3,7 @@ import paddle import cv2 +import os import json from zipfile import ZipFile import numpy as np @@ -90,10 +91,26 @@ def al_file_fun(file_in, prompt): result = auto_label(image_pil, prompt) label_data = result2json(result, imgname.name.split("/")[-1]) with open(imgname.name.split("/")[-1]+'.josn','w') as f: - json.dump(result, f, indent=4) + json.dump(label_data, f, indent=4) zipObj.write(imgname.name.split("/")[-1]+'.josn') return "labeled.zip" +def al_path_fun(path_in, prompt): + with ZipFile("labeled.zip", "w") as zipObj: + for root, _, files in os.walk(path_in, topdown=False): + for name in files: + if name.split('.')[-1] in ['jpg', 'png', 'jpeg', 'JPG', 'PNG', 'JPEG']: + img_path = os.path.join(root, name) + json_path = os.path.join(root, name+'.json') + + image_pil = load_image(img_path) + result = auto_label(image_pil, prompt) + label_data = result2json(result, img_path) + with open(json_path,'w') as f: + json.dump(label_data, f, indent=4) + zipObj.write(json_path) + return "labeled.zip" + with gr.Blocks() as demo: gr.Markdown("# 自动标注(AutoLabel)") @@ -105,12 +122,18 @@ def al_file_fun(file_in, prompt): al_text_out = gr.Text(label = "标注信息") al_button = gr.Button() al_button.click(fn=al_fun, inputs = [al_image_in, al_text_in], outputs = [al_image_out, al_text_out]) - with gr.Tab("批量标注"): + with gr.Tab("上传文件批量标注"): with gr.Row(): al_file_in = gr.Files(label = "上传多张图片", file_types=['.jpg', '.png', '.jpeg', '.JPG', '.PNG', '.JPEG']) al_file_out = gr.File(label = "标注结果") al_file_text_in = gr.Text(label = "Prompt") al_file_button = gr.Button() al_file_button.click(fn=al_file_fun, inputs = [al_file_in, al_file_text_in], outputs = [al_file_out]) + with gr.Tab("指定路径下批量标注"): + al_path_in = gr.Text(label = "待标注图片所在目录") + al_path_text_in = gr.Text(label = "Prompt") + al_path_out = gr.File(label = "标注结果") + al_path_button = gr.Button() + al_path_button.click(fn=al_path_fun, inputs = [al_path_in, al_path_text_in], outputs = [al_path_out]) demo.launch() From 3345a783cec36ce69287bfda49d8fd80613d3645 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Thu, 28 Sep 2023 19:47:48 +0800 Subject: [PATCH 4/8] Update gradio_autolable.py --- applications/gradio_autolable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gradio_autolable.py b/applications/gradio_autolable.py index 490fec21d..99bf69a5f 100644 --- a/applications/gradio_autolable.py +++ b/applications/gradio_autolable.py @@ -79,7 +79,7 @@ def al_fun(img, prompt): label_data = result2json(result, " ") # 绘制 draw = ImageDraw.Draw(img) - for i in range(len(result)): + for i in range(len(result['boxes'])): rect = result['boxes'][i].tolist() draw.rectangle(rect) return img, label_data From 7297230f5992b5e843788eb4fb97a62edc4875fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Thu, 28 Sep 2023 19:56:29 +0800 Subject: [PATCH 5/8] Update gradio_autolable.py --- applications/gradio_autolable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gradio_autolable.py b/applications/gradio_autolable.py index 99bf69a5f..49bb66bc7 100644 --- a/applications/gradio_autolable.py +++ b/applications/gradio_autolable.py @@ -39,7 +39,7 @@ def result2json(result, filename): label = label[:spl_idx] # 增加bbox - rect = result['boxes'][i] + rect = result['boxes'][i].tolist() xmin, ymin, xmax, ymax = rect label_data['shapes'].append( {'label': label, From 0325c13f6cec5fd6afeb43b076c333623530c5ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Wed, 11 Oct 2023 21:35:32 +0800 Subject: [PATCH 6/8] Update gradio_autolable.py --- applications/gradio_autolable.py | 106 ++++++++++++++++++++++--------- 1 file changed, 77 insertions(+), 29 deletions(-) diff --git a/applications/gradio_autolable.py b/applications/gradio_autolable.py index 49bb66bc7..c2b765e90 100644 --- a/applications/gradio_autolable.py +++ b/applications/gradio_autolable.py @@ -6,18 +6,24 @@ import os import json from zipfile import ZipFile +import zipfile import numpy as np from PIL import Image, ImageDraw import gradio as gr import traceback +import math +import tempfile + task = Appflow(app="auto_label", models=["paddlemix/blip2-caption-opt2.7b","GroundingDino/groundingdino-swint-ogc","Sam/SamVitH-1024"]) + def auto_label(img, prompt): result = task(image=img,blip2_prompt = prompt) return result + def result2json(result, filename): label_data = {'version': '0.0.0', 'flags': {} , @@ -49,7 +55,7 @@ def result2json(result, filename): 'flags': {} } ) - + # 记录polygen seg_mask = result['seg_masks'][i].numpy()[0] mask_img = seg_mask.astype('uint8')*255 @@ -73,43 +79,79 @@ def result2json(result, filename): return label_data + +def generate_mask(img, result_masks): + divide_part = int(255/(math.ceil(len(result_masks)/3)+1)) + np_img = np.array(img) + for i in range(len(result_masks)): + color = [0,0,0] + c = i%3 + p = i//3+1 + color[c] = divide_part*p + mask = result_masks[i] + M = mask.numpy()[0] + np_img[M] = color + print(color) + img = Image.fromarray(np_img) + return img + + def al_fun(img, prompt): img = Image.fromarray(img.astype('uint8')).convert('RGB') result = auto_label(img, prompt) - label_data = result2json(result, " ") - # 绘制 + label_data = result2json(result, "tmpimg") + # Draw BBox draw = ImageDraw.Draw(img) for i in range(len(result['boxes'])): rect = result['boxes'][i].tolist() - draw.rectangle(rect) - return img, label_data + draw.rectangle(rect, width=10) + # Draw Mask + mask_img = generate_mask(result['image'], result['seg_masks']) + # Write File + labeled_file = os.path.join(tmpdir,'labeled_date.json') + with open(labeled_file,'w') as f: + json.dump(label_data, f, indent=4) + return img, mask_img, labeled_file + def al_file_fun(file_in, prompt): - with ZipFile("labeled.zip", "w") as zipObj: + out_zip_file = os.path.join(tmpdir, "labeled.zip") + with ZipFile(out_zip_file, "w") as zipObj: for _, imgname in enumerate(file_in): - image_pil = load_image(imgname.name) + image_pil = Image.open(imgname.name) result = auto_label(image_pil, prompt) label_data = result2json(result, imgname.name.split("/")[-1]) - with open(imgname.name.split("/")[-1]+'.josn','w') as f: + labeled_file = os.path.join(tmpdir,imgname.name.split("/")[-1]+'.josn') + with open(labeled_file,'w') as f: json.dump(label_data, f, indent=4) - zipObj.write(imgname.name.split("/")[-1]+'.josn') - return "labeled.zip" + zipObj.write(labeled_file) + return out_zip_file + -def al_path_fun(path_in, prompt): - with ZipFile("labeled.zip", "w") as zipObj: - for root, _, files in os.walk(path_in, topdown=False): +def al_zip_fun(zip_in, prompt): + for _, zipname in enumerate(zip_in): + with open('test.txt', 'a') as f: + f.write(zipname.name+'\n') + f.write(zipname.name+'\n') + zipfile.ZipFile(zipname.name).extractall(tmpdir) + with open('test.txt', 'a') as f: + f.write('\n after extract \n') + out_zip_file = os.path.join(tmpdir, "labeled.zip") + with ZipFile(out_zip_file, "w") as zipObj: + for root, _, files in os.walk(tmpdir, topdown=False): for name in files: if name.split('.')[-1] in ['jpg', 'png', 'jpeg', 'JPG', 'PNG', 'JPEG']: img_path = os.path.join(root, name) json_path = os.path.join(root, name+'.json') - image_pil = load_image(img_path) + image_pil = Image.open(img_path) result = auto_label(image_pil, prompt) label_data = result2json(result, img_path) with open(json_path,'w') as f: json.dump(label_data, f, indent=4) zipObj.write(json_path) - return "labeled.zip" + os.remove(img_path) + return out_zip_file with gr.Blocks() as demo: @@ -117,23 +159,29 @@ def al_path_fun(path_in, prompt): with gr.Tab("单张图片标注"): with gr.Row(): al_image_in = gr.Image(label = "输入图片") - al_image_out = gr.Image(label = "标注图片") - al_text_in = gr.Text(label = "Prompt") - al_text_out = gr.Text(label = "标注信息") + al_image_out1 = gr.Image(label = "BBox标注图片") + al_image_out2 = gr.Image(label = "Mask标注图片") + al_text_in = gr.Text(label = "Prompt", value="describe the image") + al_file_out_ = gr.File(label = "标注文件") al_button = gr.Button() - al_button.click(fn=al_fun, inputs = [al_image_in, al_text_in], outputs = [al_image_out, al_text_out]) - with gr.Tab("上传文件批量标注"): + al_button.click(fn=al_fun, inputs = [al_image_in, al_text_in], outputs = [al_image_out1, al_image_out2, al_file_out_]) + with gr.Tab("上传多张图片批量标注"): with gr.Row(): al_file_in = gr.Files(label = "上传多张图片", file_types=['.jpg', '.png', '.jpeg', '.JPG', '.PNG', '.JPEG']) al_file_out = gr.File(label = "标注结果") - al_file_text_in = gr.Text(label = "Prompt") + al_file_text_in = gr.Text(label = "Prompt", value="describe the image") al_file_button = gr.Button() al_file_button.click(fn=al_file_fun, inputs = [al_file_in, al_file_text_in], outputs = [al_file_out]) - with gr.Tab("指定路径下批量标注"): - al_path_in = gr.Text(label = "待标注图片所在目录") - al_path_text_in = gr.Text(label = "Prompt") - al_path_out = gr.File(label = "标注结果") - al_path_button = gr.Button() - al_path_button.click(fn=al_path_fun, inputs = [al_path_in, al_path_text_in], outputs = [al_path_out]) - -demo.launch() + with gr.Tab("上传压缩包批量标注"): + with gr.Row(): + al_zip_in = gr.Files(label = "上传压缩包", file_types=['.zip']) + al_zip_out = gr.File(label = "标注结果") + al_zip_text_in = gr.Text(label = "Prompt", value="describe the image") + al_zip_button = gr.Button() + al_zip_button.click(fn=al_zip_fun, inputs = [al_zip_in, al_zip_text_in], outputs = [al_zip_out]) + + +# for download file, use the tempfile +global tmpdir +with tempfile.TemporaryDirectory(dir='.') as tmpdir: + demo.launch() From fb39c6f1ad78f67075ad114a4493e03549749f74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Fri, 20 Oct 2023 18:03:10 +0800 Subject: [PATCH 7/8] Update applications/gradio_text2image.py --- applications/gradio_text2image.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/applications/gradio_text2image.py b/applications/gradio_text2image.py index 324fb65af..b144fcf9c 100644 --- a/applications/gradio_text2image.py +++ b/applications/gradio_text2image.py @@ -52,13 +52,6 @@ def vge_fun(prompt): tge_text_neg_in = gr.Text(label = "Negative Prompt") tge_button = gr.Button() tge_button.click(fn=tge_fun, inputs = [tge_image_in, tge_text_pos_in, tge_text_neg_in], outputs = [tge_image_out]) - with gr.Tab("文本图像双引导图像生成"): - with gr.Row(): - dge_image_in = gr.Image(label = "输入图片") - dge_image_out = gr.Image(label = "输出图片") - dge_text_in = gr.Text(label = "Prompt") - dge_button = gr.Button() - dge_button.click(fn=dge_fun, inputs = [dge_image_in, dge_text_in], outputs = [dge_image_out]) with gr.Tab("文本条件的视频生成"): vge_text_in = gr.Text(label = "Prompt") vge_video_out = gr.Video(label = "输出视频") From 91c1edfc338032ec41b39424419ad9e9d7912df3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Fri, 20 Oct 2023 18:03:22 +0800 Subject: [PATCH 8/8] Update applications/gradio_text2image.py --- applications/gradio_text2image.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/applications/gradio_text2image.py b/applications/gradio_text2image.py index b144fcf9c..1b9ee6f14 100644 --- a/applications/gradio_text2image.py +++ b/applications/gradio_text2image.py @@ -21,13 +21,6 @@ def tge_fun(image, prompt_pos, prompt_neg): image = app(prompt=prompt_pos,negative_prompt=prompt_neg,image=image)['result'] return image -# dual_text_and_image_guided_generation -def dge_fun(image, prompt): - image = Image.fromarray(image.astype('uint8')).convert('RGB') - app = Appflow(app='dual_text_and_image_guided_generation',models=['shi-labs/versatile-diffusion']) - image = app(prompt=prompt,image=image)['result'] - return image - # video_generation def vge_fun(prompt): app = Appflow(app='text_to_video_generation',models=['damo-vilab/text-to-video-ms-1.7b'])