forked from comfyanonymous/ComfyUI
-
Notifications
You must be signed in to change notification settings - Fork 11
/
remote_api_example.py
144 lines (133 loc) · 4.68 KB
/
remote_api_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# This sample shows how to execute a ComfyUI workflow against a remote ComfyUI server or the server running on your
# local machine. It will return the bytes of the image in the workflow.
#
# This script does not need to run within a ComfyUI directory. Instead, this can be used inside your own
# Python application or located elsewhere. It should **not** be in the Git repository directory.
#
# First, you will need to install ComfyUI. You do not need the ComfyUI repository or all of the ComfyUI dependencies to
# run a script against a server on your machine or elsewhere. You can install the convenient client and types with:
# pip install --no-deps git+https://github.com/hiddenswitch/ComfyUI.git
# pip install aiohttp[speedups]
#
# Now you should develop your workflow. Start ComfyUI as normal; navigate to "Settings" in the menu, and check "Enable
# Dev mode Options". Then click "Save (API Format)". Copy and paste the contents of this file here:
_PROMPT_FROM_WEB_UI = {
"3": {
"class_type": "KSampler",
"inputs": {
"cfg": 8,
"denoise": 1,
"latent_image": [
"5",
0
],
"model": [
"4",
0
],
"negative": [
"7",
0
],
"positive": [
"6",
0
],
"sampler_name": "euler",
"scheduler": "normal",
"seed": 8566257,
"steps": 20
}
},
"4": {
"class_type": "CheckpointLoaderSimple",
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
}
},
"5": {
"class_type": "EmptyLatentImage",
"inputs": {
"batch_size": 1,
"height": 512,
"width": 512
}
},
"6": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "masterpiece best quality girl"
}
},
"7": {
"class_type": "CLIPTextEncode",
"inputs": {
"clip": [
"4",
1
],
"text": "bad hands"
}
},
"8": {
"class_type": "VAEDecode",
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
}
},
"9": {
"class_type": "SaveImage",
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
}
}
}
# Observe this is an ordinary dictionary. The JSON that was saved from the workflow is compatible with Python syntax.
#
# Because you will be accessing a remote ComfyUI server, it should remain running somewhere. It can be your local machine,
# or some other machine. Do what makes sense for your application.
#
# We'll now write the entrypoint of our script. This is an `async def main()` because async helps us start and stop the
# code object that will run your workflow, just like pressing the Queue Prompt button.
async def main():
import copy
# Let's make some changes to the prompt. First we'll change the input text:
prompt_dict = copy.deepcopy(_PROMPT_FROM_WEB_UI)
prompt_dict["6"]["inputs"]["text"] = "masterpiece best quality man"
# Let's set the seed for our KSampler node:
prompt_dict["3"]["inputs"]["seed"] = 5
# Now we will validate the prompt. This Prompt class contains everything we need to validate the prompt.
from comfy.api.components.schema.prompt import Prompt
prompt = Prompt.validate(prompt_dict)
# Your prompt is ready to be processed. You should start your ComfyUI server; or, specify a remote URL for it.
# If you want to run your own ComfyUI server, follow the **Manual Install (Windows, Linux, macOS)** instructions.
# Then, as the README specifies, `cd` into the directory that contains your `models/` folder and run:
# comfyui --listen
# Let's create the client we will use to access it:
from comfy.client.aio_client import AsyncRemoteComfyClient
client = AsyncRemoteComfyClient(server_address="http://localhost:8188")
# Now let's get the bytes of the PNG image saved by the SaveImage node:
png_image_bytes = await client.queue_prompt(prompt)
# You can save these bytes wherever you need!
with open("image.png", "rb") as f:
f.write(png_image_bytes)
# Now let's make this script runnable:
import asyncio
if __name__ == "__main__":
# Since our main function is async, it must be run as async too.
asyncio.run(main())