1
0
forked from cgvr/DeltaVR

port InvokeAI API client to Unity, use it in ImageGenerationBox

This commit is contained in:
2025-12-22 13:19:28 +02:00
parent 236f0758df
commit 1b3b3db1bf
11 changed files with 1033 additions and 74 deletions

View File

@@ -34,18 +34,6 @@ def get_client() -> httpx.AsyncClient:
return http_client
async def text_to_image_invoke_ai(prompt, output_path):
# see available model keys via GET http://INVOKEAI_BASE_URL:9090/api/v2/models/?model_type=main
args = {
"prompt": prompt,
"width": 512,
"height": 512,
"model_key": INVOKEAI_MODEL_KEY
}
image_url = await generate_image(args)
print("got image url: ", image_url)
download_file(image_url, output_path)
async def wait_for_completion(batch_id: str, queue_id: str = DEFAULT_QUEUE_ID, timeout: int = 300) -> dict:
"""Wait for a batch to complete and return the most recent image."""
client = get_client()
@@ -107,60 +95,7 @@ async def wait_for_completion(batch_id: str, queue_id: str = DEFAULT_QUEUE_ID, t
# Wait before checking again
await asyncio.sleep(1)
async def generate_image(arguments: dict):
# Extract parameters
prompt = arguments["prompt"]
negative_prompt = arguments.get("negative_prompt", "")
width = arguments.get("width", 512)
height = arguments.get("height", 512)
steps = arguments.get("steps", 30)
cfg_scale = arguments.get("cfg_scale", 7.5)
scheduler = arguments.get("scheduler", "euler")
seed = arguments.get("seed")
model_key = arguments.get("model_key")
lora_key = arguments.get("lora_key")
lora_weight = arguments.get("lora_weight", 1.0)
vae_key = arguments.get("vae_key")
print(f"Generating image with prompt: {prompt[:50]}...")
# Create graph
graph = await create_text2img_graph(
prompt=prompt,
negative_prompt=negative_prompt,
model_key=model_key,
lora_key=lora_key,
lora_weight=lora_weight,
vae_key=vae_key,
width=width,
height=height,
steps=steps,
cfg_scale=cfg_scale,
scheduler=scheduler,
seed=seed
)
# Enqueue and wait for completion
result = await enqueue_graph(graph)
batch_id = result["batch"]["batch_id"]
print(f"Enqueued batch {batch_id}, waiting for completion...")
completed = await wait_for_completion(batch_id)
# Extract image name from result
if "result" in completed and "outputs" in completed["result"]:
outputs = completed["result"]["outputs"]
# Find the image output
for node_id, output in outputs.items():
if output.get("type") == "image_output":
image_name = output["image"]["image_name"]
image_url = await get_image_url(image_name)
return urljoin(INVOKEAI_BASE_URL, image_url)
raise RuntimeError("Failed to generate image!")
def download_file(url, filepath):
response = requests.get(url)
@@ -486,4 +421,72 @@ async def create_text2img_graph(
"edges": edges
}
return graph
return graph
async def generate_image(arguments: dict):
# Extract parameters
prompt = arguments["prompt"]
negative_prompt = arguments.get("negative_prompt", "")
width = arguments.get("width", 512)
height = arguments.get("height", 512)
steps = arguments.get("steps", 30)
cfg_scale = arguments.get("cfg_scale", 7.5)
scheduler = arguments.get("scheduler", "euler")
seed = arguments.get("seed")
model_key = arguments.get("model_key")
lora_key = arguments.get("lora_key")
lora_weight = arguments.get("lora_weight", 1.0)
vae_key = arguments.get("vae_key")
print(f"Generating image with prompt: {prompt[:50]}...")
# Create graph
graph = await create_text2img_graph(
prompt=prompt,
negative_prompt=negative_prompt,
model_key=model_key,
lora_key=lora_key,
lora_weight=lora_weight,
vae_key=vae_key,
width=width,
height=height,
steps=steps,
cfg_scale=cfg_scale,
scheduler=scheduler,
seed=seed
)
# Enqueue and wait for completion
result = await enqueue_graph(graph)
batch_id = result["batch"]["batch_id"]
print(f"Enqueued batch {batch_id}, waiting for completion...")
completed = await wait_for_completion(batch_id)
# Extract image name from result
if "result" in completed and "outputs" in completed["result"]:
outputs = completed["result"]["outputs"]
# Find the image output
for node_id, output in outputs.items():
if output.get("type") == "image_output":
image_name = output["image"]["image_name"]
image_url = await get_image_url(image_name)
return urljoin(INVOKEAI_BASE_URL, image_url)
raise RuntimeError("Failed to generate image!")
async def text_to_image_invoke_ai(prompt, output_path):
# see available model keys via GET http://INVOKEAI_BASE_URL:9090/api/v2/models/?model_type=main
args = {
"prompt": prompt,
"width": 512,
"height": 512,
"model_key": INVOKEAI_MODEL_KEY
}
image_url = await generate_image(args)
print("got image url: ", image_url)
download_file(image_url, output_path)