reimplement function from invokeai_mcp_server to make requests to local InvokeAI

This commit is contained in:
henrisel 2025-11-10 18:19:16 +02:00
parent d2e1c7b56f
commit fdd4ff827e
3 changed files with 77 additions and 3 deletions

View File

@ -0,0 +1,72 @@
from invokeai_mcp_server import create_text2img_graph, enqueue_graph, wait_for_completion, get_image_url
from urllib.parse import urljoin
import asyncio
INVOKEAI_BASE_URL = "http://127.0.0.1:9090"
async def generate_image(arguments: dict):
# Extract parameters
prompt = arguments["prompt"]
negative_prompt = arguments.get("negative_prompt", "")
width = arguments.get("width", 512)
height = arguments.get("height", 512)
steps = arguments.get("steps", 30)
cfg_scale = arguments.get("cfg_scale", 7.5)
scheduler = arguments.get("scheduler", "euler")
seed = arguments.get("seed")
model_key = arguments.get("model_key")
lora_key = arguments.get("lora_key")
lora_weight = arguments.get("lora_weight", 1.0)
vae_key = arguments.get("vae_key")
#logger.info(f"Generating image with prompt: {prompt[:50]}...")
# Create graph
graph = await create_text2img_graph(
prompt=prompt,
negative_prompt=negative_prompt,
model_key=model_key,
lora_key=lora_key,
lora_weight=lora_weight,
vae_key=vae_key,
width=width,
height=height,
steps=steps,
cfg_scale=cfg_scale,
scheduler=scheduler,
seed=seed
)
# Enqueue and wait for completion
result = await enqueue_graph(graph)
batch_id = result["batch"]["batch_id"]
#logger.info(f"Enqueued batch {batch_id}, waiting for completion...")
completed = await wait_for_completion(batch_id)
# Extract image name from result
if "result" in completed and "outputs" in completed["result"]:
outputs = completed["result"]["outputs"]
# Find the image output
for node_id, output in outputs.items():
if output.get("type") == "image_output":
image_name = output["image"]["image_name"]
image_url = await get_image_url(image_name)
text=f"Image generated successfully!\n\nImage Name: {image_name}\nImage URL: {image_url}\n\nYou can view the image at: {urljoin(INVOKEAI_BASE_URL, f'/api/v1/images/i/{image_name}/full')}"
print(text)
# Fallback if we couldn't find image output
#text=f"Image generation completed but output format was unexpected. Batch ID: {batch_id}\n\nResult: {json.dumps(completed, indent=2)}"
async def main():
args = {
"prompt": "a golden katana with a fancy pommel"
}
await generate_image(args)
asyncio.run(main())

Binary file not shown.

View File

@ -11,6 +11,8 @@ public class ModelGenerationPipelineStarter : MonoBehaviour
private MeshRenderer meshRenderer; private MeshRenderer meshRenderer;
public string inputPrompt;
// Start is called before the first frame update // Start is called before the first frame update
void Start() void Start()
{ {
@ -48,7 +50,7 @@ public class ModelGenerationPipelineStarter : MonoBehaviour
{ {
return await Task.Run(() => return await Task.Run(() =>
{ {
string inputPrompt = "tasty golden apple, photorealistic, smooth background";
// Path to your virtual environment's python.exe // Path to your virtual environment's python.exe
string pythonExe = @"D:\users\henrisel\DeltaVR3DModelGeneration\3d-generation-pipeline\.venv\Scripts\python.exe"; string pythonExe = @"D:\users\henrisel\DeltaVR3DModelGeneration\3d-generation-pipeline\.venv\Scripts\python.exe";