forked from cgvr/DeltaVR
112 lines
3.3 KiB
Python
112 lines
3.3 KiB
Python
import os
|
|
import base64
|
|
import requests
|
|
import argparse
|
|
import subprocess
|
|
from pathlib import Path
|
|
from datetime import datetime
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv()
|
|
|
|
ACCOUNT_ID = os.environ["CLOUDFLARE_ACCOUNT_ID"]
|
|
API_TOKEN = os.environ["CLOUDFLARE_API_TOKEN"]
|
|
PIPELINE_FOLDER = os.environ["PIPELINE_FOLDER"]
|
|
MODEL_FOLDER = os.environ["MODEL_FOLDER"]
|
|
|
|
|
|
def get_timestamp():
|
|
return datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
|
|
|
|
|
def text_to_image(prompt, output_path):
|
|
MODEL = "@cf/black-forest-labs/flux-1-schnell"
|
|
URL = f"https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/{MODEL}"
|
|
|
|
payload = {
|
|
"prompt": prompt,
|
|
}
|
|
|
|
headers = {
|
|
"Authorization": f"Bearer {API_TOKEN}",
|
|
"Content-Type": "application/json",
|
|
}
|
|
|
|
resp = requests.post(URL, json=payload, headers=headers, timeout=60)
|
|
resp.raise_for_status()
|
|
|
|
data = resp.json()
|
|
b64 = data["result"]["image"]
|
|
if not b64:
|
|
raise RuntimeError(f"Unexpected response structure: {data}")
|
|
|
|
img_bytes = base64.b64decode(b64)
|
|
|
|
with open(output_path, "wb") as f:
|
|
f.write(img_bytes)
|
|
|
|
|
|
def refine_text_prompt(prompt):
|
|
MODEL = "@cf/meta/llama-3.2-3b-instruct"
|
|
URL = f"https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/{MODEL}"
|
|
|
|
instructions = """
|
|
User is talking about some object. Your task is to generate a short and concise description of it. Use only user's own words, keep it as short as possible.
|
|
Example:
|
|
User: 'Umm, okay, I would like a really cool sword, with for example a bright orange crossguard. And also it should be slightly curved.'
|
|
You: 'a slightly curved sword with bright orange crossguard'
|
|
"""
|
|
|
|
response = requests.post(URL,
|
|
headers={"Authorization": f"Bearer {API_TOKEN}"},
|
|
json={
|
|
"messages": [
|
|
{"role": "system", "content": instructions},
|
|
{"role": "user", "content": prompt}
|
|
]
|
|
}
|
|
)
|
|
data = response.json()
|
|
return data["result"]["response"]
|
|
|
|
def image_to_3d(image_path, output_path):
|
|
venv_python = MODEL_FOLDER + r"\.venv\Scripts\python.exe"
|
|
script_path = MODEL_FOLDER + r"\run.py"
|
|
|
|
args = [image_path, "--output-dir", output_path]
|
|
command = [venv_python, script_path] + args
|
|
|
|
try:
|
|
# Run the subprocess
|
|
result = subprocess.run(command, capture_output=True, text=True)
|
|
|
|
# Print output and errors
|
|
print("STDOUT:\n", result.stdout)
|
|
print("STDERR:\n", result.stderr)
|
|
print("Return Code:", result.returncode)
|
|
|
|
except Exception as e:
|
|
print(f"Error occurred: {e}")
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="Text to 3D model pipeline")
|
|
parser.add_argument("--prompt", type=str, required=True, help="User text prompt")
|
|
args = parser.parse_args()
|
|
|
|
user_prompt = args.prompt
|
|
print(f"User prompt: {user_prompt}")
|
|
refined_prompt = refine_text_prompt(user_prompt)
|
|
print(f"Refined prompt: {refined_prompt}")
|
|
timestamp = get_timestamp()
|
|
pipeline_folder = Path(PIPELINE_FOLDER)
|
|
image_path = pipeline_folder / "images" / f"{timestamp}.jpg"
|
|
text_to_image(refined_prompt, image_path)
|
|
print(f"Generated image file: {image_path}")
|
|
model_path = pipeline_folder / "models" / timestamp
|
|
image_to_3d(image_path, model_path)
|
|
print(f"Generated 3D model file: {model_path}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|