1
0
forked from cgvr/DeltaVR

19 Commits

Author SHA1 Message Date
13c1e8a0f6 Changes in sound attributions 2025-12-27 12:17:15 +00:00
24543cce38 Upload files to "Doc/designs" 2025-12-16 13:59:03 +00:00
9af96fed99 Upload files to "Doc/clips" 2025-12-16 13:45:02 +00:00
ac87f2f8ef Update README.md
Just a small reference change
2025-12-07 22:21:10 +00:00
856ff3ca40 Update README.md
Adding myself into credits.
2025-12-07 17:10:52 +00:00
693b3a572e Upload files to "Doc/designs" 2025-12-02 13:37:36 +00:00
8977957054 Upload files to "Doc/clips" 2025-12-02 13:36:51 +00:00
b563be1158 Upload files to "Doc/clips" 2025-11-18 14:08:38 +00:00
616532e69c Upload files to "Doc/clips" 2025-11-18 14:03:31 +00:00
450efe675a Upload files to "Doc/clips" 2025-11-18 14:00:18 +00:00
cce7492556 Upload files to "Doc/designs" 2025-11-17 19:20:02 +00:00
e197206d0a Upload files to "Doc/designs" 2025-11-17 19:07:21 +00:00
dc7aa3b9b9 Upload files to "Doc/designs" 2025-11-17 18:03:37 +00:00
54d44afcec Upload files to "Doc/designs" 2025-11-04 17:32:58 +00:00
15c2e62e92 Upload files to "Doc/clips" 2025-11-04 17:28:46 +00:00
c4fafd1dd3 Delete Doc/clips/Hand-Collider-Prototype-Clip.gif 2025-11-04 17:26:11 +00:00
1c03f1773b Upload files to "Doc/clips" 2025-11-04 17:25:57 +00:00
ef3bc5da39 Upload files to "Doc/designs" 2025-10-21 14:03:55 +00:00
ed66253b06 Upload files to "Doc/clips" 2025-10-21 13:59:00 +00:00
62 changed files with 111 additions and 1655 deletions

View File

@@ -1,5 +0,0 @@
3D_GENERATION_URL=
MODEL_FOLDER=
CLOUDFLARE_ACCOUNT_ID=
CLOUDFLARE_API_TOKEN=

View File

@@ -1,7 +0,0 @@
.venv
.env
__pycache__
images/
models/
logs/
notebooks/test_resources/

View File

@@ -1,6 +0,0 @@
### TODO
* Artikkel text-to-3d prompt engineeringu kohta: "Sel3DCraft: Interactive Visual Prompts for User-Friendly Text-to-3D Generation"
* TRELLIS: postprocessing_utils: texture baking mode: 'opt' vs 'fast' - hardcoded 'opt', kui võimaldada 'fast' siis tuleb error
### Notes
* TRELLIS: added functionality to specify texture baking optimisation total steps as an argument (`texture_opt_total_steps`), to replace the hardcoded 2500. But this is not tracked in Git (because modified this https://github.com/IgorAherne/trellis-stable-projectorz/releases/tag/latest)

View File

@@ -1,61 +0,0 @@
import base64
import requests
import os
from dotenv import load_dotenv
load_dotenv()
ACCOUNT_ID = os.environ["CLOUDFLARE_ACCOUNT_ID"]
API_TOKEN = os.environ["CLOUDFLARE_API_TOKEN"]
def text_to_image_cloudflare(prompt, output_path):
MODEL = "@cf/black-forest-labs/flux-1-schnell"
URL = f"https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/{MODEL}"
payload = {
"prompt": prompt,
}
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json",
}
resp = requests.post(URL, json=payload, headers=headers, timeout=60)
resp.raise_for_status()
data = resp.json()
b64 = data["result"]["image"]
if not b64:
raise RuntimeError(f"Unexpected response structure: {data}")
img_bytes = base64.b64decode(b64)
with open(output_path, "wb") as f:
f.write(img_bytes)
return True
def refine_text_prompt(prompt):
MODEL = "@cf/meta/llama-3.2-3b-instruct"
URL = f"https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/{MODEL}"
instructions = """
User is talking about some object. Your task is to generate a short and concise description of it. Use only user's own words, keep it as short as possible.
Example:
User: 'Umm, okay, I would like a really cool sword, with for example a bright orange crossguard. And also it should be slightly curved.'
You: 'a slightly curved sword with bright orange crossguard'
"""
response = requests.post(URL,
headers={"Authorization": f"Bearer {API_TOKEN}"},
json={
"messages": [
{"role": "system", "content": instructions},
{"role": "user", "content": prompt}
]
}
)
data = response.json()
return data["result"]["response"]

View File

@@ -1,86 +0,0 @@
import requests
from invokeai_mcp_server import create_text2img_graph, enqueue_graph, wait_for_completion, get_image_url
from urllib.parse import urljoin
INVOKEAI_BASE_URL = "http://127.0.0.1:9090"
async def generate_image(arguments: dict):
# Extract parameters
prompt = arguments["prompt"]
negative_prompt = arguments.get("negative_prompt", "")
width = arguments.get("width", 512)
height = arguments.get("height", 512)
steps = arguments.get("steps", 30)
cfg_scale = arguments.get("cfg_scale", 7.5)
scheduler = arguments.get("scheduler", "euler")
seed = arguments.get("seed")
model_key = arguments.get("model_key")
lora_key = arguments.get("lora_key")
lora_weight = arguments.get("lora_weight", 1.0)
vae_key = arguments.get("vae_key")
print(f"Generating image with prompt: {prompt[:50]}...")
# Create graph
graph = await create_text2img_graph(
prompt=prompt,
negative_prompt=negative_prompt,
model_key=model_key,
lora_key=lora_key,
lora_weight=lora_weight,
vae_key=vae_key,
width=width,
height=height,
steps=steps,
cfg_scale=cfg_scale,
scheduler=scheduler,
seed=seed
)
# Enqueue and wait for completion
result = await enqueue_graph(graph)
batch_id = result["batch"]["batch_id"]
print(f"Enqueued batch {batch_id}, waiting for completion...")
completed = await wait_for_completion(batch_id)
# Extract image name from result
if "result" in completed and "outputs" in completed["result"]:
outputs = completed["result"]["outputs"]
# Find the image output
for node_id, output in outputs.items():
if output.get("type") == "image_output":
image_name = output["image"]["image_name"]
image_url = await get_image_url(image_name)
return urljoin(INVOKEAI_BASE_URL, image_url)
raise RuntimeError("Failed to generate image!")
def download_file(url, filepath):
response = requests.get(url)
if response.status_code == 200:
with open(filepath, "wb") as file:
file.write(response.content)
else:
raise RuntimeError(f"Failed to download image. Status code: {response.status_code}")
async def text_to_image_invoke_ai(prompt, output_path):
# see available model keys via GET http://127.0.0.1:9090/api/v2/models/?model_type=main
args = {
"prompt": prompt,
"width": 512,
"height": 512,
"model_key": "79401292-0a6b-428d-b7d7-f1e86caeba2b" # Juggernaut XL v9
#"model_key": "735f6485-6703-498f-929e-07cf0bbbd179" # Dreamshaper 8
}
image_url = await generate_image(args)
print("got image url: ", image_url)
download_file(image_url, output_path)

View File

@@ -1,97 +0,0 @@
import subprocess
import os
import time
import requests
import base64
from dotenv import load_dotenv
load_dotenv()
MODEL_FOLDER = os.environ["MODEL_FOLDER"]
API_URL = os.environ["3D_GENERATION_URL"]
def image_to_3d_subprocess(image_path, output_path):
venv_python = MODEL_FOLDER + r"\.venv\Scripts\python.exe"
script_path = MODEL_FOLDER + r"\run.py"
args = [image_path, "--output-dir", output_path]
command = [venv_python, script_path] + args
try:
# Run the subprocess
result = subprocess.run(command, capture_output=True, text=True)
# Print output and errors
print("STDOUT:\n", result.stdout)
print("STDERR:\n", result.stderr)
print("Return Code:", result.returncode)
except Exception as e:
print(f"Error occurred: {e}")
def generate_no_preview(image_base64: str):
"""Generate 3D model from a single base64-encoded image without previews.
Args:
image_base64: Base64 string of the image (without 'data:image/...' prefix)
"""
try:
# Set generation parameters
params = {
'image_base64': image_base64,
'seed': 42,
'ss_guidance_strength': 7.5,
'ss_sampling_steps': 10,
'slat_guidance_strength': 7.5,
'slat_sampling_steps': 10,
'mesh_simplify_ratio': 0.99,
'texture_size': 1024,
#'texture_baking_mode': 'opt',
'texture_opt_total_steps': 1000,
'output_format': 'glb'
}
# Start generation
print("Starting generation...")
response = requests.post(f"{API_URL}/generate_no_preview", data=params)
response.raise_for_status()
# Poll status until complete
while True:
status = requests.get(f"{API_URL}/status").json()
print(f"Progress: {status['progress']}%")
if status['status'] == 'COMPLETE':
break
elif status['status'] == 'FAILED':
raise Exception(f"Generation failed: {status['message']}")
time.sleep(1)
# Download the model
print("Downloading model...")
response = requests.get(f"{API_URL}/download/model")
response.raise_for_status()
return response.content
except Exception as e:
print(f"Error: {str(e)}")
return None
def image_to_3d_api(image_path, output_path):
with open(image_path, 'rb') as image_file:
image_data = image_file.read()
base64_encoded = base64.b64encode(image_data).decode('utf-8')
model_binary = generate_no_preview(base64_encoded)
output_file = f"{output_path}.glb"
with open(output_file, 'wb') as f:
f.write(model_binary)
return output_file

View File

@@ -1,153 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "1dc6faae",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import base64\n",
"import requests\n",
"from dotenv import load_dotenv"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b3107275",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv()\n",
"\n",
"ACCOUNT_ID = os.environ[\"CLOUDFLARE_ACCOUNT_ID\"]\n",
"API_TOKEN = os.environ[\"CLOUDFLARE_API_TOKEN\"]"
]
},
{
"cell_type": "markdown",
"id": "999adf95",
"metadata": {},
"source": [
"## Text to image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "40b35163",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Saved: test_resources/resolution_test_1.jpg (315728 bytes)\n"
]
}
],
"source": [
"# https://developers.cloudflare.com/workers-ai/models/flux-1-schnell/\n",
"\n",
"MODEL = \"@cf/black-forest-labs/flux-1-schnell\"\n",
"URL = f\"https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/{MODEL}\"\n",
"\n",
"payload = {\n",
" \"prompt\": \"cyborg crocodile, realistic style, single object, front and side fully visible, plain neutral background, clear details, soft studio lighting, true-to-scale\",\n",
"}\n",
"\n",
"headers = {\n",
" \"Authorization\": f\"Bearer {API_TOKEN}\",\n",
" \"Content-Type\": \"application/json\",\n",
"}\n",
"\n",
"resp = requests.post(URL, json=payload, headers=headers, timeout=60)\n",
"resp.raise_for_status()\n",
"\n",
"data = resp.json()\n",
"b64 = data[\"result\"][\"image\"]\n",
"if not b64:\n",
" raise RuntimeError(f\"Unexpected response structure: {data}\")\n",
"\n",
"img_bytes = base64.b64decode(b64)\n",
"\n",
"out_path = \"test_resources/resolution_test_1.jpg\"\n",
"with open(out_path, \"wb\") as f:\n",
" f.write(img_bytes)\n",
"\n",
"print(f\"Saved: {out_path} ({len(img_bytes)} bytes)\")"
]
},
{
"cell_type": "markdown",
"id": "14a874c4",
"metadata": {},
"source": [
"## Text prompt refinement"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "485f6f46",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\"dark wooden battleaxe with bronze blade\"\n"
]
}
],
"source": [
"MODEL = \"@cf/meta/llama-3.2-3b-instruct\"\n",
"URL = f\"https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/{MODEL}\"\n",
"\n",
"instructions = \"\"\"\n",
"User is talking about some object. Your task is to generate a short and concise description of it. Use only user's own words, keep it as short as possible.\n",
"Example:\n",
"User: 'Umm, okay, I would like a really cool sword, with for example a bright orange crossguard. And also it should be slightly curved.'\n",
"You: 'a slightly curved sword with bright orange crossguard'\n",
"\"\"\"\n",
"prompt = \"Umm, alright, can you please give me an epic battleaxe? It should have a dark wooden shaft and bronze blade.\"\n",
"\n",
"response = requests.post(URL,\n",
" headers={\"Authorization\": f\"Bearer {API_TOKEN}\"},\n",
" json={\n",
" \"messages\": [\n",
" {\"role\": \"system\", \"content\": instructions},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" }\n",
")\n",
"data = response.json()\n",
"result_text = data[\"result\"][\"response\"]\n",
"print(result_text)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,122 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "50e24baa",
"metadata": {},
"outputs": [],
"source": [
"from invokeai_mcp_server import create_text2img_graph, enqueue_graph, wait_for_completion, get_image_url\n",
"from urllib.parse import urljoin\n",
"\n",
"import asyncio"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0407cd9a",
"metadata": {},
"outputs": [],
"source": [
"INVOKEAI_BASE_URL = \"http://127.0.0.1:9090\"\n",
"\n",
"\n",
"async def generate_image(arguments: dict):\n",
"\n",
" # Extract parameters\n",
" prompt = arguments[\"prompt\"]\n",
" negative_prompt = arguments.get(\"negative_prompt\", \"\")\n",
" width = arguments.get(\"width\", 512)\n",
" height = arguments.get(\"height\", 512)\n",
" steps = arguments.get(\"steps\", 30)\n",
" cfg_scale = arguments.get(\"cfg_scale\", 7.5)\n",
" scheduler = arguments.get(\"scheduler\", \"euler\")\n",
" seed = arguments.get(\"seed\")\n",
" model_key = arguments.get(\"model_key\")\n",
" lora_key = arguments.get(\"lora_key\")\n",
" lora_weight = arguments.get(\"lora_weight\", 1.0)\n",
" vae_key = arguments.get(\"vae_key\")\n",
"\n",
" #logger.info(f\"Generating image with prompt: {prompt[:50]}...\")\n",
"\n",
" # Create graph\n",
" graph = await create_text2img_graph(\n",
" prompt=prompt,\n",
" negative_prompt=negative_prompt,\n",
" model_key=model_key,\n",
" lora_key=lora_key,\n",
" lora_weight=lora_weight,\n",
" vae_key=vae_key,\n",
" width=width,\n",
" height=height,\n",
" steps=steps,\n",
" cfg_scale=cfg_scale,\n",
" scheduler=scheduler,\n",
" seed=seed\n",
" )\n",
"\n",
" # Enqueue and wait for completion\n",
" result = await enqueue_graph(graph)\n",
" batch_id = result[\"batch\"][\"batch_id\"]\n",
"\n",
" #logger.info(f\"Enqueued batch {batch_id}, waiting for completion...\")\n",
"\n",
" completed = await wait_for_completion(batch_id)\n",
"\n",
" # Extract image name from result\n",
" if \"result\" in completed and \"outputs\" in completed[\"result\"]:\n",
" outputs = completed[\"result\"][\"outputs\"]\n",
" # Find the image output\n",
" for node_id, output in outputs.items():\n",
" if output.get(\"type\") == \"image_output\":\n",
" image_name = output[\"image\"][\"image_name\"]\n",
" image_url = await get_image_url(image_name)\n",
"\n",
" text=f\"Image generated successfully!\\n\\nImage Name: {image_name}\\nImage URL: {image_url}\\n\\nYou can view the image at: {urljoin(INVOKEAI_BASE_URL, f'/api/v1/images/i/{image_name}/full')}\"\n",
" print(text)\n",
"\n",
" # Fallback if we couldn't find image output\n",
" #text=f\"Image generation completed but output format was unexpected. Batch ID: {batch_id}\\n\\nResult: {json.dumps(completed, indent=2)}\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6cf9d879",
"metadata": {},
"outputs": [],
"source": [
"async def main():\n",
" args = {\n",
" \"prompt\": \"a golden katana with a fancy pommel\"\n",
" }\n",
" await generate_image(args)\n",
"\n",
"asyncio.run(main())"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,152 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "d55eb3ce",
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"import base64\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "77b23cd8",
"metadata": {},
"outputs": [],
"source": [
"# API endpoint\n",
"BASE_URL = \"http://127.0.0.1:7960\"\n",
"\n",
"def generate_no_preview(image_base64: str):\n",
" \"\"\"Generate 3D model from a single base64-encoded image without previews.\n",
" \n",
" Args:\n",
" image_base64: Base64 string of the image (without 'data:image/...' prefix)\n",
" \"\"\"\n",
" try:\n",
" # Set generation parameters\n",
" params = {\n",
" 'image_base64': image_base64,\n",
" 'seed': 42,\n",
" 'ss_guidance_strength': 7.5,\n",
" 'ss_sampling_steps': 30,\n",
" 'slat_guidance_strength': 7.5,\n",
" 'slat_sampling_steps': 30,\n",
" 'mesh_simplify_ratio': 0.95,\n",
" 'texture_size': 1024,\n",
" 'output_format': 'glb'\n",
" }\n",
" \n",
" # Start generation\n",
" print(\"Starting generation...\")\n",
" response = requests.post(f\"{BASE_URL}/generate_no_preview\", data=params)\n",
" print(\"Response status:\", response.status_code)\n",
" response.raise_for_status()\n",
" \n",
" # Poll status until complete\n",
" while True:\n",
" status = requests.get(f\"{BASE_URL}/status\").json()\n",
" print(f\"Progress: {status['progress']}%\")\n",
" \n",
" if status['status'] == 'COMPLETE':\n",
" break\n",
" elif status['status'] == 'FAILED':\n",
" raise Exception(f\"Generation failed: {status['message']}\")\n",
" \n",
" time.sleep(1)\n",
" \n",
" # Download the model\n",
" print(\"Downloading model...\")\n",
" response = requests.get(f\"{BASE_URL}/download/model\")\n",
" response.raise_for_status()\n",
" print(\"Model downloaded.\")\n",
" \n",
" return response.content\n",
" \n",
" except Exception as e:\n",
" print(f\"Error: {str(e)}\")\n",
" return None"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "eb122295",
"metadata": {},
"outputs": [],
"source": [
"def generate_model(image_path, output_path):\n",
" with open(image_path, 'rb') as image_file:\n",
" image_data = image_file.read()\n",
"\n",
" base64_encoded = base64.b64encode(image_data).decode('utf-8')\n",
" model = generate_no_preview(base64_encoded)\n",
" \n",
" with open(output_path, 'wb') as f:\n",
" f.write(model)\n",
" print(f\"Model saved to {output_path}\")\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "2ce7dfdf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Starting generation...\n",
"Response status: 200\n",
"Progress: 100%\n",
"Downloading model...\n",
"Model downloaded.\n",
"Model saved to test_resources/style_test_3_model.glb\n"
]
}
],
"source": [
"\n",
"image_path = 'test_resources/style_test_3.jpg'\n",
"output_path = \"test_resources/style_test_3_model.glb\"\n",
"\n",
"generate_model(image_path, output_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1224d13",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,165 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "4826c91d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'2025-10-18-16-35-47'"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from datetime import datetime\n",
"\n",
"datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "9419e692",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"STDOUT:\n",
" Device used: cuda\n",
"After Remesh 9998 19996\n",
"\n",
"STDERR:\n",
" D:\\users\\henrisel\\stable-fast-3d\\.venv\\lib\\site-packages\\timm\\models\\layers\\__init__.py:48: FutureWarning: Importing from timm.models.layers is deprecated, please import via timm.layers\n",
" warnings.warn(f\"Importing from {__name__} is deprecated, please import via timm.layers\", FutureWarning)\n",
"\n",
" 0%| | 0/1 [00:00<?, ?it/s]\n",
" 0%| | 0/1 [00:00<?, ?it/s]\n",
"Traceback (most recent call last):\n",
" File \"D:\\users\\henrisel\\stable-fast-3d\\run.py\", line 122, in <module>\n",
" mesh, glob_dict = model.run_image(\n",
" File \"D:\\users\\henrisel\\stable-fast-3d\\sf3d\\system.py\", line 286, in run_image\n",
" meshes, global_dict = self.generate_mesh(\n",
" File \"D:\\users\\henrisel\\stable-fast-3d\\sf3d\\system.py\", line 369, in generate_mesh\n",
" rast = self.baker.rasterize(\n",
" File \"D:\\users\\henrisel\\stable-fast-3d\\.venv\\lib\\site-packages\\texture_baker\\baker.py\", line 28, in rasterize\n",
" return torch.ops.texture_baker_cpp.rasterize(\n",
" File \"D:\\users\\henrisel\\stable-fast-3d\\.venv\\lib\\site-packages\\torch\\_ops.py\", line 1243, in __call__\n",
" return self._op(*args, **kwargs)\n",
"NotImplementedError: Could not run 'texture_baker_cpp::rasterize' with arguments from the 'CUDA' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build process (if using custom build). If you are a Facebook employee using PyTorch on mobile, please visit https://fburl.com/ptmfixes for possible resolutions. 'texture_baker_cpp::rasterize' is only available for these backends: [CPU, Meta, BackendSelect, Python, FuncTorchDynamicLayerBackMode, Functionalize, Named, Conjugate, Negative, ZeroTensor, ADInplaceOrView, AutogradOther, AutogradCPU, AutogradCUDA, AutogradXLA, AutogradMPS, AutogradXPU, AutogradHPU, AutogradLazy, AutogradMTIA, AutogradMAIA, AutogradMeta, Tracer, AutocastCPU, AutocastMTIA, AutocastMAIA, AutocastXPU, AutocastMPS, AutocastCUDA, FuncTorchBatched, BatchedNestedTensor, FuncTorchVmapMode, Batched, VmapMode, FuncTorchGradWrapper, PythonTLSSnapshot, FuncTorchDynamicLayerFrontMode, PreDispatch, PythonDispatcher].\n",
"\n",
"CPU: registered at texture_baker\\csrc\\baker.cpp:543 [kernel]\n",
"Meta: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\MetaFallbackKernel.cpp:23 [backend fallback]\n",
"BackendSelect: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\BackendSelectFallbackKernel.cpp:3 [backend fallback]\n",
"Python: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\PythonFallbackKernel.cpp:194 [backend fallback]\n",
"FuncTorchDynamicLayerBackMode: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\functorch\\DynamicLayer.cpp:479 [backend fallback]\n",
"Functionalize: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\FunctionalizeFallbackKernel.cpp:375 [backend fallback]\n",
"Named: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\NamedRegistrations.cpp:7 [backend fallback]\n",
"Conjugate: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\ConjugateFallback.cpp:17 [backend fallback]\n",
"Negative: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\native\\NegateFallback.cpp:18 [backend fallback]\n",
"ZeroTensor: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\ZeroTensorFallback.cpp:86 [backend fallback]\n",
"ADInplaceOrView: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:104 [backend fallback]\n",
"AutogradOther: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:63 [backend fallback]\n",
"AutogradCPU: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:67 [backend fallback]\n",
"AutogradCUDA: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:75 [backend fallback]\n",
"AutogradXLA: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:87 [backend fallback]\n",
"AutogradMPS: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:95 [backend fallback]\n",
"AutogradXPU: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:71 [backend fallback]\n",
"AutogradHPU: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:108 [backend fallback]\n",
"AutogradLazy: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:91 [backend fallback]\n",
"AutogradMTIA: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:79 [backend fallback]\n",
"AutogradMAIA: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:83 [backend fallback]\n",
"AutogradMeta: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\VariableFallbackKernel.cpp:99 [backend fallback]\n",
"Tracer: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\torch\\csrc\\autograd\\TraceTypeManual.cpp:294 [backend fallback]\n",
"AutocastCPU: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\autocast_mode.cpp:322 [backend fallback]\n",
"AutocastMTIA: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\autocast_mode.cpp:466 [backend fallback]\n",
"AutocastMAIA: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\autocast_mode.cpp:504 [backend fallback]\n",
"AutocastXPU: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\autocast_mode.cpp:542 [backend fallback]\n",
"AutocastMPS: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\autocast_mode.cpp:209 [backend fallback]\n",
"AutocastCUDA: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\autocast_mode.cpp:165 [backend fallback]\n",
"FuncTorchBatched: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\functorch\\LegacyBatchingRegistrations.cpp:731 [backend fallback]\n",
"BatchedNestedTensor: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\functorch\\LegacyBatchingRegistrations.cpp:758 [backend fallback]\n",
"FuncTorchVmapMode: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\functorch\\VmapModeRegistrations.cpp:27 [backend fallback]\n",
"Batched: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\LegacyBatchingRegistrations.cpp:1075 [backend fallback]\n",
"VmapMode: fallthrough registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\VmapModeRegistrations.cpp:33 [backend fallback]\n",
"FuncTorchGradWrapper: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\functorch\\TensorWrapper.cpp:210 [backend fallback]\n",
"PythonTLSSnapshot: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\PythonFallbackKernel.cpp:202 [backend fallback]\n",
"FuncTorchDynamicLayerFrontMode: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\functorch\\DynamicLayer.cpp:475 [backend fallback]\n",
"PreDispatch: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\PythonFallbackKernel.cpp:206 [backend fallback]\n",
"PythonDispatcher: registered at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\core\\PythonFallbackKernel.cpp:198 [backend fallback]\n",
"\n",
"\n",
"Return Code: 1\n"
]
}
],
"source": [
"import subprocess\n",
"\n",
"MODEL_FOLDER = r\"D:\\users\\henrisel\\stable-fast-3d\"\n",
"PROJECT_FOLDER = r\"D:\\users\\henrisel\\DeltaVR3DModelGeneration\\3d-generation-pipeline\"\n",
"\n",
"# Path to the Python interpreter in the other virtual environment\n",
"venv_python = MODEL_FOLDER + r\"\\.venv\\Scripts\\python.exe\"\n",
"\n",
"# Path to the .py file you want to run\n",
"script_path = MODEL_FOLDER + r\"\\run.py\"\n",
"\n",
"# Optional: arguments to pass to the script\n",
"args = [MODEL_FOLDER + r\"\\demo_files\\examples\\chair1.png\", \"--output-dir\", PROJECT_FOLDER + r\"\\images\"]\n",
"\n",
"# Build the command\n",
"command = [venv_python, script_path] + args\n",
"\n",
"try:\n",
" # Run the subprocess\n",
" result = subprocess.run(command, capture_output=True, text=True)\n",
"\n",
" # Print output and errors\n",
" print(\"STDOUT:\\n\", result.stdout)\n",
" print(\"STDERR:\\n\", result.stderr)\n",
" print(\"Return Code:\", result.returncode)\n",
"\n",
"except Exception as e:\n",
" print(f\"Error occurred: {e}\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ee480ba6",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,7 +0,0 @@
#torch==2.8.0+cu129 https://pytorch.org/get-started/previous-versions/
transformers==4.57.0
git+https://github.com/huggingface/diffusers.git
accelerate==1.10.1
huggingface_hub[hf_xet]==1.1.10
sentencepiece==0.2.1
protobuf==6.32.1

View File

@@ -1,62 +0,0 @@
import argparse
import asyncio
import logging
import time
from pathlib import Path
from datetime import datetime
from dotenv import load_dotenv
from generate_image_local import text_to_image_invoke_ai
from generate_model_local import image_to_3d_api
load_dotenv()
def get_timestamp():
return datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
def setup_logger(base_folder, timestamp):
log_dir = base_folder / Path("logs")
log_dir.mkdir(parents=True, exist_ok=True)
logging.basicConfig(
filename=log_dir / f"{timestamp}.log",
level=logging.INFO,
force=True
)
async def main():
parser = argparse.ArgumentParser(description="Text to 3D model pipeline")
parser.add_argument("--prompt", type=str, required=True, help="User text prompt")
args = parser.parse_args()
input_prompt = args.prompt
print(f"Input prompt: {input_prompt}")
image_generation_prompt = input_prompt + ", realistic style, single object, front and side fully visible, plain neutral background, clear details, soft studio lighting, true-to-scale"
pipeline_folder = Path(__file__).resolve().parent
timestamp = get_timestamp()
setup_logger(pipeline_folder, timestamp)
time_checkpoint = time.time()
image_path = pipeline_folder / "images" / f"{timestamp}.jpg"
# TODO: use Invoke AI or Cloudflare, depending on env var
#text_to_image_cloudflare(image_generation_prompt, image_path)
await text_to_image_invoke_ai(image_generation_prompt, image_path)
image_generation_time = time.time() - time_checkpoint
time_checkpoint = time.time()
logging.info(f"Image generation time: {round(image_generation_time, 1)} s")
print(f"Generated image file: {image_path}")
model_path = pipeline_folder / "models" / timestamp
model_file = image_to_3d_api(image_path, model_path)
model_generation_time = time.time() - time_checkpoint
logging.info(f"Model generation time: {round(model_generation_time, 1)} s")
print(f"Generated 3D model file: {model_file}")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,8 +0,0 @@
fileFormatVersion: 2
guid: 23fe3883e9cc804429bc54fb860d18f1
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -1,7 +0,0 @@
fileFormatVersion: 2
guid: f6c028f06eda5904eae3f7a7418b8416
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -7,7 +7,7 @@ TextureImporter:
mipmaps:
mipMapMode: 0
enableMipMap: 1
sRGBTexture: 0
sRGBTexture: 1
linearTexture: 0
fadeOut: 0
borderMipMap: 0
@@ -54,7 +54,7 @@ TextureImporter:
alphaUsage: 1
alphaIsTransparency: 0
spriteTessellationDetail: -1
textureType: 1
textureType: 0
textureShape: 1
singleChannelComponent: 0
flipbookRows: 1

View File

@@ -1,7 +1,6 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using _PROJECT.Scripts.Bow;
using FishNet.Object;
using FishNet.Object.Synchronizing;
@@ -37,8 +36,6 @@ public class ArcheryRange : NetworkBehaviour
private float _nextTargetTime;
private bool _roundActive;
private string targetModelName;
private readonly List<XROrigin> _presentPlayers = new();
private XROrigin _scoredPlayer;
@@ -115,32 +112,27 @@ public class ArcheryRange : NetworkBehaviour
}
}
async private void SpawnTarget()
private void SpawnTarget()
{
if (!IsServer) return;
var randomPos = targetStartPosition.position + new Vector3(
Random.Range(minRandomOffset.x, maxRandomOffset.x),
(float) Math.Round(Random.Range(minRandomOffset.y, maxRandomOffset.y)),
Random.Range(minRandomOffset.z, maxRandomOffset.z)
);
(float)Math.Round(Random.Range(minRandomOffset.y, maxRandomOffset.y)),
Random.Range(minRandomOffset.z, maxRandomOffset.z));
var target = SpawnTarget(randomPos);
var target = await SpawnTarget(randomPos);
_targets.Add(target);
}
async private Task<ArcheryTarget> SpawnTarget(Vector3 randomPos)
private ArcheryTarget SpawnTarget(Vector3 randomPos)
{
var targetObject = Instantiate(targetPrefab, randomPos, Quaternion.identity, null);
GameObject targetReplacement = await PipelineManager.Instance.SpawnModel(targetModelName);
// TODO: replace target prefab's child with the generated model
targetReplacement.transform.parent = targetObject.transform;
targetReplacement.transform.position = targetObject.transform.position;
ArcheryTarget target = targetObject.GetComponent<ArcheryTarget>();
var prefab = Instantiate(targetPrefab, randomPos, Quaternion.identity, null);
ArcheryTarget target = prefab.GetComponent<ArcheryTarget>();
target.endPosition = targetEndPosition.position;
target.addScore = AddScore;
Spawn(targetObject);
Spawn(prefab);
return target;
}
@@ -156,7 +148,7 @@ public class ArcheryRange : NetworkBehaviour
_targets = new List<ArcheryTarget>();
if (_maxScore < _score) _maxScore = _score;
if (_presentPlayers.Count != 0) // If there are players in the area.
if(_presentPlayers.Count != 0) // If there are players in the area.
{
// Gives the score to the player longest-lasting in the area. It would be better to give it to the player that fired the starting arrow, but I'm not spending 10 hours on this.
@@ -183,12 +175,9 @@ public class ArcheryRange : NetworkBehaviour
SetTimeLeftText("");
}
async public void StartRound()
public void StartRound()
{
if (!IsServer) return;
targetModelName = await PipelineManager.Instance.GenerateModelAsync("unicorn with golden horn and long fluffy tail and butterfly wings");
_roundEndTime = Time.time + roundLength;
_nextTargetTime = Time.time;
_roundActive = true;

View File

@@ -1,7 +1,10 @@
using System;
using _PROJECT.Scripts.Bow;
using _PROJECT.Scripts.Bow.Extra;
using FishNet.Object;
using FishNet.Object.Synchronizing;
using UnityEngine;
using Random = UnityEngine.Random;
public class ArcheryTarget : NetworkBehaviour, IArrowHittable
{
@@ -10,10 +13,12 @@ public class ArcheryTarget : NetworkBehaviour, IArrowHittable
public Vector3 endPosition;
public float forwardSpeed = 2f;
public Action<float> addScore;
private bool _flipDirection;
private void Awake()
{
_flipDirection = Random.value > 0.5f;
}
// Update is called once per frame
@@ -23,12 +28,11 @@ public class ArcheryTarget : NetworkBehaviour, IArrowHittable
float step = forwardSpeed * Time.deltaTime;
var position = transform.position;
if (Math.Abs(position.x - endPosition.x) < 0.1)
{
Destroy(gameObject);
}
if (Math.Abs(position.x - endPosition.x) < 0.1) Destroy(gameObject);
transform.position = Vector3.MoveTowards(position, new Vector3(endPosition.x, position.y, position.z), step);
transform.position = Vector3.MoveTowards(position,
new Vector3(endPosition.x, position.y, position.z), step);
}
public void Hit(Arrow arrow)

View File

@@ -1,133 +0,0 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!21 &2100000
Material:
serializedVersion: 8
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_Name: Green
m_Shader: {fileID: 4800000, guid: 933532a4fcc9baf4fa0491de14d08ed7, type: 3}
m_Parent: {fileID: 0}
m_ModifiedSerializedProperties: 0
m_ValidKeywords: []
m_InvalidKeywords: []
m_LightmapFlags: 4
m_EnableInstancingVariants: 0
m_DoubleSidedGI: 0
m_CustomRenderQueue: -1
stringTagMap:
RenderType: Opaque
disabledShaderPasses: []
m_LockedProperties:
m_SavedProperties:
serializedVersion: 3
m_TexEnvs:
- _BaseMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _BumpMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailAlbedoMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailMask:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailNormalMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _EmissionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MainTex:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MetallicGlossMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _OcclusionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _ParallaxMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _SpecGlossMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- unity_Lightmaps:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- unity_LightmapsInd:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- unity_ShadowMasks:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
m_Ints: []
m_Floats:
- _AlphaClip: 0
- _AlphaToMask: 0
- _Blend: 0
- _BlendModePreserveSpecular: 1
- _BumpScale: 1
- _ClearCoatMask: 0
- _ClearCoatSmoothness: 0
- _Cull: 2
- _Cutoff: 0.5
- _DetailAlbedoMapScale: 1
- _DetailNormalMapScale: 1
- _DstBlend: 0
- _DstBlendAlpha: 0
- _EnvironmentReflections: 1
- _GlossMapScale: 0
- _Glossiness: 0
- _GlossyReflections: 0
- _Metallic: 0
- _OcclusionStrength: 1
- _Parallax: 0.005
- _QueueOffset: 0
- _ReceiveShadows: 1
- _Smoothness: 0.5
- _SmoothnessTextureChannel: 0
- _SpecularHighlights: 1
- _SrcBlend: 1
- _SrcBlendAlpha: 1
- _Surface: 0
- _WorkflowMode: 1
- _ZWrite: 1
m_Colors:
- _BaseColor: {r: 0, g: 1, b: 0.0381248, a: 1}
- _Color: {r: 0, g: 1, b: 0.0381248, a: 1}
- _EmissionColor: {r: 0, g: 0, b: 0, a: 1}
- _SpecColor: {r: 0.19999996, g: 0.19999996, b: 0.19999996, a: 1}
m_BuildTextureStacks: []
--- !u!114 &6221994712197478572
MonoBehaviour:
m_ObjectHideFlags: 11
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: d0353a89b1f911e48b9e16bdc9f2e058, type: 3}
m_Name:
m_EditorClassIdentifier:
version: 7

View File

@@ -1,8 +0,0 @@
fileFormatVersion: 2
guid: 937c5f357ed270843bd43d1f7d5d475b
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 2100000
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -1,133 +0,0 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &-7093071968994914494
MonoBehaviour:
m_ObjectHideFlags: 11
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: d0353a89b1f911e48b9e16bdc9f2e058, type: 3}
m_Name:
m_EditorClassIdentifier:
version: 7
--- !u!21 &2100000
Material:
serializedVersion: 8
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_Name: Red
m_Shader: {fileID: 4800000, guid: 933532a4fcc9baf4fa0491de14d08ed7, type: 3}
m_Parent: {fileID: 0}
m_ModifiedSerializedProperties: 0
m_ValidKeywords: []
m_InvalidKeywords: []
m_LightmapFlags: 4
m_EnableInstancingVariants: 0
m_DoubleSidedGI: 0
m_CustomRenderQueue: -1
stringTagMap:
RenderType: Opaque
disabledShaderPasses: []
m_LockedProperties:
m_SavedProperties:
serializedVersion: 3
m_TexEnvs:
- _BaseMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _BumpMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailAlbedoMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailMask:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _DetailNormalMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _EmissionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MainTex:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _MetallicGlossMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _OcclusionMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _ParallaxMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- _SpecGlossMap:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- unity_Lightmaps:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- unity_LightmapsInd:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
- unity_ShadowMasks:
m_Texture: {fileID: 0}
m_Scale: {x: 1, y: 1}
m_Offset: {x: 0, y: 0}
m_Ints: []
m_Floats:
- _AlphaClip: 0
- _AlphaToMask: 0
- _Blend: 0
- _BlendModePreserveSpecular: 1
- _BumpScale: 1
- _ClearCoatMask: 0
- _ClearCoatSmoothness: 0
- _Cull: 2
- _Cutoff: 0.5
- _DetailAlbedoMapScale: 1
- _DetailNormalMapScale: 1
- _DstBlend: 0
- _DstBlendAlpha: 0
- _EnvironmentReflections: 1
- _GlossMapScale: 0
- _Glossiness: 0
- _GlossyReflections: 0
- _Metallic: 0
- _OcclusionStrength: 1
- _Parallax: 0.005
- _QueueOffset: 0
- _ReceiveShadows: 1
- _Smoothness: 0.5
- _SmoothnessTextureChannel: 0
- _SpecularHighlights: 1
- _SrcBlend: 1
- _SrcBlendAlpha: 1
- _Surface: 0
- _WorkflowMode: 1
- _ZWrite: 1
m_Colors:
- _BaseColor: {r: 1, g: 0, b: 0, a: 1}
- _Color: {r: 1, g: 0, b: 0, a: 1}
- _EmissionColor: {r: 0, g: 0, b: 0, a: 1}
- _SpecColor: {r: 0.19999996, g: 0.19999996, b: 0.19999996, a: 1}
m_BuildTextureStacks: []

View File

@@ -1,8 +0,0 @@
fileFormatVersion: 2
guid: 707a698b0ec80454a8c68700bca72941
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 2100000
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -1,8 +0,0 @@
fileFormatVersion: 2
guid: 0ec3982ba49c4b84ea95332cb090e115
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -1,52 +0,0 @@
using UnityEngine;
public class ModelGenerationTestBox : MonoBehaviour
{
public Material activeMaterial;
public Material inactiveMaterial;
public Transform modelSpawnPoint;
public VoiceTranscriptionTestBox voiceTranscriptionTestBox;
private MeshRenderer meshRenderer;
// Start is called before the first frame update
void Start()
{
meshRenderer = GetComponent<MeshRenderer>();
}
// Update is called once per frame
void Update()
{
}
async void OnTriggerEnter(Collider other)
{
KbmController controller = other.GetComponent<KbmController>();
if (controller != null)
{
meshRenderer.material = activeMaterial;
string inputPrompt = voiceTranscriptionTestBox.currentTextOutput;
string modelPath = await PipelineManager.Instance.GenerateModelAsync(inputPrompt);
//LoadModel("D:\\henrisel\\DeltaVR3DModelGeneration\\3d-generation-pipeline\\models\\2025-11-17-16-13-33\\mesh.glb");
GameObject spawnedObject = await PipelineManager.Instance.SpawnModel(modelPath);
spawnedObject.transform.parent = modelSpawnPoint;
spawnedObject.transform.position = modelSpawnPoint.position;
}
}
private void OnTriggerExit(Collider other)
{
KbmController controller = other.GetComponent<KbmController>();
if (controller != null)
{
meshRenderer.material = inactiveMaterial;
}
}
}

View File

@@ -1,11 +0,0 @@
fileFormatVersion: 2
guid: 46e67223dce9b7a4783ed36b8ed65f19
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -1,104 +0,0 @@
using GLTFast;
using System.Diagnostics;
using System.Threading.Tasks;
using UnityEngine;
public class PipelineManager : MonoBehaviour
{
public static PipelineManager Instance { get; private set; }
private void Awake()
{
Instance = this;
}
// Start is called before the first frame update
void Start()
{
}
// Update is called once per frame
void Update()
{
}
public async Task<string> GenerateModelAsync(string inputPrompt)
{
return await Task.Run(() =>
{
// Path to your virtual environment's python.exe
string pythonExe = @"D:\users\henrisel\DeltaVR3DModelGeneration\3d-generation-pipeline\.venv\Scripts\python.exe";
// Path to your Python script
string scriptPath = @"D:\users\henrisel\DeltaVR3DModelGeneration\3d-generation-pipeline\start_pipeline.py";
// Arguments to pass to the script
string arguments = $"{scriptPath} --prompt \"{inputPrompt}\"";
ProcessStartInfo psi = new ProcessStartInfo
{
FileName = pythonExe,
Arguments = arguments,
UseShellExecute = false,
RedirectStandardOutput = true,
RedirectStandardError = true,
CreateNoWindow = true
};
using (Process process = new Process())
{
process.StartInfo = psi;
process.OutputDataReceived += (sender, e) => UnityEngine.Debug.Log(e.Data);
process.ErrorDataReceived += (sender, e) => UnityEngine.Debug.LogError(e.Data);
process.Start();
string output = process.StandardOutput.ReadToEnd();
string error = process.StandardError.ReadToEnd();
process.WaitForExit();
// Extract model path from output
foreach (string line in output.Split('\n'))
{
if (line.StartsWith("Generated 3D model file: "))
{
return line.Replace("Generated 3D model file: ", "").Trim();
}
}
throw new System.Exception("Failed to generate 3D model!");
}
});
}
public async Task<GameObject> SpawnModel(string modelPath)
{
var gltf = new GltfImport();
bool loadSuccess = await gltf.Load(modelPath);
if (loadSuccess)
{
GameObject spawnedObject = new GameObject("spawned model");
bool spawnSuccess = await gltf.InstantiateMainSceneAsync(spawnedObject.transform);
if (spawnSuccess)
{
Transform spawnedObjectMainTransform = spawnedObject.transform.GetChild(0).transform;
GameObject spawnedObjectBody = spawnedObjectMainTransform.GetChild(0).transform.gameObject;
MeshCollider collider = spawnedObjectBody.AddComponent<MeshCollider>();
collider.convex = true;
MeshRenderer renderer = spawnedObjectBody.GetComponent<MeshRenderer>();
renderer.material.SetFloat("metallicFactor", 0);
spawnedObjectMainTransform.gameObject.AddComponent<Rigidbody>();
return spawnedObject;
}
}
throw new System.Exception("Failed to spawn GameObject from model" + modelPath);
}
}

View File

@@ -1,11 +0,0 @@
fileFormatVersion: 2
guid: 19e82e42c38cf2d4b912baa8d60c5407
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -1,94 +0,0 @@
using System.Diagnostics;
using TMPro;
using UnityEngine;
using Whisper;
using Whisper.Utils;
public class VoiceTranscriptionTestBox : MonoBehaviour
{
public Material activeMaterial;
public Material inactiveMaterial;
private MeshRenderer meshRenderer;
public WhisperManager whisper;
public MicrophoneRecord microphoneRecord;
public TextMeshProUGUI outputText;
private string _buffer;
public string currentTextOutput;
private void Awake()
{
whisper.OnNewSegment += OnNewSegment;
whisper.OnProgress += OnProgressHandler;
microphoneRecord.OnRecordStop += OnRecordStop;
}
// Start is called before the first frame update
void Start()
{
meshRenderer = GetComponent<MeshRenderer>();
}
// Update is called once per frame
void Update()
{
}
void OnTriggerEnter(Collider other)
{
KbmController controller = other.GetComponent<KbmController>();
if (controller != null)
{
meshRenderer.material = activeMaterial;
microphoneRecord.StartRecord();
}
}
private void OnTriggerExit(Collider other)
{
KbmController controller = other.GetComponent<KbmController>();
if (controller != null)
{
meshRenderer.material = inactiveMaterial;
microphoneRecord.StopRecord();
}
}
private async void OnRecordStop(AudioChunk recordedAudio)
{
_buffer = "";
var sw = new Stopwatch();
sw.Start();
var res = await whisper.GetTextAsync(recordedAudio.Data, recordedAudio.Frequency, recordedAudio.Channels);
if (res == null)
return;
var time = sw.ElapsedMilliseconds;
var rate = recordedAudio.Length / (time * 0.001f);
UnityEngine.Debug.Log($"Time: {time} ms\nRate: {rate:F1}x");
var text = res.Result;
currentTextOutput = text;
outputText.text = text;
}
private void OnProgressHandler(int progress)
{
UnityEngine.Debug.Log($"Progress: {progress}%");
}
private void OnNewSegment(WhisperSegment segment)
{
_buffer += segment.Text;
UnityEngine.Debug.Log(_buffer + "...");
}
}

View File

@@ -1,11 +0,0 @@
fileFormatVersion: 2
guid: d28857190597d9a46a8ddf3cf902cc81
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -1,57 +0,0 @@
using System.Diagnostics;
using UnityEngine;
using Whisper;
using Whisper.Utils;
public class VoiceTranscription : MonoBehaviour
{
public WhisperManager whisper;
public MicrophoneRecord microphoneRecord;
private string _buffer;
private void Awake()
{
whisper.OnNewSegment += OnNewSegment;
microphoneRecord.OnRecordStop += OnRecordStop;
}
// Start is called before the first frame update
void Start()
{
}
// Update is called once per frame
void Update()
{
}
private async void OnRecordStop(AudioChunk recordedAudio)
{
_buffer = "";
var sw = new Stopwatch();
sw.Start();
var res = await whisper.GetTextAsync(recordedAudio.Data, recordedAudio.Frequency, recordedAudio.Channels);
if (res == null)
return;
var time = sw.ElapsedMilliseconds;
var rate = recordedAudio.Length / (time * 0.001f);
UnityEngine.Debug.Log($"Time: {time} ms\nRate: {rate:F1}x");
var text = res.Result;
UnityEngine.Debug.Log(text);
}
private void OnNewSegment(WhisperSegment segment)
{
_buffer += segment.Text;
UnityEngine.Debug.Log(_buffer + "...");
}
}

View File

@@ -1,11 +0,0 @@
fileFormatVersion: 2
guid: af83274dbfe8bab4599dda694e2545c2
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
Doc/clips/Door-Grabbing-Clip.gif LFS Normal file

Binary file not shown.

BIN
Doc/clips/Doors-Issue-Clip.gif LFS Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
Doc/clips/Ghost-Hand-Clip.gif LFS Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
Doc/designs/Old-VR-Doorknob.png LFS Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
Doc/designs/Real-Doorknob.jpg LFS Normal file

Binary file not shown.

BIN
Doc/designs/Two-Elevators.png LFS Normal file

Binary file not shown.

View File

@@ -4,7 +4,6 @@
"com.unity.2d.tilemap": "1.0.0",
"com.unity.ai.navigation": "1.1.1",
"com.unity.cinemachine": "2.9.5",
"com.unity.cloud.gltfast": "6.14.1",
"com.unity.collab-proxy": "2.0.1",
"com.unity.ext.nunit": "1.0.6",
"com.unity.feature.vr": "1.0.0",
@@ -28,7 +27,6 @@
"com.unity.xr.mock-hmd": "1.3.1-preview.1",
"com.unity.xr.oculus": "3.2.3",
"com.unity.xr.openxr": "1.7.0",
"com.whisper.unity": "https://github.com/Macoron/whisper.unity.git?path=/Packages/com.whisper.unity",
"com.unity.modules.ai": "1.0.0",
"com.unity.modules.androidjni": "1.0.0",
"com.unity.modules.animation": "1.0.0",

View File

@@ -31,12 +31,11 @@
"url": "https://packages.unity.com"
},
"com.unity.burst": {
"version": "1.8.24",
"version": "1.8.3",
"depth": 1,
"source": "registry",
"dependencies": {
"com.unity.mathematics": "1.2.1",
"com.unity.modules.jsonserialize": "1.0.0"
"com.unity.mathematics": "1.2.1"
},
"url": "https://packages.unity.com"
},
@@ -49,19 +48,6 @@
},
"url": "https://packages.unity.com"
},
"com.unity.cloud.gltfast": {
"version": "6.14.1",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.burst": "1.8.24",
"com.unity.collections": "1.2.4",
"com.unity.mathematics": "1.2.6",
"com.unity.modules.jsonserialize": "1.0.0",
"com.unity.modules.unitywebrequest": "1.0.0"
},
"url": "https://packages.unity.com"
},
"com.unity.collab-proxy": {
"version": "2.0.1",
"depth": 0,
@@ -69,16 +55,6 @@
"dependencies": {},
"url": "https://packages.unity.com"
},
"com.unity.collections": {
"version": "1.2.4",
"depth": 1,
"source": "registry",
"dependencies": {
"com.unity.burst": "1.6.6",
"com.unity.test-framework": "1.1.31"
},
"url": "https://packages.unity.com"
},
"com.unity.editorcoroutines": {
"version": "1.0.0",
"depth": 1,
@@ -388,13 +364,6 @@
},
"url": "https://packages.unity.com"
},
"com.whisper.unity": {
"version": "https://github.com/Macoron/whisper.unity.git?path=/Packages/com.whisper.unity",
"depth": 0,
"source": "git",
"dependencies": {},
"hash": "529a628a915a97799e89e061af9cb7c71407124d"
},
"com.unity.modules.ai": {
"version": "1.0.0",
"depth": 0,

View File

@@ -66,23 +66,17 @@ Multiplayer and cross-play functionality. [Bachelor's Thesis](https://comserv.cs
**Raimond Tunnel**<br/>
Project management, visual design.
**Timur Nizamov**<br/>
Technical sound design.
Developed in the [Computer Graphcis and Virtual Reality Study Lab](https://cgvr.cs.ut.ee/) of the [Institute of Computer Science, University of Tartu](https://cs.ut.ee).
### Used Attributions
| Description | License | Source | Author |
|-----------------------------------------------------|----------------------------------------------|---------------------------------------------------------------------------------------------|------------------|
| Bold's car driving sound | Attribution NonCommercial 3.0 | [Link](https://freesound.org/people/Pfujimoto/sounds/14371/) | Pfujimoto |
| Bold's car braking sound | Attribution 3.0 | [Link](https://freesound.org/people/200154michaela/sounds/542448/) | 200154michaela |
| Bold's car horn sound | Attribution 4.0 | [Link](https://freesound.org/people/ceberation/sounds/235506/) | ceberation |
| Server rack model | Royalty Free, No AI License | [Link](https://www.cgtrader.com/free-3d-models/electronics/computer/simple-server-model) | anymelok |
| Server rack humming sound | Attribution 4.0 | [Link](https://freesound.org/people/jameswrowles/sounds/248217/) | jameswrowles |
| Fire suppression button press sound | Creative Commons 0 | [Link](https://freesound.org/people/LamaMakesMusic/sounds/403556/) | LamaMakesMusic |
| Fire suppression alarm sound | Attribution 3.0 | [Link](https://freesound.org/people/jobro/sounds/33737/) | jobro |
| Fire-suppressing gas release sound | Creative Commons 0 | [Link](https://freesound.org/people/mrmccormack/sounds/182359/) | mrmccormack |
| Coughing sound in response to fire-suppressing gas | Attribution 4.0 | [Link](https://freesound.org/people/qubodup/sounds/739416/) | qubodup |
| Robot movement sound | Creative Commons 0 | [Link](https://freesound.org/people/Brazilio123/sounds/661435/) | Brazilio123 |
| Portal humming sound | Attribution 4.0 | [Link](https://freesound.org/people/zimbot/sounds/122972/) | zimbot |
| Spacewalk UFO sound | Attribution NonCommercial 4.0 | [Link](https://freesound.org/people/Speedenza/sounds/209366/) | Speedenza |
| Keyboard icons | Creative Commons Attribution-NoDerivs 3.0 | [Link](https://icons8.com/) | icons8 |