Spaces:
Running
Running
File size: 3,704 Bytes
63e0edd 5970985 63e0edd 52e708e 63e0edd 42873b2 604a4cd 63e0edd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import os
import gradio as gr
import copy
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
from trimesh.exchange.gltf import export_glb
import trimesh
import numpy as np
import tempfile
# Initialize Llama model from Hugging Face
model = Llama(
model_path=hf_hub_download(
repo_id=os.environ.get("REPO_ID", "bartowski/LLaMA-Mesh-GGUF"),
filename=os.environ.get("MODEL_FILE", "LLaMA-Mesh-Q4_K_L.gguf"),
)
)
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">LLaMA-Mesh</h1>
<p>LLaMA-Mesh: Unifying 3D Mesh Generation with Language Models.</p>
<p>Supports up to 4096 tokens. Run locally for 8k token context.</p>
<p>To generate another mesh, click "clear" and start a new dialog.</p>
</div>
'''
LICENSE = """
<p/>--- Built with Meta Llama 3.1 8B ---
"""
PLACEHOLDER = """
<div style="padding: 30px; text-align: center;">
<h1 style="font-size: 28px; opacity: 0.55;">LLaMA-Mesh</h1>
<p style="font-size: 18px; opacity: 0.65;">Create 3D meshes by chatting.</p>
</div>
"""
css = """
h1 {
text-align: center;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
"""
def generate_text(message, history, max_tokens=2048, temperature=0.9, top_p=0.95):
temp = ""
response = model.create_chat_completion(
messages=[{"role": "user", "content": message}],
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
stream=True,
)
for streamed in response:
delta = streamed["choices"][0].get("delta", {})
#print(delta)
text_chunk = delta.get("content", "")
temp += text_chunk
yield temp
def apply_gradient_color(mesh_text):
temp_file = tempfile.NamedTemporaryFile(suffix=".obj", delete=False).name
with open(temp_file, "w") as f:
f.write(mesh_text)
mesh = trimesh.load_mesh(temp_file, file_type='obj')
vertices = mesh.vertices
y_values = vertices[:, 1]
y_normalized = (y_values - y_values.min()) / (y_values.max() - y_values.min())
colors = np.zeros((len(vertices), 4))
colors[:, 0] = y_normalized
colors[:, 2] = 1 - y_normalized
colors[:, 3] = 1.0
mesh.visual.vertex_colors = colors
glb_path = temp_file.replace(".obj", ".glb")
with open(glb_path, "wb") as f:
f.write(export_glb(mesh))
return glb_path
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTION)
chatbot = gr.ChatInterface(
generate_text,
title="LLaMA-Mesh | GGUF Integration",
description="Supports generating 3D meshes with LLaMA-GGUF.",
examples=[
['Create a 3D model of a wooden hammer'],
['Create a 3D model of a pyramid in OBJ format'],
['Create a 3D model of a table.'],
],
cache_examples=False,
additional_inputs=[
gr.Slider(minimum=2048, maximum=8192, value=4096, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=1.5, value=0.9, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
)
gr.Markdown("### 3D Mesh Visualization")
mesh_input = gr.Textbox(
label="3D Mesh Input",
placeholder="Paste your 3D mesh in OBJ format here...",
lines=5,
)
visualize_button = gr.Button("Visualize 3D Mesh")
output_model = gr.Model3D(label="3D Mesh Visualization")
visualize_button.click(
fn=apply_gradient_color,
inputs=[mesh_input],
outputs=[output_model]
)
gr.Markdown(LICENSE)
# Launch the demo
if __name__ == "__main__":
demo.launch() |