prithivMLmods commited on
Commit
6803172
·
verified ·
1 Parent(s): 6a44e02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -4
app.py CHANGED
@@ -6,6 +6,7 @@ import time
6
  import asyncio
7
  import tempfile
8
  from threading import Thread
 
9
 
10
  import gradio as gr
11
  import spaces
@@ -36,6 +37,16 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
36
  seed = random.randint(0, MAX_SEED)
37
  return seed
38
 
 
 
 
 
 
 
 
 
 
 
39
  class Model:
40
  def __init__(self):
41
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -117,7 +128,6 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
117
 
118
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
119
 
120
- # Load the text-only model and tokenizer (for pure text chat)
121
  model_id = "prithivMLmods/FastThink-0.5B-Tiny"
122
  tokenizer = AutoTokenizer.from_pretrained(model_id)
123
  model = AutoModelForCausalLM.from_pretrained(
@@ -236,7 +246,6 @@ def generate_image_fn(
236
  image_paths = [save_image(img) for img in images]
237
  return image_paths, seed
238
 
239
-
240
  @spaces.GPU(duration=120, enable_queue=True)
241
  def generate_3d_fn(
242
  prompt: str,
@@ -287,7 +296,13 @@ def generate(
287
  num_steps=64,
288
  randomize_seed=True,
289
  )
290
- yield gr.File(glb_path, label="3D Model (GLB)")
 
 
 
 
 
 
291
  return
292
 
293
  # --- Image Generation branch ---
@@ -415,4 +430,4 @@ demo = gr.ChatInterface(
415
 
416
  if __name__ == "__main__":
417
  # To create a public link, set share=True in launch().
418
- demo.queue(max_size=20).launch(share=True)
 
6
  import asyncio
7
  import tempfile
8
  from threading import Thread
9
+ import base64
10
 
11
  import gradio as gr
12
  import spaces
 
37
  seed = random.randint(0, MAX_SEED)
38
  return seed
39
 
40
+ def glb_to_data_url(glb_path: str) -> str:
41
+ """
42
+ Reads a GLB file from disk and returns a data URL with a base64 encoded representation.
43
+ This data URL can be used as the `src` for an HTML <model-viewer> tag.
44
+ """
45
+ with open(glb_path, "rb") as f:
46
+ data = f.read()
47
+ b64_data = base64.b64encode(data).decode("utf-8")
48
+ return f"data:model/gltf-binary;base64,{b64_data}"
49
+
50
  class Model:
51
  def __init__(self):
52
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
128
 
129
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
130
 
 
131
  model_id = "prithivMLmods/FastThink-0.5B-Tiny"
132
  tokenizer = AutoTokenizer.from_pretrained(model_id)
133
  model = AutoModelForCausalLM.from_pretrained(
 
246
  image_paths = [save_image(img) for img in images]
247
  return image_paths, seed
248
 
 
249
  @spaces.GPU(duration=120, enable_queue=True)
250
  def generate_3d_fn(
251
  prompt: str,
 
296
  num_steps=64,
297
  randomize_seed=True,
298
  )
299
+ # Convert the GLB file to a base64 data URL and embed it in an HTML <model-viewer> tag.
300
+ data_url = glb_to_data_url(glb_path)
301
+ html_output = f'''
302
+ <model-viewer src="{data_url}" alt="3D Model" auto-rotate camera-controls style="width: 100%; height: 400px;"></model-viewer>
303
+ <script type="module" src="https://unpkg.com/@google/model-viewer/dist/model-viewer.min.js"></script>
304
+ '''
305
+ yield gr.HTML(html_output)
306
  return
307
 
308
  # --- Image Generation branch ---
 
430
 
431
  if __name__ == "__main__":
432
  # To create a public link, set share=True in launch().
433
+ demo.queue(max_size=20).launch(share=True)