Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -47,24 +47,6 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
47 |
|
48 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
49 |
|
50 |
-
# Updated function with optimized progress UI
|
51 |
-
def progress_bar_html(message: str) -> str:
|
52 |
-
return f"""
|
53 |
-
<div style="display: flex; align-items: center; justify-content: center; margin: 10px 0;">
|
54 |
-
<span style="margin-right: 10px; font-weight: bold; color: #333;">{message}</span>
|
55 |
-
<div style="position: relative; width: 200px; height: 10px; background-color: #e0e0e0; border-radius: 5px; overflow: hidden;">
|
56 |
-
<div style="position: absolute; width: 100%; height: 100%; background: linear-gradient(90deg, #76c7c0, #4caf50); animation: loading 2s ease-in-out infinite;"></div>
|
57 |
-
</div>
|
58 |
-
</div>
|
59 |
-
<style>
|
60 |
-
@keyframes loading {{
|
61 |
-
0% {{ transform: translateX(-100%); }}
|
62 |
-
50% {{ transform: translateX(0%); }}
|
63 |
-
100% {{ transform: translateX(100%); }}
|
64 |
-
}}
|
65 |
-
</style>
|
66 |
-
"""
|
67 |
-
|
68 |
# Load text-only model and tokenizer
|
69 |
model_id = "prithivMLmods/FastThink-0.5B-Tiny"
|
70 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -80,7 +62,7 @@ TTS_VOICES = [
|
|
80 |
"en-US-GuyNeural", # @tts2
|
81 |
]
|
82 |
|
83 |
-
MODEL_ID = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct"
|
84 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
85 |
model_m = Qwen2VLForConditionalGeneration.from_pretrained(
|
86 |
MODEL_ID,
|
@@ -146,6 +128,26 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
146 |
seed = random.randint(0, MAX_SEED)
|
147 |
return seed
|
148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
@spaces.GPU(duration=60, enable_queue=True)
|
150 |
def generate_image_fn(
|
151 |
prompt: str,
|
@@ -214,11 +216,11 @@ def generate(
|
|
214 |
text = input_dict["text"]
|
215 |
files = input_dict.get("files", [])
|
216 |
|
217 |
-
# Handle image generation command
|
218 |
if text.strip().lower().startswith("@image"):
|
|
|
219 |
prompt = text[len("@image"):].strip()
|
220 |
# Show animated progress bar for image generation
|
221 |
-
yield
|
222 |
image_paths, used_seed = generate_image_fn(
|
223 |
prompt=prompt,
|
224 |
negative_prompt="",
|
@@ -232,7 +234,7 @@ def generate(
|
|
232 |
use_resolution_binning=True,
|
233 |
num_images=1,
|
234 |
)
|
235 |
-
#
|
236 |
yield gr.Image(image_paths[0])
|
237 |
return # Exit early
|
238 |
|
@@ -252,7 +254,6 @@ def generate(
|
|
252 |
conversation = clean_chat_history(chat_history)
|
253 |
conversation.append({"role": "user", "content": text})
|
254 |
|
255 |
-
# For multimodal chat with files (e.g. image + text)
|
256 |
if files:
|
257 |
if len(files) > 1:
|
258 |
images = [load_image(image) for image in files]
|
@@ -275,17 +276,13 @@ def generate(
|
|
275 |
thread.start()
|
276 |
|
277 |
buffer = ""
|
278 |
-
# Show progress bar for
|
279 |
-
yield
|
280 |
for new_text in streamer:
|
281 |
buffer += new_text
|
282 |
buffer = buffer.replace("<|im_end|>", "")
|
283 |
time.sleep(0.01)
|
284 |
-
|
285 |
-
interim_html = f"<div>{buffer}</div><div>{progress_bar_html('Thinking...')}</div>"
|
286 |
-
yield gr.HTML(interim_html)
|
287 |
-
# Final output without the progress bar
|
288 |
-
yield gr.HTML(f"<div>{buffer}</div>")
|
289 |
else:
|
290 |
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
|
291 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
@@ -304,19 +301,18 @@ def generate(
|
|
304 |
"num_beams": 1,
|
305 |
"repetition_penalty": repetition_penalty,
|
306 |
}
|
307 |
-
|
308 |
-
|
309 |
|
310 |
outputs = []
|
311 |
-
# Show progress bar for
|
312 |
-
yield
|
313 |
for new_text in streamer:
|
314 |
outputs.append(new_text)
|
315 |
-
|
316 |
-
|
317 |
final_response = "".join(outputs)
|
318 |
-
|
319 |
-
yield gr.HTML(f"<div>{final_response}</div>")
|
320 |
|
321 |
# If TTS was requested, convert the final response to speech.
|
322 |
if is_tts and voice:
|
|
|
47 |
|
48 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
# Load text-only model and tokenizer
|
51 |
model_id = "prithivMLmods/FastThink-0.5B-Tiny"
|
52 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
62 |
"en-US-GuyNeural", # @tts2
|
63 |
]
|
64 |
|
65 |
+
MODEL_ID = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct"
|
66 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
67 |
model_m = Qwen2VLForConditionalGeneration.from_pretrained(
|
68 |
MODEL_ID,
|
|
|
128 |
seed = random.randint(0, MAX_SEED)
|
129 |
return seed
|
130 |
|
131 |
+
def progress_bar_html(label: str) -> str:
|
132 |
+
"""
|
133 |
+
Returns an HTML snippet for a thin progress bar with a label.
|
134 |
+
The progress bar is styled as a dark red animated bar.
|
135 |
+
"""
|
136 |
+
return f'''
|
137 |
+
<div style="display: flex; align-items: center;">
|
138 |
+
<span style="margin-right: 10px; font-size: 14px;">{label}</span>
|
139 |
+
<div style="width: 110px; height: 5px; background-color: #f0f0f0; border-radius: 2px; overflow: hidden;">
|
140 |
+
<div style="width: 100%; height: 100%; background-color: darkred; animation: loading 1.5s linear infinite;"></div>
|
141 |
+
</div>
|
142 |
+
</div>
|
143 |
+
<style>
|
144 |
+
@keyframes loading {{
|
145 |
+
0% {{ transform: translateX(-100%); }}
|
146 |
+
100% {{ transform: translateX(100%); }}
|
147 |
+
}}
|
148 |
+
</style>
|
149 |
+
'''
|
150 |
+
|
151 |
@spaces.GPU(duration=60, enable_queue=True)
|
152 |
def generate_image_fn(
|
153 |
prompt: str,
|
|
|
216 |
text = input_dict["text"]
|
217 |
files = input_dict.get("files", [])
|
218 |
|
|
|
219 |
if text.strip().lower().startswith("@image"):
|
220 |
+
# Remove the "@image" tag and use the rest as prompt
|
221 |
prompt = text[len("@image"):].strip()
|
222 |
# Show animated progress bar for image generation
|
223 |
+
yield progress_bar_html("Generating Image")
|
224 |
image_paths, used_seed = generate_image_fn(
|
225 |
prompt=prompt,
|
226 |
negative_prompt="",
|
|
|
234 |
use_resolution_binning=True,
|
235 |
num_images=1,
|
236 |
)
|
237 |
+
# Once done, yield the generated image
|
238 |
yield gr.Image(image_paths[0])
|
239 |
return # Exit early
|
240 |
|
|
|
254 |
conversation = clean_chat_history(chat_history)
|
255 |
conversation.append({"role": "user", "content": text})
|
256 |
|
|
|
257 |
if files:
|
258 |
if len(files) > 1:
|
259 |
images = [load_image(image) for image in files]
|
|
|
276 |
thread.start()
|
277 |
|
278 |
buffer = ""
|
279 |
+
# Show animated progress bar for multimodal generation
|
280 |
+
yield progress_bar_html("Thinking...")
|
281 |
for new_text in streamer:
|
282 |
buffer += new_text
|
283 |
buffer = buffer.replace("<|im_end|>", "")
|
284 |
time.sleep(0.01)
|
285 |
+
yield buffer
|
|
|
|
|
|
|
|
|
286 |
else:
|
287 |
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
|
288 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
|
|
301 |
"num_beams": 1,
|
302 |
"repetition_penalty": repetition_penalty,
|
303 |
}
|
304 |
+
t = Thread(target=model.generate, kwargs=generation_kwargs)
|
305 |
+
t.start()
|
306 |
|
307 |
outputs = []
|
308 |
+
# Show animated progress bar for text generation
|
309 |
+
yield progress_bar_html("Thinking...")
|
310 |
for new_text in streamer:
|
311 |
outputs.append(new_text)
|
312 |
+
yield "".join(outputs)
|
313 |
+
|
314 |
final_response = "".join(outputs)
|
315 |
+
yield final_response
|
|
|
316 |
|
317 |
# If TTS was requested, convert the final response to speech.
|
318 |
if is_tts and voice:
|