prithivMLmods commited on
Commit
a3d42ff
·
verified ·
1 Parent(s): 07f7d2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -91,8 +91,8 @@ def progress_bar_html(label: str) -> str:
91
  return f'''
92
  <div style="display: flex; align-items: center;">
93
  <span style="margin-right: 10px; font-size: 14px;">{label}</span>
94
- <div style="width: 110px; height: 5px; background-color: #FFF0F5; border-radius: 2px; overflow: hidden;">
95
- <div style="width: 100%; height: 100%; background-color: #FF69B4; animation: loading 1.5s linear infinite;"></div>
96
  </div>
97
  </div>
98
  <style>
@@ -104,7 +104,7 @@ def progress_bar_html(label: str) -> str:
104
  '''
105
 
106
 
107
- @spaces.GPU(duration=60, enable_queue=True)
108
  def generate(input_dict: dict, chat_history: list[dict],
109
  max_new_tokens: int = 1024,
110
  temperature: float = 0.6,
@@ -160,7 +160,7 @@ def generate(input_dict: dict, chat_history: list[dict],
160
  thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
161
  thread.start()
162
  buffer = ""
163
- yield progress_bar_html("Processing video with Qwen2VL")
164
  for new_text in streamer:
165
  buffer += new_text
166
  buffer = buffer.replace("<|im_end|>", "")
@@ -190,7 +190,7 @@ def generate(input_dict: dict, chat_history: list[dict],
190
  thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
191
  thread.start()
192
  buffer = ""
193
- yield progress_bar_html("Thinking...")
194
  for new_text in streamer:
195
  buffer += new_text
196
  buffer = buffer.replace("<|im_end|>", "")
@@ -219,7 +219,7 @@ def generate(input_dict: dict, chat_history: list[dict],
219
  t = Thread(target=model.generate, kwargs=generation_kwargs)
220
  t.start()
221
  outputs = []
222
- yield progress_bar_html("Processing...")
223
  for new_text in streamer:
224
  outputs.append(new_text)
225
  yield "".join(outputs)
 
91
  return f'''
92
  <div style="display: flex; align-items: center;">
93
  <span style="margin-right: 10px; font-size: 14px;">{label}</span>
94
+ <div style="width: 110px; height: 5px; background-color: #DC143C; border-radius: 2px; overflow: hidden;">
95
+ <div style="width: 100%; height: 100%; background-color: #FF0000; animation: loading 1.5s linear infinite;"></div>
96
  </div>
97
  </div>
98
  <style>
 
104
  '''
105
 
106
 
107
+ @spaces.GPU
108
  def generate(input_dict: dict, chat_history: list[dict],
109
  max_new_tokens: int = 1024,
110
  temperature: float = 0.6,
 
160
  thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
161
  thread.start()
162
  buffer = ""
163
+ yield progress_bar_html("Processing video with Callisto OCR3")
164
  for new_text in streamer:
165
  buffer += new_text
166
  buffer = buffer.replace("<|im_end|>", "")
 
190
  thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
191
  thread.start()
192
  buffer = ""
193
+ yield progress_bar_html("Processing image with Callisto OCR3")
194
  for new_text in streamer:
195
  buffer += new_text
196
  buffer = buffer.replace("<|im_end|>", "")
 
219
  t = Thread(target=model.generate, kwargs=generation_kwargs)
220
  t.start()
221
  outputs = []
222
+ yield progress_bar_html("Processing With Pocket Llama 3B")
223
  for new_text in streamer:
224
  outputs.append(new_text)
225
  yield "".join(outputs)