m7n commited on
Commit
dbd8935
·
1 Parent(s): a682539

Refactor GPU function definitions in app.py to improve structure and ensure warmup function is called before embedding creation.

Browse files
Files changed (1) hide show
  1. app.py +30 -33
app.py CHANGED
@@ -92,16 +92,7 @@ def is_running_in_hf_space():
92
  from spaces.zero.client import _get_token
93
 
94
 
95
- @spaces.GPU(duration=1) # ← forces the detector to see a GPU-aware fn
96
- def _warmup():
97
- print("Warming up...")
98
-
99
- _warmup()
100
 
101
- @spaces.GPU(duration=30)
102
- def create_embeddings_30(texts_to_embedd):
103
- """Create embeddings for the input texts using the loaded model."""
104
- return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
105
 
106
 
107
  #if is_running_in_hf_space():
@@ -194,33 +185,39 @@ def no_op_decorator(func):
194
  # decorator_to_use = spaces.GPU() if is_running_in_hf_space() else no_op_decorator
195
  # #duration=120
196
 
 
 
 
197
 
198
- if is_running_in_hf_space():
199
- @spaces.GPU(duration=30)
200
- def create_embeddings_30(texts_to_embedd):
201
- """Create embeddings for the input texts using the loaded model."""
202
- return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
203
-
204
- @spaces.GPU(duration=59)
205
- def create_embeddings_59(texts_to_embedd):
206
- """Create embeddings for the input texts using the loaded model."""
207
- return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
208
-
209
- @spaces.GPU(duration=120)
210
- def create_embeddings_120(texts_to_embedd):
211
- """Create embeddings for the input texts using the loaded model."""
212
- return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
213
-
214
- @spaces.GPU(duration=299)
215
- def create_embeddings_299(texts_to_embedd):
216
- """Create embeddings for the input texts using the loaded model."""
217
- return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
 
 
 
218
 
219
 
220
- else:
221
- def create_embeddings(texts_to_embedd):
222
- """Create embeddings for the input texts using the loaded model."""
223
- return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
224
 
225
 
226
 
 
92
  from spaces.zero.client import _get_token
93
 
94
 
 
 
 
 
 
95
 
 
 
 
 
96
 
97
 
98
  #if is_running_in_hf_space():
 
185
  # decorator_to_use = spaces.GPU() if is_running_in_hf_space() else no_op_decorator
186
  # #duration=120
187
 
188
+ @spaces.GPU(duration=1) # ← forces the detector to see a GPU-aware fn
189
+ def _warmup():
190
+ print("Warming up...")
191
 
192
+ _warmup()
193
+
194
+
195
+ # if is_running_in_hf_space():
196
+ @spaces.GPU(duration=30)
197
+ def create_embeddings_30(texts_to_embedd):
198
+ """Create embeddings for the input texts using the loaded model."""
199
+ return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
200
+
201
+ @spaces.GPU(duration=59)
202
+ def create_embeddings_59(texts_to_embedd):
203
+ """Create embeddings for the input texts using the loaded model."""
204
+ return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
205
+
206
+ @spaces.GPU(duration=120)
207
+ def create_embeddings_120(texts_to_embedd):
208
+ """Create embeddings for the input texts using the loaded model."""
209
+ return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
210
+
211
+ @spaces.GPU(duration=299)
212
+ def create_embeddings_299(texts_to_embedd):
213
+ """Create embeddings for the input texts using the loaded model."""
214
+ return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
215
 
216
 
217
+ # else:
218
+ # def create_embeddings(texts_to_embedd):
219
+ # """Create embeddings for the input texts using the loaded model."""
220
+ # return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
221
 
222
 
223