m7n commited on
Commit
a236b7f
·
1 Parent(s): 8734842

Refactor app.py to enhance spaces import handling, provide a fallback for non-Space environments, and restore necessary function calls for resource initialization.

Browse files
Files changed (1) hide show
  1. app.py +33 -20
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import time
2
  print(f"Starting up: {time.strftime('%Y-%m-%d %H:%M:%S')}")
3
  # source openalex_env_map/bin/activate
@@ -87,9 +88,28 @@ is_running_in_hf_zero_gpu()
87
  def is_running_in_hf_space():
88
  return "SPACE_ID" in os.environ
89
 
90
- if is_running_in_hf_space():
91
- import spaces # necessary to run on Zero.
 
 
 
 
92
  from spaces.zero.client import _get_token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  #if is_running_in_hf_space():
95
  #import spaces # necessary to run on Zero.
@@ -160,10 +180,10 @@ MODEL_NAME = "m7n/discipline-tuned_specter_2_024"
160
  start_time = time.time()
161
  print("Initializing resources...")
162
 
163
- #download_required_files(REQUIRED_FILES)
164
- #basedata_df = setup_basemap_data(BASEMAP_PATH)
165
- #mapper = setup_mapper(MAPPER_PARAMS_PATH)
166
- #model = setup_embedding_model(MODEL_NAME)
167
 
168
  print(f"Resources initialized in {time.time() - start_time:.2f} seconds")
169
 
@@ -181,15 +201,14 @@ def no_op_decorator(func):
181
  # decorator_to_use = spaces.GPU() if is_running_in_hf_space() else no_op_decorator
182
  # #duration=120
183
 
184
-
185
  @spaces.GPU(duration=1) # ← forces the detector to see a GPU-aware fn
186
  def _warmup():
187
  print("Warming up...")
188
 
189
-
190
-
191
  _warmup()
192
 
 
 
193
  @spaces.GPU(duration=30)
194
  def create_embeddings_30(texts_to_embedd):
195
  """Create embeddings for the input texts using the loaded model."""
@@ -198,7 +217,7 @@ def create_embeddings_30(texts_to_embedd):
198
  @spaces.GPU(duration=59)
199
  def create_embeddings_59(texts_to_embedd):
200
  """Create embeddings for the input texts using the loaded model."""
201
- return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)#
202
 
203
  @spaces.GPU(duration=120)
204
  def create_embeddings_120(texts_to_embedd):
@@ -211,10 +230,10 @@ def create_embeddings_299(texts_to_embedd):
211
  return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
212
 
213
 
214
- #else:
215
- # def create_embeddings(texts_to_embedd):
216
- # """Create embeddings for the input texts using the loaded model."""
217
- # return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
218
 
219
 
220
 
@@ -901,12 +920,6 @@ with gr.Blocks(theme=theme, css="""
901
  # demo.launch(server_name="0.0.0.0", server_port=7860, share=True,allowed_paths=["/static"])
902
 
903
  # Mount Gradio app to FastAPI
904
-
905
-
906
-
907
-
908
-
909
-
910
  if is_running_in_hf_space():
911
  app = gr.mount_gradio_app(app, demo, path="/",ssr_mode=False) # setting to false for now.
912
  else:
 
1
+ #import spaces #
2
  import time
3
  print(f"Starting up: {time.strftime('%Y-%m-%d %H:%M:%S')}")
4
  # source openalex_env_map/bin/activate
 
88
  def is_running_in_hf_space():
89
  return "SPACE_ID" in os.environ
90
 
91
+ # #if is_running_in_hf_space():
92
+ # from spaces.zero.client import _get_token
93
+
94
+
95
+ try:
96
+ import spaces
97
  from spaces.zero.client import _get_token
98
+ HAS_SPACES = True
99
+ except (ImportError, ModuleNotFoundError):
100
+ HAS_SPACES = False
101
+
102
+ # Provide a harmless fallback so decorators don’t explode
103
+ if not HAS_SPACES:
104
+ class _Dummy:
105
+ def GPU(self, *a, **k):
106
+ def deco(f): # no-op decorator
107
+ return f
108
+ return deco
109
+ spaces = _Dummy() # fake module object
110
+ def _get_token(request): # stub, never called off-Space
111
+ return ""
112
+
113
 
114
  #if is_running_in_hf_space():
115
  #import spaces # necessary to run on Zero.
 
180
  start_time = time.time()
181
  print("Initializing resources...")
182
 
183
+ download_required_files(REQUIRED_FILES)
184
+ basedata_df = setup_basemap_data(BASEMAP_PATH)
185
+ mapper = setup_mapper(MAPPER_PARAMS_PATH)
186
+ model = setup_embedding_model(MODEL_NAME)
187
 
188
  print(f"Resources initialized in {time.time() - start_time:.2f} seconds")
189
 
 
201
  # decorator_to_use = spaces.GPU() if is_running_in_hf_space() else no_op_decorator
202
  # #duration=120
203
 
 
204
  @spaces.GPU(duration=1) # ← forces the detector to see a GPU-aware fn
205
  def _warmup():
206
  print("Warming up...")
207
 
 
 
208
  _warmup()
209
 
210
+
211
+ # if is_running_in_hf_space():
212
  @spaces.GPU(duration=30)
213
  def create_embeddings_30(texts_to_embedd):
214
  """Create embeddings for the input texts using the loaded model."""
 
217
  @spaces.GPU(duration=59)
218
  def create_embeddings_59(texts_to_embedd):
219
  """Create embeddings for the input texts using the loaded model."""
220
+ return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
221
 
222
  @spaces.GPU(duration=120)
223
  def create_embeddings_120(texts_to_embedd):
 
230
  return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
231
 
232
 
233
+ # else:
234
+ # def create_embeddings(texts_to_embedd):
235
+ # """Create embeddings for the input texts using the loaded model."""
236
+ # return model.encode(texts_to_embedd, show_progress_bar=True, batch_size=192)
237
 
238
 
239
 
 
920
  # demo.launch(server_name="0.0.0.0", server_port=7860, share=True,allowed_paths=["/static"])
921
 
922
  # Mount Gradio app to FastAPI
 
 
 
 
 
 
923
  if is_running_in_hf_space():
924
  app = gr.mount_gradio_app(app, demo, path="/",ssr_mode=False) # setting to false for now.
925
  else: