Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,6 @@ import torch
|
|
11 |
import os
|
12 |
import logging
|
13 |
|
14 |
-
# Ensure proper display for debugging
|
15 |
pd.set_option('display.max_colwidth', 1000)
|
16 |
|
17 |
# Patch torch.load to always load on CPU
|
@@ -20,11 +19,10 @@ def cpu_load(*args, **kwargs):
|
|
20 |
return original_torch_load(*args, map_location=torch.device('cpu'), **kwargs)
|
21 |
torch.load = cpu_load
|
22 |
|
23 |
-
# Flask app setup
|
24 |
app = Flask(__name__)
|
25 |
|
26 |
# Logging setup
|
27 |
-
LOG_DIR = "/
|
28 |
LOG_FILE = os.path.join(LOG_DIR, "usage_log.jsonl")
|
29 |
os.makedirs(LOG_DIR, exist_ok=True)
|
30 |
logging.basicConfig(
|
@@ -33,7 +31,6 @@ logging.basicConfig(
|
|
33 |
format='%(asctime)s [%(levelname)s] %(message)s'
|
34 |
)
|
35 |
|
36 |
-
# Define pipelines
|
37 |
PIPELINES = [
|
38 |
{'id': 8, 'name': 'Embedded using BioWordVec', 'filename': "pipeline_ex3_s4.joblib"},
|
39 |
{'id': 1, 'name': 'Baseline', 'filename': "pipeline_ex1_s1.joblib"},
|
@@ -47,7 +44,6 @@ PIPELINES = [
|
|
47 |
|
48 |
pipeline_metadata = [{'id': p['id'], 'name': p['name']} for p in PIPELINES]
|
49 |
|
50 |
-
# Helper functions
|
51 |
def load_pipeline_from_hub(filename):
|
52 |
cache_dir = "/tmp/hf_cache"
|
53 |
os.environ["HF_HUB_CACHE"] = cache_dir
|
@@ -80,7 +76,7 @@ def log_interaction(user_input, model_name, predictions):
|
|
80 |
except Exception as e:
|
81 |
print(f"[ERROR] Could not write log entry: {e}")
|
82 |
|
83 |
-
|
84 |
@app.route('/')
|
85 |
def index():
|
86 |
return render_template('index.html', pipelines=pipeline_metadata)
|
@@ -103,6 +99,6 @@ def get_data():
|
|
103 |
|
104 |
return render_template('index.html', results=results, name=name, pipelines=pipeline_metadata)
|
105 |
|
106 |
-
|
107 |
if __name__ == '__main__':
|
108 |
app.run(host="0.0.0.0", port=7860)
|
|
|
11 |
import os
|
12 |
import logging
|
13 |
|
|
|
14 |
pd.set_option('display.max_colwidth', 1000)
|
15 |
|
16 |
# Patch torch.load to always load on CPU
|
|
|
19 |
return original_torch_load(*args, map_location=torch.device('cpu'), **kwargs)
|
20 |
torch.load = cpu_load
|
21 |
|
|
|
22 |
app = Flask(__name__)
|
23 |
|
24 |
# Logging setup
|
25 |
+
LOG_DIR = "/logs" # Use a universally writable directory
|
26 |
LOG_FILE = os.path.join(LOG_DIR, "usage_log.jsonl")
|
27 |
os.makedirs(LOG_DIR, exist_ok=True)
|
28 |
logging.basicConfig(
|
|
|
31 |
format='%(asctime)s [%(levelname)s] %(message)s'
|
32 |
)
|
33 |
|
|
|
34 |
PIPELINES = [
|
35 |
{'id': 8, 'name': 'Embedded using BioWordVec', 'filename': "pipeline_ex3_s4.joblib"},
|
36 |
{'id': 1, 'name': 'Baseline', 'filename': "pipeline_ex1_s1.joblib"},
|
|
|
44 |
|
45 |
pipeline_metadata = [{'id': p['id'], 'name': p['name']} for p in PIPELINES]
|
46 |
|
|
|
47 |
def load_pipeline_from_hub(filename):
|
48 |
cache_dir = "/tmp/hf_cache"
|
49 |
os.environ["HF_HUB_CACHE"] = cache_dir
|
|
|
76 |
except Exception as e:
|
77 |
print(f"[ERROR] Could not write log entry: {e}")
|
78 |
|
79 |
+
|
80 |
@app.route('/')
|
81 |
def index():
|
82 |
return render_template('index.html', pipelines=pipeline_metadata)
|
|
|
99 |
|
100 |
return render_template('index.html', results=results, name=name, pipelines=pipeline_metadata)
|
101 |
|
102 |
+
|
103 |
if __name__ == '__main__':
|
104 |
app.run(host="0.0.0.0", port=7860)
|