root
commited on
Commit
Β·
654f7e8
1
Parent(s):
baade64
ss
Browse files- app.py +6 -8
- requirements.txt +1 -1
app.py
CHANGED
@@ -38,12 +38,6 @@ st.set_page_config(
|
|
38 |
with st.sidebar:
|
39 |
st.title("βοΈ Configuration")
|
40 |
|
41 |
-
# Ranking weights
|
42 |
-
st.subheader("Ranking Weights")
|
43 |
-
semantic_weight = st.slider("Semantic Similarity Weight", 0.0, 1.0, 0.7, 0.1)
|
44 |
-
keyword_weight = 1.0 - semantic_weight
|
45 |
-
st.write(f"Keyword Weight: {keyword_weight:.1f}")
|
46 |
-
|
47 |
# Advanced options
|
48 |
st.subheader("Advanced Options")
|
49 |
top_k = st.selectbox("Number of results to display", [1,2,3,4,5], index=4)
|
@@ -98,8 +92,10 @@ def load_embedding_model():
|
|
98 |
"""Load and cache the BGE embedding model"""
|
99 |
print("[Cache] Attempting to load Embedding Model (BAAI/bge-large-en-v1.5)...")
|
100 |
try:
|
|
|
|
|
101 |
with st.spinner("π Loading BAAI/bge-large-en-v1.5 model..."):
|
102 |
-
model = SentenceTransformer('BAAI/bge-large-en-v1.5')
|
103 |
st.success("β
Embedding model loaded successfully!")
|
104 |
print("[Cache] Embedding Model (BAAI/bge-large-en-v1.5) LOADED.")
|
105 |
return model
|
@@ -112,9 +108,11 @@ def load_cross_encoder():
|
|
112 |
"""Load and cache the Cross-Encoder model"""
|
113 |
print("[Cache] Attempting to load Cross-Encoder Model (ms-marco-MiniLM-L6-v2)...")
|
114 |
try:
|
|
|
|
|
115 |
with st.spinner("π Loading Cross-Encoder ms-marco-MiniLM-L6-v2..."):
|
116 |
from sentence_transformers import CrossEncoder
|
117 |
-
model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2')
|
118 |
st.success("β
Cross-Encoder model loaded successfully!")
|
119 |
print("[Cache] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED.")
|
120 |
return model
|
|
|
38 |
with st.sidebar:
|
39 |
st.title("βοΈ Configuration")
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# Advanced options
|
42 |
st.subheader("Advanced Options")
|
43 |
top_k = st.selectbox("Number of results to display", [1,2,3,4,5], index=4)
|
|
|
92 |
"""Load and cache the BGE embedding model"""
|
93 |
print("[Cache] Attempting to load Embedding Model (BAAI/bge-large-en-v1.5)...")
|
94 |
try:
|
95 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
96 |
+
print(f"[Cache] Using device: {device} for embedding model")
|
97 |
with st.spinner("π Loading BAAI/bge-large-en-v1.5 model..."):
|
98 |
+
model = SentenceTransformer('BAAI/bge-large-en-v1.5', device=device)
|
99 |
st.success("β
Embedding model loaded successfully!")
|
100 |
print("[Cache] Embedding Model (BAAI/bge-large-en-v1.5) LOADED.")
|
101 |
return model
|
|
|
108 |
"""Load and cache the Cross-Encoder model"""
|
109 |
print("[Cache] Attempting to load Cross-Encoder Model (ms-marco-MiniLM-L6-v2)...")
|
110 |
try:
|
111 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
112 |
+
print(f"[Cache] Using device: {device} for cross-encoder model")
|
113 |
with st.spinner("π Loading Cross-Encoder ms-marco-MiniLM-L6-v2..."):
|
114 |
from sentence_transformers import CrossEncoder
|
115 |
+
model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L6-v2', device=device)
|
116 |
st.success("β
Cross-Encoder model loaded successfully!")
|
117 |
print("[Cache] Cross-Encoder Model (ms-marco-MiniLM-L6-v2) LOADED.")
|
118 |
return model
|
requirements.txt
CHANGED
@@ -14,6 +14,6 @@ huggingface-hub==0.30.0
|
|
14 |
bitsandbytes==0.44.1
|
15 |
accelerate==0.27.2
|
16 |
datasets==2.18.0
|
17 |
-
sentence-transformers
|
18 |
plotly==5.18.0
|
19 |
einops
|
|
|
14 |
bitsandbytes==0.44.1
|
15 |
accelerate==0.27.2
|
16 |
datasets==2.18.0
|
17 |
+
sentence-transformers>=3.0.0,<4.0.0
|
18 |
plotly==5.18.0
|
19 |
einops
|