Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ GREETING = (
|
|
17 |
# Constants
|
18 |
EMBEDDING_MODEL_NAME = "all-MiniLM-L12-v2"
|
19 |
LLM_MODEL_NAME = "Qwen/Qwen2.5-0.5B-Instruct"
|
20 |
-
PUBLICATIONS_TO_RETRIEVE =
|
21 |
|
22 |
|
23 |
def embedding(
|
@@ -77,7 +77,7 @@ def reply(message: str, history: list[str]) -> str:
|
|
77 |
# Preprocess message
|
78 |
|
79 |
pipe = transformers.pipeline(
|
80 |
-
"text2text-generation", model="Qwen/Qwen2.5-7B-Instruct", device="cuda"
|
81 |
)
|
82 |
|
83 |
message = preprocess(message, PUBLICATIONS_TO_RETRIEVE)
|
|
|
17 |
# Constants
|
18 |
EMBEDDING_MODEL_NAME = "all-MiniLM-L12-v2"
|
19 |
LLM_MODEL_NAME = "Qwen/Qwen2.5-0.5B-Instruct"
|
20 |
+
PUBLICATIONS_TO_RETRIEVE = 10
|
21 |
|
22 |
|
23 |
def embedding(
|
|
|
77 |
# Preprocess message
|
78 |
|
79 |
pipe = transformers.pipeline(
|
80 |
+
"text2text-generation", model="Qwen/Qwen2.5-7B-Instruct", device="cuda", return_full_text=False
|
81 |
)
|
82 |
|
83 |
message = preprocess(message, PUBLICATIONS_TO_RETRIEVE)
|