Spaces:
Sleeping
Sleeping
maximka608
commited on
Commit
·
a557e7e
1
Parent(s):
104ce4f
fix
Browse files- app.py +1 -1
- utils/llm.py +2 -2
app.py
CHANGED
@@ -64,7 +64,7 @@ def main(query, search_types, llm_api_key):
|
|
64 |
if "Vector" in search_types:
|
65 |
vector_search = knowledge_base.search_by_embedding(embedding, 5)[0].tolist()
|
66 |
if "BM25" in search_types:
|
67 |
-
bm25_search = knowledge_base.search_by_BM25(query,
|
68 |
|
69 |
docs = combine_docs(vector_search + bm25_search, texts)
|
70 |
prompt = create_prompt(query, docs)
|
|
|
64 |
if "Vector" in search_types:
|
65 |
vector_search = knowledge_base.search_by_embedding(embedding, 5)[0].tolist()
|
66 |
if "BM25" in search_types:
|
67 |
+
bm25_search = knowledge_base.search_by_BM25(query, 3)
|
68 |
|
69 |
docs = combine_docs(vector_search + bm25_search, texts)
|
70 |
prompt = create_prompt(query, docs)
|
utils/llm.py
CHANGED
@@ -5,9 +5,9 @@ class LLM:
|
|
5 |
def __init__(self, api_key):
|
6 |
os.environ["COHERE_API_KEY"] = api_key
|
7 |
|
8 |
-
def generate_response(self, prompt, temperature=0.
|
9 |
response = completion(
|
10 |
-
model="command-r",
|
11 |
messages=[{"content": prompt, "role": "user"}],
|
12 |
temperature=temperature,
|
13 |
max_tokens=max_tokens
|
|
|
5 |
def __init__(self, api_key):
|
6 |
os.environ["COHERE_API_KEY"] = api_key
|
7 |
|
8 |
+
def generate_response(self, prompt, temperature=0.4, max_tokens=1000):
|
9 |
response = completion(
|
10 |
+
model="command-r-plus",
|
11 |
messages=[{"content": prompt, "role": "user"}],
|
12 |
temperature=temperature,
|
13 |
max_tokens=max_tokens
|