Spaces:
Sleeping
Sleeping
Commit
·
f1ab357
1
Parent(s):
08ec439
Update src/backend/chatbot.py
Browse files- src/backend/chatbot.py +2 -2
src/backend/chatbot.py
CHANGED
@@ -35,7 +35,7 @@ def init_llm(model, demo_lite):
|
|
35 |
if demo_lite == False:
|
36 |
print("BP 5 : running full demo")
|
37 |
if model == "Llama2-7b_CPP":
|
38 |
-
model_path = "
|
39 |
print("model path: ", model_path)
|
40 |
llm = LlamaCPP(
|
41 |
# You can pass in the URL to a GGML model to download it automatically
|
@@ -57,7 +57,7 @@ def init_llm(model, demo_lite):
|
|
57 |
verbose=True,
|
58 |
)
|
59 |
elif model == "deci-7b_CPP":
|
60 |
-
model_path = "/Users/dheym/Library/CloudStorage/OneDrive-Personal/Documents/side_projects/GRDN/src/models/decilm-7b-uniform-gqa-q8_0.gguf"
|
61 |
print("model path: ", model_path)
|
62 |
llm = LlamaCPP(
|
63 |
# You can pass in the URL to a GGML model to download it automatically
|
|
|
35 |
if demo_lite == False:
|
36 |
print("BP 5 : running full demo")
|
37 |
if model == "Llama2-7b_CPP":
|
38 |
+
model_path = "../../models/llama-2-7b-chat.Q4_K_M.gguf"
|
39 |
print("model path: ", model_path)
|
40 |
llm = LlamaCPP(
|
41 |
# You can pass in the URL to a GGML model to download it automatically
|
|
|
57 |
verbose=True,
|
58 |
)
|
59 |
elif model == "deci-7b_CPP":
|
60 |
+
#model_path = "/Users/dheym/Library/CloudStorage/OneDrive-Personal/Documents/side_projects/GRDN/src/models/decilm-7b-uniform-gqa-q8_0.gguf"
|
61 |
print("model path: ", model_path)
|
62 |
llm = LlamaCPP(
|
63 |
# You can pass in the URL to a GGML model to download it automatically
|