Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -92,21 +92,21 @@ def getLLMModel(LLMID):
|
|
| 92 |
if LLMID == 1:
|
| 93 |
llm = Replicate(
|
| 94 |
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
|
| 95 |
-
model_kwargs={"temperature": 0.2,
|
| 96 |
print("LLAMA2 13B LLM Selected")
|
| 97 |
elif LLMID == 2:
|
| 98 |
llm = Replicate(
|
| 99 |
model="replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
|
| 100 |
-
model_kwargs={"temperature": 0.2,
|
| 101 |
print("LLAMA2 7B LLM Selected")
|
| 102 |
elif LLMID == 3:
|
| 103 |
llm = Replicate(model="meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e",
|
| 104 |
-
model_kwargs={"temperature": 0.2,
|
| 105 |
print("LLAMA2 7B Chat LLM Selected")
|
| 106 |
elif LLMID == 4:
|
| 107 |
llm = Replicate(
|
| 108 |
model="a16z-infra/mistral-7b-instruct-v0.1:83b6a56e7c828e667f21fd596c338fd4f0039b46bcfa18d973e8e70e455fda70",
|
| 109 |
-
model_kwargs={"temperature": 0.2,
|
| 110 |
print("Mistral AI LLM Selected")
|
| 111 |
else:
|
| 112 |
llm = OpenAI(temperature=0.0)
|
|
|
|
| 92 |
if LLMID == 1:
|
| 93 |
llm = Replicate(
|
| 94 |
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
|
| 95 |
+
model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
|
| 96 |
print("LLAMA2 13B LLM Selected")
|
| 97 |
elif LLMID == 2:
|
| 98 |
llm = Replicate(
|
| 99 |
model="replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
|
| 100 |
+
model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
|
| 101 |
print("LLAMA2 7B LLM Selected")
|
| 102 |
elif LLMID == 3:
|
| 103 |
llm = Replicate(model="meta/llama-2-7b-chat:8e6975e5ed6174911a6ff3d60540dfd4844201974602551e10e9e87ab143d81e",
|
| 104 |
+
model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
|
| 105 |
print("LLAMA2 7B Chat LLM Selected")
|
| 106 |
elif LLMID == 4:
|
| 107 |
llm = Replicate(
|
| 108 |
model="a16z-infra/mistral-7b-instruct-v0.1:83b6a56e7c828e667f21fd596c338fd4f0039b46bcfa18d973e8e70e455fda70",
|
| 109 |
+
model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
|
| 110 |
print("Mistral AI LLM Selected")
|
| 111 |
else:
|
| 112 |
llm = OpenAI(temperature=0.0)
|