Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,7 @@ from langchain.prompts import PromptTemplate
|
|
14 |
from langchain.schema.runnable import RunnablePassthrough
|
15 |
from langchain.chains import LLMChain
|
16 |
import transformers
|
|
|
17 |
|
18 |
import transformers
|
19 |
from transformers import pipeline
|
@@ -23,14 +24,13 @@ import transformers
|
|
23 |
model_name='mistralai/Mistral-7B-Instruct-v0.1'
|
24 |
from huggingface_hub import login
|
25 |
login(token=st.secrets["HF_TOKEN"])
|
26 |
-
from ctransformers import AutoModelForCausalLM, AutoTokenizer
|
27 |
|
28 |
# model loading.
|
29 |
model = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
|
30 |
model_file="mistral-7b-instruct-v0.1.Q5_K_M.gguf",
|
31 |
model_type="mistral",
|
32 |
max_new_tokens=1048,
|
33 |
-
temperature=0.
|
34 |
hf=True
|
35 |
)
|
36 |
|
|
|
14 |
from langchain.schema.runnable import RunnablePassthrough
|
15 |
from langchain.chains import LLMChain
|
16 |
import transformers
|
17 |
+
from ctransformers import AutoModelForCausalLM, AutoTokenizer
|
18 |
|
19 |
import transformers
|
20 |
from transformers import pipeline
|
|
|
24 |
model_name='mistralai/Mistral-7B-Instruct-v0.1'
|
25 |
from huggingface_hub import login
|
26 |
login(token=st.secrets["HF_TOKEN"])
|
|
|
27 |
|
28 |
# model loading.
|
29 |
model = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
|
30 |
model_file="mistral-7b-instruct-v0.1.Q5_K_M.gguf",
|
31 |
model_type="mistral",
|
32 |
max_new_tokens=1048,
|
33 |
+
temperature=0.01,
|
34 |
hf=True
|
35 |
)
|
36 |
|