Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from langchain.prompts import PromptTemplate
|
|
4 |
# Use a pipeline as a high-level helper
|
5 |
from transformers import pipeline
|
6 |
|
7 |
-
|
8 |
# # from langchain.llms import CTransformers
|
9 |
# from langchain_community.llms import CTransformers
|
10 |
|
@@ -23,7 +23,7 @@ def getLLMResponse(form_input,email_sender,email_recipient,email_style):
|
|
23 |
|
24 |
#C Transformers is the Python library that provides bindings for transformer models implemented in C/C++ using the GGML library
|
25 |
|
26 |
-
llm = CTransformers(
|
27 |
model_type='llama',
|
28 |
config={'max_new_tokens': 256,
|
29 |
'temperature': 0.01})
|
|
|
4 |
# Use a pipeline as a high-level helper
|
5 |
from transformers import pipeline
|
6 |
|
7 |
+
model_from_hugging_face = pipeline("text-generation", model="TheBloke/Llama-2-7B-Chat-GGML")
|
8 |
# # from langchain.llms import CTransformers
|
9 |
# from langchain_community.llms import CTransformers
|
10 |
|
|
|
23 |
|
24 |
#C Transformers is the Python library that provides bindings for transformer models implemented in C/C++ using the GGML library
|
25 |
|
26 |
+
llm = CTransformers(model_from_hugging_face # models/llama-2-7b-chat.ggmlv3.q8_0.bin, #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
|
27 |
model_type='llama',
|
28 |
config={'max_new_tokens': 256,
|
29 |
'temperature': 0.01})
|