Hemasagar commited on
Commit
1ee02b9
·
verified ·
1 Parent(s): 5cb30ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -4,7 +4,7 @@ from langchain.prompts import PromptTemplate
4
  # Use a pipeline as a high-level helper
5
  from transformers import pipeline
6
 
7
- CTransformers = pipeline("text-generation", model="TheBloke/Llama-2-7B-Chat-GGML")
8
  # # from langchain.llms import CTransformers
9
  # from langchain_community.llms import CTransformers
10
 
@@ -23,7 +23,7 @@ def getLLMResponse(form_input,email_sender,email_recipient,email_style):
23
 
24
  #C Transformers is the Python library that provides bindings for transformer models implemented in C/C++ using the GGML library
25
 
26
- llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin', #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
27
  model_type='llama',
28
  config={'max_new_tokens': 256,
29
  'temperature': 0.01})
 
4
  # Use a pipeline as a high-level helper
5
  from transformers import pipeline
6
 
7
+ model_from_hugging_face = pipeline("text-generation", model="TheBloke/Llama-2-7B-Chat-GGML")
8
  # # from langchain.llms import CTransformers
9
  # from langchain_community.llms import CTransformers
10
 
 
23
 
24
  #C Transformers is the Python library that provides bindings for transformer models implemented in C/C++ using the GGML library
25
 
26
+ llm = CTransformers(model_from_hugging_face # models/llama-2-7b-chat.ggmlv3.q8_0.bin, #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
27
  model_type='llama',
28
  config={'max_new_tokens': 256,
29
  'temperature': 0.01})