pratikshahp commited on
Commit
63741c5
·
verified ·
1 Parent(s): 6bc3e62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -10
app.py CHANGED
@@ -7,21 +7,16 @@ from langchain_community.vectorstores import Chroma
7
  from langchain_community.embeddings import HuggingFaceEmbeddings
8
  from langchain_text_splitters import RecursiveCharacterTextSplitter
9
  import os
10
- from dotenv import load_dotenv
11
- # Load model directly
12
-
13
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
14
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
15
 
16
  # Load environment variables
17
- load_dotenv()
18
  # hf_api_key = os.getenv("HF_TOKEN")
19
-
20
- #model_name = "openai-community/gpt2"
21
  # model_name = "google/gemma-2-9b"
22
 
23
- #tokenizer = AutoTokenizer.from_pretrained(model_name)
24
- #model = AutoModelForCausalLM.from_pretrained(model_name) # ,use_auth_token=hf_api_key)
25
 
26
 
27
  def get_llm_response(input_prompt, content, prompt):
 
7
  from langchain_community.embeddings import HuggingFaceEmbeddings
8
  from langchain_text_splitters import RecursiveCharacterTextSplitter
9
  import os
10
+ #from dotenv import load_dotenv
 
 
 
 
11
 
12
  # Load environment variables
13
+ #load_dotenv()
14
  # hf_api_key = os.getenv("HF_TOKEN")
15
+ model_name = "openai-community/gpt2"
 
16
  # model_name = "google/gemma-2-9b"
17
 
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ model = AutoModelForCausalLM.from_pretrained(model_name) # ,use_auth_token=hf_api_key)
20
 
21
 
22
  def get_llm_response(input_prompt, content, prompt):