pratikshahp commited on
Commit
6bc3e62
·
verified ·
1 Parent(s): 37310e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -2,21 +2,26 @@ import gradio as gr
2
  import fitz # PyMuPDF
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
5
  from langchain_community.vectorstores import Chroma
6
  from langchain_community.embeddings import HuggingFaceEmbeddings
7
  from langchain_text_splitters import RecursiveCharacterTextSplitter
8
  import os
9
  from dotenv import load_dotenv
 
 
 
 
10
 
11
  # Load environment variables
12
  load_dotenv()
13
  # hf_api_key = os.getenv("HF_TOKEN")
14
 
15
- model_name = "openai-community/gpt2"
16
  # model_name = "google/gemma-2-9b"
17
 
18
- tokenizer = AutoTokenizer.from_pretrained(model_name)
19
- model = AutoModelForCausalLM.from_pretrained(model_name) # ,use_auth_token=hf_api_key)
20
 
21
 
22
  def get_llm_response(input_prompt, content, prompt):
 
2
  import fitz # PyMuPDF
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
  from langchain_community.vectorstores import Chroma
7
  from langchain_community.embeddings import HuggingFaceEmbeddings
8
  from langchain_text_splitters import RecursiveCharacterTextSplitter
9
  import os
10
  from dotenv import load_dotenv
11
+ # Load model directly
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
14
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
15
 
16
  # Load environment variables
17
  load_dotenv()
18
  # hf_api_key = os.getenv("HF_TOKEN")
19
 
20
+ #model_name = "openai-community/gpt2"
21
  # model_name = "google/gemma-2-9b"
22
 
23
+ #tokenizer = AutoTokenizer.from_pretrained(model_name)
24
+ #model = AutoModelForCausalLM.from_pretrained(model_name) # ,use_auth_token=hf_api_key)
25
 
26
 
27
  def get_llm_response(input_prompt, content, prompt):