AI-trainer1 commited on
Commit
329bb1f
·
verified ·
1 Parent(s): dae8a2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -12,8 +12,8 @@ from langchain_core.prompts import ChatPromptTemplate
12
  import os
13
  from dotenv import load_dotenv
14
  from helper import SYSTEM_PROMPT
15
- from langchain_google_genai import GoogleGenerativeAIEmbeddings
16
- # from langchain.embeddings import HuggingFaceEmbeddings # open source free embedding
17
  load_dotenv()
18
 
19
 
@@ -23,9 +23,9 @@ class PDFQAProcessor:
23
 
24
  llm = ChatGroq(
25
  # model_name="deepseek-r1-distill-llama-70b",
26
- model_name="llama3-70b-8192",
27
  temperature=0.1,
28
- max_tokens=3000,
29
  api_key = os.getenv('GROQ_API_KEY')
30
  )
31
 
@@ -37,15 +37,15 @@ class PDFQAProcessor:
37
 
38
  question_answer_chain = create_stuff_documents_chain(llm, prompt)
39
 
40
- # EMBEDDING_MODEL = "intfloat/e5-large-v2"
41
 
42
- # embeddings = HuggingFaceEmbeddings(
43
- # model_name=EMBEDDING_MODEL,
44
- # model_kwargs={'device': 'cpu'},
45
- # encode_kwargs={'normalize_embeddings': True}
46
- # )
47
 
48
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
49
  CHUNK_SIZE = 700
50
  CHUNK_OVERLAP = 150
51
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=CHUNK_SIZE,chunk_overlap = CHUNK_OVERLAP)
 
12
  import os
13
  from dotenv import load_dotenv
14
  from helper import SYSTEM_PROMPT
15
+ # from langchain_google_genai import GoogleGenerativeAIEmbeddings
16
+ from langchain.embeddings import HuggingFaceEmbeddings # open source free embedding
17
  load_dotenv()
18
 
19
 
 
23
 
24
  llm = ChatGroq(
25
  # model_name="deepseek-r1-distill-llama-70b",
26
+ model_name="llama-3.3-70b-versatile",
27
  temperature=0.1,
28
+ max_tokens=8192,
29
  api_key = os.getenv('GROQ_API_KEY')
30
  )
31
 
 
37
 
38
  question_answer_chain = create_stuff_documents_chain(llm, prompt)
39
 
40
+ EMBEDDING_MODEL = "intfloat/e5-large-v2"
41
 
42
+ embeddings = HuggingFaceEmbeddings(
43
+ model_name=EMBEDDING_MODEL,
44
+ model_kwargs={'device': 'cpu'},
45
+ encode_kwargs={'normalize_embeddings': True}
46
+ )
47
 
48
+ # embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
49
  CHUNK_SIZE = 700
50
  CHUNK_OVERLAP = 150
51
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=CHUNK_SIZE,chunk_overlap = CHUNK_OVERLAP)