jchen8000 commited on
Commit
055baa9
·
verified ·
1 Parent(s): 4fe1364

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -2
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  from langchain_community.document_loaders import PyPDFLoader
3
  from langchain_text_splitters import RecursiveCharacterTextSplitter
@@ -13,6 +14,18 @@ from langchain_core.runnables import RunnablePassthrough
13
  # Initialize the FAISS vector store
14
  vector_store = None
15
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  # Function to handle PDF upload and indexing
17
  def index_pdf(pdf):
18
  global vector_store
@@ -48,10 +61,28 @@ def chatbot_query(query):
48
  return response
49
 
50
 
 
 
51
  def generate_response(query, history, model, temperature, max_tokens, top_p, seed):
52
- response = query + "\n"
53
- response = response + model + "\n"
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  return response
56
 
57
 
 
1
+ import random
2
  import gradio as gr
3
  from langchain_community.document_loaders import PyPDFLoader
4
  from langchain_text_splitters import RecursiveCharacterTextSplitter
 
14
  # Initialize the FAISS vector store
15
  vector_store = None
16
 
17
+ template = \
18
+ """Use the following pieces of context to answer the question at the end.
19
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
20
+ Always say "Thanks for asking!" at the end of the answer.
21
+
22
+ {context}
23
+
24
+ Question: {question}
25
+
26
+ Answer:
27
+ """
28
+
29
  # Function to handle PDF upload and indexing
30
  def index_pdf(pdf):
31
  global vector_store
 
61
  return response
62
 
63
 
64
+
65
+
66
  def generate_response(query, history, model, temperature, max_tokens, top_p, seed):
67
+ if vector_store is None:
68
+ return "Please upload and index a PDF at the Indexing tab."
69
 
70
+ if seed == 0:
71
+ seed = random.randint(1, 100000)
72
+
73
+ llm = ChatGroq(groq_api_key=userdata.get('GROQ_API_KEY'), model=model)
74
+
75
+ custom_rag_prompt = PromptTemplate.from_template(template)
76
+
77
+ rag_chain = (
78
+ {"context": retriever | format_docs, "question": RunnablePassthrough()}
79
+ | custom_rag_prompt
80
+ | llm
81
+ | StrOutputParser()
82
+ )
83
+
84
+ response = rag_chain.invoke(query)
85
+
86
  return response
87
 
88