catastropiyush commited on
Commit
56e58ae
·
verified ·
1 Parent(s): 97c741d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -13
app.py CHANGED
@@ -12,11 +12,11 @@ load_dotenv()
12
 
13
  # Configure the Llama index settings
14
  Settings.llm = HuggingFaceInferenceAPI(
15
- model_name="google/gemma-1.1-7b-it",
16
- tokenizer_name="google/gemma-1.1-7b-it",
17
- context_window=3000,
18
  token=os.getenv("HF_TOKEN"),
19
- max_new_tokens=512,
20
  generate_kwargs={"temperature": 0.1},
21
  )
22
  Settings.embed_model = HuggingFaceEmbedding(
@@ -49,7 +49,7 @@ def handle_query(query):
49
  chat_text_qa_msgs = [
50
  (
51
  "user",
52
- """You are a Q&A assistant named CHATTO, created by Suriya. You have a specific response programmed for when users specifically ask about your creator, Suriya. The response is: "I was created by Suriya, an enthusiast in Artificial Intelligence. He is dedicated to solving complex problems and delivering innovative solutions. With a strong focus on machine learning, deep learning, Python, generative AI, NLP, and computer vision, Suriya is passionate about pushing the boundaries of AI to explore new possibilities." For all other inquiries, your main goal is to provide answers as accurately as possible, based on the instructions and context you have been given. If a question does not match the provided context or is outside the scope of the document, kindly advise the user to ask questions within the context of the document.
53
  Context:
54
  {context_str}
55
  Question:
@@ -71,24 +71,22 @@ def handle_query(query):
71
 
72
 
73
  # Streamlit app initialization
74
- st.title("(PDF) Information and Inference🗞️")
75
- st.markdown("Retrieval-Augmented Generation")
76
- st.markdown("start chat ...🚀")
77
 
78
  if 'messages' not in st.session_state:
79
- st.session_state.messages = [{'role': 'assistant', "content": 'Hello! Upload a PDF and ask me anything about its content.'}]
80
 
81
  with st.sidebar:
82
- st.title("Menu:")
83
- uploaded_file = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button")
84
  if st.button("Submit & Process"):
85
- with st.spinner("Processing..."):
86
  filepath = "data/saved_pdf.pdf"
87
  with open(filepath, "wb") as f:
88
  f.write(uploaded_file.getbuffer())
89
  # displayPDF(filepath) # Display the uploaded PDF
90
  data_ingestion() # Process PDF every time new file is uploaded
91
- st.success("Done")
92
 
93
  user_prompt = st.chat_input("Ask me anything about the content of the PDF:")
94
  if user_prompt:
 
12
 
13
  # Configure the Llama index settings
14
  Settings.llm = HuggingFaceInferenceAPI(
15
+ model_name="meta-llama/Meta-Llama-3-8B-Instruct",
16
+ tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
17
+ context_window=4000,
18
  token=os.getenv("HF_TOKEN"),
19
+ max_new_tokens=2048,
20
  generate_kwargs={"temperature": 0.1},
21
  )
22
  Settings.embed_model = HuggingFaceEmbedding(
 
49
  chat_text_qa_msgs = [
50
  (
51
  "user",
52
+ """You are a Q/A Scientific Assistant.Be very careful and answer in detail.
53
  Context:
54
  {context_str}
55
  Question:
 
71
 
72
 
73
  # Streamlit app initialization
74
+ st.title("RAG Extractor")
 
 
75
 
76
  if 'messages' not in st.session_state:
77
+ st.session_state.messages = [{'role': 'assistant', "content": 'Hello I am Pingu! Upload a PDF and ask me anything about its content.'}]
78
 
79
  with st.sidebar:
80
+ st.title("Input")
81
+ uploaded_file = st.file_uploader("Upload your PDF Files and then click on the Submit & Process Button")
82
  if st.button("Submit & Process"):
83
+ with st.spinner("Loading..."):
84
  filepath = "data/saved_pdf.pdf"
85
  with open(filepath, "wb") as f:
86
  f.write(uploaded_file.getbuffer())
87
  # displayPDF(filepath) # Display the uploaded PDF
88
  data_ingestion() # Process PDF every time new file is uploaded
89
+ st.success("PDF is ready!")
90
 
91
  user_prompt = st.chat_input("Ask me anything about the content of the PDF:")
92
  if user_prompt: