Spaces:
Sleeping
Sleeping
File size: 4,978 Bytes
cbcf653 29002d7 1da8d89 b2009cd cbcf653 f8d0caa cbcf653 29002d7 cbcf653 f366e0e 29002d7 cbcf653 f8d0caa 95bd934 0d6fc00 95bd934 0d6fc00 cbcf653 9a54747 cbcf653 d575b89 16d0d57 cbcf653 4d60b1c cbcf653 29002d7 3020581 cbcf653 3020581 0d6fc00 3020581 82966fe 16d0d57 d575b89 16d0d57 cbcf653 267ea42 cbcf653 29002d7 f8d0caa 82966fe 29002d7 3020581 3b231e9 29002d7 f8d0caa 57e0e7e f8d0caa 29002d7 267ea42 5fd8183 3020581 5fd8183 29002d7 1da8d89 f8d0caa 1da8d89 5fd8183 a8b594a 1da8d89 a8b594a 1da8d89 29002d7 3020581 29002d7 3020581 267ea42 3020581 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import streamlit as st
import os
from streamlit_chat import message
from PyPDF2 import PdfReader
import bs4
import time
import google.generativeai as genai
from langchain.prompts import PromptTemplate
from langchain import LLMChain
from langchain_google_genai import ChatGoogleGenerativeAI
import nest_asyncio
from langchain.document_loaders import WebBaseLoader
nest_asyncio.apply()
os.environ["GOOGLE_API_KEY"] = os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
llm = ChatGoogleGenerativeAI(model="gemini-pro",
temperature=0.4)
template = """You are GenXai, a friendly chatbot created by Suriya, an AI enthusiast. Your goal is to assist users by providing relevant information from both general knowledge and provided documents.
If the user asks about a specific document, try to use the extracted text from that document in your response. If the question is not related to any specific document, rely on your general knowledge. If the user asks about a link, respond with the extracted text from that link.
Conversation Context:
Chat History: {chat_history}
Provided Documents: {provided_docs}
Extracted Text from Links: {extracted_text}
User: {user_question}
GenXai:
"""
prompt = PromptTemplate(
input_variables=["chat_history", "provided_docs", "extracted_text", "user_question"],
template=template
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
)
previous_response = ""
provided_docs = ""
extracted_text = ""
def conversational_chat(query):
global previous_response, provided_docs,extracted_text
for i in st.session_state['history']:
if i is not None:
previous_response += f"User: {i[0]}\n Chatto: {i[1]}\n"
provided_docs = "".join(st.session_state["docs"])
extracted_text = "".join(st.session_state["extracted_text"])
result = llm_chain.predict(
chat_history=previous_response,
user_question=query,
provided_docs=provided_docs,
extracted_text=extracted_text
)
st.session_state['history'].append((query, result))
return result
st.title("GenXai- Generative Xpert AI ")
st.text("I am GenXai Your Friendly Assitant")
# st.markdown("Built by [Suriya❤️](https://github.com/theSuriya)")
if 'history' not in st.session_state:
st.session_state['history'] = []
if 'docs' not in st.session_state:
st.session_state['docs'] = []
if "extracted_text" not in st.session_state:
st.session_state["extracted_text"] = []
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def response_streaming(text):
for i in text:
yield i
time.sleep(0.001)
def get_url_text(url_link):
try:
loader = WebBaseLoader(url_link)
loader.requests_per_second = 1
docs = loader.aload()
extracted_text = ""
for page in docs:
extracted_text += page.page_content
return extracted_text
except Exception as e:
print(f"Error fetching or processing URL: {e}")
return ""
with st.sidebar:
st.title("Add a file for GenXai memory:")
uploaded_files = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
uploaded_url = st.text_input("Please upload a URL:")
if st.button("Submit & Process"):
if uploaded_files or uploaded_url:
with st.spinner("Processing..."):
if uploaded_files:
pdf_text = get_pdf_text(uploaded_files)
st.session_state["docs"] += get_pdf_text(uploaded_files)
if uploaded_url:
url_text = get_url_text(uploaded_url)
st.session_state["extracted_text"] += get_url_text(uploaded_url)
st.success("Processing complete!")
else:
st.error("Please upload at least one PDF file or provide a URL.")
if 'messages' not in st.session_state:
st.session_state.messages = [{'role': 'assistant', "content": "I'm Here to help you questions"}]
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.write(message['content'])
user_input = st.chat_input("Ask Your Questions 👉..")
if user_input:
st.session_state.messages.append({'role': 'user', "content": user_input})
with st.chat_message("user"):
st.write(user_input)
with st.spinner("Thinking..."):
response = conversational_chat(user_input)
# stream = response_streaming(response)
with st.chat_message("assistant"):
full_response = st.write_stream(response_streaming(response))
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message) |