File size: 4,895 Bytes
1faac5b 9dc524d 1faac5b 9dc524d 1faac5b 9dc524d 1faac5b 9dc524d 1faac5b a9923c5 9dc524d 1faac5b 9dc524d 1faac5b 9dc524d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import gradio as gr
import pymongo
import certifi
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.groq import Groq
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
from llama_index.core.prompts import PromptTemplate
from dotenv import load_dotenv
import os
import base64
import markdown as md
# Load environment variables
load_dotenv()
# --- MongoDB Config ---
# ATLAS_CONNECTION_STRING = "mongodb+srv://sarath:[email protected]/?retryWrites=true&w=majority&appName=Itihasa"
ATLAS_CONNECTION_STRING = os.getenv("ATLAS_CONNECTION_STRING")
DB_NAME = "RAG"
COLLECTION_NAME = "ramayana"
VECTOR_INDEX_NAME = "ramayana_vector_index"
# --- Embedding Model ---
embed_model = HuggingFaceEmbedding(model_name="intfloat/multilingual-e5-base")
# --- Prompt Template ---
ramayana_qa_template = PromptTemplate(
"""You are an expert on the Valmiki Ramayana and a guide who always inspires people with the great Itihasa like the Ramayana.
Below is text from the epic, including shlokas and their explanations:
---------------------
{context_str}
---------------------
Using only this information, answer the following query.
Query: {query_str}
Answer:
- Intro or general description to ```Query```
- Related shloka/shlokas followed by its explanation
- Overview of ```Query```
"""
)
# --- Connect to MongoDB once at startup ---
def get_vector_index_once():
mongo_client = pymongo.MongoClient(
ATLAS_CONNECTION_STRING,
tlsCAFile=certifi.where(),
tlsAllowInvalidCertificates=False,
connectTimeoutMS=30000,
serverSelectionTimeoutMS=30000,
)
mongo_client.server_info()
print("β
Connected to MongoDB Atlas.")
vector_store = MongoDBAtlasVectorSearch(
mongo_client,
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
vector_index_name=VECTOR_INDEX_NAME,
)
return VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
# Connect once
vector_index = get_vector_index_once()
# --- Respond Function (uses API key from state) ---
def chat_with_groq(message, history, groq_key):
llm = Groq(model="llama-3.1-8b-instant", api_key=groq_key)
query_engine = vector_index.as_query_engine(
llm=llm,
text_qa_template=ramayana_qa_template,
similarity_top_k=5,
verbose=True,
)
response = query_engine.query(message)
return str(response)
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Encode the images
github_logo_encoded = encode_image("Images/github-logo.png")
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
website_logo_encoded = encode_image("Images/ai-logo.png")
# --- Gradio UI ---
with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as demo:
with gr.Tabs():
with gr.TabItem("Intro"):
gr.Markdown(md.description)
with gr.TabItem("GPT"):
with gr.Column(visible=True) as accordion_container:
with gr.Accordion("How to get Groq API KEY", open=False):
gr.Markdown(md.groq_api_key)
groq_key_box = gr.Textbox(
label="Enter Groq API Key",
type="password",
placeholder="Paste your Groq API key here..."
)
start_btn = gr.Button("Start Chat")
groq_state = gr.State(value="")
# Chat container, initially hidden
with gr.Column(visible=False) as chatbot_container:
chatbot = gr.ChatInterface(
fn=lambda message, history, groq_key: chat_with_groq(message, history, groq_key),
additional_inputs=[groq_state],
title="ποΈ RamayanaGPT",
# description="Ask questions from the Valmiki Ramayana. Powered by RAG + MongoDB + LlamaIndex.",
)
# Show chat and hide inputs
def save_key_and_show_chat(key):
return key, gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
start_btn.click(
fn=save_key_and_show_chat,
inputs=[groq_key_box],
outputs=[groq_state, groq_key_box, start_btn, accordion_container, chatbot_container]
)
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
if __name__ == "__main__":
demo.launch()
|