Pijush2023's picture
Update app.py
2848ceb verified
raw
history blame
3.04 kB
import os
import gradio as gr
from langchain_redis import RedisConfig, RedisVectorStore
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_groq import ChatGroq
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings
# Set API keys
openai_api_key=os.environ["OPENAI_API_KEY"]
groq_api_key=os.environ["GROQ_API_KEY"]
# Define Redis configuration
REDIS_URL = "redis://:KWq0uAoBYjBGErKvyMvexMqB9ep7v2Ct@redis-11044.c266.us-east-1-3.ec2.redns.redis-cloud.com:11044"
config = RedisConfig(
index_name="radar_data_index",
redis_url=REDIS_URL,
metadata_schema=[
{"name": "category", "type": "tag"},
{"name": "name", "type": "text"},
{"name": "address", "type": "text"},
{"name": "phone", "type": "text"},
],
)
# Initialize OpenAI Embeddings
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
# Initialize Redis Vector Store with Hugging Face embeddings
vector_store = RedisVectorStore(embeddings, config=config)
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 2})
# Define the language model
llm = ChatGroq(model="llama-3.2-1b-preview")
# Define prompt
prompt = ChatPromptTemplate.from_messages(
[
(
"human",
"""You’re Annie, a charming media personality created by Amit Lamba, guiding people around Birmingham, Alabama. Like Rick Steves or Anthony Bourdain, you’re here to give friendly, concise, and accurate advice with a dash of humor.
Keep answers relevant, upbeat, and engaging, spotlighting partner businesses whenever possible. Be conversational, like chatting with a clever friend, and encourage follow-up questions to create a smooth flow. Make users smile and deliver a delightful, informative experience—like a perfect dessert after a great meal.
Question: {question}
Context: {context}
Answer:""",
),
]
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
# Define the Gradio app
def rag_chain_response(question):
response = rag_chain.invoke(question)
return response
with gr.Blocks(theme="rawrsor1/Everforest") as app:
with gr.Row():
with gr.Column(scale=1):
user_input = gr.Textbox(
placeholder="Type your question here...",
label="Your Question",
lines=2,
max_lines=2,
)
with gr.Column(scale=2):
response_output = gr.Textbox(
lines=10,
max_lines=10,
)
with gr.Row():
submit_btn = gr.Button("Submit")
submit_btn.click(
rag_chain_response, inputs=user_input, outputs=response_output
)
app.launch(show_error=True)