Spaces:
Runtime error
Runtime error
File size: 4,934 Bytes
192447d deecb9b 192447d deecb9b e19c53d d5add66 7737d43 deecb9b 7892466 deecb9b fbe986d deecb9b c2d26c1 6c87583 7892466 c2d26c1 deecb9b 9207660 78cb7fe deecb9b 78cb7fe deecb9b 8c3dd67 deecb9b c1009f8 b370650 393e5d8 deecb9b 089a83f deecb9b b8d3256 e110d68 393e5d8 d5add66 e110d68 d5add66 fe5f1a6 d5add66 e110d68 c2d26c1 e110d68 d663512 e110d68 393e5d8 d5add66 43badf5 fe5f1a6 e110d68 d5add66 613256c d5add66 e110d68 613256c deecb9b d5add66 e110d68 192447d b8d3256 fe5f1a6 d663512 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import os
import gradio as gr
from langchain_redis import RedisConfig, RedisVectorStore
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_groq import ChatGroq
from langchain_community.embeddings import OpenAIEmbeddings
# Set API keys
openai_api_key=os.environ["OPENAI_API_KEY"]
groq_api_key=os.environ["GROQ_API_KEY"]
# Define Redis configuration
REDIS_URL = "redis://:KWq0uAoBYjBGErKvyMvexMqB9ep7v2Ct@redis-11044.c266.us-east-1-3.ec2.redns.redis-cloud.com:11044"
config = RedisConfig(
index_name="radar_data_index",
redis_url=REDIS_URL,
metadata_schema=[
{"name": "category", "type": "tag"},
{"name": "name", "type": "text"},
{"name": "address", "type": "text"},
{"name": "phone", "type": "text"},
],
)
# Initialize OpenAI Embeddings
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
# Initialize Redis Vector Store with Hugging Face embeddings
vector_store = RedisVectorStore(embeddings, config=config)
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 2})
# Define the language model
llm = ChatGroq(model="llama-3.2-1b-preview")
# Define prompt
prompt = ChatPromptTemplate.from_messages(
[
(
"human",
"""You’re Annie, a charming media personality created by Amit Lamba, guiding people around Birmingham, Alabama. Like Rick Steves or Anthony Bourdain, you’re here to give friendly, concise, and accurate advice with a dash of humor.
Keep answers relevant, upbeat, and engaging, spotlighting partner businesses whenever possible. Be conversational, like chatting with a clever friend, and encourage follow-up questions to create a smooth flow. Make users smile and deliver a delightful, informative experience—like a perfect dessert after a great meal.
Question: {question}
Context: {context}
Answer:""",
),
]
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
# Generate follow-up questions dynamically
def generate_follow_ups(response):
# Simple logic to generate follow-up questions dynamically
# You can replace this with a more sophisticated model-based approach
return [
f"What more can you tell me about {response.split()[0]}?",
f"Can you elaborate on {response.split()[-1]}?",
]
# Function to handle chatbot interaction
def rag_chain_response(messages, user_message):
# Generate a response using the RAG chain
response = rag_chain.invoke(user_message)
# Generate follow-up questions based on the response
follow_ups = generate_follow_ups(response)
# Append the user's message and the response to the chat
messages.append((user_message, response))
# Return the updated chat, follow-up questions, and clear the input box
return messages, follow_ups, ""
# Function to handle follow-up clicks
def follow_up_click(messages, follow_up_question):
# Treat the follow-up question as the user query
response = rag_chain.invoke(follow_up_question)
# Generate new follow-ups based on the new response
follow_ups = generate_follow_ups(response)
# Append the follow-up question and response to the chat
messages.append((follow_up_question, response))
# Return the updated chat and follow-up questions
return messages, follow_ups
# Define the Gradio app
with gr.Blocks(theme="rawrsor1/Everforest") as app:
chatbot = gr.Chatbot([], elem_id="RADAR", bubble_full_width=False)
question_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here...")
submit_btn = gr.Button("Submit")
follow_up_btns = gr.ButtonGroup(label="Follow-Up Questions", buttons=[""] * 2)
# Set up interaction for both Enter key and Submit button
question_input.submit(
rag_chain_response, # Function to handle input and generate response
inputs=[chatbot, question_input], # Pass current conversation state and user input
outputs=[chatbot, follow_up_btns, question_input] # Update chat, follow-ups, and clear input
)
submit_btn.click(
rag_chain_response, # Function to handle input and generate response
inputs=[chatbot, question_input], # Pass current conversation state and user input
outputs=[chatbot, follow_up_btns, question_input] # Update chat, follow-ups, and clear input
)
# Handle follow-up button clicks
follow_up_btns.click(
follow_up_click,
inputs=[chatbot, follow_up_btns],
outputs=[chatbot, follow_up_btns]
)
# Launch the Gradio app
app.launch(show_error=True)
|