File size: 3,044 Bytes
192447d
deecb9b
 
192447d
deecb9b
 
 
7892466
deecb9b
 
 
 
7892466
deecb9b
 
 
fbe986d
deecb9b
 
 
 
 
 
 
 
 
c2d26c1
 
6c87583
7892466
 
c2d26c1
deecb9b
 
 
9207660
78cb7fe
deecb9b
 
78cb7fe
deecb9b
 
 
 
 
 
8c3dd67
deecb9b
 
 
 
 
c1009f8
b370650
deecb9b
 
089a83f
deecb9b
 
 
 
 
 
b8d3256
deecb9b
 
 
 
c2d26c1
2848ceb
c2d26c1
deecb9b
 
 
 
 
 
 
 
 
 
 
c2d26c1
 
deecb9b
 
 
192447d
b8d3256
deecb9b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import os
import gradio as gr
from langchain_redis import RedisConfig, RedisVectorStore
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_groq import ChatGroq
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings


# Set API keys
openai_api_key=os.environ["OPENAI_API_KEY"]
groq_api_key=os.environ["GROQ_API_KEY"]

# Define Redis configuration
REDIS_URL = "redis://:KWq0uAoBYjBGErKvyMvexMqB9ep7v2Ct@redis-11044.c266.us-east-1-3.ec2.redns.redis-cloud.com:11044"
config = RedisConfig(
    index_name="radar_data_index",
    redis_url=REDIS_URL,
    metadata_schema=[
        {"name": "category", "type": "tag"},
        {"name": "name", "type": "text"},
        {"name": "address", "type": "text"},
        {"name": "phone", "type": "text"},
    ],
)


# Initialize OpenAI Embeddings
embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])

# Initialize Redis Vector Store with Hugging Face embeddings
vector_store = RedisVectorStore(embeddings, config=config)
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 2})


# Define the language model
llm = ChatGroq(model="llama-3.2-1b-preview")

# Define prompt
prompt = ChatPromptTemplate.from_messages(
    [
        (
            "human",
            """You’re Annie, a charming media personality created by Amit Lamba, guiding people around Birmingham, Alabama. Like Rick Steves or Anthony Bourdain, you’re here to give friendly, concise, and accurate advice with a dash of humor.
Keep answers relevant, upbeat, and engaging, spotlighting partner businesses whenever possible. Be conversational, like chatting with a clever friend, and encourage follow-up questions to create a smooth flow. Make users smile and deliver a delightful, informative experience—like a perfect dessert after a great meal.
Question: {question}
Context: {context}
Answer:""",
        ),
    ]
)

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

rag_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | prompt
    | llm
    | StrOutputParser()
)

# Define the Gradio app
def rag_chain_response(question):
    response = rag_chain.invoke(question)
    return response

with gr.Blocks(theme="rawrsor1/Everforest") as app:
    with gr.Row():
        with gr.Column(scale=1):
            user_input = gr.Textbox(
                placeholder="Type your question here...",
                label="Your Question",
                lines=2,
                max_lines=2,
            )
        with gr.Column(scale=2):
            response_output = gr.Textbox(
                lines=10,
                max_lines=10,
            )
    with gr.Row():
        submit_btn = gr.Button("Submit")
    submit_btn.click(
        rag_chain_response, inputs=user_input, outputs=response_output
    )

app.launch(show_error=True)