File size: 2,804 Bytes
642842b
 
 
 
 
984282e
642842b
984282e
642842b
 
984282e
642842b
 
984282e
642842b
 
 
 
984282e
642842b
984282e
642842b
984282e
642842b
 
984282e
642842b
 
 
 
 
984282e
642842b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
984282e
 
642842b
 
984282e
642842b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
984282e
642842b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import os
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from dotenv import load_dotenv
import gradio as gr
import openai

# Load environment variables from .env file
load_dotenv()

# Set OpenAI API key
openai.api_key = os.environ['OPENAI_API_KEY']

# Constants
CHROMA_PATH = "chroma"
PROMPT_TEMPLATE = """
Answer the question based only on the following context:

{context}

---

Answer the question based on the above context: {question}
"""

# Function to process user input and generate response
def generate_response(query_text, history):
    # Prepare the DB
    embedding_function = OpenAIEmbeddings()
    db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function)

    # Search the DB
    results = db.similarity_search_with_relevance_scores(query_text, k=3)
    if len(results) == 0 or results[0][1] < 0.7:
        response_text = "🤔 Unable to find matching results."
    else:
        context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
        prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
        prompt = prompt_template.format(context=context_text, question=query_text)

        # Generate response
        model = ChatOpenAI(model="gpt-4o")
        response_text = model.invoke(prompt).content
        # sources = [doc.metadata.get("source", None) for doc, _score in results]
        # response_text += f"\n\n**Sources:** {', '.join(sources)}"

    history.append(("You 🗣️", query_text))
    history.append(("Biomedical Informatics Assistant 🤖", response_text))
    return history, ""

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("<h1 style='text-align: center; color: white;'>AI-Powered Chat Interface for Biomedical Informatics 🤖</h1>")
    
    chatbot = gr.Chatbot(elem_id="chatbot")

    with gr.Row():
        with gr.Column(scale=7):
            query_text = gr.Textbox(
                show_label=False, 
                placeholder="Type your question here ✍️...", 
                lines=1,
                elem_id="input_box"
            )


    # Set up interactions
    query_text.submit(generate_response, [query_text, chatbot], [chatbot, query_text])

    # Custom CSS
    demo.css = """
    #input_box {
        font-size: 18px;
        padding: 10px;
    }
    #chatbot .message {
        font-size: 18px;
    }
    #chatbot .user {
        background-color: #333;
        color: white;
        font-size: 32px;
    }
    #chatbot .assistant {
        background-color: #007BFF;
        color: white;
        font-size: 32px;
    }
    body {
        background-color: #ffffff;
    }
    """
    
if __name__ == "__main__":
    demo.launch(share=True)