File size: 5,572 Bytes
b7f426b
7e42f7f
b7f426b
480a83d
7e42f7f
dc55189
016c388
 
725f763
7e42f7f
c95d4c7
 
 
480a83d
c95d4c7
 
 
 
 
480a83d
c95d4c7
 
480a83d
 
 
c95d4c7
 
480a83d
c95d4c7
 
 
 
 
 
 
 
 
 
 
 
 
 
480a83d
c95d4c7
 
dc55189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4cd8d7e
016c388
c95d4c7
4cd8d7e
 
 
c95d4c7
 
 
 
042d9eb
016c388
4cd8d7e
016c388
4cd8d7e
016c388
4cd8d7e
 
9176bca
4cd8d7e
016c388
4cd8d7e
016c388
4cd8d7e
 
b7f426b
4cd8d7e
016c388
4cd8d7e
 
016c388
4cd8d7e
016c388
4cd8d7e
 
016c388
4cd8d7e
725f763
4cd8d7e
 
725f763
4cd8d7e
725f763
4cd8d7e
 
725f763
016c388
c95d4c7
042d9eb
445701c
dc55189
a2dcfac
c95d4c7
b7f426b
dc55189
a2dcfac
4cd8d7e
dc55189
725f763
9176bca
 
 
 
 
 
 
 
 
4cd8d7e
9176bca
 
 
dc55189
4cd8d7e
dc55189
4cd8d7e
 
9176bca
 
4cd8d7e
445701c
4cd8d7e
 
 
445701c
4cd8d7e
445701c
9176bca
b7f426b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import os
import gradio as gr
from huggingface_hub import InferenceClient
from sqlalchemy import create_engine

# Clients for each model provider
llama_client = InferenceClient(provider="sambanova", api_key=os.environ["HF_TOKEN"])
minimax_client = InferenceClient(provider="novita", api_key=os.environ["HF_TOKEN"])
mistral_client = InferenceClient(provider="together", api_key=os.environ["HF_TOKEN"])

# Global objects
db_connection = None  

def get_sqlalchemy_connection():
    server = os.getenv("SQL_SERVER")
    database = os.getenv("SQL_DATABASE")
    username = os.getenv("SQL_USERNAME")
    password = os.getenv("SQL_PASSWORD")

    connection_url = f"mssql+pymssql://{username}:{password}@{server}/{database}"

    try:
        engine = create_engine(connection_url)
        conn = engine.connect()
        print("βœ… SQLAlchemy + pymssql connection successful")
        return conn
    except Exception as e:
        print(f"❌ SQLAlchemy connection failed: {e}")
        return None

def get_sql_connection():
    global db_connection

    if db_connection is not None:
        try:
            db_connection.cursor()  # test if still open
            return db_connection
        except Exception as e:
            print(f"❌ SQL connection failed: {e}")
            db_connection = None  # reset if broken

    # Reconnect if needed
    db_connection = get_sqlalchemy_connection()
    return db_connection
    
# Format chat history for Markdown display
def format_chat_history(chat_history):
    formatted = ""
    for msg in chat_history:
        role = msg["role"]
        content = msg["content"]
        if isinstance(content, list):  # For LLaMA image+text input
            for item in content:
                if "text" in item:
                    formatted += f"**{role.capitalize()}:** {item['text']}\n\n"
                elif "image_url" in item:
                    formatted += f"**{role.capitalize()}:** πŸ–ΌοΈ Image: {item['image_url']['url']}\n\n"
        else:
            formatted += f"**{role.capitalize()}:** {content}\n\n"
    return formatted.strip()

# Main chat handler
def chat_with_model(model_choice, prompt, image_url, chat_history):
    if not prompt:
        return "❌ Please enter a text prompt.", chat_history, "", ""

    if chat_history is None:
        chat_history = []
        
    conn = get_sql_connection()
    if conn is None:
        return "❌ Failed to connect to database.", chat_history, "", ""

    try:
        # === LLaMA 4 ===
        if model_choice == "LLaMA 4 (SambaNova)":
            user_msg = [{"type": "text", "text": prompt}]
            if image_url:
                user_msg.append({"type": "image_url", "image_url": {"url": image_url}})
            chat_history.append({"role": "user", "content": user_msg})

            response = llama_client.chat.completions.create(
                model="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
                messages=chat_history
            )
            bot_msg = response.choices[0].message.content
            chat_history.append({"role": "assistant", "content": bot_msg})

        # === MiniMax ===
        elif model_choice == "MiniMax M1 (Novita)":
            chat_history.append({"role": "user", "content": prompt})
            response = minimax_client.chat.completions.create(
                model="MiniMaxAI/MiniMax-M1-80k",
                messages=chat_history
            )
            bot_msg = response.choices[0].message.content
            chat_history.append({"role": "assistant", "content": bot_msg})

        # === Mistral ===
        elif model_choice == "Mistral Mixtral-8x7B (Together)":
            chat_history.append({"role": "user", "content": prompt})
            response = mistral_client.chat.completions.create(
                model="mistralai/Mixtral-8x7B-Instruct-v0.1",
                messages=chat_history
            )
            bot_msg = response.choices[0].message.content
            chat_history.append({"role": "assistant", "content": bot_msg})

        else:
            return "❌ Unsupported model selected.", chat_history, "", ""

        return format_chat_history(chat_history), chat_history, "", ""

    except Exception as e:
        return f"❌ Error: {e}", chat_history, "", ""

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## πŸ€– Multi-Model Context-Aware Chatbot")
    gr.Markdown("Supports LLaMA 4 (with optional image), MiniMax, and Mistral. Memory is preserved for multi-turn dialog.")

    model_dropdown = gr.Dropdown(
        choices=[
            "LLaMA 4 (SambaNova)",
            "MiniMax M1 (Novita)",
            "Mistral Mixtral-8x7B (Together)"
        ],
        value="LLaMA 4 (SambaNova)",
        label="Select Model"
    )

    prompt_input = gr.Textbox(label="Text Prompt", placeholder="Ask something...", lines=2)
    image_url_input = gr.Textbox(label="Optional Image URL (for LLaMA only)", placeholder="https://example.com/image.jpg")

    submit_btn = gr.Button("πŸ’¬ Generate Response")
    reset_btn = gr.Button("πŸ”„ Reset Conversation")
    output_box = gr.Markdown(label="Chat History", value="")
    state = gr.State([])

    submit_btn.click(
        fn=chat_with_model,
        inputs=[model_dropdown, prompt_input, image_url_input, state],
        outputs=[output_box, state, prompt_input, image_url_input]
    )

    reset_btn.click(
        fn=lambda: ("🧹 Conversation reset. You can start a new one.", [], "", ""),
        inputs=[],
        outputs=[output_box, state, prompt_input, image_url_input]
    )

demo.launch()