File size: 3,344 Bytes
66ff3e9
 
8658e51
b944409
 
991d86a
2a864c8
66ff3e9
991d86a
 
 
2a23908
 
 
 
 
c1a4bd7
 
 
 
 
991d86a
 
4cb67db
c62ab32
 
4cb67db
c62ab32
4cb67db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4394f5b
4cb67db
 
 
 
 
 
 
 
 
 
 
 
c62ab32
 
4cb67db
 
c62ab32
 
 
d7f2c99
 
 
4cb67db
 
e731b38
4cb67db
d7f2c99
4cb67db
 
 
 
 
 
 
 
 
 
 
 
 
 
ec9dfba
4cb67db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66ff3e9
4cb67db
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr
import requests
import json
import os

# Retrieve the OpenRouter API Key from the Space secrets
API_KEY = os.getenv("OpenRouter_API_KEY")

# Define available models for selection
MODEL_OPTIONS = [
    "openai/gpt-4o-mini-2024-07-18",
    "meta-llama/llama-3.1-405b-instruct",
    "nvidia/llama-3.1-nemotron-70b-instruct",
    "qwen/qwen-2.5-7b-instruct",
    "mistralai/mistral-large-2411",
    "microsoft/phi-3-medium-128k-instruct",
    "meta-llama/llama-3.1-405b-instruct:free",
    "nousresearch/hermes-3-llama-3.1-405b:free",
    "mistralai/mistral-7b-instruct:free",
    "microsoft/phi-3-medium-128k-instruct:free",
    "liquid/lfm-40b:free"
]

# Initialize history
history = []

def generate_text(input_text, selected_model, history_state):
    global history
    response = requests.post(
        url="https://openrouter.ai/api/v1/chat/completions",
        headers={
            "Authorization": f"Bearer {API_KEY}",
            "Content-Type": "application/json"
        },
        data=json.dumps({
            "model": selected_model,  # Use selected model
            "messages": [{"role": "user", "content": input_text}],
            "top_p": 1,
            "temperature": 1,
            "frequency_penalty": 0,
            "presence_penalty": 0,
            "repetition_penalty": 1,
            "top_k": 0,
        })
    )
    
    # Handle errors
    if response.status_code != 200:
        return f"Error: {response.status_code}, {response.text}", history_state
    
    # Parse and return the content of the response
    try:
        response_json = response.json()
        result = response_json.get("choices", [{}])[0].get("message", {}).get("content", "No content returned.")
    except json.JSONDecodeError:
        result = "Error: Unable to parse response."
    
    # Add the current interaction to the history
    history_entry = {
        "input": input_text,
        "selected_model": selected_model,
        "response": result
    }
    history.append(history_entry)
    
    # Update the history state
    history_state = history
    
    # Prepare the formatted history string
    formatted_history = "\n".join([f"Input: {entry['input']}\nModel: {entry['selected_model']}\nResponse: {entry['response']}\n" for entry in history])
    
    return result, formatted_history

# Create Gradio interface with a dropdown for model selection
iface = gr.Interface(
    fn=generate_text,
    inputs=[
        gr.Textbox(lines=2, label="Input Text", placeholder="Enter your query here"),
        gr.Dropdown(choices=MODEL_OPTIONS, label="Select Model", value=MODEL_OPTIONS[0]),
        gr.State()
    ],
    outputs=[
        gr.Textbox(label="Response", placeholder="Response will be shown here"),
        gr.Textbox(label="History", placeholder="Interaction history will be shown here", lines=10, interactive=False)
    ],
    title="Chat with OpenRouter Models"
)

# Insert custom CSS for scrollable sections
iface.add_component(gr.HTML("""
    <style>
        #output-comparisons {
            height: 300px;
            overflow: auto;
            border: 1px solid #ddd;
            padding: 10px;
        }
        #output-history {
            height: 300px;
            overflow: auto;
            border: 1px solid #ddd;
            padding: 10px;
        }
    </style>
"""))

iface.launch()