File size: 2,689 Bytes
66ff3e9
 
8658e51
b944409
 
991d86a
2a864c8
66ff3e9
991d86a
 
 
2a23908
 
 
 
 
c1a4bd7
 
 
 
 
991d86a
 
c62ab32
 
 
2a864c8
c62ab32
991d86a
 
 
 
 
 
 
 
 
2a864c8
991d86a
 
 
 
 
 
 
 
 
2a864c8
991d86a
 
 
 
 
 
 
 
 
4394f5b
c62ab32
 
 
 
2a864c8
c62ab32
 
 
2a864c8
f67b086
2a864c8
 
 
 
 
 
 
 
 
 
 
 
 
66ff3e9
2a864c8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
import requests
import json
import os

# Retrieve the OpenRouter API Key from the Space secrets
API_KEY = os.getenv("OpenRouter_API_KEY")

# Define available models for selection
MODEL_OPTIONS = [
    "openai/gpt-4o-mini-2024-07-18",
    "meta-llama/llama-3.1-405b-instruct",
    "nvidia/llama-3.1-nemotron-70b-instruct",
    "qwen/qwen-2.5-7b-instruct",
    "mistralai/mistral-large-2411",
    "microsoft/phi-3-medium-128k-instruct",
    "meta-llama/llama-3.1-405b-instruct:free",
    "nousresearch/hermes-3-llama-3.1-405b:free",
    "mistralai/mistral-7b-instruct:free",
    "microsoft/phi-3-medium-128k-instruct:free",
    "liquid/lfm-40b:free"
]

# History storage
history = []

def generate_comparisons_with_history(input_text, selected_models):
    global history
    results = {}
    for model in selected_models:
        response = requests.post(
            url="https://openrouter.ai/api/v1/chat/completions",
            headers={
                "Authorization": f"Bearer {API_KEY}",
                "Content-Type": "application/json"
            },
            data=json.dumps({
                "model": model,  # Use the current model
                "messages": [{"role": "user", "content": input_text}],
                "top_p": 1,
                "temperature": 1,
                "frequency_penalty": 0,
                "presence_penalty": 0,
                "repetition_penalty": 1,
                "top_k": 0,
            })
        )
        
        # Parse the response
        if response.status_code == 200:
            try:
                response_json = response.json()
                results[model] = response_json.get("choices", [{}])[0].get("message", {}).get("content", "No content returned.")
            except json.JSONDecodeError:
                results[model] = "Error: Unable to parse response."
        else:
            results[model] = f"Error: {response.status_code}, {response.text}"
    
    # Add input and results to history
    history_entry = {
        "input": input_text,
        "selected_models": selected_models,
        "outputs": results
    }
    history.append(history_entry)
    
    return results, history

# Create Gradio interface with multiple model selection and history
iface = gr.Interface(
    fn=generate_comparisons_with_history,
    inputs=[
        gr.Textbox(lines=2, label="Input Text", placeholder="Enter your query here"),
        gr.CheckboxGroup(choices=MODEL_OPTIONS, label="Select Models", value=[MODEL_OPTIONS[0]])
    ],
    outputs=[
        gr.JSON(label="Model Comparisons"),
        gr.JSON(label="History")
    ],
    title="Compare Outputs and Maintain History"
)

iface.launch()