Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import json | |
import os | |
# Retrieve the OpenRouter API Key from the Space secrets | |
API_KEY = os.getenv("OpenRouter_API_KEY") | |
# Define available models for selection | |
MODEL_OPTIONS = [ | |
"openai/gpt-4o-mini-2024-07-18", | |
"meta-llama/llama-3.1-405b-instruct", | |
"nvidia/llama-3.1-nemotron-70b-instruct", | |
"qwen/qwen-2.5-7b-instruct", | |
"mistralai/mistral-large-2411", | |
"microsoft/phi-3-medium-128k-instruct", | |
"meta-llama/llama-3.1-405b-instruct:free", | |
"nousresearch/hermes-3-llama-3.1-405b:free", | |
"mistralai/mistral-7b-instruct:free", | |
"microsoft/phi-3-medium-128k-instruct:free", | |
"liquid/lfm-40b:free" | |
] | |
# History storage | |
history = [] | |
def generate_model_outputs_with_history(input_text, selected_models): | |
global history | |
results = {} | |
for model in selected_models: | |
response = requests.post( | |
url="https://openrouter.ai/api/v1/chat/completions", | |
headers={ | |
"Authorization": f"Bearer {API_KEY}", | |
"Content-Type": "application/json" | |
}, | |
data=json.dumps({ | |
"model": model, | |
"messages": [{"role": "user", "content": input_text}], | |
"top_p": 1, | |
"temperature": 1, | |
"frequency_penalty": 0, | |
"presence_penalty": 0, | |
"repetition_penalty": 1, | |
"top_k": 0, | |
}) | |
) | |
# Parse the response | |
if response.status_code == 200: | |
try: | |
response_json = response.json() | |
results[model] = response_json.get("choices", [{}])[0].get("message", {}).get("content", "No content returned.") | |
except json.JSONDecodeError: | |
results[model] = "Error: Unable to parse response." | |
else: | |
results[model] = f"Error: {response.status_code}, {response.text}" | |
# Add input and results to history | |
history_entry = { | |
"input": input_text, | |
"selected_models": selected_models, | |
"outputs": results | |
} | |
history.append(history_entry) | |
return [results.get(model, "No output") for model in selected_models], history | |
# Create a dynamic number of outputs based on model selection | |
def create_outputs(selected_models): | |
return [gr.Textbox(label=f"Output from {model}", interactive=False) for model in selected_models] | |
# Gradio interface with dynamic outputs and history | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
input_text = gr.Textbox(lines=2, label="Input Text", placeholder="Enter your query here") | |
selected_models = gr.CheckboxGroup(choices=MODEL_OPTIONS, label="Select Models", value=[MODEL_OPTIONS[0]]) | |
output_placeholder = gr.State([]) # Placeholder for dynamic output components | |
history_placeholder = gr.State(history) # Maintain history state | |
def update_outputs_and_history(selected_models): | |
# Dynamically create outputs for each selected model | |
output_components = create_outputs(selected_models) | |
output_placeholder.set(output_components) | |
return output_components | |
# Button to generate outputs and maintain history | |
generate_button = gr.Button("Generate Outputs") | |
generate_button.click( | |
fn=generate_model_outputs_with_history, | |
inputs=[input_text, selected_models], | |
outputs=[update_outputs_and_history(selected_models), history_placeholder], | |
) | |
# Display the history | |
with gr.Row(): | |
gr.JSON(label="History", value=history_placeholder) | |
demo.launch() | |