File size: 2,419 Bytes
c66c032
 
02b0952
 
 
 
 
 
 
 
 
c66c032
02b0952
 
 
 
 
 
 
c66c032
 
02b0952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c66c032
02b0952
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import spaces

# Dictionary to store loaded models and tokenizers
loaded_models = {}

# List of available models (update with your preferred models)
models = ["gpt2", "gpt2-medium", "gpt2-large", "EleutherAI/gpt-neo-1.3B"]


def load_model(model_name):
    if model_name not in loaded_models:
        print(f"Loading model: {model_name}")
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        model = AutoModelForCausalLM.from_pretrained(model_name).to("cuda" if torch.cuda.is_available() else "cpu")
        loaded_models[model_name] = (model, tokenizer)
    return loaded_models[model_name]

@spaces.GPU
def get_model_response(model_name, message):
    model, tokenizer = load_model(model_name)
    inputs = tokenizer(message, return_tensors="pt").to(model.device)

    with torch.no_grad():
        outputs = model.generate(**inputs, max_length=100, num_return_sequences=1, temperature=0.7)

    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response


def chat(message, history, model1, model2):
    response1 = get_model_response(model1, message)
    response2 = get_model_response(model2, message)
    return [(message, f"{model1}: {response1}\n\n{model2}: {response2}")]


def vote(direction, history):
    if history:
        last_interaction = history[-1]
        vote_text = f"\n\nUser voted: {'πŸ‘' if direction == 'up' else 'πŸ‘Ž'}"
        updated_interaction = (last_interaction[0], last_interaction[1] + vote_text)
        return history[:-1] + [updated_interaction]
    return history


with gr.Blocks() as demo:
    gr.Markdown("# Hugging Face Model Comparison Chat")

    with gr.Row():
        model1_dropdown = gr.Dropdown(choices=models, label="Model 1", value=models[0])
        model2_dropdown = gr.Dropdown(choices=models, label="Model 2", value=models[1])

    chatbot = gr.Chatbot()
    msg = gr.Textbox(label="Your message")
    clear = gr.Button("Clear")

    with gr.Row():
        upvote = gr.Button("πŸ‘ Upvote")
        downvote = gr.Button("πŸ‘Ž Downvote")

    msg.submit(chat, [msg, chatbot, model1_dropdown, model2_dropdown], chatbot)
    clear.click(lambda: None, None, chatbot, queue=False)
    upvote.click(vote, ["up", chatbot], chatbot)
    downvote.click(vote, ["down", chatbot], chatbot)

if __name__ == "__main__":
    demo.launch()