hanzla javaid commited on
Commit
02b0952
Β·
1 Parent(s): c66c032
Files changed (1) hide show
  1. app.py +63 -8
app.py CHANGED
@@ -1,14 +1,69 @@
1
  import gradio as gr
2
- import spaces
3
  import torch
 
 
 
 
 
 
 
 
 
4
 
5
- zero = torch.Tensor([0]).cuda()
6
- print(zero.device) # <-- 'cpu' πŸ€”
 
 
 
 
 
7
 
8
  @spaces.GPU
9
- def greet(n):
10
- print(zero.device) # <-- 'cuda:0' πŸ€—
11
- return f"Hello {zero + n} Tensor"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
14
- demo.launch()
 
1
  import gradio as gr
 
2
  import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import spaces
5
+
6
+ # Dictionary to store loaded models and tokenizers
7
+ loaded_models = {}
8
+
9
+ # List of available models (update with your preferred models)
10
+ models = ["gpt2", "gpt2-medium", "gpt2-large", "EleutherAI/gpt-neo-1.3B"]
11
+
12
 
13
+ def load_model(model_name):
14
+ if model_name not in loaded_models:
15
+ print(f"Loading model: {model_name}")
16
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
17
+ model = AutoModelForCausalLM.from_pretrained(model_name).to("cuda" if torch.cuda.is_available() else "cpu")
18
+ loaded_models[model_name] = (model, tokenizer)
19
+ return loaded_models[model_name]
20
 
21
  @spaces.GPU
22
+ def get_model_response(model_name, message):
23
+ model, tokenizer = load_model(model_name)
24
+ inputs = tokenizer(message, return_tensors="pt").to(model.device)
25
+
26
+ with torch.no_grad():
27
+ outputs = model.generate(**inputs, max_length=100, num_return_sequences=1, temperature=0.7)
28
+
29
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+ return response
31
+
32
+
33
+ def chat(message, history, model1, model2):
34
+ response1 = get_model_response(model1, message)
35
+ response2 = get_model_response(model2, message)
36
+ return [(message, f"{model1}: {response1}\n\n{model2}: {response2}")]
37
+
38
+
39
+ def vote(direction, history):
40
+ if history:
41
+ last_interaction = history[-1]
42
+ vote_text = f"\n\nUser voted: {'πŸ‘' if direction == 'up' else 'πŸ‘Ž'}"
43
+ updated_interaction = (last_interaction[0], last_interaction[1] + vote_text)
44
+ return history[:-1] + [updated_interaction]
45
+ return history
46
+
47
+
48
+ with gr.Blocks() as demo:
49
+ gr.Markdown("# Hugging Face Model Comparison Chat")
50
+
51
+ with gr.Row():
52
+ model1_dropdown = gr.Dropdown(choices=models, label="Model 1", value=models[0])
53
+ model2_dropdown = gr.Dropdown(choices=models, label="Model 2", value=models[1])
54
+
55
+ chatbot = gr.Chatbot()
56
+ msg = gr.Textbox(label="Your message")
57
+ clear = gr.Button("Clear")
58
+
59
+ with gr.Row():
60
+ upvote = gr.Button("πŸ‘ Upvote")
61
+ downvote = gr.Button("πŸ‘Ž Downvote")
62
+
63
+ msg.submit(chat, [msg, chatbot, model1_dropdown, model2_dropdown], chatbot)
64
+ clear.click(lambda: None, None, chatbot, queue=False)
65
+ upvote.click(vote, ["up", chatbot], chatbot)
66
+ downvote.click(vote, ["down", chatbot], chatbot)
67
 
68
+ if __name__ == "__main__":
69
+ demo.launch()