Alibrown commited on
Commit
401addd
·
verified ·
1 Parent(s): f145050

Rename _app.py to app.py

Browse files
Files changed (2) hide show
  1. _app.py +0 -102
  2. app.py +141 -0
_app.py DELETED
@@ -1,102 +0,0 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
-
4
- # Define all pipelines
5
- def load_pipelines():
6
- pipelines = {
7
- "GPT-2 Original": pipeline("text-generation", model="gpt2"),
8
- "GPT-2 Medium": pipeline("text-generation", model="gpt2-medium"),
9
- "DistilGPT-2": pipeline("text-generation", model="distilgpt2"),
10
- "German GPT-2": pipeline("text-generation", model="german-nlp-group/german-gpt2"),
11
- "German Wechsel GPT-2": pipeline("text-generation", model="benjamin/gpt2-wechsel-german"),
12
- "T5 Base": pipeline("text-generation", model="t5-base"),
13
- "T5 Large": pipeline("text-generation", model="t5-large"),
14
- "Text Classification": pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english"),
15
- "Sentiment Analysis": pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment")
16
- }
17
- return pipelines
18
-
19
- def respond(
20
- message,
21
- history: list[tuple[str, str]],
22
- system_message,
23
- model_name,
24
- max_tokens,
25
- temperature,
26
- top_p,
27
- ):
28
- # Load pipelines
29
- pipelines = load_pipelines()
30
- pipe = pipelines.get(model_name)
31
-
32
- if not pipe:
33
- return "Error: Model not found."
34
-
35
- # For text generation models
36
- if model_name in ["GPT-2 Original", "GPT-2 Medium", "DistilGPT-2",
37
- "German GPT-2", "German Wechsel GPT-2",
38
- "T5 Base", "T5 Large"]:
39
- # Prepare full prompt
40
- full_history = ' '.join([f"{msg[0]} {msg[1] or ''}" for msg in history]) if history else ''
41
- full_prompt = f"{system_message}\n{full_history}\nUser: {message}\nAssistant:"
42
-
43
- try:
44
- response = pipe(
45
- full_prompt,
46
- max_length=len(full_prompt) + max_tokens,
47
- temperature=temperature,
48
- top_p=top_p,
49
- num_return_sequences=1
50
- )[0]['generated_text']
51
-
52
- # Extract just the new assistant response
53
- assistant_response = response[len(full_prompt):].strip()
54
- return assistant_response
55
- except Exception as e:
56
- return f"Generation error: {e}"
57
-
58
- # For classification and sentiment models
59
- elif model_name == "Text Classification":
60
- try:
61
- result = pipe(message)[0]
62
- return f"Classification: {result['label']} (Confidence: {result['score']:.2f})"
63
- except Exception as e:
64
- return f"Classification error: {e}"
65
-
66
- elif model_name == "Sentiment Analysis":
67
- try:
68
- result = pipe(message)[0]
69
- return f"Sentiment: {result['label']} (Confidence: {result['score']:.2f})"
70
- except Exception as e:
71
- return f"Sentiment analysis error: {e}"
72
-
73
- def create_chat_interface():
74
- """Create Gradio ChatInterface with model selection."""
75
- demo = gr.ChatInterface(
76
- respond,
77
- additional_inputs=[
78
- gr.Textbox(value="You are a helpful assistant.", label="System message"),
79
- gr.Dropdown(
80
- ["GPT-2 Original", "GPT-2 Medium", "DistilGPT-2",
81
- "German GPT-2", "German Wechsel GPT-2",
82
- "T5 Base", "T5 Large",
83
- "Text Classification", "Sentiment Analysis"],
84
- value="GPT-2 Original",
85
- label="Select Model"
86
- ),
87
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
88
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
89
- gr.Slider(
90
- minimum=0.1,
91
- maximum=1.0,
92
- value=0.95,
93
- step=0.05,
94
- label="Top-p (nucleus sampling)",
95
- ),
96
- ]
97
- )
98
- return demo
99
-
100
- if __name__ == "__main__":
101
- chat_interface = create_chat_interface()
102
- chat_interface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import os
4
+ from huggingface_hub import login
5
+
6
+ # Hugging Face login function
7
+ def hf_login(token):
8
+ if token:
9
+ try:
10
+ login(token)
11
+ return "Successfully logged in to Hugging Face Hub"
12
+ except Exception as e:
13
+ return f"Login error: {str(e)}"
14
+ return "No token provided"
15
+
16
+ # Define all pipelines with lazy loading
17
+ def get_pipeline(model_name):
18
+ """Lazy load pipeline only when needed"""
19
+ try:
20
+ if model_name == "GPT-2 Original":
21
+ return pipeline("text-generation", model="gpt2")
22
+ elif model_name == "GPT-2 Medium":
23
+ return pipeline("text-generation", model="gpt2-medium")
24
+ elif model_name == "DistilGPT-2":
25
+ return pipeline("text-generation", model="distilgpt2")
26
+ elif model_name == "German GPT-2":
27
+ return pipeline("text-generation", model="german-nlp-group/german-gpt2")
28
+ elif model_name == "German Wechsel GPT-2":
29
+ return pipeline("text-generation", model="benjamin/gpt2-wechsel-german")
30
+ elif model_name == "T5 Base":
31
+ return pipeline("text2text-generation", model="t5-base")
32
+ elif model_name == "T5 Large":
33
+ return pipeline("text2text-generation", model="t5-large")
34
+ elif model_name == "Text Classification":
35
+ return pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
36
+ elif model_name == "Sentiment Analysis":
37
+ return pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment")
38
+ else:
39
+ raise ValueError(f"Unknown model: {model_name}")
40
+ except Exception as e:
41
+ raise Exception(f"Error loading model {model_name}: {str(e)}")
42
+
43
+ def respond(
44
+ message,
45
+ history: list[tuple[str, str]],
46
+ system_message,
47
+ model_name,
48
+ max_tokens,
49
+ temperature,
50
+ top_p,
51
+ ):
52
+ try:
53
+ # Get the appropriate pipeline
54
+ pipe = get_pipeline(model_name)
55
+
56
+ # For text generation models
57
+ if model_name in ["GPT-2 Original", "GPT-2 Medium", "DistilGPT-2",
58
+ "German GPT-2", "German Wechsel GPT-2"]:
59
+ # Prepare full prompt
60
+ full_history = ' '.join([f"User: {msg[0]}\nAssistant: {msg[1] or ''}" for msg in history]) if history else ''
61
+ full_prompt = f"{system_message}\n{full_history}\nUser: {message}\nAssistant:"
62
+
63
+ response = pipe(
64
+ full_prompt,
65
+ max_length=len(full_prompt.split()) + max_tokens,
66
+ temperature=temperature,
67
+ top_p=top_p,
68
+ num_return_sequences=1
69
+ )[0]['generated_text']
70
+
71
+ # Extract just the new assistant response
72
+ assistant_response = response[len(full_prompt):].strip()
73
+ return assistant_response
74
+
75
+ # For T5 models
76
+ elif model_name in ["T5 Base", "T5 Large"]:
77
+ # T5 doesn't handle chat history the same way, so simplify
78
+ input_text = f"{message}"
79
+
80
+ response = pipe(
81
+ input_text,
82
+ max_length=max_tokens,
83
+ temperature=temperature,
84
+ top_p=top_p,
85
+ num_return_sequences=1
86
+ )[0]['generated_text']
87
+
88
+ return response
89
+
90
+ # For classification and sentiment models
91
+ elif model_name == "Text Classification":
92
+ result = pipe(message)[0]
93
+ return f"Classification: {result['label']} (Confidence: {result['score']:.2f})"
94
+
95
+ elif model_name == "Sentiment Analysis":
96
+ result = pipe(message)[0]
97
+ return f"Sentiment: {result['label']} (Confidence: {result['score']:.2f})"
98
+
99
+ except Exception as e:
100
+ return f"Error: {str(e)}"
101
+
102
+ def create_interface():
103
+ with gr.Blocks(title="Hugging Face Models Demo") as demo:
104
+ gr.Markdown("# Hugging Face Models Chat Interface")
105
+
106
+ with gr.Accordion("Hugging Face Login", open=False):
107
+ with gr.Row():
108
+ hf_token = gr.Textbox(label="Enter Hugging Face Token", type="password")
109
+ login_btn = gr.Button("Login")
110
+ login_output = gr.Textbox(label="Login Status")
111
+ login_btn.click(hf_login, inputs=[hf_token], outputs=[login_output])
112
+
113
+ chat_interface = gr.ChatInterface(
114
+ respond,
115
+ additional_inputs=[
116
+ gr.Textbox(value="You are a helpful assistant.", label="System message"),
117
+ gr.Dropdown(
118
+ ["GPT-2 Original", "GPT-2 Medium", "DistilGPT-2",
119
+ "German GPT-2", "German Wechsel GPT-2",
120
+ "T5 Base", "T5 Large",
121
+ "Text Classification", "Sentiment Analysis"],
122
+ value="GPT-2 Original",
123
+ label="Select Model"
124
+ ),
125
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
126
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
127
+ gr.Slider(
128
+ minimum=0.1,
129
+ maximum=1.0,
130
+ value=0.95,
131
+ step=0.05,
132
+ label="Top-p (nucleus sampling)",
133
+ ),
134
+ ]
135
+ )
136
+
137
+ return demo
138
+
139
+ if __name__ == "__main__":
140
+ interface = create_interface()
141
+ interface.launch(share=True)