sohiebwedyan commited on
Commit
b99026f
·
verified ·
1 Parent(s): 23163bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +281 -50
app.py CHANGED
@@ -1,64 +1,295 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
8
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
 
27
 
28
- response = ""
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
41
 
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
 
 
 
 
 
45
  """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from transformers import pipeline
4
  import gradio as gr
5
+ import asyncio
6
+ import ipaddress
7
+ from typing import Tuple
8
 
9
+
10
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
11
+
12
+
13
+ gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=0 if torch.cuda.is_available() else -1, trust_remote_code=True)
14
+ Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", device=0 if torch.cuda.is_available() else -1)
15
+ llama2_pipeline = pipeline("text-generation", model="Harikrishnan46624/finetuned_llama2-1.1b-chat", device=0 if torch.cuda.is_available() else -1)
16
+ summarization_pipeline = pipeline("summarization", model="Falconsai/text_summarization", device=0 if torch.cuda.is_available() else -1)
17
+
18
+ previous_questions = []
19
+
20
+ async def generate_gpt2(question, max_length, num_beams, temperature):
21
+ return gpt2_pipeline(
22
+ question,
23
+ max_length=max_length,
24
+ num_return_sequences=1,
25
+ num_beams=num_beams,
26
+ do_sample=True,
27
+ top_k=50,
28
+ top_p=0.95,
29
+ temperature=temperature
30
+ )[0]['generated_text']
31
+
32
+ async def generate_Najeb(question, max_length, num_beams, temperature):
33
+ return Najeb_pipeline(
34
+ question,
35
+ max_length=max_length,
36
+ num_return_sequences=1,
37
+ num_beams=num_beams,
38
+ do_sample=True,
39
+ top_k=30,
40
+ top_p=0.85,
41
+ temperature=temperature
42
+ )[0]['generated_text']
43
+
44
+ async def generate_llama2(question, max_length, num_beams, temperature):
45
+ return llama2_pipeline(
46
+ question,
47
+ max_length=max_length,
48
+ num_return_sequences=1,
49
+ num_beams=num_beams,
50
+ do_sample=True,
51
+ top_k=30,
52
+ top_p=0.9,
53
+ temperature=temperature
54
+ )[0]['generated_text']
55
+
56
+ async def generate_responses_async(question, max_length=128, num_beams=2, temperature=0.5):
57
+ previous_questions.append(question)
58
+
59
+
60
+ gpt2_task = asyncio.create_task(generate_gpt2(question, max_length, num_beams, temperature))
61
+ Najeb_task = asyncio.create_task(generate_Najeb(question, max_length, num_beams, temperature))
62
+ llama2_task = asyncio.create_task(generate_llama2(question, max_length, num_beams, temperature))
63
+
64
+ gpt2_response, Najeb_response, llama2_response = await asyncio.gather(gpt2_task, Najeb_task, llama2_task)
65
+
66
+
67
+ combined_responses = f"GPT-2: {gpt2_response}\nNajeb: {Najeb_response}\nLLaMA 2: {llama2_response}"
68
+ summarized_response = summarization_pipeline(combined_responses, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
69
+
70
+ return {
71
+ "GPT-2 Answer": gpt2_response,
72
+ "Najeb Answer": Najeb_response,
73
+ "LLaMA 2 Answer": llama2_response,
74
+ "Summarized Answer": summarized_response,
75
+ "Previous Questions": "\n".join(previous_questions[-5:])
76
+ }
77
+
78
+ def handle_mode_selection(mode, input_text, max_length, num_beams, temperature):
79
+ if mode == "AI Question Answering":
80
+ result = asyncio.run(generate_responses_async(input_text, max_length, num_beams, temperature))
81
+ return (
82
+ f"**GPT-2 Model Response:**\n{result['GPT-2 Answer']}",
83
+ f"**Najeb Model Response:**\n{result['Najeb Answer']}",
84
+ f"**LLaMA 2 Model Response:**\n{result['LLaMA 2 Answer']}",
85
+ f"**Summarized Response:**\n{result['Summarized Answer']}",
86
+ f"**Previous Questions:**\n{result['Previous Questions']}"
87
+ )
88
+ else:
89
+ subnet_result = calculate_subnet(input_text)
90
+ return subnet_result, "", "", "", ""
91
+ def get_network(ip_input: str) -> Tuple[ipaddress.IPv4Network, str]:
92
+ try:
93
+ if ip_input.count("/") == 0:
94
+ ip_input += "/24"
95
+ net = ipaddress.IPv4Network(ip_input, strict=False)
96
+ ip = ip_input.split("/")[0]
97
+ return (net, ip)
98
+ except ValueError:
99
+ return None, None
100
+
101
+ def calculate_subnet(ip_input: str) -> str:
102
+ network, ip = get_network(ip_input)
103
+ if network is None or ip is None:
104
+ return "Invalid IP Address or Subnet!"
105
+
106
+
107
+ network_address = network.network_address
108
+ broadcast_address = network.broadcast_address
109
+ usable_hosts = list(network.hosts())
110
+ num_usable_hosts = len(usable_hosts)
111
+ usable_hosts_range = f"{usable_hosts[0]} - {usable_hosts[-1]}" if usable_hosts else "NA"
112
+
113
+
114
+ octets = str(ip).split('.')
115
+ binary_octets = [bin(int(octet))[2:].zfill(8) for octet in octets]
116
+ bin_ip = '.'.join(binary_octets)
117
+
118
+ bin_addr = str(bin(int(network_address))[2:].zfill(32))
119
+ bin_addr = '.'.join([bin_addr[i:i+8] for i in range(0, len(bin_addr), 8)])
120
+
121
+ bin_mask = str(bin(int(network.netmask))[2:].zfill(32))
122
+ bin_mask = '.'.join([bin_mask[i:i+8] for i in range(0, len(bin_mask), 8)])
123
+
124
+
125
+ result = f"""
126
+ IP Address: {ip}
127
+ Address (bin): {bin_ip}
128
+ Network Address: {network_address}
129
+ Network Address (bin): {bin_addr}
130
+ Netmask: {network.netmask}
131
+ Netmask (bin): {bin_mask}
132
+ CIDR Notation: {network.prefixlen}
133
+ Broadcast Address: {broadcast_address}
134
+ Usable IP Range: {usable_hosts_range}
135
+ Number of Hosts: {network.num_addresses:,d}
136
+ Number of Usable Hosts: {num_usable_hosts:,d}
137
+ Wildcard Mask: {network.hostmask}
138
+ Private IP: {network.is_private}
139
  """
140
+ return result.strip()
141
+
142
+
143
+
144
+ custom_css = """
145
+ body {
146
+ background-color: #f0f8ff;
147
+ font-family: 'Arial', sans-serif;
148
+ color: #333;
149
+ }
150
+
151
+ h1 {
152
+ text-align: center;
153
+ color: #0066cc;
154
+ }
155
 
156
+ p {
157
+ text-align: center;
158
+ color: #333;
159
+ }
160
 
161
+ .gradio-container {
162
+ width: 80%;
163
+ margin: auto;
164
+ background-color: rgba(255, 255, 255, 0.8);
165
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
166
+ padding: 20px;
167
+ border-radius: 10px;
168
+ }
 
169
 
170
+ .gr-button {
171
+ background-color: #0066cc;
172
+ color: white;
173
+ border: none;
174
+ border-radius: 5px;
175
+ padding: 10px;
176
+ cursor: pointer;
177
+ transition: background-color 0.3s ease;
178
+ }
179
 
180
+ .gr-button:hover {
181
+ background-color: #004c99;
182
+ }
183
 
184
+ .gr-textbox {
185
+ border: 2px solid #0066cc;
186
+ border-radius: 5px;
187
+ padding: 10px;
188
+ background-color: #fff;
189
+ color: #333;
190
+ }
191
 
192
+ .gr-slider {
193
+ color: #0066cc;
194
+ }
 
 
 
 
 
195
 
196
+ .gr-json {
197
+ background-color: rgba(240, 248, 255, 0.8);
198
+ border-radius: 10px;
199
+ padding: 10px;
200
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
201
+ }
202
 
203
 
204
+ #image-container {
205
+ text-align: center;
206
+ position: relative;
207
+ }
208
+
209
+ #image-container img {
210
+ width: 1400px;
211
+ border-radius: 10px;
212
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
213
+ }
214
+
215
+ #image-container button {
216
+ position: absolute;
217
+ top: 50%;
218
+ left: 50%;
219
+ transform: translate(-50%, -50%);
220
+ background-color: rgba(0, 102, 204, 0.8);
221
+ color: white;
222
+ border: none;
223
+ padding: 10px 20px;
224
+ border-radius: 5px;
225
+ cursor: pointer;
226
+ font-size: 16px;
227
+ transition: background-color 0.3s ease;
228
+ }
229
+
230
+ #image-container button:hover {
231
+ background-color: rgba(0, 76, 153, 0.8);
232
+ }
233
+
234
+ .gr-box {
235
+ background-color: #ffffff;
236
+ border: 2px solid #0066cc;
237
+ border-radius: 10px;
238
+ padding: 10px;
239
+ margin-top: 10px;
240
+ }
241
+ #GPT2_title, #Najeb_title, #LLaMA_title, #summary_title, #previous_title {
242
+ color: black;
243
+ font-weight: bold;
244
+ }
245
  """
246
+
247
+ scroll_js = """
248
+ <script>
249
+ function scrollToTop() {
250
+ document.getElementById('target-section').scrollIntoView({behavior: 'smooth'});
251
+ }
252
+ </script>
253
  """
254
+
255
+
256
+
257
+ iface = gr.Blocks(css=custom_css)
258
+
259
+ with iface:
260
+ gr.Markdown(f"<h1>Welcome to Najeb</h1><p>AI Question & Subnet Calculator, Enter your question or IP address to generate answers or calculate subnets.</p>")
261
+
262
+ gr.HTML(f"""
263
+ <div id="image-container">
264
+ <img src="https://news.cornell.edu/sites/default/files/styles/story_thumbnail_xlarge/public/2024-07/robot-1280x720_0.jpg?itok=AF6MakCq" alt="AI Image">
265
+ <button onclick="scrollToTop()">Go to Najeb</button>
266
+ </div>
267
+ {scroll_js} <!-- Adding the JS to handle scrolling -->
268
+ """)
269
+
270
+
271
+ with gr.Row():
272
+ mode_selector = gr.Radio(["AI Question Answering", "Subnet Calculation"], label="Select Mode", value="AI Question Answering")
273
+
274
+ with gr.Row():
275
+ with gr.Column():
276
+ input_text = gr.Textbox(label="Enter your question or IP", placeholder="Type here...", lines=2)
277
+ max_length_slider = gr.Slider(50, 1024, 128, label="Max Length")
278
+ num_beams_slider = gr.Slider(1, 10, 2, label="Number of Beams", step=1)
279
+ temperature_slider = gr.Slider(0.1, 1.0, 0.5, label="Temperature", step=0.1)
280
+ submit_button = gr.Button("Submit")
281
+
282
+ with gr.Column():
283
+ gpt2_output_box = gr.Markdown(label="GPT-2 Model Response")
284
+ najeb_output_box = gr.Markdown(label="Najeb Model Response")
285
+ llama2_output_box = gr.Markdown(label="LLaMA 2 Model Response")
286
+ summary_output_box = gr.Markdown(label="Summarized Response")
287
+ previous_questions_box = gr.Markdown(label="Previous Questions")
288
+
289
+ submit_button.click(
290
+ handle_mode_selection,
291
+ inputs=[mode_selector, input_text, max_length_slider, num_beams_slider, temperature_slider],
292
+ outputs=[gpt2_output_box, najeb_output_box, llama2_output_box, summary_output_box, previous_questions_box]
293
+ )
294
+
295
+ iface.launch(share=True)