Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,8 +9,7 @@ from typing import Tuple
|
|
9 |
|
10 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
11 |
|
12 |
-
|
13 |
-
|
14 |
token = os.getenv("HF_TOKEN")
|
15 |
Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", token=token, device=0 if torch.cuda.is_available() else -1)
|
16 |
gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=0 if torch.cuda.is_available() else -1, trust_remote_code=True)
|
@@ -19,6 +18,7 @@ summarization_pipeline = pipeline("summarization", model="Falconsai/text_summari
|
|
19 |
|
20 |
previous_questions = []
|
21 |
|
|
|
22 |
async def generate_gpt2(question, max_length, num_beams, temperature):
|
23 |
return gpt2_pipeline(
|
24 |
question,
|
@@ -31,6 +31,7 @@ async def generate_gpt2(question, max_length, num_beams, temperature):
|
|
31 |
temperature=temperature
|
32 |
)[0]['generated_text']
|
33 |
|
|
|
34 |
async def generate_Najeb(question, max_length, num_beams, temperature):
|
35 |
return Najeb_pipeline(
|
36 |
question,
|
@@ -43,6 +44,7 @@ async def generate_Najeb(question, max_length, num_beams, temperature):
|
|
43 |
temperature=temperature
|
44 |
)[0]['generated_text']
|
45 |
|
|
|
46 |
async def generate_llama2(question, max_length, num_beams, temperature):
|
47 |
return llama2_pipeline(
|
48 |
question,
|
@@ -50,22 +52,21 @@ async def generate_llama2(question, max_length, num_beams, temperature):
|
|
50 |
num_return_sequences=1,
|
51 |
num_beams=num_beams,
|
52 |
do_sample=True,
|
53 |
-
top_k=
|
54 |
-
top_p=0.
|
55 |
temperature=temperature
|
56 |
)[0]['generated_text']
|
57 |
|
|
|
58 |
async def generate_responses_async(question, max_length=128, num_beams=2, temperature=0.5):
|
59 |
previous_questions.append(question)
|
60 |
|
61 |
-
|
62 |
gpt2_task = asyncio.create_task(generate_gpt2(question, max_length, num_beams, temperature))
|
63 |
Najeb_task = asyncio.create_task(generate_Najeb(question, max_length, num_beams, temperature))
|
64 |
llama2_task = asyncio.create_task(generate_llama2(question, max_length, num_beams, temperature))
|
65 |
|
66 |
gpt2_response, Najeb_response, llama2_response = await asyncio.gather(gpt2_task, Najeb_task, llama2_task)
|
67 |
|
68 |
-
|
69 |
combined_responses = f"GPT-2: {gpt2_response}\nNajeb: {Najeb_response}\nLLaMA 2: {llama2_response}"
|
70 |
summarized_response = summarization_pipeline(combined_responses, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
|
71 |
|
@@ -77,6 +78,7 @@ async def generate_responses_async(question, max_length=128, num_beams=2, temper
|
|
77 |
"Previous Questions": "\n".join(previous_questions[-5:])
|
78 |
}
|
79 |
|
|
|
80 |
def handle_mode_selection(mode, input_text, max_length, num_beams, temperature):
|
81 |
if mode == "AI Question Answering":
|
82 |
result = asyncio.run(generate_responses_async(input_text, max_length, num_beams, temperature))
|
@@ -90,6 +92,8 @@ def handle_mode_selection(mode, input_text, max_length, num_beams, temperature):
|
|
90 |
else:
|
91 |
subnet_result = calculate_subnet(input_text)
|
92 |
return subnet_result, "", "", "", ""
|
|
|
|
|
93 |
def get_network(ip_input: str) -> Tuple[ipaddress.IPv4Network, str]:
|
94 |
try:
|
95 |
if ip_input.count("/") == 0:
|
@@ -100,19 +104,18 @@ def get_network(ip_input: str) -> Tuple[ipaddress.IPv4Network, str]:
|
|
100 |
except ValueError:
|
101 |
return None, None
|
102 |
|
|
|
103 |
def calculate_subnet(ip_input: str) -> str:
|
104 |
network, ip = get_network(ip_input)
|
105 |
if network is None or ip is None:
|
106 |
return "Invalid IP Address or Subnet!"
|
107 |
|
108 |
-
|
109 |
network_address = network.network_address
|
110 |
broadcast_address = network.broadcast_address
|
111 |
usable_hosts = list(network.hosts())
|
112 |
num_usable_hosts = len(usable_hosts)
|
113 |
usable_hosts_range = f"{usable_hosts[0]} - {usable_hosts[-1]}" if usable_hosts else "NA"
|
114 |
|
115 |
-
|
116 |
octets = str(ip).split('.')
|
117 |
binary_octets = [bin(int(octet))[2:].zfill(8) for octet in octets]
|
118 |
bin_ip = '.'.join(binary_octets)
|
@@ -123,7 +126,6 @@ def calculate_subnet(ip_input: str) -> str:
|
|
123 |
bin_mask = str(bin(int(network.netmask))[2:].zfill(32))
|
124 |
bin_mask = '.'.join([bin_mask[i:i+8] for i in range(0, len(bin_mask), 8)])
|
125 |
|
126 |
-
|
127 |
result = f"""
|
128 |
IP Address: {ip}
|
129 |
Address (bin): {bin_ip}
|
@@ -141,8 +143,7 @@ Private IP: {network.is_private}
|
|
141 |
"""
|
142 |
return result.strip()
|
143 |
|
144 |
-
|
145 |
-
|
146 |
custom_css = """
|
147 |
body {
|
148 |
background-color: #f0f8ff;
|
@@ -202,7 +203,6 @@ p {
|
|
202 |
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
203 |
}
|
204 |
|
205 |
-
|
206 |
#image-container {
|
207 |
text-align: center;
|
208 |
position: relative;
|
@@ -240,58 +240,36 @@ p {
|
|
240 |
padding: 10px;
|
241 |
margin-top: 10px;
|
242 |
}
|
243 |
-
#
|
244 |
-
|
245 |
-
|
|
|
|
|
|
|
246 |
}
|
247 |
"""
|
248 |
|
249 |
-
|
250 |
-
|
251 |
-
function scrollToTop() {
|
252 |
-
document.getElementById('target-section').scrollIntoView({behavior: 'smooth'});
|
253 |
-
}
|
254 |
-
</script>
|
255 |
-
"""
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
iface = gr.Blocks(css=custom_css)
|
260 |
-
|
261 |
-
with iface:
|
262 |
-
gr.Markdown(f"<h1>Welcome to Najeb</h1><p>AI Question & Subnet Calculator, Enter your question or IP address to generate answers or calculate subnets.</p>")
|
263 |
-
|
264 |
-
gr.HTML(f"""
|
265 |
-
<div id="image-container">
|
266 |
-
<img src="https://news.cornell.edu/sites/default/files/styles/story_thumbnail_xlarge/public/2024-07/robot-1280x720_0.jpg?itok=AF6MakCq" alt="AI Image">
|
267 |
-
<button onclick="scrollToTop()">Go to Najeb</button>
|
268 |
-
</div>
|
269 |
-
{scroll_js} <!-- Adding the JS to handle scrolling -->
|
270 |
-
""")
|
271 |
-
|
272 |
-
|
273 |
-
with gr.Row():
|
274 |
-
mode_selector = gr.Radio(["AI Question Answering", "Subnet Calculation"], label="Select Mode", value="AI Question Answering")
|
275 |
-
|
276 |
with gr.Row():
|
277 |
with gr.Column():
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
|
|
|
|
283 |
|
284 |
with gr.Column():
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
inputs=[mode_selector, input_text, max_length_slider, num_beams_slider, temperature_slider],
|
294 |
-
outputs=[gpt2_output_box, najeb_output_box, llama2_output_box, summary_output_box, previous_questions_box]
|
295 |
-
)
|
296 |
|
|
|
297 |
iface.launch(share=True)
|
|
|
9 |
|
10 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
11 |
|
12 |
+
# الحصول على التوكن من البيئة
|
|
|
13 |
token = os.getenv("HF_TOKEN")
|
14 |
Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", token=token, device=0 if torch.cuda.is_available() else -1)
|
15 |
gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=0 if torch.cuda.is_available() else -1, trust_remote_code=True)
|
|
|
18 |
|
19 |
previous_questions = []
|
20 |
|
21 |
+
# توليد الردود باستخدام GPT-2
|
22 |
async def generate_gpt2(question, max_length, num_beams, temperature):
|
23 |
return gpt2_pipeline(
|
24 |
question,
|
|
|
31 |
temperature=temperature
|
32 |
)[0]['generated_text']
|
33 |
|
34 |
+
# توليد الردود باستخدام Najeb
|
35 |
async def generate_Najeb(question, max_length, num_beams, temperature):
|
36 |
return Najeb_pipeline(
|
37 |
question,
|
|
|
44 |
temperature=temperature
|
45 |
)[0]['generated_text']
|
46 |
|
47 |
+
# توليد الردود باستخدام LLaMA 2
|
48 |
async def generate_llama2(question, max_length, num_beams, temperature):
|
49 |
return llama2_pipeline(
|
50 |
question,
|
|
|
52 |
num_return_sequences=1,
|
53 |
num_beams=num_beams,
|
54 |
do_sample=True,
|
55 |
+
top_k=50,
|
56 |
+
top_p=0.95,
|
57 |
temperature=temperature
|
58 |
)[0]['generated_text']
|
59 |
|
60 |
+
# التعامل مع الردود بشكل غير متزامن
|
61 |
async def generate_responses_async(question, max_length=128, num_beams=2, temperature=0.5):
|
62 |
previous_questions.append(question)
|
63 |
|
|
|
64 |
gpt2_task = asyncio.create_task(generate_gpt2(question, max_length, num_beams, temperature))
|
65 |
Najeb_task = asyncio.create_task(generate_Najeb(question, max_length, num_beams, temperature))
|
66 |
llama2_task = asyncio.create_task(generate_llama2(question, max_length, num_beams, temperature))
|
67 |
|
68 |
gpt2_response, Najeb_response, llama2_response = await asyncio.gather(gpt2_task, Najeb_task, llama2_task)
|
69 |
|
|
|
70 |
combined_responses = f"GPT-2: {gpt2_response}\nNajeb: {Najeb_response}\nLLaMA 2: {llama2_response}"
|
71 |
summarized_response = summarization_pipeline(combined_responses, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
|
72 |
|
|
|
78 |
"Previous Questions": "\n".join(previous_questions[-5:])
|
79 |
}
|
80 |
|
81 |
+
# تحديد طريقة الحساب بناءً على المدخل
|
82 |
def handle_mode_selection(mode, input_text, max_length, num_beams, temperature):
|
83 |
if mode == "AI Question Answering":
|
84 |
result = asyncio.run(generate_responses_async(input_text, max_length, num_beams, temperature))
|
|
|
92 |
else:
|
93 |
subnet_result = calculate_subnet(input_text)
|
94 |
return subnet_result, "", "", "", ""
|
95 |
+
|
96 |
+
# الحصول على الشبكة وعنوان الـ IP
|
97 |
def get_network(ip_input: str) -> Tuple[ipaddress.IPv4Network, str]:
|
98 |
try:
|
99 |
if ip_input.count("/") == 0:
|
|
|
104 |
except ValueError:
|
105 |
return None, None
|
106 |
|
107 |
+
# حساب الشبكة الفرعية
|
108 |
def calculate_subnet(ip_input: str) -> str:
|
109 |
network, ip = get_network(ip_input)
|
110 |
if network is None or ip is None:
|
111 |
return "Invalid IP Address or Subnet!"
|
112 |
|
|
|
113 |
network_address = network.network_address
|
114 |
broadcast_address = network.broadcast_address
|
115 |
usable_hosts = list(network.hosts())
|
116 |
num_usable_hosts = len(usable_hosts)
|
117 |
usable_hosts_range = f"{usable_hosts[0]} - {usable_hosts[-1]}" if usable_hosts else "NA"
|
118 |
|
|
|
119 |
octets = str(ip).split('.')
|
120 |
binary_octets = [bin(int(octet))[2:].zfill(8) for octet in octets]
|
121 |
bin_ip = '.'.join(binary_octets)
|
|
|
126 |
bin_mask = str(bin(int(network.netmask))[2:].zfill(32))
|
127 |
bin_mask = '.'.join([bin_mask[i:i+8] for i in range(0, len(bin_mask), 8)])
|
128 |
|
|
|
129 |
result = f"""
|
130 |
IP Address: {ip}
|
131 |
Address (bin): {bin_ip}
|
|
|
143 |
"""
|
144 |
return result.strip()
|
145 |
|
146 |
+
# تحديد التصميم المخصص
|
|
|
147 |
custom_css = """
|
148 |
body {
|
149 |
background-color: #f0f8ff;
|
|
|
203 |
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
204 |
}
|
205 |
|
|
|
206 |
#image-container {
|
207 |
text-align: center;
|
208 |
position: relative;
|
|
|
240 |
padding: 10px;
|
241 |
margin-top: 10px;
|
242 |
}
|
243 |
+
#GPT2_output, #Najeb_output, #Llama2_output {
|
244 |
+
width: 400px;
|
245 |
+
height: 120px;
|
246 |
+
}
|
247 |
+
#summary_box {
|
248 |
+
background-color: rgba(255, 255, 255, 0.9);
|
249 |
}
|
250 |
"""
|
251 |
|
252 |
+
# إنشاء واجهة Gradio
|
253 |
+
with gr.Blocks(css=custom_css) as iface:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
with gr.Row():
|
255 |
with gr.Column():
|
256 |
+
question_input = gr.Textbox(label="Enter Your Question", placeholder="Ask a question here...", lines=3)
|
257 |
+
mode_selector = gr.Dropdown(
|
258 |
+
label="Select Mode", choices=["AI Question Answering", "IP Subnet Calculation"], value="AI Question Answering"
|
259 |
+
)
|
260 |
+
max_length_slider = gr.Slider(50, 500, step=10, label="Max Length", value=150)
|
261 |
+
num_beams_slider = gr.Slider(1, 5, step=1, label="Number of Beams", value=2)
|
262 |
+
temperature_slider = gr.Slider(0.0, 1.0, step=0.05, label="Temperature", value=0.5)
|
263 |
|
264 |
with gr.Column():
|
265 |
+
gpt2_output = gr.Textbox(label="GPT-2 Output", interactive=False)
|
266 |
+
Najeb_output = gr.Textbox(label="Najeb Output", interactive=False)
|
267 |
+
Llama2_output = gr.Textbox(label="Llama2 Output", interactive=False)
|
268 |
+
summarized_output = gr.Textbox(label="Summarized Answer", interactive=False)
|
269 |
+
previous_questions_output = gr.Textbox(label="Previous Questions", interactive=False)
|
270 |
+
|
271 |
+
mode_selector.change(handle_mode_selection, inputs=[mode_selector, question_input, max_length_slider, num_beams_slider, temperature_slider],
|
272 |
+
outputs=[gpt2_output, Najeb_output, Llama2_output, summarized_output, previous_questions_output])
|
|
|
|
|
|
|
273 |
|
274 |
+
# إطلاق الواجهة
|
275 |
iface.launch(share=True)
|