Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,15 +6,18 @@ import asyncio
|
|
6 |
import ipaddress
|
7 |
from typing import Tuple
|
8 |
|
9 |
-
|
10 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
11 |
|
12 |
# الحصول على التوكن من البيئة
|
13 |
token = os.getenv("HF_TOKEN")
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
18 |
|
19 |
previous_questions = []
|
20 |
|
@@ -61,12 +64,15 @@ async def generate_llama2(question, max_length, num_beams, temperature):
|
|
61 |
async def generate_responses_async(question, max_length=128, num_beams=2, temperature=0.5):
|
62 |
previous_questions.append(question)
|
63 |
|
|
|
64 |
gpt2_task = asyncio.create_task(generate_gpt2(question, max_length, num_beams, temperature))
|
65 |
Najeb_task = asyncio.create_task(generate_Najeb(question, max_length, num_beams, temperature))
|
66 |
llama2_task = asyncio.create_task(generate_llama2(question, max_length, num_beams, temperature))
|
67 |
|
|
|
68 |
gpt2_response, Najeb_response, llama2_response = await asyncio.gather(gpt2_task, Najeb_task, llama2_task)
|
69 |
|
|
|
70 |
combined_responses = f"GPT-2: {gpt2_response}\nNajeb: {Najeb_response}\nLLaMA 2: {llama2_response}"
|
71 |
summarized_response = summarization_pipeline(combined_responses, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
|
72 |
|
@@ -209,67 +215,40 @@ p {
|
|
209 |
}
|
210 |
|
211 |
#image-container img {
|
212 |
-
width:
|
213 |
-
|
214 |
-
|
215 |
}
|
216 |
|
217 |
#image-container button {
|
218 |
position: absolute;
|
219 |
-
top:
|
220 |
-
left:
|
221 |
-
|
222 |
-
background-color: rgba(0, 102, 204, 0.8);
|
223 |
color: white;
|
224 |
border: none;
|
225 |
-
padding: 10px
|
226 |
-
border-radius: 5px;
|
227 |
cursor: pointer;
|
228 |
-
font-size: 16px;
|
229 |
-
transition: background-color 0.3s ease;
|
230 |
-
}
|
231 |
-
|
232 |
-
#image-container button:hover {
|
233 |
-
background-color: rgba(0, 76, 153, 0.8);
|
234 |
-
}
|
235 |
-
|
236 |
-
.gr-box {
|
237 |
-
background-color: #ffffff;
|
238 |
-
border: 2px solid #0066cc;
|
239 |
-
border-radius: 10px;
|
240 |
-
padding: 10px;
|
241 |
-
margin-top: 10px;
|
242 |
-
}
|
243 |
-
#GPT2_output, #Najeb_output, #Llama2_output {
|
244 |
-
width: 400px;
|
245 |
-
height: 120px;
|
246 |
-
}
|
247 |
-
#summary_box {
|
248 |
-
background-color: rgba(255, 255, 255, 0.9);
|
249 |
}
|
250 |
"""
|
251 |
|
252 |
-
#
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
outputs=[gpt2_output, Najeb_output, Llama2_output, summarized_output, previous_questions_output])
|
273 |
-
|
274 |
-
# إطلاق الواجهة
|
275 |
-
iface.launch(share=True)
|
|
|
6 |
import ipaddress
|
7 |
from typing import Tuple
|
8 |
|
9 |
+
# تعيين المتغيرات البيئية لتهيئة PyTorch لاستخدام الـ GPU إذا كان متاحًا
|
10 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
11 |
|
12 |
# الحصول على التوكن من البيئة
|
13 |
token = os.getenv("HF_TOKEN")
|
14 |
+
|
15 |
+
# إعداد الأنابيب للموديلات المختلفة باستخدام PyTorch
|
16 |
+
device = 0 if torch.cuda.is_available() else -1
|
17 |
+
Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", token=token, device=device)
|
18 |
+
gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=device, trust_remote_code=True)
|
19 |
+
llama2_pipeline = pipeline("text-generation", model="Harikrishnan46624/finetuned_llama2-1.1b-chat", device=device)
|
20 |
+
summarization_pipeline = pipeline("summarization", model="Falconsai/text_summarization", device=device)
|
21 |
|
22 |
previous_questions = []
|
23 |
|
|
|
64 |
async def generate_responses_async(question, max_length=128, num_beams=2, temperature=0.5):
|
65 |
previous_questions.append(question)
|
66 |
|
67 |
+
# إنشاء المهام بشكل غير متزامن لتوليد الردود من الموديلات المختلفة
|
68 |
gpt2_task = asyncio.create_task(generate_gpt2(question, max_length, num_beams, temperature))
|
69 |
Najeb_task = asyncio.create_task(generate_Najeb(question, max_length, num_beams, temperature))
|
70 |
llama2_task = asyncio.create_task(generate_llama2(question, max_length, num_beams, temperature))
|
71 |
|
72 |
+
# تجميع الردود من جميع الموديلات
|
73 |
gpt2_response, Najeb_response, llama2_response = await asyncio.gather(gpt2_task, Najeb_task, llama2_task)
|
74 |
|
75 |
+
# دمج الردود و تلخيصها
|
76 |
combined_responses = f"GPT-2: {gpt2_response}\nNajeb: {Najeb_response}\nLLaMA 2: {llama2_response}"
|
77 |
summarized_response = summarization_pipeline(combined_responses, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
|
78 |
|
|
|
215 |
}
|
216 |
|
217 |
#image-container img {
|
218 |
+
width: 100%;
|
219 |
+
max-width: 500px;
|
220 |
+
margin-bottom: 10px;
|
221 |
}
|
222 |
|
223 |
#image-container button {
|
224 |
position: absolute;
|
225 |
+
top: 10px;
|
226 |
+
left: 10px;
|
227 |
+
background-color: rgba(0, 0, 0, 0.5);
|
|
|
228 |
color: white;
|
229 |
border: none;
|
230 |
+
padding: 5px 10px;
|
|
|
231 |
cursor: pointer;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
}
|
233 |
"""
|
234 |
|
235 |
+
# إعداد واجهة Gradio
|
236 |
+
gr.Interface(
|
237 |
+
fn=handle_mode_selection,
|
238 |
+
inputs=[
|
239 |
+
gr.Dropdown(choices=["AI Question Answering", "Subnet Calculation"], label="Select Mode"),
|
240 |
+
gr.Textbox(label="Input", placeholder="Ask your question or enter an IP address/subnet..."),
|
241 |
+
gr.Slider(minimum=50, maximum=1024, step=1, value=128, label="Max Length"),
|
242 |
+
gr.Slider(minimum=1, maximum=10, step=1, value=2, label="Num Beams"),
|
243 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.5, label="Temperature")
|
244 |
+
],
|
245 |
+
outputs=[
|
246 |
+
gr.Markdown(label="GPT-2 Answer"),
|
247 |
+
gr.Markdown(label="Najeb Answer"),
|
248 |
+
gr.Markdown(label="LLaMA 2 Answer"),
|
249 |
+
gr.Markdown(label="Summarized Answer"),
|
250 |
+
gr.Markdown(label="Previous Questions")
|
251 |
+
],
|
252 |
+
css=custom_css,
|
253 |
+
live=True
|
254 |
+
).launch(debug=True)
|
|
|
|
|
|
|
|