sohiebwedyan commited on
Commit
057b9bb
·
verified ·
1 Parent(s): 091164c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -59
app.py CHANGED
@@ -6,15 +6,18 @@ import asyncio
6
  import ipaddress
7
  from typing import Tuple
8
 
9
-
10
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
11
 
12
  # الحصول على التوكن من البيئة
13
  token = os.getenv("HF_TOKEN")
14
- Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", token=token, device=0 if torch.cuda.is_available() else -1)
15
- gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=0 if torch.cuda.is_available() else -1, trust_remote_code=True)
16
- llama2_pipeline = pipeline("text-generation", model="Harikrishnan46624/finetuned_llama2-1.1b-chat", device=0 if torch.cuda.is_available() else -1)
17
- summarization_pipeline = pipeline("summarization", model="Falconsai/text_summarization", device=0 if torch.cuda.is_available() else -1)
 
 
 
18
 
19
  previous_questions = []
20
 
@@ -61,12 +64,15 @@ async def generate_llama2(question, max_length, num_beams, temperature):
61
  async def generate_responses_async(question, max_length=128, num_beams=2, temperature=0.5):
62
  previous_questions.append(question)
63
 
 
64
  gpt2_task = asyncio.create_task(generate_gpt2(question, max_length, num_beams, temperature))
65
  Najeb_task = asyncio.create_task(generate_Najeb(question, max_length, num_beams, temperature))
66
  llama2_task = asyncio.create_task(generate_llama2(question, max_length, num_beams, temperature))
67
 
 
68
  gpt2_response, Najeb_response, llama2_response = await asyncio.gather(gpt2_task, Najeb_task, llama2_task)
69
 
 
70
  combined_responses = f"GPT-2: {gpt2_response}\nNajeb: {Najeb_response}\nLLaMA 2: {llama2_response}"
71
  summarized_response = summarization_pipeline(combined_responses, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
72
 
@@ -209,67 +215,40 @@ p {
209
  }
210
 
211
  #image-container img {
212
- width: 1400px;
213
- border-radius: 10px;
214
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
215
  }
216
 
217
  #image-container button {
218
  position: absolute;
219
- top: 50%;
220
- left: 50%;
221
- transform: translate(-50%, -50%);
222
- background-color: rgba(0, 102, 204, 0.8);
223
  color: white;
224
  border: none;
225
- padding: 10px 20px;
226
- border-radius: 5px;
227
  cursor: pointer;
228
- font-size: 16px;
229
- transition: background-color 0.3s ease;
230
- }
231
-
232
- #image-container button:hover {
233
- background-color: rgba(0, 76, 153, 0.8);
234
- }
235
-
236
- .gr-box {
237
- background-color: #ffffff;
238
- border: 2px solid #0066cc;
239
- border-radius: 10px;
240
- padding: 10px;
241
- margin-top: 10px;
242
- }
243
- #GPT2_output, #Najeb_output, #Llama2_output {
244
- width: 400px;
245
- height: 120px;
246
- }
247
- #summary_box {
248
- background-color: rgba(255, 255, 255, 0.9);
249
  }
250
  """
251
 
252
- # إنشاء واجهة Gradio
253
- with gr.Blocks(css=custom_css) as iface:
254
- with gr.Row():
255
- with gr.Column():
256
- question_input = gr.Textbox(label="Enter Your Question", placeholder="Ask a question here...", lines=3)
257
- mode_selector = gr.Dropdown(
258
- label="Select Mode", choices=["AI Question Answering", "IP Subnet Calculation"], value="AI Question Answering"
259
- )
260
- max_length_slider = gr.Slider(50, 500, step=10, label="Max Length", value=150)
261
- num_beams_slider = gr.Slider(1, 5, step=1, label="Number of Beams", value=2)
262
- temperature_slider = gr.Slider(0.0, 1.0, step=0.05, label="Temperature", value=0.5)
263
-
264
- with gr.Column():
265
- gpt2_output = gr.Textbox(label="GPT-2 Output", interactive=False)
266
- Najeb_output = gr.Textbox(label="Najeb Output", interactive=False)
267
- Llama2_output = gr.Textbox(label="Llama2 Output", interactive=False)
268
- summarized_output = gr.Textbox(label="Summarized Answer", interactive=False)
269
- previous_questions_output = gr.Textbox(label="Previous Questions", interactive=False)
270
-
271
- mode_selector.change(handle_mode_selection, inputs=[mode_selector, question_input, max_length_slider, num_beams_slider, temperature_slider],
272
- outputs=[gpt2_output, Najeb_output, Llama2_output, summarized_output, previous_questions_output])
273
-
274
- # إطلاق الواجهة
275
- iface.launch(share=True)
 
6
  import ipaddress
7
  from typing import Tuple
8
 
9
+ # تعيين المتغيرات البيئية لتهيئة PyTorch لاستخدام الـ GPU إذا كان متاحًا
10
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
11
 
12
  # الحصول على التوكن من البيئة
13
  token = os.getenv("HF_TOKEN")
14
+
15
+ # إعداد الأنابيب للموديلات المختلفة باستخدام PyTorch
16
+ device = 0 if torch.cuda.is_available() else -1
17
+ Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", token=token, device=device)
18
+ gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=device, trust_remote_code=True)
19
+ llama2_pipeline = pipeline("text-generation", model="Harikrishnan46624/finetuned_llama2-1.1b-chat", device=device)
20
+ summarization_pipeline = pipeline("summarization", model="Falconsai/text_summarization", device=device)
21
 
22
  previous_questions = []
23
 
 
64
  async def generate_responses_async(question, max_length=128, num_beams=2, temperature=0.5):
65
  previous_questions.append(question)
66
 
67
+ # إنشاء المهام بشكل غير متزامن لتوليد الردود من الموديلات المختلفة
68
  gpt2_task = asyncio.create_task(generate_gpt2(question, max_length, num_beams, temperature))
69
  Najeb_task = asyncio.create_task(generate_Najeb(question, max_length, num_beams, temperature))
70
  llama2_task = asyncio.create_task(generate_llama2(question, max_length, num_beams, temperature))
71
 
72
+ # تجميع الردود من جميع الموديلات
73
  gpt2_response, Najeb_response, llama2_response = await asyncio.gather(gpt2_task, Najeb_task, llama2_task)
74
 
75
+ # دمج الردود و تلخيصها
76
  combined_responses = f"GPT-2: {gpt2_response}\nNajeb: {Najeb_response}\nLLaMA 2: {llama2_response}"
77
  summarized_response = summarization_pipeline(combined_responses, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
78
 
 
215
  }
216
 
217
  #image-container img {
218
+ width: 100%;
219
+ max-width: 500px;
220
+ margin-bottom: 10px;
221
  }
222
 
223
  #image-container button {
224
  position: absolute;
225
+ top: 10px;
226
+ left: 10px;
227
+ background-color: rgba(0, 0, 0, 0.5);
 
228
  color: white;
229
  border: none;
230
+ padding: 5px 10px;
 
231
  cursor: pointer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  }
233
  """
234
 
235
+ # إعداد واجهة Gradio
236
+ gr.Interface(
237
+ fn=handle_mode_selection,
238
+ inputs=[
239
+ gr.Dropdown(choices=["AI Question Answering", "Subnet Calculation"], label="Select Mode"),
240
+ gr.Textbox(label="Input", placeholder="Ask your question or enter an IP address/subnet..."),
241
+ gr.Slider(minimum=50, maximum=1024, step=1, value=128, label="Max Length"),
242
+ gr.Slider(minimum=1, maximum=10, step=1, value=2, label="Num Beams"),
243
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.5, label="Temperature")
244
+ ],
245
+ outputs=[
246
+ gr.Markdown(label="GPT-2 Answer"),
247
+ gr.Markdown(label="Najeb Answer"),
248
+ gr.Markdown(label="LLaMA 2 Answer"),
249
+ gr.Markdown(label="Summarized Answer"),
250
+ gr.Markdown(label="Previous Questions")
251
+ ],
252
+ css=custom_css,
253
+ live=True
254
+ ).launch(debug=True)