sohiebwedyan commited on
Commit
7d1ffa9
·
verified ·
1 Parent(s): c7fda9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -15,12 +15,13 @@ token = os.getenv("HF_TOKEN")
15
  # إعداد الأنابيب للموديلات المختلفة باستخدام PyTorch
16
  device = 0 if torch.cuda.is_available() else -1
17
  Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", token=token, device=device)
18
- gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=device, trust_remote_code=True)
19
- #llama2_pipeline = pipeline("text-generation", model="Harikrishnan46624/finetuned_llama2-1.1b-chat", device=device)
20
  summarization_pipeline = pipeline("summarization", model="Falconsai/text_summarization", device=device)
21
 
22
  previous_questions = []
23
 
 
24
  # توليد الردود باستخدام GPT-2
25
  async def generate_gpt2(question, max_length, num_beams, temperature):
26
  return gpt2_pipeline(
 
15
  # إعداد الأنابيب للموديلات المختلفة باستخدام PyTorch
16
  device = 0 if torch.cuda.is_available() else -1
17
  Najeb_pipeline = pipeline("text-generation", model="sohiebwedyan/NAJEB_BOT", token=token, device=device)
18
+ #gpt2_pipeline = pipeline("text-generation", model="Qwen/Qwen-1_8B-Chat", device=device, trust_remote_code=True)
19
+ gpt2_pipeline = pipeline("text-generation", model="Harikrishnan46624/finetuned_llama2-1.1b-chat", device=device)
20
  summarization_pipeline = pipeline("summarization", model="Falconsai/text_summarization", device=device)
21
 
22
  previous_questions = []
23
 
24
+
25
  # توليد الردود باستخدام GPT-2
26
  async def generate_gpt2(question, max_length, num_beams, temperature):
27
  return gpt2_pipeline(