waleedmohd commited on
Commit
549f4f9
·
verified ·
1 Parent(s): c26ed61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -40
app.py CHANGED
@@ -1,64 +1,66 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
 
 
 
18
  messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
 
 
28
  response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
  stream=True,
34
  temperature=temperature,
35
- top_p=top_p,
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
  yield response
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
  ],
 
 
 
 
 
 
 
 
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Use Arabic-optimized model (Free for commercial use)
5
+ client = InferenceClient("aubmindlab/aragpt2-base") # Arabic GPT-2 model
 
 
 
6
 
7
  def respond(
8
  message,
9
  history: list[tuple[str, str]],
10
+ system_message="أنت مساعد مفيد يتحدث العربية",
11
+ max_tokens=512,
12
+ temperature=0.7,
13
+ top_p=0.95,
14
  ):
15
+ # Force Arabic responses
16
+ prompt = f"باللغة العربية: {message}"
17
+
18
  messages = [{"role": "system", "content": system_message}]
19
+
20
+ # Add conversation history
21
+ for user_msg, bot_msg in history[-3:]: # Limit history for mobile performance
22
+ if user_msg:
23
+ messages.append({"role": "user", "content": user_msg})
24
+ if bot_msg:
25
+ messages.append({"role": "assistant", "content": bot_msg})
26
 
27
+ messages.append({"role": "user", "content": prompt})
 
 
 
 
 
 
28
 
29
+ # Generate Arabic response
30
  response = ""
31
+ for chunk in client.text_generation(
32
+ prompt=prompt,
33
+ max_new_tokens=max_tokens,
 
34
  stream=True,
35
  temperature=temperature,
36
+ repetition_penalty=1.2, # Reduce repetition common in Arabic dialects
37
  ):
38
+ response += chunk
 
 
39
  yield response
40
 
41
+ # Mobile-optimized Arabic interface
 
 
 
42
  demo = gr.ChatInterface(
43
  respond,
44
  additional_inputs=[
45
+ gr.Textbox(value="أنت مساعد عربي مفيد", label="الرسالة النظام"),
46
+ gr.Slider(minimum=1, maximum=512, value=256, label="الحد الأقصى للكلمات"),
47
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.5, label="الابتكار"),
48
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, label="الدقة"),
 
 
 
 
 
 
49
  ],
50
+ css="""
51
+ .gradio-container {direction: rtl;}
52
+ textarea {font-family: 'Amiri', serif;}
53
+ """,
54
+ examples=[
55
+ ["ما هو أفضل حل للزراعة في السودان؟"], # Sudan example
56
+ ["كيف يمكن تطوير الذكاء الاصطناعي في السعودية؟"] # KSA example
57
+ ]
58
  )
59
 
 
60
  if __name__ == "__main__":
61
+ # Mobile optimization settings
62
+ demo.launch(
63
+ enable_queue=True,
64
+ allowed_paths=["./assets"], # For local Arabic fonts if needed
65
+ prevent_thread_lock=True
66
+ )