samvb1002 commited on
Commit
eba41fe
·
verified ·
1 Parent(s): 08cfacb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -2,15 +2,18 @@ import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
  import pytesseract
4
 
5
- # Initialize chat model (You can change the model here)
6
- chat_model = pipeline("text-generation", model="gpt2") # You can switch to any model of your choice
7
 
8
 
 
 
 
 
9
  # Chat function
10
  def chat_fn(history, user_input):
11
  conversation = {"history": history, "user": user_input}
12
- response = chat_model(user_input, max_length=50, num_return_sequences=1)
13
- conversation["bot"] = response[0]['generated_text']
 
14
  history.append((user_input, conversation["bot"]))
15
  return history, ""
16
 
 
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
  import pytesseract
4
 
 
 
5
 
6
 
7
+ # Load model directly
8
+ tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabic")
9
+ model = AutoModelForCausalLM.from_pretrained("aubmindlab/bert-base-arabic")
10
+
11
  # Chat function
12
  def chat_fn(history, user_input):
13
  conversation = {"history": history, "user": user_input}
14
+ # Use the model for Arabic
15
+ response = model.generate(input_ids=tokenizer.encode(user_input, return_tensors="pt"), max_length=50)
16
+ conversation["bot"] = tokenizer.decode(response[0], skip_special_tokens=True)
17
  history.append((user_input, conversation["bot"]))
18
  return history, ""
19