SeyedAli commited on
Commit
0b4638a
·
1 Parent(s): 8f78d7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -7,7 +7,7 @@ from PIL import Image
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
9
  # English to Persian model
10
- fa_en_translation_tokenizer = MT5Tokenizer.from_pretrained("SeyedAli/Persian-to-English-Translation-mT5-V1").to(device)
11
  fa_en_translation_model = MT5ForConditionalGeneration.from_pretrained("SeyedAli/Persian-to-English-Translation-mT5-V1").to(device)
12
 
13
  def run_fa_en_transaltion_model(input_string, **generator_args):
@@ -17,7 +17,7 @@ def run_fa_en_transaltion_model(input_string, **generator_args):
17
  return output
18
 
19
  # Persian to English model
20
- en_fa_translation_tokenizer = MT5Tokenizer.from_pretrained("SeyedAli/English-to-Persian-Translation-mT5-V1").to(device)
21
  en_fa_translation_model = MT5ForConditionalGeneration.from_pretrained("SeyedAli/English-to-Persian-Translation-mT5-V1").to(device)
22
 
23
  def run_en_fa_transaltion_model(input_string, **generator_args):
@@ -27,7 +27,7 @@ def run_en_fa_transaltion_model(input_string, **generator_args):
27
  return output
28
 
29
  # Visual Question Answering model
30
- VQA_processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(device)
31
  VQA_model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(device)
32
 
33
 
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
9
  # English to Persian model
10
+ fa_en_translation_tokenizer = MT5Tokenizer.from_pretrained("SeyedAli/Persian-to-English-Translation-mT5-V1")
11
  fa_en_translation_model = MT5ForConditionalGeneration.from_pretrained("SeyedAli/Persian-to-English-Translation-mT5-V1").to(device)
12
 
13
  def run_fa_en_transaltion_model(input_string, **generator_args):
 
17
  return output
18
 
19
  # Persian to English model
20
+ en_fa_translation_tokenizer = MT5Tokenizer.from_pretrained("SeyedAli/English-to-Persian-Translation-mT5-V1")
21
  en_fa_translation_model = MT5ForConditionalGeneration.from_pretrained("SeyedAli/English-to-Persian-Translation-mT5-V1").to(device)
22
 
23
  def run_en_fa_transaltion_model(input_string, **generator_args):
 
27
  return output
28
 
29
  # Visual Question Answering model
30
+ VQA_processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
31
  VQA_model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(device)
32
 
33