anton-l HF staff commited on
Commit
27cb35e
·
1 Parent(s): e90522e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -4,7 +4,7 @@ from tqdm import tqdm
4
  from functools import partialmethod
5
  import gradio as gr
6
  from gradio.mix import Series
7
- from transformers import pipeline, FSMTForConditionalGeneration
8
  from rudalle.pipelines import generate_images
9
  from rudalle import get_rudalle_model, get_tokenizer, get_vae
10
 
@@ -13,6 +13,7 @@ tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)
13
 
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
  translation_model = FSMTForConditionalGeneration.from_pretrained("facebook/wmt19-en-ru", torch_dtype=torch.float16)
 
16
  translation_pipe = pipeline("translation", model=translation_model, device=0)
17
  dalle = get_rudalle_model("Malevich", pretrained=True, fp16=True, device=device)
18
  tokenizer = get_tokenizer()
 
4
  from functools import partialmethod
5
  import gradio as gr
6
  from gradio.mix import Series
7
+ from transformers import pipeline, FSMTForConditionalGeneration, FSMTTokenizer
8
  from rudalle.pipelines import generate_images
9
  from rudalle import get_rudalle_model, get_tokenizer, get_vae
10
 
 
13
 
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
  translation_model = FSMTForConditionalGeneration.from_pretrained("facebook/wmt19-en-ru", torch_dtype=torch.float16)
16
+ tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
17
  translation_pipe = pipeline("translation", model=translation_model, device=0)
18
  dalle = get_rudalle_model("Malevich", pretrained=True, fp16=True, device=device)
19
  tokenizer = get_tokenizer()