anton-l HF Staff commited on
Commit
e90522e
·
1 Parent(s): 854dfbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -4,7 +4,7 @@ from tqdm import tqdm
4
  from functools import partialmethod
5
  import gradio as gr
6
  from gradio.mix import Series
7
- from transformers import pipeline
8
  from rudalle.pipelines import generate_images
9
  from rudalle import get_rudalle_model, get_tokenizer, get_vae
10
 
@@ -12,7 +12,8 @@ from rudalle import get_rudalle_model, get_tokenizer, get_vae
12
  tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)
13
 
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
- translation_pipe = pipeline("translation", model="facebook/wmt19-en-ru", torch_dtype=torch.float16, device=0)
 
16
  dalle = get_rudalle_model("Malevich", pretrained=True, fp16=True, device=device)
17
  tokenizer = get_tokenizer()
18
  vae = get_vae().to(device)
 
4
  from functools import partialmethod
5
  import gradio as gr
6
  from gradio.mix import Series
7
+ from transformers import pipeline, FSMTForConditionalGeneration
8
  from rudalle.pipelines import generate_images
9
  from rudalle import get_rudalle_model, get_tokenizer, get_vae
10
 
 
12
  tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)
13
 
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ translation_model = FSMTForConditionalGeneration.from_pretrained("facebook/wmt19-en-ru", torch_dtype=torch.float16)
16
+ translation_pipe = pipeline("translation", model=translation_model, device=0)
17
  dalle = get_rudalle_model("Malevich", pretrained=True, fp16=True, device=device)
18
  tokenizer = get_tokenizer()
19
  vae = get_vae().to(device)