KarmaCST's picture
Update app.py
e7bf233 verified
raw
history blame
2.11 kB
import gradio as gr
import random
import requests
from PIL import Image
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# from dotenv import load_dotenv
# Load the translation model
translation_model = AutoModelForSeq2SeqLM.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en")
tokenizer = AutoTokenizer.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en")
model = gr.load("models/Purz/face-projection")
src_lang="dzo_Tibt"
tgt_lang="eng_Latn"
def translate_dzongkha_to_english(text):
translation_pipeline = pipeline("translation",
model=translation_model,
tokenizer=tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang)
translated_text = translation_pipeline(text)[0]['translation_text']
def generate_image(translated_text, seed):
if seed is not None:
random.seed(seed)
if text in [example[0] for example in examples]:
print(f"Using example: {text}")
# return model(text)
return model(text)
# model = gr.load("models/Purz/face-projection")
# def generate_image(text, seed):
# if seed is not None:
# random.seed(seed)
# if text in [example[0] for example in examples]:
# print(f"Using example: {text}")
# return model(text)
examples = [
["Humanoid Cat Warrior, Full View", None],
["Warhammer Sisterhood", None],
["Future Robots war", None],
["Fantasy dragon", None]
]
interface = gr.Interface(
fn=translate_dzongkha_to_english,
inputs=[
gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."),
gr.Slider(minimum=0, maximum=10000, step=1, label="Seed (optional)")
],
outputs=gr.Image(label="Generated Image"),
examples=examples,
theme="NoCrypt/miku",
description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
)
interface.launch()