File size: 2,734 Bytes
66b0cef
9b4118a
66b0cef
e7bf233
 
 
 
 
 
 
 
 
 
 
 
 
 
3603393
 
 
 
 
 
e7bf233
3603393
9b4118a
3603393
e7bf233
3603393
e7bf233
3603393
 
 
 
 
 
 
 
e7bf233
3603393
 
e7bf233
3603393
 
e7bf233
3603393
 
 
 
 
 
147944d
3603393
 
 
 
 
 
bce0716
434846f
23846ab
9b4118a
09fb843
70151d9
9b4118a
f7420b8
 
94f65a5
fd2ee2d
434846f
8a42a65
874c1d4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
import random

import requests
from PIL import Image
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# from dotenv import load_dotenv


# Load the translation model
translation_model = AutoModelForSeq2SeqLM.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en")
tokenizer = AutoTokenizer.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en")


src_lang="dzo_Tibt"
tgt_lang="eng_Latn"

# def translate_dzongkha_to_english(text):
#     translation_pipeline = pipeline("translation",
#                                 model=translation_model,
#                                 tokenizer=tokenizer,
#                                 src_lang=src_lang,
#                                 tgt_lang=tgt_lang)
    
#     translated_text = translation_pipeline(text)[0]['translation_text']

#     return translated_text

model = gr.load("models/Purz/face-projection")

def generate_image(text, seed):
    translation_pipeline = pipeline("translation",
                                    model=translation_model,
                                    tokenizer=tokenizer,
                                    src_lang=src_lang,
                                    tgt_lang=tgt_lang)
    
    text = translation_pipeline(text)[0]['translation_text']

    if seed is not None:
        random.seed(seed)

    if text in [example[0] for example in examples]:
        print(f"Using example: {text}")

    return model(text)
examples=[
    ["བྱི་ཅུང་ཚུ་གངས་རི་གི་ཐོག་ཁར་འཕུར།", None],
    ["པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་གནམ་གྲུ་འཕུར།",None],
    ["པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་ ཤིང་ཚུ་གི་བར་ན་ གནམ་གྲུ་འཕུར་བའི་འཐོང་གནང་།",None],
    ["སློབ་ཕྲུག་ཚུ་ ཆརཔ་ནང་རྐང་རྩེད་རྩེ་དེས།"]
]
# examples = [
#     ["Humanoid Cat Warrior, Full View", None],
#     ["Warhammer Sisterhood", None],
#     ["Future Robots war", None],
#     ["Fantasy dragon", None]
# ]

interface = gr.Interface(
    fn=generate_image,
    inputs=[
        gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."),
        gr.Slider(minimum=0, maximum=10000, step=1, label="Seed (optional)")
    ],
    outputs=gr.Image(label="Generated Image"),
    examples=examples,
    theme="NoCrypt/miku",
    description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
)

interface.launch()