import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline import torch from langs import LANGS import os import numpy as np import modin.pandas as pd from PIL import Image from diffusers import DiffusionPipeline #EulerDiscreteScheduler device1 = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", safety_checker=None) pipe = pipe.to(device1) pipe1 = pipeline('text-generation', model='RamAnanth1/distilgpt2-sd-prompts') TASK = "translation" CKPT = "facebook/nllb-200-distilled-600M" model = AutoModelForSeq2SeqLM.from_pretrained(CKPT) tokenizer = AutoTokenizer.from_pretrained(CKPT) device = 0 if torch.cuda.is_available() else -1 def translate(text): """ Translate the text from source lang to target lang """ src_lang = "zho-Hans" tgt_lang = "eng_Latn" max_length = 400 translation_pipeline = pipeline(TASK, model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=max_length, device=device) result = translation_pipeline(text) return result[0]['translation_text'] #prompt stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5") clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2") def get_images(prompt): gallery_dir = stable_diffusion(prompt, fn_index=2) img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)] return img_results[0] def get_new_prompt(img, mode): interrogate = clip_interrogator_2(img, mode, 12, api_name="clipi2") return interrogate def infer(input): prompt = pipe1(input+',', num_return_sequences=1)[0]["generated_text"] img = get_images(prompt) result = get_new_prompt(img, 'fast') return result[0] #stable diffusion def genie (prompt, negative_prompt, scale, steps, seed): generator = torch.Generator(device=device1).manual_seed(seed) images = pipe(prompt, negative_prompt=negative_prompt, width=768, height=768, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator).images[0] return images with gr.Blocks() as demo: gr.Markdown( """ #