Spaces:
Sleeping
Sleeping
File size: 3,638 Bytes
7c1115e 93cb70c 39ba994 630f14f 93cb70c 630f14f 93fee0b 7c1115e 93fee0b 7c1115e 48c1712 93fee0b 48c1712 93fee0b 48c1712 7c1115e d21eb1e 93fee0b 48c1712 7c1115e 48c1712 93fee0b 7c1115e 48c1712 7c1115e 48c1712 93fee0b 7c1115e 48c1712 93fee0b 48c1712 1786f21 7c1115e 93fee0b 48c1712 93fee0b 7c1115e 48c1712 93cb70c 3201c4f 93fee0b 7c1115e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import os
from huggingface_hub import login
from transformers import MarianMTModel, MarianTokenizer, pipeline
import requests
import io
from PIL import Image
import gradio as gr
# Set Hugging Face API key
hf_token = os.getenv("HUGGINGFACE_API_KEY")
if hf_token is None:
raise ValueError("Hugging Face API key not found in environment variables.")
# Login to Hugging Face
login(token=hf_token)
# Define models for specific languages
language_models = {
"fra": "Helsinki-NLP/opus-mt-fr-en",
"spa": "Helsinki-NLP/opus-mt-es-en",
"tam": "Helsinki-NLP/opus-mt-tam-en",
"deu": "Helsinki-NLP/opus-mt-de-en",
"jpn": "Helsinki-NLP/opus-mt-ja-en",
"rus": "Helsinki-NLP/opus-mt-ru-en",
"kor": "Helsinki-NLP/opus-mt-ko-en",
"hin": "Helsinki-NLP/opus-mt-hi-en",
"ita": "Helsinki-NLP/opus-mt-it-en",
"por": "Helsinki-NLP/opus-mt-pt-en"
# Add more language models as needed
}
# Function to get translator pipeline for specific language
def get_translator(language_code):
model_name = language_models.get(language_code)
if model_name:
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
return pipeline("translation", model=model, tokenizer=tokenizer)
else:
raise ValueError(f"No translation model found for language code '{language_code}'.")
# FLUX model API settings
flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
flux_headers = {"Authorization": f"Bearer {hf_token}"}
# Function for translation, creative text generation, and image creation
def translate_generate_image_and_text(input_text, src_lang_code):
try:
# Step 1: Get translator and translate text
translator = get_translator(src_lang_code)
translation = translator(input_text, max_length=40)
translated_text = translation[0]['translation_text']
# Step 2: Generate creative text with Mistral model
mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
mistral_headers = {"Authorization": f"Bearer {hf_token}"}
mistral_response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text})
if mistral_response.status_code == 200:
creative_text = mistral_response.json()[0]['generated_text']
else:
creative_text = "Error generating creative text"
# Step 3: Generate an image with FLUX model
flux_response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": creative_text})
if flux_response.status_code == 200:
image_bytes = flux_response.content
image = Image.open(io.BytesIO(image_bytes))
else:
image = None
return translated_text, creative_text, image
except Exception as e:
return f"An error occurred: {str(e)}", None, None
# Gradio interface setup
interface = gr.Interface(
fn=translate_generate_image_and_text,
inputs=[
gr.Textbox(label="Enter text for translation"),
gr.Textbox(label="Source Language Code", placeholder="e.g., 'fra' for French, 'spa' for Spanish")
],
outputs=[
gr.Textbox(label="Translated Text"),
gr.Textbox(label="Creative Text"),
gr.Image(label="Generated Image")
],
title="Multilingual Translation, Creative Content, and Image Generator",
description="Select a language and translate text to English, generate creative content, and produce an image."
)
# Launch the Gradio app
interface.launch()
|