transart / app.py
pravin0077's picture
Update app.py
48c1712 verified
raw
history blame
3.64 kB
import os
from huggingface_hub import login
from transformers import MarianMTModel, MarianTokenizer, pipeline
import requests
import io
from PIL import Image
import gradio as gr
# Set Hugging Face API key
hf_token = os.getenv("HUGGINGFACE_API_KEY")
if hf_token is None:
raise ValueError("Hugging Face API key not found in environment variables.")
# Login to Hugging Face
login(token=hf_token)
# Define models for specific languages
language_models = {
"fra": "Helsinki-NLP/opus-mt-fr-en",
"spa": "Helsinki-NLP/opus-mt-es-en",
"tam": "Helsinki-NLP/opus-mt-tam-en",
"deu": "Helsinki-NLP/opus-mt-de-en",
"jpn": "Helsinki-NLP/opus-mt-ja-en",
"rus": "Helsinki-NLP/opus-mt-ru-en",
"kor": "Helsinki-NLP/opus-mt-ko-en",
"hin": "Helsinki-NLP/opus-mt-hi-en",
"ita": "Helsinki-NLP/opus-mt-it-en",
"por": "Helsinki-NLP/opus-mt-pt-en"
# Add more language models as needed
}
# Function to get translator pipeline for specific language
def get_translator(language_code):
model_name = language_models.get(language_code)
if model_name:
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)
return pipeline("translation", model=model, tokenizer=tokenizer)
else:
raise ValueError(f"No translation model found for language code '{language_code}'.")
# FLUX model API settings
flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
flux_headers = {"Authorization": f"Bearer {hf_token}"}
# Function for translation, creative text generation, and image creation
def translate_generate_image_and_text(input_text, src_lang_code):
try:
# Step 1: Get translator and translate text
translator = get_translator(src_lang_code)
translation = translator(input_text, max_length=40)
translated_text = translation[0]['translation_text']
# Step 2: Generate creative text with Mistral model
mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
mistral_headers = {"Authorization": f"Bearer {hf_token}"}
mistral_response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text})
if mistral_response.status_code == 200:
creative_text = mistral_response.json()[0]['generated_text']
else:
creative_text = "Error generating creative text"
# Step 3: Generate an image with FLUX model
flux_response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": creative_text})
if flux_response.status_code == 200:
image_bytes = flux_response.content
image = Image.open(io.BytesIO(image_bytes))
else:
image = None
return translated_text, creative_text, image
except Exception as e:
return f"An error occurred: {str(e)}", None, None
# Gradio interface setup
interface = gr.Interface(
fn=translate_generate_image_and_text,
inputs=[
gr.Textbox(label="Enter text for translation"),
gr.Textbox(label="Source Language Code", placeholder="e.g., 'fra' for French, 'spa' for Spanish")
],
outputs=[
gr.Textbox(label="Translated Text"),
gr.Textbox(label="Creative Text"),
gr.Image(label="Generated Image")
],
title="Multilingual Translation, Creative Content, and Image Generator",
description="Select a language and translate text to English, generate creative content, and produce an image."
)
# Launch the Gradio app
interface.launch()