File size: 1,563 Bytes
d60a3d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from huggingface_hub import InferenceClient
import gradio as gr

# Initialize the inference client with the Mixtral model
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

def translate_text(text, target_language):
    # Format the prompt to include the translation instruction
    prompt = f"Translate the following text to {target_language}:\n{text}"
    
    # Call the Mixtral model for translation
    response = client(text_generation=prompt, parameters={"max_new_tokens": 100}, options={"wait_for_model": True})
    
    # The Mixtral model response includes the translated text in its output
    translated_text = response[0]['generated_text']
    
    # Clean up the response to extract only the translated part
    # This step might need adjustment based on the model's output format
    translated_text = translated_text.replace(prompt, '').strip()
    
    return translated_text

# Define the languages you want to support in your app
languages = [
    "French",
    "Spanish",
    "German",
    "Italian",
    "Portuguese",
    # Add more languages as needed
]

# Create the Gradio interface
iface = gr.Interface(
    fn=translate_text,
    inputs=[
        gr.Textbox(label="Text to Translate", placeholder="Enter text here..."),
        gr.Dropdown(label="Target Language", choices=languages)
    ],
    outputs=gr.Textbox(label="Translated Text"),
    title="Simple Translator with Mixtral",
    description="Translate text to various languages using the Mixtral model from Hugging Face."
)

# Launch the interface
iface.launch()