Lenylvt commited on
Commit
f4c6da9
·
verified ·
1 Parent(s): d60a3d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -5,22 +5,25 @@ import gradio as gr
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
  def translate_text(text, target_language):
8
- # Format the prompt to include the translation instruction
9
- prompt = f"Translate the following text to {target_language}:\n{text}"
10
 
11
- # Call the Mixtral model for translation
12
- response = client(text_generation=prompt, parameters={"max_new_tokens": 100}, options={"wait_for_model": True})
 
 
 
 
13
 
14
- # The Mixtral model response includes the translated text in its output
15
  translated_text = response[0]['generated_text']
16
 
17
- # Clean up the response to extract only the translated part
18
- # This step might need adjustment based on the model's output format
19
- translated_text = translated_text.replace(prompt, '').strip()
20
 
21
  return translated_text
22
 
23
- # Define the languages you want to support in your app
24
  languages = [
25
  "French",
26
  "Spanish",
@@ -30,7 +33,6 @@ languages = [
30
  # Add more languages as needed
31
  ]
32
 
33
- # Create the Gradio interface
34
  iface = gr.Interface(
35
  fn=translate_text,
36
  inputs=[
@@ -42,5 +44,4 @@ iface = gr.Interface(
42
  description="Translate text to various languages using the Mixtral model from Hugging Face."
43
  )
44
 
45
- # Launch the interface
46
- iface.launch()
 
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
  def translate_text(text, target_language):
8
+ # Correctly format the prompt for the translation task
9
+ prompt = f"Translate the following text to {target_language}: {text}"
10
 
11
+ # Correctly perform the model call for text generation
12
+ response = client.text_generation(
13
+ inputs=prompt,
14
+ parameters={"max_new_tokens": 100},
15
+ options={"wait_for_model": True}
16
+ )
17
 
18
+ # Extract the translated text from the response
19
  translated_text = response[0]['generated_text']
20
 
21
+ # Depending on the model's response, you may need to clean the output
22
+ # For example, remove the input part from the response if necessary
23
+ translated_text = translated_text.split(prompt)[-1].strip()
24
 
25
  return translated_text
26
 
 
27
  languages = [
28
  "French",
29
  "Spanish",
 
33
  # Add more languages as needed
34
  ]
35
 
 
36
  iface = gr.Interface(
37
  fn=translate_text,
38
  inputs=[
 
44
  description="Translate text to various languages using the Mixtral model from Hugging Face."
45
  )
46
 
47
+ iface.launch()