Lenylvt commited on
Commit
21fca8b
·
verified ·
1 Parent(s): f4c6da9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -30
app.py CHANGED
@@ -1,47 +1,29 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
- # Initialize the inference client with the Mixtral model
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
- def translate_text(text, target_language):
8
- # Correctly format the prompt for the translation task
9
- prompt = f"Translate the following text to {target_language}: {text}"
10
 
11
- # Correctly perform the model call for text generation
12
- response = client.text_generation(
13
- inputs=prompt,
14
- parameters={"max_new_tokens": 100},
15
- options={"wait_for_model": True}
16
- )
17
-
18
- # Extract the translated text from the response
19
  translated_text = response[0]['generated_text']
20
-
21
- # Depending on the model's response, you may need to clean the output
22
- # For example, remove the input part from the response if necessary
23
- translated_text = translated_text.split(prompt)[-1].strip()
24
-
25
- return translated_text
26
-
27
- languages = [
28
- "French",
29
- "Spanish",
30
- "German",
31
- "Italian",
32
- "Portuguese",
33
- # Add more languages as needed
34
- ]
35
 
36
  iface = gr.Interface(
37
  fn=translate_text,
38
  inputs=[
39
- gr.Textbox(label="Text to Translate", placeholder="Enter text here..."),
40
- gr.Dropdown(label="Target Language", choices=languages)
41
  ],
42
  outputs=gr.Textbox(label="Translated Text"),
43
  title="Simple Translator with Mixtral",
44
- description="Translate text to various languages using the Mixtral model from Hugging Face."
45
  )
46
 
47
  iface.launch()
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
+ def translate_text(input_text, target_language):
7
+ prompt = f"Translate the following text into {target_language}: {input_text}"
8
+ response = client.text_generation(prompt, parameters={"max_new_tokens": 256}, options={"wait_for_model": True})
9
 
10
+ # Since the model's response includes the prompt, we extract only the translated text
11
+ # Assuming the translated text follows immediately after the prompt
 
 
 
 
 
 
12
  translated_text = response[0]['generated_text']
13
+ # Clean the response to display only the translated part
14
+ # This might need to be adjusted based on how the model includes the prompt in its response
15
+ clean_translation = translated_text[len(prompt):].strip()
16
+ return clean_translation
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  iface = gr.Interface(
19
  fn=translate_text,
20
  inputs=[
21
+ gr.Textbox(label="Text to Translate", placeholder="Enter the text you want to translate here..."),
22
+ gr.Textbox(label="Target Language", placeholder="Enter the target language (e.g., French, Spanish)..."),
23
  ],
24
  outputs=gr.Textbox(label="Translated Text"),
25
  title="Simple Translator with Mixtral",
26
+ description="Translate text to your specified language using the Mixtral model from Hugging Face."
27
  )
28
 
29
  iface.launch()