Spaces:
Runtime error
Runtime error
File size: 4,251 Bytes
239e535 4425953 336ceaa dcd106e 336ceaa 239e535 336ceaa 4425953 1180e45 0a70df2 4425953 336ceaa 3bfefd7 0a70df2 336ceaa 239e535 336ceaa 0a70df2 3bfefd7 d0077e2 4425953 3bfefd7 336ceaa 3bfefd7 336ceaa 0a70df2 336ceaa 0a70df2 336ceaa 3bfefd7 0a70df2 dcd106e 0a70df2 336ceaa dcd106e 336ceaa 0a70df2 336ceaa 0a70df2 dcd106e 336ceaa 4425953 336ceaa dcd106e 0a70df2 3bfefd7 dcd106e 0a70df2 dcd106e 0a70df2 dcd106e 0a70df2 dcd106e 5ad4503 3bfefd7 336ceaa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import gradio as gr
import os
from llama_cpp import Llama
import logging
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Load the model
logger.info("Loading the model...")
llm = Llama.from_pretrained(
repo_id="AndreasThinks/mistral-7b-english-welsh-translate-GGUF",
filename="*q4_k_m.gguf",
verbose=True
)
logger.info("Model loaded successfully")
def translate(text, source_lang, target_lang):
logger.info(f"Translating from {source_lang} to {target_lang}")
if source_lang == target_lang:
logger.info("Source and target languages are the same. No translation needed.")
return text, text, source_lang, target_lang
instruction = f"Translate the text from {source_lang} to {target_lang}"
input_text = f"""
### Instruction: {instruction}
### Input: {text}
### Response:
"""
logger.info(f"Input text: {input_text}")
full_output = ""
for chunk in llm(input_text, max_tokens=6000, stop=["### Input:", "### Instruction:"], echo=True, stream=True):
full_output += chunk['choices'][0]['text']
yield full_output, input_text, source_lang, target_lang
translated_text = full_output.split("### Response:")[-1].strip()
logger.info(f"Translation completed. Output: {translated_text}")
return translated_text, input_text, source_lang, target_lang
def continue_generation(translated_text, input_text, source_lang, target_lang):
logger.info("Continuing generation...")
full_text = f"{input_text}{translated_text}"
logger.info(f"Full text for continued generation: {full_text}")
full_output = translated_text
for chunk in llm(full_text, max_tokens=8000, stop=["### Input:", "### Instruction:"], echo=True, stream=True):
new_text = chunk['choices'][0]['text']
full_output += new_text
yield full_output, input_text, source_lang, target_lang
new_translated_text = full_output.split("### Response:")[-1].strip()
updated_translated_text = translated_text + " " + new_translated_text
logger.info(f"Generation completed. Updated output: {updated_translated_text}")
return updated_translated_text, input_text, source_lang, target_lang
# Create the Gradio interface
with gr.Blocks() as iface:
gr.Markdown("# English-Welsh Translator")
gr.Markdown("Translate text between English and Welsh using a the quantized version of [AndreasThinks/mistral-7b-english-welsh-translate.](https://huggingface.co/AndreasThinks/mistral-7b-english-welsh-translate)")
gr.Markdown("Please note this version of the model is heavily quantized - it will be slow and may exhibit degradations in performance. Please only test on small text chunks.")
with gr.Row():
input_text = gr.Textbox(label="Enter text to translate")
output_text = gr.Textbox(label="Translated Text")
with gr.Row():
source_lang = gr.Radio(["English", "Welsh"], label="Source Language", value="English")
target_lang = gr.Radio(["English", "Welsh"], label="Target Language", value="Welsh")
translate_button = gr.Button("Translate")
continue_button = gr.Button("Continue Generating")
# Hidden components to store state
input_prompt = gr.Textbox(visible=False)
source_lang_state = gr.Textbox(visible=False)
target_lang_state = gr.Textbox(visible=False)
translate_button.click(
translate,
inputs=[input_text, source_lang, target_lang],
outputs=[output_text, input_prompt, source_lang_state, target_lang_state]
)
continue_button.click(
continue_generation,
inputs=[output_text, input_prompt, source_lang_state, target_lang_state],
outputs=[output_text, input_prompt, source_lang_state, target_lang_state]
)
gr.Examples(
examples=[
["Hello, how are you?", "English", "Welsh"],
["Bore da!", "Welsh", "English"],
],
inputs=[input_text, source_lang, target_lang]
)
# Launch the app
logger.info("Launching the Gradio interface...")
iface.launch()
logger.info("Gradio interface launched successfully") |