import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM import torch import os from huggingface_hub import login # Authenticate with Hugging Face using the environment variable huggingface_token = os.getenv("HUGGINGFACE_TOKEN") login(token=huggingface_token) # Load the tokenizer and model from Hugging Face @st.cache_resource def load_model(): model_name = "meta-llama/Meta-Llama-3.1-70B-Instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16) return tokenizer, model tokenizer, model = load_model() # Supported languages languages = ['English', 'French', 'Spanish', 'Hindi', 'Punjabi'] # Streamlit app def main(): st.title("Language Translator") # User input for input language input_language = st.selectbox("Select Input Language", languages) # User input for output language output_language = st.selectbox("Select Output Language", languages) # Text input box for user to input text input_text = st.text_area("Enter the text to translate") if st.button("Translate"): if input_text.strip() == "": st.error("Please enter some text to translate.") elif input_language == output_language: st.warning("Input and output languages are the same. Please select different languages.") else: # Perform translation translation = translate_text(input_text, input_language, output_language) st.success("Translation:") st.write(translation) # Function to translate text using the LLaMA model def translate_text(text, input_language, output_language): prompt = f"Translate the following from {input_language} to {output_language}:\n\n{text}" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") with torch.no_grad(): outputs = model.generate(**inputs, max_new_tokens=200) translation = tokenizer.decode(outputs[0], skip_special_tokens=True) return translation if __name__ == "__main__": main()