rushankg commited on
Commit
c414752
·
verified ·
1 Parent(s): 461cede

Created app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+
5
+ # Load the tokenizer and model from Hugging Face
6
+ @st.cache_resource
7
+ def load_model():
8
+ model_name = "meta-llama/Meta-Llama-3.1-70B-Instruct"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
11
+ return tokenizer, model
12
+
13
+ tokenizer, model = load_model()
14
+
15
+ # Supported languages
16
+ languages = ['English', 'French', 'Spanish', 'Hindi', 'Punjabi']
17
+
18
+ # Streamlit app
19
+ def main():
20
+ st.title("Language Translator")
21
+
22
+ # User input for input language
23
+ input_language = st.selectbox("Select Input Language", languages)
24
+
25
+ # User input for output language
26
+ output_language = st.selectbox("Select Output Language", languages)
27
+
28
+ # Text input box for user to input text
29
+ input_text = st.text_area("Enter the text to translate")
30
+
31
+ if st.button("Translate"):
32
+ if input_text.strip() == "":
33
+ st.error("Please enter some text to translate.")
34
+ elif input_language == output_language:
35
+ st.warning("Input and output languages are the same. Please select different languages.")
36
+ else:
37
+ # Perform translation
38
+ translation = translate_text(input_text, input_language, output_language)
39
+ st.success("Translation:")
40
+ st.write(translation)
41
+
42
+ # Function to translate text using the LLaMA model
43
+ def translate_text(text, input_language, output_language):
44
+ prompt = f"Translate the following from {input_language} to {output_language}:\n\n{text}"
45
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
46
+ with torch.no_grad():
47
+ outputs = model.generate(**inputs, max_new_tokens=200)
48
+ translation = tokenizer.decode(outputs[0], skip_special_tokens=True)
49
+ return translation
50
+
51
+ if __name__ == "__main__":
52
+ main()