import { oauthLoginUrl, oauthHandleRedirectIfPresent } from "@huggingface/hub"; import streamlit as st from transformers import pipeline from PIL import Image st.title("LLM Translate for ko->eng") # adding the text that will show in the text box as default text_default = """ 그를 중심으로 휘몰아치는 막대한 마나. 허공에서 피어오른 다섯 개의불꽂이 크기를 부풀리고, 이내 포탄처럼 쏘아졌다. 후우우우웅, 까앙! 수만의 몬스터로 이루어진 검은 파도가 갈라졌다. 초고온의 열기가 살과 뼈를 태우고 지면을 녹였다.""" from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer import torch # attn_implementation = None # USE_FLASH_ATTENTION = False # if USE_FLASH_ATTENTION: # attn_implementation="flash_attention_2" model_id = "r1208/c4ai-command-r-v01-4bit_32r" model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained(model_id, token = access_token) tokenizer_with_prefix_space = AutoTokenizer.from_pretrained(model_id, add_prefix_space=True) def get_tokens_as_list(word_list): "Converts a sequence of words into a list of tokens" tokens_list = [] for word in word_list: tokenized_word = tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0] tokens_list.append(tokenized_word) return tokens_list bad_words_ids = get_tokens_as_list( word_list=["\n", "\n\n", "\ ", " \ ", "\\", "'\n'"] ) max_new_tokens = st.sidebar.slider("Max Length", value=100, min_value=10, max_value=1000) temperature = st.sidebar.slider("Temperature", value=0.3, min_value=0.0, max_value=1.0, step=0.05) top_k = st.sidebar.slider("Top-k", min_value=0, max_value=50, value=0) top_p = st.sidebar.slider("Top-p", min_value=0.75, max_value=1.0, step=0.05, value=0.9) def translate(text): # Prepare the prompt messages = f"Translate from Korean to English: {text}" input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") prompt_padded_len = len(input_ids[0]) # Generate the translation gen_tokens = model.generate(input_ids, max_length=max_new_tokens, temperature=temperature, top_k=top_k, top_p=top_p, bad_words_ids = bad_words_ids) gen_tokens = [ gt[prompt_padded_len:] for gt in gen_tokens ] translation = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True) return translation def main(): st.subheader("Enter text to translate") input_text = st.text_area("", height=300) if st.button("Translate"): if input_text: translation = translate(input_text) st.text_area("Translated Text", value=translation, height=300) else: st.error("Please enter some text to translate.") if __name__ == "__main__": main()