Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -48,11 +48,14 @@ def translate(text):
|
|
48 |
# Prepare the prompt
|
49 |
messages = f"Translate from Korean to English: {text}"
|
50 |
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
|
|
51 |
|
52 |
# Generate the translation
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
56 |
|
57 |
return translation
|
58 |
|
|
|
48 |
# Prepare the prompt
|
49 |
messages = f"Translate from Korean to English: {text}"
|
50 |
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
51 |
+
prompt_padded_len = len(input_ids[0])
|
52 |
|
53 |
# Generate the translation
|
54 |
+
gen_tokens = model.generate(input_ids, max_length=max_new_tokens, temperature=temperature, top_k=top_k, top_p=top_p, bad_words_ids = bad_words_ids)
|
55 |
+
gen_tokens = [
|
56 |
+
gt[prompt_padded_len:] for gt in gen_tokens
|
57 |
+
]
|
58 |
+
translation = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)
|
59 |
|
60 |
return translation
|
61 |
|