Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -54,7 +54,7 @@ def llm_run(prompt, max_length, top_p, temprature, top_k, messages):
|
|
54 |
en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
|
55 |
messages.append({"role": "user", "content": en_translated})
|
56 |
# messages.append({"role": "user", "content": prompt})
|
57 |
-
|
58 |
input_ids = tokenizer.apply_chat_template(
|
59 |
messages,
|
60 |
add_generation_prompt = True,
|
@@ -77,7 +77,7 @@ def llm_run(prompt, max_length, top_p, temprature, top_k, messages):
|
|
77 |
|
78 |
for text in streamer:
|
79 |
generated_text.append(text)
|
80 |
-
|
81 |
# yield "".join(generated_text)
|
82 |
yield GoogleTranslator(source='auto', target=lang).translate("".join(generated_text))
|
83 |
|
|
|
54 |
en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
|
55 |
messages.append({"role": "user", "content": en_translated})
|
56 |
# messages.append({"role": "user", "content": prompt})
|
57 |
+
print("messages")
|
58 |
input_ids = tokenizer.apply_chat_template(
|
59 |
messages,
|
60 |
add_generation_prompt = True,
|
|
|
77 |
|
78 |
for text in streamer:
|
79 |
generated_text.append(text)
|
80 |
+
print('generated_text: ', generated_text)
|
81 |
# yield "".join(generated_text)
|
82 |
yield GoogleTranslator(source='auto', target=lang).translate("".join(generated_text))
|
83 |
|