Spaces:
Sleeping
Sleeping
Update app/main.py
Browse files- app/main.py +4 -1
app/main.py
CHANGED
@@ -105,11 +105,14 @@ async def ask_gemmaPhysics(
|
|
105 |
try:
|
106 |
print(f'Asking LlamaPhysics with the question "{prompt}", translation is {"enabled" if translate_from_thai else "disabled"}')
|
107 |
if translate_from_thai:
|
|
|
108 |
prompt = e.translate(prompt)
|
|
|
109 |
result = ask_llama(PHllm, prompt, max_new_tokens=max_new_tokens, temperature=temperature, repeat_penalty=repeat_penalty)
|
110 |
-
print(f"
|
111 |
if translate_from_thai:
|
112 |
result = t.translate(result)
|
|
|
113 |
return QuestionResponse(answer=result, question=prompt, config={"temperature": temperature, "max_new_tokens": max_new_tokens, "repeat_penalty": repeat_penalty})
|
114 |
except Exception as e:
|
115 |
return HTTPException(500, QuestionResponse(code=500, answer=str(e), question=prompt))
|
|
|
105 |
try:
|
106 |
print(f'Asking LlamaPhysics with the question "{prompt}", translation is {"enabled" if translate_from_thai else "disabled"}')
|
107 |
if translate_from_thai:
|
108 |
+
print("Translating content to EN.")
|
109 |
prompt = e.translate(prompt)
|
110 |
+
print(f"Asking the model with the question {prompt}")
|
111 |
result = ask_llama(PHllm, prompt, max_new_tokens=max_new_tokens, temperature=temperature, repeat_penalty=repeat_penalty)
|
112 |
+
print(f"Got Model Response: {result}")
|
113 |
if translate_from_thai:
|
114 |
result = t.translate(result)
|
115 |
+
print(f"Translation Result: {result}")
|
116 |
return QuestionResponse(answer=result, question=prompt, config={"temperature": temperature, "max_new_tokens": max_new_tokens, "repeat_penalty": repeat_penalty})
|
117 |
except Exception as e:
|
118 |
return HTTPException(500, QuestionResponse(code=500, answer=str(e), question=prompt))
|