Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,14 @@ import markdown
|
|
7 |
# Initialize the Hugging Face Inference Client
|
8 |
client = InferenceClient()
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
# Function to generate and format AI response
|
11 |
def generate_response(prompt_template, **kwargs):
|
12 |
# Simulate processing/loading
|
@@ -28,8 +36,7 @@ def generate_response(prompt_template, **kwargs):
|
|
28 |
"markdown.extensions.attr_list",
|
29 |
]
|
30 |
)
|
31 |
-
return f"""
|
32 |
-
{html}"""
|
33 |
|
34 |
# Gradio app interface
|
35 |
with gr.Blocks() as app:
|
|
|
7 |
# Initialize the Hugging Face Inference Client
|
8 |
client = InferenceClient()
|
9 |
|
10 |
+
|
11 |
+
def render_latex(latex_input):
|
12 |
+
try:
|
13 |
+
rendered_html = katex.render(latex_input, display_mode=True)
|
14 |
+
return f"{rendered_html}"
|
15 |
+
except Exception as e:
|
16 |
+
return f"{latex_input}"
|
17 |
+
|
18 |
# Function to generate and format AI response
|
19 |
def generate_response(prompt_template, **kwargs):
|
20 |
# Simulate processing/loading
|
|
|
36 |
"markdown.extensions.attr_list",
|
37 |
]
|
38 |
)
|
39 |
+
return f"""{render_latex(html)}"""
|
|
|
40 |
|
41 |
# Gradio app interface
|
42 |
with gr.Blocks() as app:
|