Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -23,7 +23,7 @@ style = '''
|
|
23 |
'''
|
24 |
st.markdown(style, unsafe_allow_html=True)
|
25 |
|
26 |
-
st.markdown('<p style="font-family:sans-serif;font-size: 1.9rem;"> HertogAI Q&A using TAPAS and Model Language</p>', unsafe_allow_html=True)
|
27 |
st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'>Pre-trained TAPAS model runs on max 64 rows and 32 columns data. Make sure the file data doesn't exceed these dimensions.</p>", unsafe_allow_html=True)
|
28 |
|
29 |
# Initialize TAPAS pipeline
|
@@ -87,7 +87,7 @@ else:
|
|
87 |
# Get the raw answer from TAPAS
|
88 |
raw_answer = tqa(table=df, query=question, truncation=True)
|
89 |
|
90 |
-
st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'> Raw Result: </p>",
|
91 |
unsafe_allow_html=True)
|
92 |
st.success(raw_answer)
|
93 |
|
@@ -116,7 +116,7 @@ else:
|
|
116 |
generated_text = t5_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
117 |
|
118 |
# Display the final generated response
|
119 |
-
st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'> Final Generated Response: </p>", unsafe_allow_html=True)
|
120 |
st.success(generated_text)
|
121 |
|
122 |
except Exception as e:
|
|
|
23 |
'''
|
24 |
st.markdown(style, unsafe_allow_html=True)
|
25 |
|
26 |
+
st.markdown('<p style="font-family:sans-serif;font-size: 1.9rem;"> HertogAI Table Q&A using TAPAS and Model Language</p>', unsafe_allow_html=True)
|
27 |
st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'>Pre-trained TAPAS model runs on max 64 rows and 32 columns data. Make sure the file data doesn't exceed these dimensions.</p>", unsafe_allow_html=True)
|
28 |
|
29 |
# Initialize TAPAS pipeline
|
|
|
87 |
# Get the raw answer from TAPAS
|
88 |
raw_answer = tqa(table=df, query=question, truncation=True)
|
89 |
|
90 |
+
st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'> Raw Result From TAPAS: </p>",
|
91 |
unsafe_allow_html=True)
|
92 |
st.success(raw_answer)
|
93 |
|
|
|
116 |
generated_text = t5_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
117 |
|
118 |
# Display the final generated response
|
119 |
+
st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'> Final Generated Response with LLM: </p>", unsafe_allow_html=True)
|
120 |
st.success(generated_text)
|
121 |
|
122 |
except Exception as e:
|