Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -895,7 +895,6 @@ if st.session_state.framework == "dash":
|
|
895 |
section[data-testid="stAppScrollToBottomContainer"]{{
|
896 |
margin-top:50px !important;
|
897 |
padding-right:5px !important;
|
898 |
-
margin-right:50px;
|
899 |
}}
|
900 |
div[data-testid="stChatMessageAvatarUser"]{{
|
901 |
display:none;
|
@@ -1737,12 +1736,20 @@ if st.session_state.framework == "dash":
|
|
1737 |
task=task,
|
1738 |
temperature=temperature,top_p=top_p,max_tokens=max_tokens
|
1739 |
)
|
1740 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1741 |
st.session_state.messages.append({
|
1742 |
"role": "assistant",
|
1743 |
"content": extract_response_content(response),
|
1744 |
"image": "",
|
1745 |
-
"model": selected_model['model']
|
|
|
|
|
1746 |
})
|
1747 |
|
1748 |
st.session_state.generate_response = False # Reset the flag
|
@@ -1755,7 +1762,15 @@ if st.session_state.framework == "dash":
|
|
1755 |
with st.container(key=container_key):
|
1756 |
with st.chat_message(message["role"]):
|
1757 |
if message["role"] == "assistant":
|
1758 |
-
st.markdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1759 |
if message["image"] != "":
|
1760 |
st.markdown(f"""<img src="data:image/png;base64,{base64.b64encode(open(message["image"],"rb").read()).decode()}" class="user-image" alt="Uploaded Image"> """,unsafe_allow_html=True,)
|
1761 |
st.markdown(message["content"])
|
@@ -1801,11 +1816,21 @@ if st.session_state.framework == "dash":
|
|
1801 |
"image": image_path
|
1802 |
})
|
1803 |
response = extract_response_content(result)
|
1804 |
-
st.markdown(
|
|
|
|
|
|
|
|
|
1805 |
print(response)
|
1806 |
st.write_stream(generate_stream_response(response)) # This will stream the text one character at a time
|
1807 |
# Add assistant response to chat history
|
1808 |
-
st.session_state.
|
|
|
|
|
|
|
|
|
|
|
|
|
1809 |
save_chat_history()
|
1810 |
|
1811 |
else:
|
@@ -1827,8 +1852,19 @@ if st.session_state.framework == "dash":
|
|
1827 |
with st.spinner("Model is generating a response..."):
|
1828 |
st.session_state.messages.append({"role": "user", "content": prompt,"image":""})
|
1829 |
result = query_huggingface_model(selected_model, prompt , input_type="text",task=task,temperature=temperature,top_p=top_p,max_tokens=max_tokens)
|
1830 |
-
st.markdown(
|
|
|
|
|
|
|
|
|
|
|
1831 |
response = extract_response_content(result)
|
1832 |
st.write_stream(generate_stream_response(response)) # Add assistant response to chat history
|
1833 |
-
st.session_state.
|
|
|
|
|
|
|
|
|
|
|
|
|
1834 |
save_chat_history()
|
|
|
895 |
section[data-testid="stAppScrollToBottomContainer"]{{
|
896 |
margin-top:50px !important;
|
897 |
padding-right:5px !important;
|
|
|
898 |
}}
|
899 |
div[data-testid="stChatMessageAvatarUser"]{{
|
900 |
display:none;
|
|
|
1736 |
task=task,
|
1737 |
temperature=temperature,top_p=top_p,max_tokens=max_tokens
|
1738 |
)
|
1739 |
+
if st.session_state.menu == "gen":
|
1740 |
+
temper = temperature,
|
1741 |
+
topp = top_p
|
1742 |
+
else:
|
1743 |
+
temper = "",
|
1744 |
+
topp = ""
|
1745 |
+
|
1746 |
st.session_state.messages.append({
|
1747 |
"role": "assistant",
|
1748 |
"content": extract_response_content(response),
|
1749 |
"image": "",
|
1750 |
+
"model": selected_model['model'],
|
1751 |
+
"temp":temper,
|
1752 |
+
"top_p" : topp,
|
1753 |
})
|
1754 |
|
1755 |
st.session_state.generate_response = False # Reset the flag
|
|
|
1762 |
with st.container(key=container_key):
|
1763 |
with st.chat_message(message["role"]):
|
1764 |
if message["role"] == "assistant":
|
1765 |
+
st.markdown(
|
1766 |
+
f"**Model:** `{message['model']}`"
|
1767 |
+
+ (
|
1768 |
+
f" **Temperature:** `{message['temp']}` **Top-p:** `{message['top_p']}`"
|
1769 |
+
if message.get('temp') and message.get('top_p') else ""
|
1770 |
+
),
|
1771 |
+
unsafe_allow_html=True
|
1772 |
+
)
|
1773 |
+
|
1774 |
if message["image"] != "":
|
1775 |
st.markdown(f"""<img src="data:image/png;base64,{base64.b64encode(open(message["image"],"rb").read()).decode()}" class="user-image" alt="Uploaded Image"> """,unsafe_allow_html=True,)
|
1776 |
st.markdown(message["content"])
|
|
|
1816 |
"image": image_path
|
1817 |
})
|
1818 |
response = extract_response_content(result)
|
1819 |
+
st.markdown(
|
1820 |
+
f"**Model:** `{selected_model['model'] if isinstance(selected_model, dict) else selected_model}`"
|
1821 |
+
f"{': temperature' + str(temperature) if st.session_state.menu == 'gen' else ''} "
|
1822 |
+
f"{ 'Top-p:'+str( top_p) if st.session_state.menu == 'gen' else ''}"
|
1823 |
+
)
|
1824 |
print(response)
|
1825 |
st.write_stream(generate_stream_response(response)) # This will stream the text one character at a time
|
1826 |
# Add assistant response to chat history
|
1827 |
+
if st.session_state.menu == "gen":
|
1828 |
+
temper = temperature,
|
1829 |
+
topp = top_p
|
1830 |
+
else:
|
1831 |
+
temper = "",
|
1832 |
+
topp = ""
|
1833 |
+
st.session_state.messages.append({"role": "assistant", "content": response,"image":"","model":selected_model['model'],"temp":temper,"top_p":topp})
|
1834 |
save_chat_history()
|
1835 |
|
1836 |
else:
|
|
|
1852 |
with st.spinner("Model is generating a response..."):
|
1853 |
st.session_state.messages.append({"role": "user", "content": prompt,"image":""})
|
1854 |
result = query_huggingface_model(selected_model, prompt , input_type="text",task=task,temperature=temperature,top_p=top_p,max_tokens=max_tokens)
|
1855 |
+
st.markdown(
|
1856 |
+
f"**Model:** `{selected_model['model'] if isinstance(selected_model, dict) else selected_model}`"
|
1857 |
+
f"{':temperature ' + str(temperature) if st.session_state.menu == 'gen' else ''} "
|
1858 |
+
f"{ 'Top-p:'+str( top_p) if st.session_state.menu == 'gen' else ''}"
|
1859 |
+
)
|
1860 |
+
|
1861 |
response = extract_response_content(result)
|
1862 |
st.write_stream(generate_stream_response(response)) # Add assistant response to chat history
|
1863 |
+
if st.session_state.menu == "gen":
|
1864 |
+
temper = temperature,
|
1865 |
+
topp = top_p
|
1866 |
+
else:
|
1867 |
+
temper = "",
|
1868 |
+
topp = ""
|
1869 |
+
st.session_state.messages.append({"role": "assistant", "content": response,"image":"","model":selected_model['model'],"temp":temper,"top_p":topp})
|
1870 |
save_chat_history()
|