Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ os.environ["m_token"] = sec_key
|
|
8 |
|
9 |
# Specify the repository IDs of the Hugging Face models you want to use
|
10 |
repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
|
11 |
-
repo_id_llama3 = "meta-llama/Meta-Llama-3-8B" # Replace with the actual repo ID for Llama3
|
12 |
|
13 |
# Streamlit app layout
|
14 |
st.title("๐ค Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ูู
ูุฐุฌ ๐ง")
|
@@ -85,32 +85,32 @@ if st.button("๐ช Cast Spell"):
|
|
85 |
)
|
86 |
|
87 |
# Initialize the HuggingFaceEndpoint for Llama3
|
88 |
-
|
89 |
repo_id=repo_id_llama3,
|
90 |
max_length=max_length,
|
91 |
temperature=temperature,
|
92 |
token=sec_key
|
93 |
-
)
|
94 |
|
95 |
# Invoke both models with the user's query
|
96 |
response_mistral = llm_mistral.invoke(user_query)
|
97 |
-
response_llama3 = llm_llama3.invoke(user_query)
|
98 |
|
99 |
# Display the responses side by side
|
100 |
-
col1
|
101 |
|
102 |
with col1:
|
103 |
st.markdown("๐ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
|
104 |
st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)
|
105 |
-
|
106 |
with col2:
|
107 |
st.markdown("๐ฎ <span class='response'>Response from Llama3:</span>", unsafe_allow_html=True)
|
108 |
st.markdown(f"<span class='response'>{response_llama3}</span>", unsafe_allow_html=True)
|
109 |
-
|
110 |
# Save query and responses to session state
|
111 |
if 'history' not in st.session_state:
|
112 |
st.session_state.history = []
|
113 |
-
st.session_state.history.append((user_query, response_mistral
|
114 |
else:
|
115 |
st.write("๐จ Please enter a query to cast your spell.")
|
116 |
|
@@ -125,9 +125,9 @@ if 'history' in st.session_state:
|
|
125 |
st.subheader("๐ Scroll of Spells Cast")
|
126 |
for query, response_mistral, response_llama3 in st.session_state.history:
|
127 |
st.write(f"**Query:** {query}")
|
128 |
-
col1
|
129 |
with col1:
|
130 |
st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
|
131 |
-
|
132 |
-
st.markdown(f"<span class='response'>**Response from Llama3:** {response_llama3}</span>", unsafe_allow_html=True)
|
133 |
st.write("---")
|
|
|
8 |
|
9 |
# Specify the repository IDs of the Hugging Face models you want to use
|
10 |
repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
|
11 |
+
#repo_id_llama3 = "meta-llama/Meta-Llama-3-8B" # Replace with the actual repo ID for Llama3
|
12 |
|
13 |
# Streamlit app layout
|
14 |
st.title("๐ค Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ูู
ูุฐุฌ ๐ง")
|
|
|
85 |
)
|
86 |
|
87 |
# Initialize the HuggingFaceEndpoint for Llama3
|
88 |
+
''' llm_llama3 = HuggingFaceEndpoint(
|
89 |
repo_id=repo_id_llama3,
|
90 |
max_length=max_length,
|
91 |
temperature=temperature,
|
92 |
token=sec_key
|
93 |
+
)'''
|
94 |
|
95 |
# Invoke both models with the user's query
|
96 |
response_mistral = llm_mistral.invoke(user_query)
|
97 |
+
#response_llama3 = llm_llama3.invoke(user_query)
|
98 |
|
99 |
# Display the responses side by side
|
100 |
+
col1 = st.columns(1)
|
101 |
|
102 |
with col1:
|
103 |
st.markdown("๐ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
|
104 |
st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)
|
105 |
+
'''
|
106 |
with col2:
|
107 |
st.markdown("๐ฎ <span class='response'>Response from Llama3:</span>", unsafe_allow_html=True)
|
108 |
st.markdown(f"<span class='response'>{response_llama3}</span>", unsafe_allow_html=True)
|
109 |
+
'''
|
110 |
# Save query and responses to session state
|
111 |
if 'history' not in st.session_state:
|
112 |
st.session_state.history = []
|
113 |
+
st.session_state.history.append((user_query, response_mistral))
|
114 |
else:
|
115 |
st.write("๐จ Please enter a query to cast your spell.")
|
116 |
|
|
|
125 |
st.subheader("๐ Scroll of Spells Cast")
|
126 |
for query, response_mistral, response_llama3 in st.session_state.history:
|
127 |
st.write(f"**Query:** {query}")
|
128 |
+
col1 = st.columns(1)
|
129 |
with col1:
|
130 |
st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
|
131 |
+
''' with col2:
|
132 |
+
st.markdown(f"<span class='response'>**Response from Llama3:** {response_llama3}</span>", unsafe_allow_html=True)'''
|
133 |
st.write("---")
|