AI_Query_Wizard / app.py
YaserDS-777's picture
Update app.py
74944ca verified
raw
history blame
4.37 kB
import os
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint
# Set the environment variable "m_token" to the value of sec_key
sec_key = ""
os.environ["m_token"] = sec_key
# Specify the repository IDs of the Hugging Face models you want to use
repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
#repo_id_llama3 = "meta-llama/Meta-Llama-3-8B" # Replace with the actual repo ID for Llama3
# Streamlit app layout
st.title("๐Ÿค– Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ู†ู…ูˆุฐุฌ ๐Ÿง™")
# Custom background and styling
st.markdown(
"""
<style>
.stTextInput, .stButton {
background-color: rgba(255, 255, 255, 0.8);
border-radius: 10px;
padding: 10px;
}
.response {
color: #FFD700; /* Gold color for responses */
font-weight: bold;
}
</style>
""",
unsafe_allow_html=True
)
# Input text area for user query with enhanced instructions
user_query = st.text_area(
"โœจ Enter your magical query:",
height=100,
help="""
**Enhanced Prompting Instructions:**
- Be clear and specific about what you want to know.
- Use natural language to describe your query.
- If asking a question, ensure it is well-formed and unambiguous.
- For best results, provide context or background information if relevant.
"""
)
# Slider for adjusting the temperature
temperature = st.slider(
"Temperature",
min_value=0.1,
max_value=1.0,
value=0.7,
step=0.1,
help="""
**Temperature:**
- Lower values (e.g., 0.1) make the output more deterministic and focused.
- Higher values (e.g., 1.0) make the output more diverse and creative.
"""
)
# Slider for adjusting the max length
max_length = st.slider(
"Max Length",
min_value=32,
max_value=256,
value=128,
step=32,
help="""
**Max Length:**
- Controls the maximum number of tokens in the generated response.
- Adjust based on the desired length of the response.
"""
)
# Button to trigger the query
if st.button("๐Ÿช„ Cast Spell"):
if user_query:
# Initialize the HuggingFaceEndpoint for Mistral
llm_mistral = HuggingFaceEndpoint(
repo_id=repo_id_mistral,
max_length=max_length,
temperature=temperature,
token=sec_key
)
# Initialize the HuggingFaceEndpoint for Llama3
''' llm_llama3 = HuggingFaceEndpoint(
repo_id=repo_id_llama3,
max_length=max_length,
temperature=temperature,
token=sec_key
)'''
# Invoke both models with the user's query
response_mistral = llm_mistral.invoke(user_query)
#response_llama3 = llm_llama3.invoke(user_query)
# Display the responses side by side
col1 = st.columns(1)
with col1:
st.markdown("๐Ÿ”ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)
'''
with col2:
st.markdown("๐Ÿ”ฎ <span class='response'>Response from Llama3:</span>", unsafe_allow_html=True)
st.markdown(f"<span class='response'>{response_llama3}</span>", unsafe_allow_html=True)
'''
# Save query and responses to session state
if 'history' not in st.session_state:
st.session_state.history = []
st.session_state.history.append((user_query, response_mistral))
else:
st.write("๐Ÿšจ Please enter a query to cast your spell.")
# Button to clear history
if st.button("๐Ÿ—‘๏ธ Clear History"):
if 'history' in st.session_state:
st.session_state.history = []
st.success("History cleared!")
# Display history of queries and responses
if 'history' in st.session_state:
st.subheader("๐Ÿ“œ Scroll of Spells Cast")
for query, response_mistral, response_llama3 in st.session_state.history:
st.write(f"**Query:** {query}")
col1 = st.columns(1)
with col1:
st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
''' with col2:
st.markdown(f"<span class='response'>**Response from Llama3:** {response_llama3}</span>", unsafe_allow_html=True)'''
st.write("---")