File size: 4,370 Bytes
0429aca
 
 
 
f067dd4
0429aca
f067dd4
0429aca
 
 
5e567dc
0429aca
 
f067dd4
0429aca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74944ca
0429aca
 
 
 
5e567dc
0429aca
 
 
5e567dc
0429aca
 
5e567dc
0429aca
 
 
 
5e567dc
0429aca
 
 
5e567dc
0429aca
 
 
5e567dc
0429aca
 
 
 
 
 
 
 
 
 
 
 
 
 
5e567dc
0429aca
 
74944ca
5e567dc
0429aca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import os
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint

# Set the environment variable "m_token" to the value of sec_key
sec_key = ""
os.environ["m_token"] = sec_key

# Specify the repository IDs of the Hugging Face models you want to use
repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
#repo_id_llama3 = "meta-llama/Meta-Llama-3-8B"  # Replace with the actual repo ID for Llama3

# Streamlit app layout
st.title("๐Ÿค– Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ู†ู…ูˆุฐุฌ  ๐Ÿง™")

# Custom background and styling
st.markdown(
    """
    <style>
   
    .stTextInput, .stButton {
        background-color: rgba(255, 255, 255, 0.8);
        border-radius: 10px;
        padding: 10px;
    }
    .response {
        color: #FFD700; /* Gold color for responses */
        font-weight: bold;
    }
    </style>
    """,
    unsafe_allow_html=True
)

# Input text area for user query with enhanced instructions
user_query = st.text_area(
    "โœจ Enter your magical query:",
    height=100,
    help="""
    **Enhanced Prompting Instructions:**
    - Be clear and specific about what you want to know.
    - Use natural language to describe your query.
    - If asking a question, ensure it is well-formed and unambiguous.
    - For best results, provide context or background information if relevant.
    """
)

# Slider for adjusting the temperature
temperature = st.slider(
    "Temperature",
    min_value=0.1,
    max_value=1.0,
    value=0.7,
    step=0.1,
    help="""
    **Temperature:**
    - Lower values (e.g., 0.1) make the output more deterministic and focused.
    - Higher values (e.g., 1.0) make the output more diverse and creative.
    """
)

# Slider for adjusting the max length
max_length = st.slider(
    "Max Length",
    min_value=32,
    max_value=256,
    value=128,
    step=32,
    help="""
    **Max Length:**
    - Controls the maximum number of tokens in the generated response.
    - Adjust based on the desired length of the response.
    """
)

# Button to trigger the query
if st.button("๐Ÿช„ Cast Spell"):
    if user_query:
        # Initialize the HuggingFaceEndpoint for Mistral
        llm_mistral = HuggingFaceEndpoint(
            repo_id=repo_id_mistral,
            max_length=max_length,
            temperature=temperature,
            token=sec_key
        )

        # Initialize the HuggingFaceEndpoint for Llama3
'''  llm_llama3 = HuggingFaceEndpoint(
            repo_id=repo_id_llama3,
            max_length=max_length,
            temperature=temperature,
            token=sec_key
        )'''

        # Invoke both models with the user's query
        response_mistral = llm_mistral.invoke(user_query)
        #response_llama3 = llm_llama3.invoke(user_query)

        # Display the responses side by side
        col1 = st.columns(1)

        with col1:
            st.markdown("๐Ÿ”ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
            st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)
'''
        with col2:
            st.markdown("๐Ÿ”ฎ <span class='response'>Response from Llama3:</span>", unsafe_allow_html=True)
            st.markdown(f"<span class='response'>{response_llama3}</span>", unsafe_allow_html=True)
'''
        # Save query and responses to session state
        if 'history' not in st.session_state:
            st.session_state.history = []
        st.session_state.history.append((user_query, response_mistral))
    else:
        st.write("๐Ÿšจ Please enter a query to cast your spell.")

# Button to clear history
if st.button("๐Ÿ—‘๏ธ Clear History"):
    if 'history' in st.session_state:
        st.session_state.history = []
    st.success("History cleared!")

# Display history of queries and responses
if 'history' in st.session_state:
    st.subheader("๐Ÿ“œ Scroll of Spells Cast")
    for query, response_mistral, response_llama3 in st.session_state.history:
        st.write(f"**Query:** {query}")
        col1 = st.columns(1)
        with col1:
            st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
''' with col2:
            st.markdown(f"<span class='response'>**Response from Llama3:** {response_llama3}</span>", unsafe_allow_html=True)'''
        st.write("---")