File size: 3,009 Bytes
0429aca
 
 
 
f067dd4
d76ea0c
f067dd4
0429aca
d76ea0c
0429aca
 
 
f067dd4
0429aca
 
 
d76ea0c
 
 
 
 
 
 
 
 
0429aca
 
 
 
d76ea0c
 
 
 
 
 
 
 
 
 
0429aca
 
 
 
d76ea0c
 
 
 
 
 
 
 
 
 
0429aca
 
 
 
d76ea0c
 
 
 
 
 
 
 
0429aca
d76ea0c
 
0429aca
d76ea0c
 
 
0429aca
d76ea0c
 
 
 
 
 
0429aca
 
 
d76ea0c
 
 
0429aca
 
 
d76ea0c
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint

# Set the environment variable "m_token" to the value of sec_key
sec_key = "YOUR_HUGGING_FACE_API_TOKEN_HERE"
os.environ["m_token"] = sec_key

# Specify the repository ID of the Hugging Face model you want to use
repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"

# Streamlit app layout
st.title("๐Ÿค– Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ู†ู…ูˆุฐุฌ  ๐Ÿง™")

# Input text area for user query with enhanced instructions
user_query = st.text_area(
  "โœจ Enter your magical query:",
  height=100,
  help="""
  **Enhanced Prompting Instructions:**
  - Be clear and specific about what you want to know.
  - Use natural language to describe your query.
  - If asking a question, ensure it is well-formed and unambiguous.
  - For best results, provide context or background information if relevant.
  """
)

# Slider for adjusting the temperature
temperature = st.slider(
  "Temperature",
  min_value=0.1,
  max_value=1.0,
  value=0.7,
  step=0.1,
  help="""
  **Temperature:**
  - Lower values (e.g., 0.1) make the output more deterministic and focused.
  - Higher values (e.g., 1.0) make the output more diverse and creative.
  """
)

# Slider for adjusting the max length
max_length = st.slider(
  "Max Length",
  min_value=32,
  max_value=256,
  value=128,
  step=32,
  help="""
  **Max Length:**
  - Controls the maximum number of tokens in the generated response.
  - Adjust based on the desired length of the response.
  """
)

# Button to trigger the query
if st.button("๐Ÿช„ Cast Spell"):
  if user_query:
      # Initialize the HuggingFaceEndpoint for Mistral
      llm_mistral = HuggingFaceEndpoint(
          repo_id=repo_id_mistral,
          max_length=max_length,
          temperature=temperature,
          token=sec_key
      )

      # Invoke the model with the user's query
      response_mistral = llm_mistral.invoke(user_query)

      # Display the response
      st.markdown("๐Ÿ”ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
      st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)

      # Save query and response to session state
      if 'history' not in st.session_state:
          st.session_state.history = []
      st.session_state.history.append((user_query, response_mistral))
  else:
      st.write("๐Ÿšจ Please enter a query to cast your spell.")

# Button to clear history
if st.button("๐Ÿ—‘๏ธ Clear History"):
  if 'history' in st.session_state:
      st.session_state.history = []
  st.success("History cleared!")

# Display history of queries and responses
if 'history' in st.session_state:
  st.subheader("๐Ÿ“œ Scroll of Spells Cast")
  for query, response_mistral in st.session_state.history:
      st.write(f"**Query:** {query}")
      st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
      st.write("---")