YaserDS-777 commited on
Commit
d76ea0c
ยท
verified ยท
1 Parent(s): 74944ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -98
app.py CHANGED
@@ -3,131 +3,91 @@ import streamlit as st
3
  from langchain_huggingface import HuggingFaceEndpoint
4
 
5
  # Set the environment variable "m_token" to the value of sec_key
6
- sec_key = ""
7
  os.environ["m_token"] = sec_key
8
 
9
- # Specify the repository IDs of the Hugging Face models you want to use
10
  repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
11
- #repo_id_llama3 = "meta-llama/Meta-Llama-3-8B" # Replace with the actual repo ID for Llama3
12
 
13
  # Streamlit app layout
14
  st.title("๐Ÿค– Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ู†ู…ูˆุฐุฌ ๐Ÿง™")
15
 
16
- # Custom background and styling
17
- st.markdown(
18
- """
19
- <style>
20
-
21
- .stTextInput, .stButton {
22
- background-color: rgba(255, 255, 255, 0.8);
23
- border-radius: 10px;
24
- padding: 10px;
25
- }
26
- .response {
27
- color: #FFD700; /* Gold color for responses */
28
- font-weight: bold;
29
- }
30
- </style>
31
- """,
32
- unsafe_allow_html=True
33
- )
34
-
35
  # Input text area for user query with enhanced instructions
36
  user_query = st.text_area(
37
- "โœจ Enter your magical query:",
38
- height=100,
39
- help="""
40
- **Enhanced Prompting Instructions:**
41
- - Be clear and specific about what you want to know.
42
- - Use natural language to describe your query.
43
- - If asking a question, ensure it is well-formed and unambiguous.
44
- - For best results, provide context or background information if relevant.
45
- """
46
  )
47
 
48
  # Slider for adjusting the temperature
49
  temperature = st.slider(
50
- "Temperature",
51
- min_value=0.1,
52
- max_value=1.0,
53
- value=0.7,
54
- step=0.1,
55
- help="""
56
- **Temperature:**
57
- - Lower values (e.g., 0.1) make the output more deterministic and focused.
58
- - Higher values (e.g., 1.0) make the output more diverse and creative.
59
- """
60
  )
61
 
62
  # Slider for adjusting the max length
63
  max_length = st.slider(
64
- "Max Length",
65
- min_value=32,
66
- max_value=256,
67
- value=128,
68
- step=32,
69
- help="""
70
- **Max Length:**
71
- - Controls the maximum number of tokens in the generated response.
72
- - Adjust based on the desired length of the response.
73
- """
74
  )
75
 
76
  # Button to trigger the query
77
  if st.button("๐Ÿช„ Cast Spell"):
78
- if user_query:
79
- # Initialize the HuggingFaceEndpoint for Mistral
80
- llm_mistral = HuggingFaceEndpoint(
81
- repo_id=repo_id_mistral,
82
- max_length=max_length,
83
- temperature=temperature,
84
- token=sec_key
85
- )
86
-
87
- # Initialize the HuggingFaceEndpoint for Llama3
88
- ''' llm_llama3 = HuggingFaceEndpoint(
89
- repo_id=repo_id_llama3,
90
- max_length=max_length,
91
- temperature=temperature,
92
- token=sec_key
93
- )'''
94
 
95
- # Invoke both models with the user's query
96
- response_mistral = llm_mistral.invoke(user_query)
97
- #response_llama3 = llm_llama3.invoke(user_query)
98
 
99
- # Display the responses side by side
100
- col1 = st.columns(1)
 
101
 
102
- with col1:
103
- st.markdown("๐Ÿ”ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
104
- st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)
105
- '''
106
- with col2:
107
- st.markdown("๐Ÿ”ฎ <span class='response'>Response from Llama3:</span>", unsafe_allow_html=True)
108
- st.markdown(f"<span class='response'>{response_llama3}</span>", unsafe_allow_html=True)
109
- '''
110
- # Save query and responses to session state
111
- if 'history' not in st.session_state:
112
- st.session_state.history = []
113
- st.session_state.history.append((user_query, response_mistral))
114
- else:
115
- st.write("๐Ÿšจ Please enter a query to cast your spell.")
116
 
117
  # Button to clear history
118
  if st.button("๐Ÿ—‘๏ธ Clear History"):
119
- if 'history' in st.session_state:
120
- st.session_state.history = []
121
- st.success("History cleared!")
122
 
123
  # Display history of queries and responses
124
  if 'history' in st.session_state:
125
- st.subheader("๐Ÿ“œ Scroll of Spells Cast")
126
- for query, response_mistral, response_llama3 in st.session_state.history:
127
- st.write(f"**Query:** {query}")
128
- col1 = st.columns(1)
129
- with col1:
130
- st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
131
- ''' with col2:
132
- st.markdown(f"<span class='response'>**Response from Llama3:** {response_llama3}</span>", unsafe_allow_html=True)'''
133
- st.write("---")
 
3
  from langchain_huggingface import HuggingFaceEndpoint
4
 
5
  # Set the environment variable "m_token" to the value of sec_key
6
+ sec_key = "YOUR_HUGGING_FACE_API_TOKEN_HERE"
7
  os.environ["m_token"] = sec_key
8
 
9
+ # Specify the repository ID of the Hugging Face model you want to use
10
  repo_id_mistral = "mistralai/Mistral-7B-Instruct-v0.3"
 
11
 
12
  # Streamlit app layout
13
  st.title("๐Ÿค– Mistral-7B-Instruct-v0.3 ุชุฌุฑุจุฉ ู†ู…ูˆุฐุฌ ๐Ÿง™")
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  # Input text area for user query with enhanced instructions
16
  user_query = st.text_area(
17
+ "โœจ Enter your magical query:",
18
+ height=100,
19
+ help="""
20
+ **Enhanced Prompting Instructions:**
21
+ - Be clear and specific about what you want to know.
22
+ - Use natural language to describe your query.
23
+ - If asking a question, ensure it is well-formed and unambiguous.
24
+ - For best results, provide context or background information if relevant.
25
+ """
26
  )
27
 
28
  # Slider for adjusting the temperature
29
  temperature = st.slider(
30
+ "Temperature",
31
+ min_value=0.1,
32
+ max_value=1.0,
33
+ value=0.7,
34
+ step=0.1,
35
+ help="""
36
+ **Temperature:**
37
+ - Lower values (e.g., 0.1) make the output more deterministic and focused.
38
+ - Higher values (e.g., 1.0) make the output more diverse and creative.
39
+ """
40
  )
41
 
42
  # Slider for adjusting the max length
43
  max_length = st.slider(
44
+ "Max Length",
45
+ min_value=32,
46
+ max_value=256,
47
+ value=128,
48
+ step=32,
49
+ help="""
50
+ **Max Length:**
51
+ - Controls the maximum number of tokens in the generated response.
52
+ - Adjust based on the desired length of the response.
53
+ """
54
  )
55
 
56
  # Button to trigger the query
57
  if st.button("๐Ÿช„ Cast Spell"):
58
+ if user_query:
59
+ # Initialize the HuggingFaceEndpoint for Mistral
60
+ llm_mistral = HuggingFaceEndpoint(
61
+ repo_id=repo_id_mistral,
62
+ max_length=max_length,
63
+ temperature=temperature,
64
+ token=sec_key
65
+ )
 
 
 
 
 
 
 
 
66
 
67
+ # Invoke the model with the user's query
68
+ response_mistral = llm_mistral.invoke(user_query)
 
69
 
70
+ # Display the response
71
+ st.markdown("๐Ÿ”ฎ <span class='response'>Response from Mistral-7B-Instruct-v0.3:</span>", unsafe_allow_html=True)
72
+ st.markdown(f"<span class='response'>{response_mistral}</span>", unsafe_allow_html=True)
73
 
74
+ # Save query and response to session state
75
+ if 'history' not in st.session_state:
76
+ st.session_state.history = []
77
+ st.session_state.history.append((user_query, response_mistral))
78
+ else:
79
+ st.write("๐Ÿšจ Please enter a query to cast your spell.")
 
 
 
 
 
 
 
 
80
 
81
  # Button to clear history
82
  if st.button("๐Ÿ—‘๏ธ Clear History"):
83
+ if 'history' in st.session_state:
84
+ st.session_state.history = []
85
+ st.success("History cleared!")
86
 
87
  # Display history of queries and responses
88
  if 'history' in st.session_state:
89
+ st.subheader("๐Ÿ“œ Scroll of Spells Cast")
90
+ for query, response_mistral in st.session_state.history:
91
+ st.write(f"**Query:** {query}")
92
+ st.markdown(f"<span class='response'>**Response from Mistral-7B-Instruct-v0.3:** {response_mistral}</span>", unsafe_allow_html=True)
93
+ st.write("---")