Spaces:
Sleeping
Sleeping
Jeasonize it MORE and MORE?
Browse files
app.py
CHANGED
@@ -87,7 +87,7 @@ temp_values = st.sidebar.slider(
|
|
87 |
)
|
88 |
|
89 |
custom_instructions = st.sidebar.text_area(
|
90 |
-
"Custom Instructions",
|
91 |
value="You are helpful assistant, act like a Human in conversation. Keep answers very short and in English only!",
|
92 |
help="Customize how the AI should behave"
|
93 |
)
|
@@ -132,33 +132,10 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
|
|
132 |
|
133 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
134 |
|
135 |
-
formated_text = format_promt(prompt, custom_instructions, temp_values)
|
136 |
-
|
137 |
-
|
138 |
with st.chat_message("assistant"):
|
139 |
client = InferenceClient(
|
140 |
model=model_links[selected_model],)
|
141 |
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
# Create a placeholder for the streaming response
|
147 |
-
message_placeholder = st.empty()
|
148 |
-
full_response = ""
|
149 |
-
|
150 |
-
# Stream the response and accumulate it
|
151 |
-
for chunk in output:
|
152 |
-
if isinstance(chunk, dict) and "generated_text" in chunk:
|
153 |
-
text_chunk = chunk["generated_text"]
|
154 |
-
elif isinstance(chunk, str):
|
155 |
-
text_chunk = chunk
|
156 |
-
else:
|
157 |
-
continue
|
158 |
-
|
159 |
-
full_response += text_chunk
|
160 |
-
message_placeholder.markdown(full_response + "▌")
|
161 |
-
|
162 |
-
# Display final response and store it
|
163 |
-
message_placeholder.markdown(full_response)
|
164 |
-
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
|
|
87 |
)
|
88 |
|
89 |
custom_instructions = st.sidebar.text_area(
|
90 |
+
"Custom System Instructions",
|
91 |
value="You are helpful assistant, act like a Human in conversation. Keep answers very short and in English only!",
|
92 |
help="Customize how the AI should behave"
|
93 |
)
|
|
|
132 |
|
133 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
134 |
|
|
|
|
|
|
|
135 |
with st.chat_message("assistant"):
|
136 |
client = InferenceClient(
|
137 |
model=model_links[selected_model],)
|
138 |
|
139 |
+
try:
|
140 |
+
# Combine system prompt and user input
|
141 |
+
full_prompt = f"<|system|>{custom_instructions}</s>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|