Spaces:
Sleeping
Sleeping
Reformating
Browse files
app.py
CHANGED
@@ -137,5 +137,29 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
|
|
137 |
model=model_links[selected_model],)
|
138 |
|
139 |
try:
|
140 |
-
#
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
model=model_links[selected_model],)
|
138 |
|
139 |
try:
|
140 |
+
# Use the format_promt function to prepare the request
|
141 |
+
formatted_request = format_promt(prompt, custom_instructions, temp_values)
|
142 |
+
|
143 |
+
output = client.post(json=formatted_request)
|
144 |
+
|
145 |
+
# Create a placeholder for the streaming response
|
146 |
+
message_placeholder = st.empty()
|
147 |
+
full_response = ""
|
148 |
+
|
149 |
+
# Stream the response and accumulate it
|
150 |
+
for chunk in output:
|
151 |
+
if isinstance(chunk, dict) and "generated_text" in chunk:
|
152 |
+
text_chunk = chunk["generated_text"]
|
153 |
+
elif isinstance(chunk, str):
|
154 |
+
text_chunk = chunk
|
155 |
+
else:
|
156 |
+
continue
|
157 |
+
|
158 |
+
full_response += text_chunk
|
159 |
+
message_placeholder.markdown(full_response + "▌")
|
160 |
+
|
161 |
+
# Display final response and store it
|
162 |
+
message_placeholder.markdown(full_response)
|
163 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
164 |
+
except Exception as e:
|
165 |
+
st.error(f"Error: {str(e)}")
|