Spaces:
Sleeping
Sleeping
Jeasonize it MORE ?
Browse files
app.py
CHANGED
@@ -34,12 +34,21 @@ model_info ={
|
|
34 |
'url':'https://huggingface.co/microsoft/Phi-3.5-mini-instruct'},
|
35 |
}
|
36 |
|
37 |
-
def format_promt(message, custom_instructions=None):
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
if custom_instructions:
|
40 |
-
messages.append({"role": "system", "content": custom_instructions})
|
41 |
-
messages.append({"role": "user", "content": message})
|
42 |
-
return
|
43 |
|
44 |
def reset_conversation():
|
45 |
'''
|
@@ -123,7 +132,7 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
|
|
123 |
|
124 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
125 |
|
126 |
-
formated_text = format_promt(prompt, custom_instructions)
|
127 |
|
128 |
|
129 |
with st.chat_message("assistant"):
|
@@ -131,10 +140,7 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
|
|
131 |
model=model_links[selected_model],)
|
132 |
|
133 |
output = client.post(
|
134 |
-
json=format_promt(prompt, custom_instructions)
|
135 |
-
temperature=temp_values,
|
136 |
-
max_new_tokens=1000,
|
137 |
-
stream=True
|
138 |
)
|
139 |
|
140 |
# Create a placeholder for the streaming response
|
|
|
34 |
'url':'https://huggingface.co/microsoft/Phi-3.5-mini-instruct'},
|
35 |
}
|
36 |
|
37 |
+
def format_promt(message, custom_instructions=None, temperature=0.5):
|
38 |
+
payload = {
|
39 |
+
"inputs": {
|
40 |
+
"messages": []
|
41 |
+
},
|
42 |
+
"parameters": {
|
43 |
+
"temperature": temperature,
|
44 |
+
"max_new_tokens": 1000,
|
45 |
+
"stream": True
|
46 |
+
}
|
47 |
+
}
|
48 |
if custom_instructions:
|
49 |
+
payload["inputs"]["messages"].append({"role": "system", "content": custom_instructions})
|
50 |
+
payload["inputs"]["messages"].append({"role": "user", "content": message})
|
51 |
+
return payload
|
52 |
|
53 |
def reset_conversation():
|
54 |
'''
|
|
|
132 |
|
133 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
134 |
|
135 |
+
formated_text = format_promt(prompt, custom_instructions, temp_values)
|
136 |
|
137 |
|
138 |
with st.chat_message("assistant"):
|
|
|
140 |
model=model_links[selected_model],)
|
141 |
|
142 |
output = client.post(
|
143 |
+
json=format_promt(prompt, custom_instructions, temp_values)
|
|
|
|
|
|
|
144 |
)
|
145 |
|
146 |
# Create a placeholder for the streaming response
|