expandme commited on
Commit
ff9d893
·
1 Parent(s): 17c6df5

Unjeasonise

Browse files
Files changed (1) hide show
  1. app.py +8 -17
app.py CHANGED
@@ -34,13 +34,6 @@ model_info ={
34
  'url':'https://huggingface.co/microsoft/Phi-3.5-mini-instruct'},
35
  }
36
 
37
- def format_promt(message, custom_instructions=None, temperature=0.5):
38
- messages = []
39
- if custom_instructions:
40
- messages.append({"role": "system", "content": custom_instructions})
41
- messages.append({"role": "user", "content": message})
42
- return {"inputs": messages}
43
-
44
  def reset_conversation():
45
  '''
46
  Resets Conversation
@@ -121,6 +114,8 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
121
  with st.chat_message("user"):
122
  st.markdown(prompt)
123
 
 
 
124
  st.session_state.messages.append({"role": "user", "content": prompt})
125
 
126
  with st.chat_message("assistant"):
@@ -128,16 +123,12 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
128
  model=model_links[selected_model],)
129
 
130
  try:
131
- # Use the format_promt function to prepare the request
132
- formatted_request = format_promt(prompt, custom_instructions, temp_value)
133
-
134
- output = client.post(
135
- json=formatted_request,
136
- params={
137
- "temperature": temp_value,
138
- "max_new_tokens": 1000,
139
- "stream": True
140
- }
141
  )
142
 
143
  # Create a placeholder for the streaming response
 
34
  'url':'https://huggingface.co/microsoft/Phi-3.5-mini-instruct'},
35
  }
36
 
 
 
 
 
 
 
 
37
  def reset_conversation():
38
  '''
39
  Resets Conversation
 
114
  with st.chat_message("user"):
115
  st.markdown(prompt)
116
 
117
+ st.session_state.messages.append({"role": "system", "content": custom_instructions})
118
+
119
  st.session_state.messages.append({"role": "user", "content": prompt})
120
 
121
  with st.chat_message("assistant"):
 
123
  model=model_links[selected_model],)
124
 
125
  try:
126
+
127
+ output = client.text_generation(
128
+ prompt,
129
+ temperature=temp_value,#0.5
130
+ max_new_tokens=3000,
131
+ stream=True
 
 
 
 
132
  )
133
 
134
  # Create a placeholder for the streaming response