expandme commited on
Commit
17c6df5
·
1 Parent(s): 3685631

Unjeasonise

Browse files
Files changed (1) hide show
  1. app.py +12 -16
app.py CHANGED
@@ -35,17 +35,11 @@ model_info ={
35
  }
36
 
37
  def format_promt(message, custom_instructions=None, temperature=0.5):
38
- payload = {
39
- "inputs": {
40
- "messages": []
41
- }
42
- }
43
  if custom_instructions:
44
- payload["inputs"]["messages"].append({"role": "system", "content": custom_instructions})
45
- payload["inputs"]["messages"].append({"role": "user", "content": message})
46
- return payload
47
-
48
-
49
 
50
  def reset_conversation():
51
  '''
@@ -137,12 +131,14 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"
137
  # Use the format_promt function to prepare the request
138
  formatted_request = format_promt(prompt, custom_instructions, temp_value)
139
 
140
- output = text_generation(
141
- formatted_request,
142
- temperature=temp_value,#0.5
143
- max_new_tokens=1000,
144
- stream=True
145
- )
 
 
146
 
147
  # Create a placeholder for the streaming response
148
  message_placeholder = st.empty()
 
35
  }
36
 
37
  def format_promt(message, custom_instructions=None, temperature=0.5):
38
+ messages = []
 
 
 
 
39
  if custom_instructions:
40
+ messages.append({"role": "system", "content": custom_instructions})
41
+ messages.append({"role": "user", "content": message})
42
+ return {"inputs": messages}
 
 
43
 
44
  def reset_conversation():
45
  '''
 
131
  # Use the format_promt function to prepare the request
132
  formatted_request = format_promt(prompt, custom_instructions, temp_value)
133
 
134
+ output = client.post(
135
+ json=formatted_request,
136
+ params={
137
+ "temperature": temp_value,
138
+ "max_new_tokens": 1000,
139
+ "stream": True
140
+ }
141
+ )
142
 
143
  # Create a placeholder for the streaming response
144
  message_placeholder = st.empty()