shukdevdatta123 commited on
Commit
d11e4cc
·
verified ·
1 Parent(s): 0a1766d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -102,7 +102,7 @@ Please provide a comprehensive analysis with deep reasoning. Think through all i
102
  model="deepseek-r1-distill-llama-70b",
103
  messages=[{"role": "user", "content": prompt}],
104
  temperature=0.6,
105
- max_completion_tokens=4096,
106
  top_p=0.95,
107
  reasoning_format="raw"
108
  )
@@ -143,7 +143,7 @@ Be decisive and solution-focused. Provide concrete, actionable recommendations."
143
  messages=[{"role": "user", "content": prompt}],
144
  temperature=0.6,
145
  top_p=0.95,
146
- max_completion_tokens=4096
147
  )
148
 
149
  response_content = completion.choices[0].message.content
@@ -184,7 +184,7 @@ Be extremely thorough and leave no stone unturned. Provide detailed evidence and
184
  messages=[{"role": "user", "content": prompt}],
185
  temperature=0.7,
186
  top_p=0.9,
187
- max_completion_tokens=4096
188
  )
189
 
190
  response_content = completion.choices[0].message.content
@@ -195,7 +195,7 @@ Be extremely thorough and leave no stone unturned. Provide detailed evidence and
195
  model="qwen-qwq-32b",
196
  messages=[{"role": "user", "content": fallback_prompt}],
197
  temperature=0.5,
198
- max_completion_tokens=4096
199
  )
200
  response_content = fallback_completion.choices[0].message.content
201
 
@@ -264,7 +264,7 @@ Format your response as a well-structured final solution that leverages all avai
264
  model="llama-3.3-70b-versatile",
265
  messages=[{"role": "user", "content": synthesis_prompt}],
266
  temperature=0.7,
267
- max_completion_tokens=4096,
268
  top_p=0.9
269
  )
270
 
 
102
  model="deepseek-r1-distill-llama-70b",
103
  messages=[{"role": "user", "content": prompt}],
104
  temperature=0.6,
105
+ max_completion_tokens=8,192,
106
  top_p=0.95,
107
  reasoning_format="raw"
108
  )
 
143
  messages=[{"role": "user", "content": prompt}],
144
  temperature=0.6,
145
  top_p=0.95,
146
+ max_completion_tokens=8,192
147
  )
148
 
149
  response_content = completion.choices[0].message.content
 
184
  messages=[{"role": "user", "content": prompt}],
185
  temperature=0.7,
186
  top_p=0.9,
187
+ max_completion_tokens=8,192
188
  )
189
 
190
  response_content = completion.choices[0].message.content
 
195
  model="qwen-qwq-32b",
196
  messages=[{"role": "user", "content": fallback_prompt}],
197
  temperature=0.5,
198
+ max_completion_tokens=8,192
199
  )
200
  response_content = fallback_completion.choices[0].message.content
201
 
 
264
  model="llama-3.3-70b-versatile",
265
  messages=[{"role": "user", "content": synthesis_prompt}],
266
  temperature=0.7,
267
+ max_completion_tokens=8,192,
268
  top_p=0.9
269
  )
270