yxmnjxzx commited on
Commit
050c8b0
·
verified ·
1 Parent(s): cee4674

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -3
app.py CHANGED
@@ -93,7 +93,7 @@ class PromptRefiner:
93
  ]
94
 
95
  response = self.client.chat.completions.create(
96
- model="llama-3.2-90b-text-preview",
97
  messages=messages,
98
  max_tokens=8196, # Increased token limit
99
  temperature=0.5
@@ -138,7 +138,19 @@ class GradioInterface:
138
  outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
139
  )
140
  with gr.Row():
141
- # apply_model=gr.Dropdown(["gpt-4o",'gpt-4-turbo'], value="gpt-4o", label="Model"),
 
 
 
 
 
 
 
 
 
 
 
 
142
  apply_button = gr.Button("Apply Prompts")
143
 
144
  with gr.Row():
@@ -151,7 +163,7 @@ class GradioInterface:
151
 
152
  apply_button.click(
153
  fn=self.apply_prompts,
154
- inputs=[prompt_text, refined_prompt],
155
  outputs=[original_output, refined_output]
156
  )
157
 
 
93
  ]
94
 
95
  response = self.client.chat.completions.create(
96
+ model=model,
97
  messages=messages,
98
  max_tokens=8196, # Increased token limit
99
  temperature=0.5
 
138
  outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
139
  )
140
  with gr.Row():
141
+ apply_model = gr.Dropdown(
142
+ [
143
+ "llama-3.1-70b-versatile",
144
+ "llama3-groq-70b-8192-tool-use-preview",
145
+ "llama-3.2-90b-text-preview",
146
+ "llama-3.2-90b-vision-preview",
147
+ "llama3-groq-70b-8192-tool-use-preview",
148
+ "mixtral-8x7b-32768"
149
+ ],
150
+ value="llama-3.1-70b-versatile",
151
+ label="Choose Model"
152
+ )
153
+ # apply_model=gr.Dropdown(["gpt-4o",'gpt-4-turbo'], value="gpt-4o", label="Model"),
154
  apply_button = gr.Button("Apply Prompts")
155
 
156
  with gr.Row():
 
163
 
164
  apply_button.click(
165
  fn=self.apply_prompts,
166
+ inputs=[prompt_text, refined_prompt, apply_model],
167
  outputs=[original_output, refined_output]
168
  )
169