ykl7 commited on
Commit
22d49db
·
1 Parent(s): 6bfe6f1

gpt4o debug

Browse files
Files changed (2) hide show
  1. app.py +8 -2
  2. llm_reasoner.py +18 -11
app.py CHANGED
@@ -160,7 +160,7 @@ def reasoner(query: str, documents: list[str], llm_client: Any):
160
  message = "Using Claude Sonnet to reason and verify the claim..."
161
  elif selected_reasoner == "GPT-4o":
162
  message = "Using GPT-4o to analyze and verify the claim in detail..."
163
- else:
164
  message = "Using o3-mini to quickly analyze the claim..."
165
 
166
  if not documents or len(documents) == 0:
@@ -215,7 +215,13 @@ if prompt := st.chat_input("Type here"):
215
  api_key = os.getenv("openai_key")
216
  options["API_KEY"] = api_key
217
  options["model_family"] = "OpenAI"
218
- options["model_name"] = "gpt-4o-2024-05-13"
 
 
 
 
 
 
219
 
220
  llm_client = LLMReasoner(options)
221
 
 
160
  message = "Using Claude Sonnet to reason and verify the claim..."
161
  elif selected_reasoner == "GPT-4o":
162
  message = "Using GPT-4o to analyze and verify the claim in detail..."
163
+ elif selected_reasoner == "o3-mini":
164
  message = "Using o3-mini to quickly analyze the claim..."
165
 
166
  if not documents or len(documents) == 0:
 
215
  api_key = os.getenv("openai_key")
216
  options["API_KEY"] = api_key
217
  options["model_family"] = "OpenAI"
218
+ options["model_name"] = "gpt-4o-2024-11-20"
219
+
220
+ elif selected_reasoner == "o3-mini":
221
+ api_key = os.getenv("openai_key")
222
+ options["API_KEY"] = api_key
223
+ options["model_family"] = "OpenAI"
224
+ options["model_name"] = "o3-mini-2025-01-31"
225
 
226
  llm_client = LLMReasoner(options)
227
 
llm_reasoner.py CHANGED
@@ -30,21 +30,28 @@ class LLMReasoner():
30
 
31
  def make_openai_chat_completions_api_call(self, prompt):
32
  try:
33
- response = self.client.chat.completions.create(
34
- model=self.model_name,
35
- messages=prompt,
36
- temperature=self.temp,
37
- max_completion_tokens=self.max_tokens,
38
- top_p=self.top_p,
39
- frequency_penalty=self.frequency_penalty,
40
- presence_penalty=self.presence_penalty
41
- )
 
 
 
 
 
 
 
42
  return self.parse_chat_completions_api_response(response)
43
  except openai.APIConnectionError as e:
44
  print("The server could not be reached")
45
  print(e.__cause__) # an underlying Exception, likely raised within httpx.
46
  time.sleep(60)
47
- return self.make_openai_api_call(prompt)
48
  except openai.RateLimitError as e:
49
  print("Rate limit error hit")
50
  exit()
@@ -56,7 +63,7 @@ class LLMReasoner():
56
  print(e.status_code)
57
  # print(e.response.data)
58
  time.sleep(60)
59
- return self.make_openai_api_call(prompt)
60
 
61
  def parse_chat_completions_api_response(self, response):
62
  # print(response.model_dump())
 
30
 
31
  def make_openai_chat_completions_api_call(self, prompt):
32
  try:
33
+ if "gpt-4o" in self.model_name:
34
+ response = self.client.chat.completions.create(
35
+ model=self.model_name,
36
+ messages=prompt,
37
+ temperature=self.temp,
38
+ max_completion_tokens=self.max_tokens,
39
+ top_p=self.top_p,
40
+ frequency_penalty=self.frequency_penalty,
41
+ presence_penalty=self.presence_penalty
42
+ )
43
+ elif "o3-mini" in self.model_name:
44
+ response = self.client.chat.completions.create(
45
+ model=self.model_name,
46
+ messages=prompt,
47
+ reasoning_effort="medium"
48
+ )
49
  return self.parse_chat_completions_api_response(response)
50
  except openai.APIConnectionError as e:
51
  print("The server could not be reached")
52
  print(e.__cause__) # an underlying Exception, likely raised within httpx.
53
  time.sleep(60)
54
+ return self.make_openai_chat_completions_api_call(prompt)
55
  except openai.RateLimitError as e:
56
  print("Rate limit error hit")
57
  exit()
 
63
  print(e.status_code)
64
  # print(e.response.data)
65
  time.sleep(60)
66
+ return self.make_openai_chat_completions_api_call(prompt)
67
 
68
  def parse_chat_completions_api_response(self, response):
69
  # print(response.model_dump())