zjXu11 commited on
Commit
821874d
·
verified ·
1 Parent(s): 8ce26e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -46
app.py CHANGED
@@ -163,19 +163,21 @@ class Reviewer:
163
  retrieval_content += f"Relevant Paper {str(cnt)}:\n"
164
  retrieval_content += f"Title: {paper['title']}\n{paper['content']}\n\n"
165
  formatted_citation = format_bibtex(paper, 'unsrt')
166
- retrieved_papers += f"{str(cnt)}. {formatted_citation} ({paper['url']})\n\n"
167
  cnt += 1
168
  text = retrieval_content + content
169
- chat_review_text = self.chat_review(text=text)
170
  else:
171
  text = content
172
- chat_review_text = self.chat_review(text=text)
173
  retrieved_papers = ""
174
  else:
175
  text = content
176
- chat_review_text = self.chat_review(text=text)
177
  retrieved_papers = ""
178
 
 
 
179
  return chat_review_text, retrieved_papers
180
 
181
  def query_gen(self, abstract):
@@ -358,59 +360,81 @@ Organize the result in JSON format as follows:
358
  {"role": "system", "content": f"Read the following scientific paper and generate {str(self.limit_num)} major limitations in this paper about its {self.aspect}. Do not include any limitation explicitly mentioned in the paper itself. Return only the limitations in the following JSON format: {{\"limitations\": <a list of limitations>"} ,
359
  {"role": "user", "content": text},
360
  ]
 
 
 
 
 
 
 
 
 
 
 
361
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  responses = asyncio.run(
363
  generate_from_openai_chat_completion(
364
  client,
365
  messages=[messages],
366
- engine_name=self.model_name, # gpt-3.5-turbo
367
  max_tokens=1000, # 32
368
  requests_per_minute = 20,
369
- # response_format={"type":"json_object"},
370
  )
371
- )
372
- try:
373
- limitations = json.loads(responses[0])["limitations"][:self.limit_num]
374
- result = ""
375
- limit_cnt = 1
376
- for limitation in limitations:
377
- result += f"{str(limit_cnt)}. {limitation}\n"
378
- limit_cnt += 1
379
- except:
380
- SYSTEM_INPUT = f"Below is an output from an LLM about several limitations of a scientific paper. Please extract the list of limitations and DO NOT make any modification to the original limitations. Return the limitations in the following JSON format: {{\"limitations\": <a list of limitations>}}. If there is no valid response inthe output, return {{\"limitations\": {{}}}}"
381
- messages=[
382
- {"role": "system", "content": SYSTEM_INPUT},
383
- {"role": "user", "content": responses[0]},
 
 
 
384
  ]
385
- os.environ["OPENAI_BASE_URL"] = PRIVATE_API_BASE
386
- os.environ["OPENAI_API_KEY"] = PRIVATE_API_KEY
387
- client = AsyncOpenAI()
388
- responses = asyncio.run(
389
- generate_from_openai_chat_completion(
390
- client,
391
- messages=[messages],
392
- engine_name="gpt-4o-mini", # gpt-3.5-turbo
393
- max_tokens=1000, # 32
394
- requests_per_minute = 20,
395
- response_format={"type":"json_object"},
396
- )
397
- )
398
- limitations = json.loads(responses[0])["limitations"][:self.limit_num]
399
- result = ""
400
- limit_cnt = 1
401
- for limitation in limitations:
402
- result += f"{str(limit_cnt)}. {limitation}\n\n"
403
- limit_cnt += 1
404
- # for choice in response.choices:
405
- # result += choice.message.content
406
- # result = insert_sentence(result, '**Generated by ChatGPT, no copying allowed!**', 50)
407
- except Exception as e:
408
- result = "Error: "+ str(e)
409
- # usage = 'xxxxx'
410
  print("********"*10)
411
  print(result)
412
- print("********"*10)
413
- return result
414
 
415
 
416
  def retrieve_papers(self, title, abstract):
 
163
  retrieval_content += f"Relevant Paper {str(cnt)}:\n"
164
  retrieval_content += f"Title: {paper['title']}\n{paper['content']}\n\n"
165
  formatted_citation = format_bibtex(paper, 'unsrt')
166
+ retrieved_papers += f"{str(cnt)}. {formatted_citation}\n({paper['url']})\n\n"
167
  cnt += 1
168
  text = retrieval_content + content
169
+ chat_review_limitations = self.chat_review(text=text)
170
  else:
171
  text = content
172
+ chat_review_limitations = self.chat_review(text=text)
173
  retrieved_papers = ""
174
  else:
175
  text = content
176
+ chat_review_limitations = self.chat_review(text=text)
177
  retrieved_papers = ""
178
 
179
+ text = f"Paper:\n{paper['content']}\n\n"
180
+ chat_review_text = self.chat_refine(text=text, limitations=chat_review_limitations)
181
  return chat_review_text, retrieved_papers
182
 
183
  def query_gen(self, abstract):
 
360
  {"role": "system", "content": f"Read the following scientific paper and generate {str(self.limit_num)} major limitations in this paper about its {self.aspect}. Do not include any limitation explicitly mentioned in the paper itself. Return only the limitations in the following JSON format: {{\"limitations\": <a list of limitations>"} ,
361
  {"role": "user", "content": text},
362
  ]
363
+
364
+ responses = asyncio.run(
365
+ generate_from_openai_chat_completion(
366
+ client,
367
+ messages=[messages],
368
+ engine_name=self.model_name, # gpt-3.5-turbo
369
+ max_tokens=1000, # 32
370
+ requests_per_minute = 20,
371
+ # response_format={"type":"json_object"},
372
+ )
373
+ )
374
  try:
375
+ limitations = json.loads(responses[0])["limitations"][:self.limit_num]
376
+ result = ""
377
+ limit_cnt = 1
378
+ for limitation in limitations:
379
+ result += f"{str(limit_cnt)}. {limitation}\n"
380
+ limit_cnt += 1
381
+ except:
382
+ SYSTEM_INPUT = f"Below is an output from an LLM about several limitations of a scientific paper. Please extract the list of limitations and DO NOT make any modification to the original limitations. Return the limitations in the following JSON format: {{\"limitations\": <a list of limitations>}}. If there is no valid response inthe output, return {{\"limitations\": {{}}}}"
383
+ messages=[
384
+ {"role": "system", "content": SYSTEM_INPUT},
385
+ {"role": "user", "content": responses[0]},
386
+ ]
387
+ os.environ["OPENAI_BASE_URL"] = PRIVATE_API_BASE
388
+ os.environ["OPENAI_API_KEY"] = PRIVATE_API_KEY
389
+ client = AsyncOpenAI()
390
  responses = asyncio.run(
391
  generate_from_openai_chat_completion(
392
  client,
393
  messages=[messages],
394
+ engine_name="gpt-4o-mini", # gpt-3.5-turbo
395
  max_tokens=1000, # 32
396
  requests_per_minute = 20,
397
+ response_format={"type":"json_object"},
398
  )
399
+ )
400
+ limitations = json.loads(responses[0])["limitations"][:self.limit_num]
401
+
402
+
403
+ return limitations
404
+
405
+ def chat_refine(self, text, limitations):
406
+ os.environ["OPENAI_BASE_URL"] = self.api_base
407
+ os.environ["OPENAI_API_KEY"] = self.api
408
+ client = AsyncOpenAI()
409
+
410
+ messages = []
411
+ for limitation in limitations:
412
+ message=[
413
+ {"role": "system", "content": f"Read the following scientific paper and a limitation of the paper. Provide a specific, actionable suggestion to address the limitation in no more than 50 words."} ,
414
+ {"role": "user", "content": f"{text}\nLimitation: {limitation}"},
415
  ]
416
+ messages.append(message)
417
+
418
+ responses = asyncio.run(
419
+ generate_from_openai_chat_completion(
420
+ client,
421
+ messages=messages,
422
+ engine_name=self.model_name, # gpt-3.5-turbo
423
+ max_tokens=1000, # 32
424
+ requests_per_minute = 20,
425
+ # response_format={"type":"json_object"},
426
+ )
427
+ )
428
+
429
+ result = ""
430
+ limit_cnt = 1
431
+ for limitation, response in zip(limitations, responses):
432
+ result += f"{str(limit_cnt)}. {limitation} {response}\n\n"
433
+ limit_cnt += 1
 
 
 
 
 
 
 
434
  print("********"*10)
435
  print(result)
436
+ print("********"*10)
437
+ return result
438
 
439
 
440
  def retrieve_papers(self, title, abstract):