dlflannery commited on
Commit
8807f4f
·
verified ·
1 Parent(s): 6655844

Update app.py

Browse files

Added latex math handling to deepseek models

Files changed (1) hide show
  1. app.py +33 -2
app.py CHANGED
@@ -24,6 +24,7 @@ import pytz
24
  import math
25
  import numpy as np
26
  # import matplotlib.pyplot as plt
 
27
 
28
 
29
  load_dotenv(override=True)
@@ -90,6 +91,31 @@ def date_from_utime(utime):
90
  eastern = pytz.timezone('US/Eastern')
91
  return dt.astimezone(eastern).strftime('%Y-%m-%d')
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  def stock_list():
94
  rv = ''
95
  with open(stock_data_path, 'rt') as fp:
@@ -698,6 +724,8 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
698
  elif prompt.lower().startswith('puzzle'):
699
  chatType = 'logic'
700
  prompt = prompt[6:]
 
 
701
  past.append({"role":"user", "content":prompt})
702
  gen_image = (uploaded_image_file != '')
703
  if chatType in special_chat_types:
@@ -732,6 +760,9 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
732
  return [past, msg, None, gptModel, uploaded_image_file, plot]
733
  if not chatType in special_chat_types:
734
  reply = completion.choices[0].message.content
 
 
 
735
  final_text = reply
736
  if deepseek:
737
  loc1 = reply.find('<think>')
@@ -739,11 +770,11 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel, uploaded_ima
739
  loc2 = reply.find('</think>')
740
  if loc2 > loc1:
741
  final_text = reply[loc2 + 8:]
742
- reply = reply.replace('<think>','Thinking:\n').replace('</think>','Done thinking:\n')
743
  tokens_in = completion.usage.prompt_tokens
744
  tokens_out = completion.usage.completion_tokens
745
  tokens = completion.usage.total_tokens
746
- response += md("\n\nYOU: " + prompt + "\nGPT: " + reply)
747
  if isBoss:
748
  response += md(f"\n{reporting_model}: tokens in/out = {tokens_in}/{tokens_out}")
749
  if tokens > 40000:
 
24
  import math
25
  import numpy as np
26
  # import matplotlib.pyplot as plt
27
+ from pylatexenc.latex2text import LatexNodes2Text
28
 
29
 
30
  load_dotenv(override=True)
 
91
  eastern = pytz.timezone('US/Eastern')
92
  return dt.astimezone(eastern).strftime('%Y-%m-%d')
93
 
94
+ def convert_latex_math(text):
95
+ lines = text.split('\n')
96
+ start_line = False
97
+ out_txt = ''
98
+ for line in lines:
99
+ if len(line) == 0:
100
+ out_txt += '\n'
101
+ continue
102
+ else:
103
+ if line == r'\]':
104
+ continue
105
+ if line == r'\[':
106
+ start_line = True
107
+ continue
108
+ if start_line:
109
+ line = '\n' + LatexNodes2Text().latex_to_text(line.strip())
110
+ start_line = False
111
+ if line.startswith(r'\['):
112
+ loc = line.find(r'\]')
113
+ if loc > 0:
114
+ latex_code = line[2:loc]
115
+ line = '\n' + LatexNodes2Text().latex_to_text(latex_code)
116
+ out_txt += (line + '\n')
117
+ return out_txt
118
+
119
  def stock_list():
120
  rv = ''
121
  with open(stock_data_path, 'rt') as fp:
 
724
  elif prompt.lower().startswith('puzzle'):
725
  chatType = 'logic'
726
  prompt = prompt[6:]
727
+ if deepseek:
728
+ prompt = prompt + '. Do not use Latex for math expressions.'
729
  past.append({"role":"user", "content":prompt})
730
  gen_image = (uploaded_image_file != '')
731
  if chatType in special_chat_types:
 
760
  return [past, msg, None, gptModel, uploaded_image_file, plot]
761
  if not chatType in special_chat_types:
762
  reply = completion.choices[0].message.content
763
+ # if 'groq' in reporting_model:
764
+ if deepseek:
765
+ reply = convert_latex_math(reply)
766
  final_text = reply
767
  if deepseek:
768
  loc1 = reply.find('<think>')
 
770
  loc2 = reply.find('</think>')
771
  if loc2 > loc1:
772
  final_text = reply[loc2 + 8:]
773
+ reply = reply.replace('<think>','\n***Thinking***\n').replace('</think>','\n***Done thinking***\n')
774
  tokens_in = completion.usage.prompt_tokens
775
  tokens_out = completion.usage.completion_tokens
776
  tokens = completion.usage.total_tokens
777
+ response += md("\n\n***YOU***: " + prompt + "\n***GPT***: " + reply)
778
  if isBoss:
779
  response += md(f"\n{reporting_model}: tokens in/out = {tokens_in}/{tokens_out}")
780
  if tokens > 40000: