dlflannery commited on
Commit
d2c1683
·
verified ·
1 Parent(s): a42a483

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -14
app.py CHANGED
@@ -27,11 +27,15 @@ client = OpenAI(api_key = key)
27
 
28
  def genUsageStats(do_reset=False):
29
  result = []
30
- ttotal4o = 0
31
- ttotal4mini = 0
 
 
32
  for user in unames:
33
- tokens4o = 0
34
- tokens4mini = 0
 
 
35
  fp = dataDir + user + '_log.txt'
36
  if os.path.exists(fp):
37
  accessOk = False
@@ -45,22 +49,27 @@ def genUsageStats(do_reset=False):
45
  for line in dataList:
46
  (u, t) = line.split(':')
47
  (t, m) = t.split('-')
48
- tcount = int(t)
 
 
49
  if 'mini' in m:
50
- tokens4mini += tcount
51
- ttotal4mini += tcount
 
 
52
  else:
53
- tokens4o += tcount
54
- ttotal4o += tcount
 
 
55
  accessOk = True
56
  break
57
  except:
58
  sleep(3)
59
  if not accessOk:
60
  return f'File access failed reading stats for user: {user}'
61
- result.append([user, str(tokens4mini), str(tokens4o)])
62
- gtotal = ttotal4mini + ttotal4o
63
- result.append(['totals', str(ttotal4mini), str(ttotal4o), str(gtotal)])
64
  return result
65
 
66
  def clear():
@@ -91,10 +100,12 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel):
91
  completion = client.chat.completions.create(model=gptModel,
92
  messages=past)
93
  reply = completion.choices[0].message.content
 
 
94
  tokens = completion.usage.total_tokens
95
  response += "\n\nYOU: " + prompt + "\nGPT: " + reply
96
  if isBoss:
97
- response += f"\n{gptModel}: {tokens} tokens"
98
  if tokens > 40000:
99
  response += "\n\nTHIS DIALOG IS GETTING TOO LONG. PLEASE RESTART CONVERSATION SOON."
100
  past.append({"role":"assistant", "content": reply})
@@ -106,7 +117,7 @@ def chat(prompt, user_window, pwd_window, past, response, gptModel):
106
  m = '4o'
107
  if 'mini' in gptModel:
108
  m = '4omini'
109
- f.write(f'{user_window}: {tokens}-{m}\n')
110
  accessOk = True
111
  break
112
  except Exception as e:
 
27
 
28
  def genUsageStats(do_reset=False):
29
  result = []
30
+ ttotal4o_in = 0
31
+ ttotal4o_out = 0
32
+ ttotal4mini_in = 0
33
+ ttotal4mini_out = 0
34
  for user in unames:
35
+ tokens4o_in = 0
36
+ tokens4o_out = 0
37
+ tokens4mini_in = 0
38
+ tokens4mini_out = 0
39
  fp = dataDir + user + '_log.txt'
40
  if os.path.exists(fp):
41
  accessOk = False
 
49
  for line in dataList:
50
  (u, t) = line.split(':')
51
  (t, m) = t.split('-')
52
+ (tin, tout) = t.split('/')
53
+ incount = int(tin)
54
+ outcount = int(tout)
55
  if 'mini' in m:
56
+ tokens4mini_in += incount
57
+ tokens4mini_out += outcount
58
+ ttotal4mini_in += incount
59
+ ttotal4mini_out += outcount
60
  else:
61
+ tokens4o_in += incount
62
+ tokens4o_out += outcount
63
+ ttotal4o_in += incount
64
+ ttotal4o_out += outcount
65
  accessOk = True
66
  break
67
  except:
68
  sleep(3)
69
  if not accessOk:
70
  return f'File access failed reading stats for user: {user}'
71
+ result.append([user, f'{tokens4mini_in}/{tokens4mini_out}', f'{tokens4o_in}/{tokens4o_out}'])
72
+ result.append(['totals', f'{ttotal4mini_in}/{ttotal4mini_out}', f'{ttotal4o_in}/{ttotal4o_out}'])
 
73
  return result
74
 
75
  def clear():
 
100
  completion = client.chat.completions.create(model=gptModel,
101
  messages=past)
102
  reply = completion.choices[0].message.content
103
+ tokens_in = completion.usage.prompt_tokens
104
+ tokens_out = completion.usage.completion_tokens
105
  tokens = completion.usage.total_tokens
106
  response += "\n\nYOU: " + prompt + "\nGPT: " + reply
107
  if isBoss:
108
+ response += f"\n{gptModel}: tokens in/out = {tokens_in}/{tokens_out}"
109
  if tokens > 40000:
110
  response += "\n\nTHIS DIALOG IS GETTING TOO LONG. PLEASE RESTART CONVERSATION SOON."
111
  past.append({"role":"assistant", "content": reply})
 
117
  m = '4o'
118
  if 'mini' in gptModel:
119
  m = '4omini'
120
+ f.write(f'{user_window}:{tokens_in}/{tokens_out}-{m}\n')
121
  accessOk = True
122
  break
123
  except Exception as e: