Omnibus commited on
Commit
1850cee
·
verified ·
1 Parent(s): bbbbe65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -23
app.py CHANGED
@@ -71,29 +71,30 @@ def chat_inf(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,r
71
  print("\n######### HIST "+str(in_len))
72
  print("\n######### TOKENS "+str(tokens))
73
  if (in_len+tokens) > 8000:
74
- yield [(prompt,"Wait. I need to compress our Chat history...")]
75
- hist=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem)
76
- yield [(prompt,"History has been compressed, processing request...")]
77
- history = [(prompt,hist)]
78
- generate_kwargs = dict(
79
- temperature=temp,
80
- max_new_tokens=tokens,
81
- top_p=top_p,
82
- repetition_penalty=rep_p,
83
- do_sample=True,
84
- seed=seed,
85
- )
86
- #formatted_prompt=prompt
87
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history[0-chat_mem:])
88
- print("\n######### PROMPT "+str(len(formatted_prompt)))
89
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
90
- output = ""
91
- for response in stream:
92
- output += response.token.text
93
- yield [(prompt,output)]
94
- history.append((prompt,output))
95
- memory=history
96
- yield history
 
97
 
98
  def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
99
  print(chatblock)
 
71
  print("\n######### HIST "+str(in_len))
72
  print("\n######### TOKENS "+str(tokens))
73
  if (in_len+tokens) > 8000:
74
+ yield [(prompt,"Wait, that's too many tokens, please reduce the Chat Memory value")]
75
+ #hist=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem)
76
+ #yield [(prompt,"History has been compressed, processing request...")]
77
+ #history.append((prompt,hist))
78
+ else:
79
+ generate_kwargs = dict(
80
+ temperature=temp,
81
+ max_new_tokens=tokens,
82
+ top_p=top_p,
83
+ repetition_penalty=rep_p,
84
+ do_sample=True,
85
+ seed=seed,
86
+ )
87
+ #formatted_prompt=prompt
88
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history[0-chat_mem:])
89
+ print("\n######### PROMPT "+str(len(formatted_prompt)))
90
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
91
+ output = ""
92
+ for response in stream:
93
+ output += response.token.text
94
+ yield [(prompt,output)]
95
+ history.append((prompt,output))
96
+ memory=history
97
+ yield history
98
 
99
  def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
100
  print(chatblock)