Omnibus commited on
Commit
cb5b8fa
·
verified ·
1 Parent(s): 37597ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -17,9 +17,9 @@ InferenceClient(models[2]),
17
  InferenceClient(models[3]),
18
  ]
19
 
20
- VERBOSE=True
21
 
22
- def format_prompt(message, history):
23
  prompt = ""
24
  if history:
25
  #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
@@ -28,10 +28,15 @@ def format_prompt(message, history):
28
  #print(prompt)
29
  prompt += f"{bot_response}\n"
30
  #print(prompt)
31
- prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model"
 
32
  return prompt
33
 
34
- def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem):
 
 
 
 
35
  #token max=8192
36
  hist_len=0
37
  client=clients[int(client_choice)-1]
@@ -58,7 +63,7 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
58
  do_sample=True,
59
  seed=seed,
60
  )
61
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:])
62
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
63
  output = ""
64
  for response in stream:
@@ -70,7 +75,7 @@ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,
70
  if VERBOSE==True:
71
  print("\n######### HIST "+str(in_len))
72
  print("\n######### TOKENS "+str(tokens))
73
- print("\n######### PROMPT "+str(len(formatted_prompt)))
74
 
75
  def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
76
  print(chatblock)
@@ -109,6 +114,7 @@ with gr.Blocks() as app:
109
  stop_btn=gr.Button("Stop")
110
  clear_btn=gr.Button("Clear")
111
  client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
 
112
  with gr.Column(scale=1):
113
  with gr.Group():
114
  rand = gr.Checkbox(label="Random Seed", value=True)
@@ -132,8 +138,10 @@ with gr.Blocks() as app:
132
  chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
133
 
134
  im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
135
- chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
136
- go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
 
 
137
  stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
138
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
139
  app.queue(default_concurrency_limit=10).launch()
 
17
  InferenceClient(models[3]),
18
  ]
19
 
20
+ VERBOSE=False
21
 
22
+ def format_prompt(message, history, cust_p):
23
  prompt = ""
24
  if history:
25
  #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
 
28
  #print(prompt)
29
  prompt += f"{bot_response}\n"
30
  #print(prompt)
31
+ #prompt += f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
32
+ prompt+=cust_p.format(USER_INPUT=message)
33
  return prompt
34
 
35
+ def custom_prompt(prompt):
36
+ return prompt
37
+
38
+
39
+ def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
40
  #token max=8192
41
  hist_len=0
42
  client=clients[int(client_choice)-1]
 
63
  do_sample=True,
64
  seed=seed,
65
  )
66
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
67
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
68
  output = ""
69
  for response in stream:
 
75
  if VERBOSE==True:
76
  print("\n######### HIST "+str(in_len))
77
  print("\n######### TOKENS "+str(tokens))
78
+ #print("\n######### PROMPT "+str(len(formatted_prompt)))
79
 
80
  def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
81
  print(chatblock)
 
114
  stop_btn=gr.Button("Stop")
115
  clear_btn=gr.Button("Clear")
116
  client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
117
+ custom_prompt=gr.Textbox(lines=5,value="<start_of_turn>user\nUSER_INPUT<end_of_turn>\n<start_of_turn>model\n")
118
  with gr.Column(scale=1):
119
  with gr.Group():
120
  rand = gr.Checkbox(label="Random Seed", value=True)
 
138
  chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
139
 
140
  im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
141
+
142
+ chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
143
+ go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
144
+
145
  stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
146
  clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
147
  app.queue(default_concurrency_limit=10).launch()