AFischer1985 commited on
Commit
0eb163e
·
1 Parent(s): 0b07f09

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +13 -12
run.py CHANGED
@@ -8,10 +8,10 @@ def response(message, history, model, prompt_type):
8
  print(model)
9
  if(model=="WizardLM-13B"):
10
  url="https://wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
11
- if(prompt_type=="Default"): prompt_type="Alpaca"
12
  if(model=="SauerkrautLM-7B"):
13
  url="https://SauerkrautLM-GGUF-API.hf.space/v1/completions"
14
- if(prompt_type=="Default"): prompt_type="Alpaca"
15
  if(model=="OpenHermes2-7B"):
16
  url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
17
  if(prompt_type=="Default"): prompt_type="ChatML"
@@ -19,15 +19,16 @@ def response(message, history, model, prompt_type):
19
  url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
20
  if(prompt_type=="Default"): prompt_type="ChatML"
21
  print(prompt_type)
22
- if(prompt_type=="Alpaca"):
23
- body={"prompt":"###Frage: "+message+" ###Antwort:","max_tokens":1000,"stop":"###","stream":True}
24
  if(prompt_type=="ChatML"):
25
- body={"prompt":"""<|im_start|>system
26
- You are a helpful AI-Assistant.<|im_end|>
27
- <|im_start|>user
28
- """+message+"""<|im_end|>
29
- <|im_start|>assistant
30
- ""","max_tokens":1000,"stop":"<|im_end|>","stream":True}
 
 
 
31
  response=""
32
  buffer=""
33
  print(str(body))
@@ -36,7 +37,7 @@ You are a helpful AI-Assistant.<|im_end|>
36
  #print("*** Raw String: "+str(text)+"\n***\n")
37
  text=text.decode('utf-8')
38
  if(text.startswith(": ping -")==False):buffer=str(buffer)+str(text)
39
- #if(text.startswith(": ping -")): print("\n*** PING!\n***\n")
40
  #print("\n*** Buffer: "+str(buffer)+"\n***\n")
41
  buffer=buffer.split('"finish_reason"')
42
  if(len(buffer)==1):
@@ -54,4 +55,4 @@ You are a helpful AI-Assistant.<|im_end|>
54
  pass
55
  yield response
56
 
57
- gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B"],value="OpenHermes2-7B",label="Model"),gr.Dropdown(["Default", "Alpaca","ChatML"],value="Default",label="Prompt Type")]).queue().launch(share=True)
 
8
  print(model)
9
  if(model=="WizardLM-13B"):
10
  url="https://wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
11
+ if(prompt_type=="Default"): prompt_type="Vicuna"
12
  if(model=="SauerkrautLM-7B"):
13
  url="https://SauerkrautLM-GGUF-API.hf.space/v1/completions"
14
+ if(prompt_type=="Default"): prompt_type="Vicuna (German)"
15
  if(model=="OpenHermes2-7B"):
16
  url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
17
  if(prompt_type=="Default"): prompt_type="ChatML"
 
19
  url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
20
  if(prompt_type=="Default"): prompt_type="ChatML"
21
  print(prompt_type)
 
 
22
  if(prompt_type=="ChatML"):
23
+ body={"prompt":"<|im_start|>system\nYou are a helpful AI-Assistant.<|im_end|>\n<|im_start|>user\n"+message+"<|im_end|>\n<|im_start|>assistant","max_tokens":1000,"stop":"<|im_end|>","stream":True}
24
+ if(prompt_type=="ChatML (German)"):
25
+ body={"prompt":"<|im_start|>system\nDu bist ein KI-basiertes deutschsprachiges Assistenzsystem.<|im_end|>\n<|im_start|>user"+message+"<|im_end|>\n<|im_start|>assistant","max_tokens":1000,"stop":"<|im_end|>","stream":True}
26
+ if(prompt_type=="Alpaca"):
27
+ body={"prompt":"###Instruction: "+message+" ###Resonse:","max_tokens":1000,"stop":"###","stream":True}
28
+ if(prompt_type=="Vicuna"):
29
+ body={"prompt":"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: "+message+" ASSISTANT:","max_tokens":1000,"stop":"USER:","stream":True}
30
+ if(prompt_type=="Vicuna (German)"):
31
+ body={"prompt":"Ein Chat zwischen einem Benutzer und einem KI-Assistenten. Der KI-Assistent gibt hilfreiche, detaillierte und höfliche Antworten.\nUser: "+message+"\nAssistant: ","max_tokens":1000,"stop":"User:","stream":True}
32
  response=""
33
  buffer=""
34
  print(str(body))
 
37
  #print("*** Raw String: "+str(text)+"\n***\n")
38
  text=text.decode('utf-8')
39
  if(text.startswith(": ping -")==False):buffer=str(buffer)+str(text)
40
+ #if(text.startswith(": ping -")): print("\n*** PIacNG!\n***\n")
41
  #print("\n*** Buffer: "+str(buffer)+"\n***\n")
42
  buffer=buffer.split('"finish_reason"')
43
  if(len(buffer)==1):
 
55
  pass
56
  yield response
57
 
58
+ gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B"],value="OpenHermes2-7B",label="Model"),gr.Dropdown(["Default", "ChatML","ChatML (German)","Vicuna","Vicuna (German)","Alpaca"],value="Default",label="Prompt Type")]).queue().launch(share=True)