AFischer1985 commited on
Commit
57817e9
·
1 Parent(s): b91d77f

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +21 -5
run.py CHANGED
@@ -2,20 +2,36 @@ import gradio as gr
2
  import requests
3
  import random
4
  import json
5
- def response(message, history,prompt_type):
6
  url="https://afischer1985-OpenHermes-2-GGUF-API.hf.space/v1/completions"
7
- #prompt_type="ChatML"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  if(prompt_type=="Alpaca"):
9
- body={"prompt":"###Frage: "+message+" ###Antwort:","max_tokens":1000,"stop":"###","stream":True} #128
10
  if(prompt_type!="Alpaca"):
11
  body={"prompt":"""<|im_start|>system
12
- Du bist OpenHermes2 ein deutschsprachiger KI-basierter Assistent.<|im_end|>
13
  <|im_start|>user
14
  """+message+"""<|im_end|>
15
  <|im_start|>assistant
16
  ""","max_tokens":1000,"stop":"<|im_end|>","stream":True}
17
  response=""
18
  buffer=""
 
19
  print("User: "+message+"\nAI: ")
20
  for text in requests.post(url, json=body, stream=True):
21
  #print("*** Raw String: "+str(text)+"\n***\n")
@@ -39,4 +55,4 @@ Du bist OpenHermes2 ein deutschsprachiger KI-basierter Assistent.<|im_end|>
39
  pass
40
  yield response
41
 
42
- gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["Alpaca", "ChatML"],value="ChatML")]).queue().launch(share=True)
 
2
  import requests
3
  import random
4
  import json
5
+ def response(message, history, model):
6
  url="https://afischer1985-OpenHermes-2-GGUF-API.hf.space/v1/completions"
7
+ prompt_type="ChatML"
8
+ endstr="<|im_end|>"
9
+ print(model)
10
+ if(model=="WizardLM-13B"):
11
+ url="https://wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
12
+ prompt_type="Alpaca"
13
+ if(model=="SauerkrautLM-7B"):
14
+ url="https://SauerkrautLM-GGUF-API.hf.space/v1/completions"
15
+ prompt_type="Alpaca"
16
+ if(model=="OpenHermes2-7B"):
17
+ url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
18
+ prompt_type="ChatML"
19
+ if(model=="CollectiveCognition-7B"):
20
+ url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
21
+ prompt_type="ChatML"
22
+ print(prompt_type)
23
  if(prompt_type=="Alpaca"):
24
+ body={"prompt":"###Frage: "+message+" ###Antwort:","max_tokens":1000,"stop":"###","stream":True}
25
  if(prompt_type!="Alpaca"):
26
  body={"prompt":"""<|im_start|>system
27
+ Du bist ein deutschsprachiger KI-basierter Assistent.<|im_end|>
28
  <|im_start|>user
29
  """+message+"""<|im_end|>
30
  <|im_start|>assistant
31
  ""","max_tokens":1000,"stop":"<|im_end|>","stream":True}
32
  response=""
33
  buffer=""
34
+ print(str(body))
35
  print("User: "+message+"\nAI: ")
36
  for text in requests.post(url, json=body, stream=True):
37
  #print("*** Raw String: "+str(text)+"\n***\n")
 
55
  pass
56
  yield response
57
 
58
+ gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B"],value="OpenHermes2-7B",label="Model")]).queue().launch(share=True)