Spaces:
Runtime error
Runtime error
Commit
·
0b07f09
1
Parent(s):
00d1f95
Update run.py
Browse files
run.py
CHANGED
@@ -2,27 +2,26 @@ import gradio as gr
|
|
2 |
import requests
|
3 |
import random
|
4 |
import json
|
5 |
-
def response(message, history, model):
|
6 |
url="https://afischer1985-OpenHermes-2-GGUF-API.hf.space/v1/completions"
|
7 |
-
prompt_type="ChatML"
|
8 |
endstr="<|im_end|>"
|
9 |
print(model)
|
10 |
if(model=="WizardLM-13B"):
|
11 |
url="https://wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
|
12 |
-
prompt_type="Alpaca"
|
13 |
if(model=="SauerkrautLM-7B"):
|
14 |
url="https://SauerkrautLM-GGUF-API.hf.space/v1/completions"
|
15 |
-
prompt_type="Alpaca"
|
16 |
if(model=="OpenHermes2-7B"):
|
17 |
url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
|
18 |
-
prompt_type="ChatML"
|
19 |
if(model=="CollectiveCognition-7B"):
|
20 |
url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
|
21 |
-
prompt_type="ChatML"
|
22 |
print(prompt_type)
|
23 |
if(prompt_type=="Alpaca"):
|
24 |
body={"prompt":"###Frage: "+message+" ###Antwort:","max_tokens":1000,"stop":"###","stream":True}
|
25 |
-
if(prompt_type
|
26 |
body={"prompt":"""<|im_start|>system
|
27 |
You are a helpful AI-Assistant.<|im_end|>
|
28 |
<|im_start|>user
|
@@ -55,4 +54,4 @@ You are a helpful AI-Assistant.<|im_end|>
|
|
55 |
pass
|
56 |
yield response
|
57 |
|
58 |
-
gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B"],value="OpenHermes2-7B",label="Model")]).queue().launch(share=True)
|
|
|
2 |
import requests
|
3 |
import random
|
4 |
import json
|
5 |
+
def response(message, history, model, prompt_type):
|
6 |
url="https://afischer1985-OpenHermes-2-GGUF-API.hf.space/v1/completions"
|
|
|
7 |
endstr="<|im_end|>"
|
8 |
print(model)
|
9 |
if(model=="WizardLM-13B"):
|
10 |
url="https://wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
|
11 |
+
if(prompt_type=="Default"): prompt_type="Alpaca"
|
12 |
if(model=="SauerkrautLM-7B"):
|
13 |
url="https://SauerkrautLM-GGUF-API.hf.space/v1/completions"
|
14 |
+
if(prompt_type=="Default"): prompt_type="Alpaca"
|
15 |
if(model=="OpenHermes2-7B"):
|
16 |
url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
|
17 |
+
if(prompt_type=="Default"): prompt_type="ChatML"
|
18 |
if(model=="CollectiveCognition-7B"):
|
19 |
url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
|
20 |
+
if(prompt_type=="Default"): prompt_type="ChatML"
|
21 |
print(prompt_type)
|
22 |
if(prompt_type=="Alpaca"):
|
23 |
body={"prompt":"###Frage: "+message+" ###Antwort:","max_tokens":1000,"stop":"###","stream":True}
|
24 |
+
if(prompt_type=="ChatML"):
|
25 |
body={"prompt":"""<|im_start|>system
|
26 |
You are a helpful AI-Assistant.<|im_end|>
|
27 |
<|im_start|>user
|
|
|
54 |
pass
|
55 |
yield response
|
56 |
|
57 |
+
gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B"],value="OpenHermes2-7B",label="Model"),gr.Dropdown(["Default", "Alpaca","ChatML"],value="Default",label="Prompt Type")]).queue().launch(share=True)
|