AFischer1985 commited on
Commit
e228074
·
1 Parent(s): d353085

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +37 -69
run.py CHANGED
@@ -1,82 +1,50 @@
1
  #############################################################################
2
- # Title: Gradio Interface to AI hosted on Huggingface-Space
3
  # Author: Andreas Fischer
4
  # Date: October 7th, 2023
5
- # Last update: December 8th, 2023
6
  #############################################################################
7
 
8
  import gradio as gr
9
  import requests
10
- import random
11
  import json
12
 
13
- def specifications(message, model, prompt_type, verbose=False):
14
- url="http://0.0.0.0:2600/v1/completions"
15
- body=""
16
-
17
- if(model=="Local"):
18
- url="http://0.0.0.0:2600/v1/completions"
19
- if(prompt_type=="Default"): prompt_type="ChatML (German)"
20
- if(model=="SauerkrautLM-7B"):
21
- url="https://SauerkrautLM-GGUF-API.hf.space/v1/completions"
22
- if(prompt_type=="Default"): prompt_type="Vicuna (German)"
23
- if(model=="WizardLM-13B"):
24
- url="https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/completions"
25
- if(prompt_type=="Default"): prompt_type="Vicuna"
26
- if(model=="OpenHermes2-7B"):
27
- url="https://AFischer1985-OpenHermes-2-GGUF-API.hf.space/v1/completions"
28
- if(prompt_type=="Default"): prompt_type="ChatML"
29
- if(model=="CollectiveCognition-7B"):
30
- url="https://AFischer1985-CollectiveCognition-GGUF-API.hf.space/v1/completions"
31
- if(prompt_type=="Default"): prompt_type="ChatML"
32
-
33
- if(prompt_type=="ChatML"):
34
- body={"prompt":"<|im_start|>system\nYou are a helpful AI-Assistant.<|im_end|>\n<|im_start|>user\n"+message+"<|im_end|>\n<|im_start|>assistant\n","max_tokens":1000,"stop":"<|im_end|>","echo":"False","stream":True}
35
- if(prompt_type=="ChatML (German)"):
36
- body={"prompt":"<|im_start|>system\nu bist ein großes Sprachmodell, das höflich und kompetent antwortet. Schreibe deine Gedanken Schritt für Schritt auf, um Probleme sinnvoll zu lösen.<|im_end|>\n<|im_start|>user\n"+message+"<|im_end|>\n<|im_start|>assistant\n","max_tokens":1000,"stop":"User:","echo":"False","stream":True}
37
- if(prompt_type=="Alpaca"):
38
- body={"prompt":"###Instruction:\n"+message+"\n\n###Response:\n","max_tokens":1000,"stop":"###","echo":"False","stream":True}
39
- if(prompt_type=="Vicuna"):
40
- body={"prompt":"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: "+message+" ASSISTANT:","max_tokens":1000,"stop":"USER:","echo":"False","stream":"True"}
41
- if(prompt_type=="Vicuna (German)"):
42
- body={"prompt":"Ein Chat zwischen einem Benutzer und einem KI-Assistenten. Der KI-Assistent gibt hilfreiche, detaillierte und höfliche Antworten.\nUser: "+message+"\nAssistant: ","max_tokens":1000,"stop":"User:","echo":"False","stream":True}
43
- if(verbose==True):
44
- print("model: "+model+"\n"+"URL: "+url+"\n"+"prompt_type: "+prompt_type+"\n"+"message: "+message+"\n"+"body: "+str(body)+"\n")
45
- return([url,body,model,prompt_type])
46
-
47
- def response(message, history, model, prompt_type):
48
- print(model)
49
- [url,body,model,prompt_type]=specifications(message,model,prompt_type,verbose=True)
50
- response=""
51
- buffer=""
52
  print("URL: "+url)
 
53
  print("User: "+message+"\nAI: ")
54
- for text in requests.post(url, json=body, stream=True):
55
- text=text.decode('utf-8')
56
- if(text.startswith(": ping -")==False):buffer=str(buffer)+str(text)
57
- buffer=buffer.split('"finish_reason": null}]}')
58
- if(len(buffer)==1):
59
- buffer="".join(buffer)
60
- pass
61
- if(len(buffer)==2):
62
- part=buffer[0]+'"finish_reason": null}]}'
63
- if(part.lstrip('\n\r').startswith("data: ")): part=part.lstrip('\n\r').replace("data: ", "")
64
- try:
65
- part = str(json.loads(part)["choices"][0]["text"])
66
- print(part, end="", flush=True)
67
- response=response+part
68
- buffer=""
69
- except:
70
- pass
71
- yield response
72
-
73
-
74
- gr.ChatInterface(response,additional_inputs=[
75
- gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B","WizardLM-13B"],value="WizardLM-13B",label="Model"),
76
- gr.Dropdown(["Default", "ChatML","ChatML (German)","Vicuna","Vicuna (German)","Alpaca"],value="Default",label="Prompt Type")
77
- ]).queue().launch() #share=False, server_name="0.0.0.0", server_port=7864)
78
-
79
- #import os
80
- #os.system('python3 -m llama_cpp.server --model "/home/af/gguf/models/SauerkrautLM-7b-HerO-q8_0.gguf" --host 0.0.0.0 --port 2600')
81
 
82
 
 
1
  #############################################################################
2
+ # Title: Gradio Interface to AI hosted by Huggingface
3
  # Author: Andreas Fischer
4
  # Date: October 7th, 2023
5
+ # Last update: December 19th, 2023
6
  #############################################################################
7
 
8
  import gradio as gr
9
  import requests
10
+ import time
11
  import json
12
 
13
+ def response(message, history, model):
14
+ if(model=="Default"): model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
15
+ model_id = model
16
+ params={"max_length":500, "return_full_text":False} #, "stream":True
17
+ url = f"https://api-inference.huggingface.co/models/{model_id}"
18
+ correction=1
19
+ prompt=f"[INST] {message} [/INST]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  print("URL: "+url)
21
+ print(params)
22
  print("User: "+message+"\nAI: ")
23
+ response=""
24
+ for text in requests.post(url, json={"inputs":prompt, "parameters":params}, stream=True):
25
+ text=text.decode('UTF-8')
26
+ print(text)
27
+ if(correction==3):
28
+ text='"}]'+text
29
+ correction=2
30
+ if(correction==1):
31
+ text=text.lstrip('[{"generated_text":"')
32
+ correction=2
33
+ if(text.endswith('"}]')):
34
+ text=text.rstrip('"}]')
35
+ correction=3
36
+ response=response+text
37
+ print(response)
38
+ time.sleep(0.2)
39
+ yield response
40
+
41
+ x=requests.get(f"https://api-inference.huggingface.co/framework/text-generation-inference")
42
+ x=[i["model_id"] for i in x.json()]
43
+ x.insert(0,"Default")
44
+ print(x)
45
+
46
+ gr.ChatInterface(
47
+ response,
48
+ additional_inputs=[gr.Dropdown(x,value="Default",label="Model")]).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)
 
49
 
50