AFischer1985 commited on
Commit
1073cd9
·
1 Parent(s): caaa800

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +2 -2
run.py CHANGED
@@ -10,7 +10,7 @@ import requests
10
  import random
11
  import json
12
 
13
- def specifications(message, model,prompt_type,verbose=False):
14
  url="http://0.0.0.0:2600/v1/completions"
15
  body=""
16
 
@@ -71,7 +71,7 @@ def response(message, history, model, prompt_type):
71
  yield response
72
 
73
 
74
- gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["Local","CollectiveCognition-7B", "OpenHermes2-7B","WizardLM-13B"],value="WizardLM-13B",label="Model"),gr.Dropdown(["Default", "ChatML","ChatML (German)","Vicuna","Vicuna (German)","Alpaca"],value="Default",label="Prompt Type")]).queue().launch(share=False, server_name="0.0.0.0", server_port=7864)
75
 
76
  #import os
77
  #os.system('python3 -m llama_cpp.server --model "/home/af/gguf/models/SauerkrautLM-7b-HerO-q8_0.gguf" --host 0.0.0.0 --port 2600')
 
10
  import random
11
  import json
12
 
13
+ def specifications(message, model, prompt_type, verbose=False):
14
  url="http://0.0.0.0:2600/v1/completions"
15
  body=""
16
 
 
71
  yield response
72
 
73
 
74
+ gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B","WizardLM-13B"],value="WizardLM-13B",label="Model"),gr.Dropdown(["Default", "ChatML","ChatML (German)","Vicuna","Vicuna (German)","Alpaca"],value="Default",label="Prompt Type")]).queue().launch(share=False, server_name="0.0.0.0", server_port=7864)
75
 
76
  #import os
77
  #os.system('python3 -m llama_cpp.server --model "/home/af/gguf/models/SauerkrautLM-7b-HerO-q8_0.gguf" --host 0.0.0.0 --port 2600')