AFischer1985 commited on
Commit
d353085
·
1 Parent(s): 1073cd9

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +4 -1
run.py CHANGED
@@ -71,7 +71,10 @@ def response(message, history, model, prompt_type):
71
  yield response
72
 
73
 
74
- gr.ChatInterface(response,additional_inputs=[gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B","WizardLM-13B"],value="WizardLM-13B",label="Model"),gr.Dropdown(["Default", "ChatML","ChatML (German)","Vicuna","Vicuna (German)","Alpaca"],value="Default",label="Prompt Type")]).queue().launch(share=False, server_name="0.0.0.0", server_port=7864)
 
 
 
75
 
76
  #import os
77
  #os.system('python3 -m llama_cpp.server --model "/home/af/gguf/models/SauerkrautLM-7b-HerO-q8_0.gguf" --host 0.0.0.0 --port 2600')
 
71
  yield response
72
 
73
 
74
+ gr.ChatInterface(response,additional_inputs=[
75
+ gr.Dropdown(["CollectiveCognition-7B", "OpenHermes2-7B","WizardLM-13B"],value="WizardLM-13B",label="Model"),
76
+ gr.Dropdown(["Default", "ChatML","ChatML (German)","Vicuna","Vicuna (German)","Alpaca"],value="Default",label="Prompt Type")
77
+ ]).queue().launch() #share=False, server_name="0.0.0.0", server_port=7864)
78
 
79
  #import os
80
  #os.system('python3 -m llama_cpp.server --model "/home/af/gguf/models/SauerkrautLM-7b-HerO-q8_0.gguf" --host 0.0.0.0 --port 2600')