imperialwool commited on
Commit
7c1334f
·
verified ·
1 Parent(s): 14f87da

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +2 -2
gradio_app.py CHANGED
@@ -7,13 +7,13 @@ import psutil
7
  # Initing things
8
  print("! INITING LLAMA MODEL !")
9
  llm = Llama(model_path="./model.bin") # LLaMa model
10
- llama_model_name = "TheBloke/openchat_3.5-GGUF"
11
  print("! INITING DONE !")
12
 
13
  # Preparing things to work
14
  title = "llama.cpp API"
15
  desc = '''<h1>Hello, world!</h1>
16
- This is showcase how to make own server with Llama2 model.<br>
17
  I'm using here 7b model just for example. Also here's only CPU power.<br>
18
  But you can use GPU power as well!<br><br>
19
  <h1>How to GPU?</h1>
 
7
  # Initing things
8
  print("! INITING LLAMA MODEL !")
9
  llm = Llama(model_path="./model.bin") # LLaMa model
10
+ llama_model_name = "NousResearch/Hermes-2-Pro-Mistral-7B-GGUF" # This is just for indication in "three dots menu"
11
  print("! INITING DONE !")
12
 
13
  # Preparing things to work
14
  title = "llama.cpp API"
15
  desc = '''<h1>Hello, world!</h1>
16
+ This is showcase how to make own server with Llama2 model using llama_cpp.<br>
17
  I'm using here 7b model just for example. Also here's only CPU power.<br>
18
  But you can use GPU power as well!<br><br>
19
  <h1>How to GPU?</h1>