Spaces:
Sleeping
Sleeping
toaster61
commited on
Commit
·
f8dd638
1
Parent(s):
9c01d30
abort gradio
Browse files- app_gradio.py +0 -45
- requirements.txt +1 -2
app_gradio.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from llama_cpp import Llama
|
3 |
-
|
4 |
-
llm = Llama(model_path="./model.bin")
|
5 |
-
|
6 |
-
with open('system.prompt', 'r', encoding='utf-8') as f:
|
7 |
-
prompt = f.read()
|
8 |
-
|
9 |
-
title = "Openbuddy LLama Api"
|
10 |
-
desc = '''<h1>Hello, world!</h1>
|
11 |
-
This is showcase how to make own server with OpenBuddy's model.<br>
|
12 |
-
I'm using here 3b model just for example. Also here's only CPU power.<br>
|
13 |
-
But you can use GPU power as well!<br><br>
|
14 |
-
<h1>How to GPU?</h1>
|
15 |
-
Change <code>`CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS`</code> in Dockerfile on <code>`CMAKE_ARGS="-DLLAMA_CUBLAS=on"`</code>. Also you can try <code>`DLLAMA_CLBLAST`</code>, <code>`DLLAMA_METAL`</code> or <code>`DLLAMA_METAL`</code>.<br>
|
16 |
-
Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-python</a> and <a href="https://www.gradio.app/">Gradio</a>.<br><br>
|
17 |
-
<h1>How to test it on own machine?</h1>
|
18 |
-
You can install Docker, build image and run it. I made <code>`run-docker.sh`</code> for ya. To stop container run <code>`docker ps`</code>, find name of container and run <code>`docker stop _dockerContainerName_`</code><br>
|
19 |
-
Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.<br><br>
|
20 |
-
Also it can run with quart+uvicorn! Check the repo!'''
|
21 |
-
|
22 |
-
def greet(request: str, max_tokens: int = 64, override_system_prompt: str = ""):
|
23 |
-
try:
|
24 |
-
system_prompt = override_system_prompt if override_system_prompt != "" else prompt
|
25 |
-
max_tokens = max_tokens if max_tokens > 0 and max_tokens < 256 else 64
|
26 |
-
userPrompt = system_prompt + "\n\nUser: " + request + "\nAssistant: "
|
27 |
-
except: return "ERROR 400: Not enough data"
|
28 |
-
try:
|
29 |
-
output = llm(userPrompt, max_tokens=max_tokens, stop=["User:", "\n"], echo=False)
|
30 |
-
print(output)
|
31 |
-
return output["choices"][0]["text"]
|
32 |
-
except Exception as e:
|
33 |
-
print(e)
|
34 |
-
return "ERROR 500: Server error. Check logs!!"
|
35 |
-
|
36 |
-
demo = gr.Interface(
|
37 |
-
fn=greet,
|
38 |
-
inputs=[gr.Text("Hello, how are you?"), gr.Number(64), gr.Textbox()],
|
39 |
-
outputs=["text"],
|
40 |
-
description=desc,
|
41 |
-
title=title,
|
42 |
-
allow_flagging="never"
|
43 |
-
).queue()
|
44 |
-
if __name__ == "__main__":
|
45 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,3 +1,2 @@
|
|
1 |
quart
|
2 |
-
uvicorn
|
3 |
-
gradio
|
|
|
1 |
quart
|
2 |
+
uvicorn
|
|