Spaces:
Sleeping
Sleeping
File size: 796 Bytes
559ea97 2a813c3 536efdb 2a813c3 b995a3b 2a813c3 559ea97 6522af3 b995a3b 559ea97 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
from quart import Quart, request
from llama_cpp import Llama
app = Quart(__name__)
with open('system.prompt', 'r', encoding='utf-8') as f:
prompt = f.read()
@app.post("/request")
async def echo():
try:
data = await request.get_json()
if data.get("max_tokens") != None and data.get("max_tokens") > 500: data['max_tokens'] = 500
userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: "
except: return {"error": "Not enough data"}, 400
return {"output": output}
@app.get("/")
async def get():
return '''<h1>Hello, world!</h1>
This is showcase how to make own server with OpenBuddy's model.<br>
I'm using here 3b model just for example. Also here's only CPU power.<br>
But you can use GPU power as well!<br>
<br>
<h1>How to GPU?</h1>
''' |