File size: 1,511 Bytes
0ffdf21
76e9347
 
 
c901280
361f350
c901280
 
 
 
76e9347
 
 
 
 
 
c901280
76e9347
 
 
 
 
 
 
 
0ffdf21
76e9347
 
 
 
0ffdf21
 
 
76e9347
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from modules.pmbl import PMBL
import torch

print(f"CUDA available: {torch.cuda.is_available()}")
print(f"CUDA device count: {torch.cuda.device_count()}")
if torch.cuda.is_available():
    print(f"CUDA device name: {torch.cuda.get_device_name(0)}")

app = FastAPI(docs_url=None, redoc_url=None)

app.mount("/static", StaticFiles(directory="static"), name="static")
app.mount("/templates", StaticFiles(directory="templates"), name="templates")

pmbl = PMBL("./PMB-7b.Q6_K.gguf", gpu_layers=50)

@app.head("/")
@app.get("/")
def index() -> HTMLResponse:
    with open("templates/index.html") as f:
        return HTMLResponse(content=f.read())

@app.post("/chat")
async def chat(request: Request):
    try:
        data = await request.json()
        user_input = data["user_input"]
        mode = data["mode"]
        history = pmbl.get_chat_history(mode, user_input)
        response_generator = pmbl.generate_response(user_input, history, mode)
        return StreamingResponse(response_generator, media_type="text/plain")
    except Exception as e:
        print(f"[SYSTEM] Error: {str(e)}")
        return {"error": str(e)}

@app.post("/sleep")
async def sleep():
    try:
        pmbl.sleep_mode()
        return {"message": "Sleep mode completed successfully"}
    except Exception as e:
        print(f"[SYSTEM] Error: {str(e)}")
        return {"error": str(e)}