|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
from llama_cpp import Llama |
|
|
|
|
|
llm = Llama( |
|
model_path="../Llama-3.2-3B-Instruct-Q8_0.gguf", |
|
n_ctx=4096, |
|
n_threads=2, |
|
) |
|
|
|
|
|
class Validation(BaseModel): |
|
user_prompt: str |
|
max_tokens: int = 1024 |
|
temperature: float = 0.01 |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
@app.post("/generate_response") |
|
async def generate_response(item: Validation): |
|
|
|
output = llm(item.user_prompt, max_tokens=item.max_tokens, temperature=item.temperature, echo=False) |
|
|
|
|
|
return output['choices'][0]['text'] |
|
|