|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
from llama_cpp import Llama |
|
|
|
|
|
llm = Llama( |
|
model_path="phi-3-mini-4k-instruct-text-to-sql.Q4_K.gguf", |
|
n_ctx=4096, |
|
n_threads=2, |
|
) |
|
|
|
|
|
class Validation(BaseModel): |
|
user_prompt: str |
|
max_tokens: int = 1024 |
|
temperature: float = 0.01 |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
@app.post("/generate_response") |
|
async def generate_response(item: Validation): |
|
|
|
output = llm(item.user_prompt, max_tokens=item.max_tokens, temperature=item.temperature, echo=False) |
|
|
|
|
|
return output['choices'][0]['text'] |
|
|