|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
|
|
|
|
from llama_cpp import Llama |
|
|
|
|
|
llm = Llama( |
|
model_path="Anoop03031988/Phi-3-mini-4k-instruct-text-to-sql-GGUF", |
|
n_ctx=4096, |
|
n_threads=2, |
|
) |
|
|
|
|
|
class Validation(BaseModel): |
|
user_prompt: str |
|
system_prompt: str |
|
max_tokens: int = 1024 |
|
temperature: float = 0.01 |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
@app.post("/generate_response") |
|
async def generate_response(item: Validation): |
|
|
|
prompt = f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|> \n |
|
{ item.system_prompt }<|eot_id|> \n <|start_header_id|>user<|end_header_id|> |
|
{ item.user_prompt }<|eot_id|> \n <|start_header_id|>assistant<|end_header_id|>""" |
|
|
|
|
|
output = llm(prompt, max_tokens = item.max_tokens,temperature = item.temperature, echo=True) |
|
|
|
|
|
return output['choices'][0]['text'] |
|
|