File size: 614 Bytes
d857963
 
 
 
 
 
18f4287
d857963
18f4287
d857963
 
 
 
 
2e5f69a
d857963
 
 
18f4287
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
from fastapi import FastAPI
import requests
from llama_cpp import Llama 

app = FastAPI()

llm = Llama(model_path="./tinyllama-1.1b-chat.gguf")

@app.post("/llm")
async def stream(item: dict):
	
    if 'prompt' not in item.keys():
        raise ValueError("prompt é obrigatório")

    prompt = "<|system|>You are a helpfull assistant</s><|user|>"+item['prompt']+"</s><|assistant|>"
    temperatura = item['temperatura'] if 'temperatura' in item.keys() else 0.2
    max_tokens = item['max_tokens'] if 'max_tokens' in item.keys() else 512
    
    return llm(prompt, max_tokens=max_tokens, temperature=temperatura)