File size: 1,075 Bytes
73bce0b
 
 
 
 
 
d97cf54
 
 
73bce0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d97cf54
 
 
73bce0b
d97cf54
73bce0b
 
dce1999
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama

# Model loading with specified path and configuration
llm = Llama(
    model_path="phi-3-mini-4k-instruct.Q4_K.gguf",  # Update the path as necessary
    n_ctx=4096,       
    n_threads=2,      
)

# Pydantic object for validation
class Validation(BaseModel):
    user_prompt: str
    system_prompt: str
    max_tokens: int = 1024  
    temperature: float = 0.01  

# FastAPI application initialization
app = FastAPI()

# Endpoint for generating responses
@app.post("/generate_response")
async def generate_response(item: Validation):
    # Construct the complete prompt using the given system and user prompts in the required format
    prompt = f"<|user|>\n{item.system_prompt}\n<|end|>\n<|user|>\n{item.user_prompt}\n<|end|>\n<|assistant|>"

    # Call the Llama model to generate a response
    output = llm(prompt, max_tokens=item.max_tokens, temperature=item.temperature, echo=True)
    
    # Extract and return the text from the response
    return output['choices'][0]['text']