File size: 1,345 Bytes
7eebe0e
2b25d5e
 
7eebe0e
 
 
 
 
 
 
 
2b25d5e
7eebe0e
 
2b25d5e
7eebe0e
 
106bbb2
2b25d5e
7eebe0e
2b25d5e
 
 
 
 
7eebe0e
b6b32c9
 
 
7eebe0e
 
106bbb2
7eebe0e
 
706b2e4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from fastapi import FastAPI
from pydantic import BaseModel

# Assuming Llama class has been correctly imported and set up
from llama_cpp import Llama

# Model loading with specified path and configuration
llm = Llama(
    model_path="Meta-Llama-3-8B-Instruct.Q4_K_M.gguf",  # Update the path as necessary
    n_ctx=4096,       # Maximum number of tokens for context (input + output)
    n_threads=4,      # Number of CPU cores used
)

# Pydantic object for validation
class Validation(BaseModel):
    user_prompt: str  # User's input prompt
    system_prompt: str  # System's guiding prompt
    max_tokens: int

# FastAPI application initialization
app = FastAPI()

# Endpoint for generating responses
@app.post("/generate_response")
async def generate_response(item: Validation):
    # Construct the complete prompt using the given system and user prompts
    prompt = f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|> \n
{ item.system_prompt }<|eot_id|> \n <|start_header_id|>user<|end_header_id|>
{ item.user_prompt }<|eot_id|> \n <|start_header_id|>assistant<|end_header_id|>"""
    
    # Call the Llama model to generate a response
    output = llm(prompt, max_tokens = item.max_tokens,echo=True)  # Update parameters as needed
    
    # Extract and return the text from the response
    return output['choices'][0]['text']