Update main.py
Browse files
main.py
CHANGED
@@ -1,28 +1,38 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
-
from pydantic import BaseModel
|
|
|
3 |
from llama_cpp import Llama
|
4 |
|
5 |
-
#
|
6 |
-
llm = Llama(
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
10 |
)
|
11 |
|
12 |
-
# Pydantic
|
|
|
|
|
|
|
|
|
13 |
class Validation(BaseModel):
|
14 |
-
|
15 |
-
max_tokens: int = 1024
|
16 |
-
temperature: float = 0.01
|
17 |
|
18 |
-
# FastAPI application
|
19 |
app = FastAPI()
|
20 |
|
21 |
-
#
|
22 |
@app.post("/generate_response")
|
23 |
async def generate_response(item: Validation):
|
24 |
-
#
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
from pydantic import BaseModel, Field
|
3 |
+
from typing import List, Dict
|
4 |
from llama_cpp import Llama
|
5 |
|
6 |
+
# Load the Llama model with the specified path and configuration
|
7 |
+
llm = Llama.from_pretrained(
|
8 |
+
repo_id="bartowski/Llama-3.2-3B-Instruct-GGUF", # Replace with the actual model repository ID
|
9 |
+
filename="Llama-3.2-3B-Instruct-Q8_0.gguf", # Replace with your actual model filename if necessary
|
10 |
+
n_ctx=4096,
|
11 |
+
n_threads=2,
|
12 |
)
|
13 |
|
14 |
+
# Define a Pydantic model for request validation
|
15 |
+
class Message(BaseModel):
|
16 |
+
role: str # "user" or "assistant"
|
17 |
+
content: str # The actual message content
|
18 |
+
|
19 |
class Validation(BaseModel):
|
20 |
+
messages: List[Message] = Field(default_factory=list) # List of previous messages in the conversation
|
21 |
+
max_tokens: int = 1024 # Maximum tokens for the response
|
22 |
+
temperature: float = 0.01 # Model response temperature for creativity
|
23 |
|
24 |
+
# Initialize the FastAPI application
|
25 |
app = FastAPI()
|
26 |
|
27 |
+
# Define the endpoint for generating responses
|
28 |
@app.post("/generate_response")
|
29 |
async def generate_response(item: Validation):
|
30 |
+
# Generate a response using the Llama model with the chat history
|
31 |
+
response = llm.create_chat_completion(
|
32 |
+
messages=[{"role": msg.role, "content": msg.content} for msg in item.messages],
|
33 |
+
max_tokens=item.max_tokens,
|
34 |
+
temperature=item.temperature
|
35 |
+
)
|
36 |
+
|
37 |
+
# Extract and return the response text
|
38 |
+
return {"response": response['choices'][0]['message']['content']}
|