File size: 688 Bytes
386fc65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from llama_cpp import Llama  # Change this import as per your LLaMA library

app = FastAPI()

# Initialize LLaMA model
model = Llama(model_path="path/to/llama-3.2-model.bin")  # Specify the correct path to your model

class LlamaRequest(BaseModel):
    prompt: str

@app.post("/llama/")
async def get_llama_response(request: LlamaRequest):
    try:
        response = model.generate(request.prompt)  # Call the LLaMA model
        return {"response": response}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# To run the API, use 'uvicorn api:app --reload' in the terminal.