from llamaLLM import get_response | |
from fastapi import FastAPI, HTTPException | |
from pydantic import BaseModel # data validation | |
app = FastAPI() | |
async def read_main(): | |
return {"msg": "Hello from Llama this side !!!!"} | |
class Message(BaseModel): | |
message: str | |
system_instruction = "you are a good chat model who has to act as a friend to the user." | |
convers = [{"role": "system", "content": system_instruction}] | |
async def predict(message: Message): | |
print(message) | |
user_input = message.message | |
if user_input.lower() in ["exit", "quit"]: | |
return {"response": "Exiting the chatbot. Goodbye!"} | |
global convers | |
print(len(convers)) | |
response, convers = get_response(user_input, convers) | |
return {"response": response} | |