Guhanselvam commited on
Commit
1a1b05e
·
verified ·
1 Parent(s): a4420b8

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +25 -16
chatbot.py CHANGED
@@ -1,19 +1,28 @@
1
- # chatbot.py
2
- import requests
 
 
3
 
4
- class Chatbot:
5
- def __init__(self, api_url: str):
6
- self.api_url = api_url
7
 
8
- def generate_response(self, user_message: str) -> str:
9
- try:
10
- response = requests.post(self.api_url, json={"prompt": user_message})
11
- return response.json().get("response", "Error: Unable to get response.")
12
- except Exception as e:
13
- return f"Error: {str(e)}"
14
 
15
- def auto_generate_response(self):
16
- # This is where you can set up any logic or triggers for automatic responses
17
- # For example, using a fixed prompt
18
- default_prompt = "Hello, how can I assist you today?"
19
- return self.generate_response(default_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ from fastapi import FastAPI, HTTPException
3
+ from pydantic import BaseModel
4
+ from chatbot import Chatbot
5
 
6
+ # Initialize FastAPI
7
+ app = FastAPI()
 
8
 
9
+ # Initialize the chatbot connected to the LLaMA API
10
+ chatbot = Chatbot(api_url="http://localhost:8001/llama/") # Adjust this if you change the port of api.py
 
 
 
 
11
 
12
+ class ChatRequest(BaseModel):
13
+ message: str
14
+
15
+ @app.post("/chat/")
16
+ async def chat(request: ChatRequest):
17
+ """
18
+ This endpoint could be called from another service if needed.
19
+ Automatically generates a response.
20
+ """
21
+ try:
22
+ # Automatically generate a response based on a default message or logic
23
+ bot_response = chatbot.auto_generate_response()
24
+ return {"response": bot_response}
25
+ except Exception as e:
26
+ raise HTTPException(status_code=500, detail=str(e))
27
+
28
+ # To run the backend, use 'uvicorn app:app --reload' in the terminal.