VishnuRamDebyez commited on
Commit
f3b0c13
·
verified ·
1 Parent(s): f77ca9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -1,20 +1,17 @@
1
  from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  import os
4
- from openai import OpenAI
5
  from dotenv import load_dotenv
6
  from typing import List
7
 
8
-
9
  load_dotenv()
10
 
11
  app = FastAPI(title="Debyez Chatbot API")
12
 
13
- # Initialize OpenAI client
14
- client = OpenAI(
15
- base_url="https://api-inference.huggingface.co/v1",
16
- api_key=os.getenv("TOKEN")
17
- )
18
 
19
  class Message(BaseModel):
20
  role: str
@@ -45,7 +42,7 @@ async def chat_endpoint(request: ChatRequest):
45
  ]
46
 
47
  completion = client.chat.completions.create(
48
- model="meta-llama/Meta-Llama-3-8B-Instruct",
49
  messages=formatted_messages,
50
  temperature=0.5,
51
  max_tokens=3000,
@@ -61,4 +58,4 @@ async def root():
61
 
62
  @app.get("/health")
63
  async def health_check():
64
- return {"status": "healthy"}
 
1
  from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  import os
4
+ from groq import Groq
5
  from dotenv import load_dotenv
6
  from typing import List
7
 
8
+ # Load environment variables
9
  load_dotenv()
10
 
11
  app = FastAPI(title="Debyez Chatbot API")
12
 
13
+ # Initialize Groq client (Replace with your actual API key)
14
+ client = Groq(api_key=os.getenv("GROQ_API_KEY"))
 
 
 
15
 
16
  class Message(BaseModel):
17
  role: str
 
42
  ]
43
 
44
  completion = client.chat.completions.create(
45
+ model="llama3-8b-8192", # Adjust model if needed
46
  messages=formatted_messages,
47
  temperature=0.5,
48
  max_tokens=3000,
 
58
 
59
  @app.get("/health")
60
  async def health_check():
61
+ return {"status": "healthy"}