subhrajit mohanty
commited on
Commit
·
cfc1102
1
Parent(s):
fc97dd8
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import openai
|
2 |
from fastapi import FastAPI, HTTPException
|
|
|
3 |
from fastapi.middleware.cors import CORSMiddleware
|
4 |
from pydantic import BaseModel
|
5 |
from fastapi import FastAPI, HTTPException, Depends, Request, Response
|
@@ -52,12 +53,24 @@ class RefToken(BaseModel):
|
|
52 |
expiry_date: str
|
53 |
ref_key: str
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
@app.get("/")
|
56 |
async def base_url():
|
57 |
try:
|
58 |
return {
|
59 |
-
"Please Check the documentation here": "https://huggingface.co/spaces/
|
60 |
-
"Swagger UI" : "https://
|
61 |
}
|
62 |
except Exception as e:
|
63 |
raise HTTPException(status_code=500, detail="An error occurred while processing the request." + str(e))
|
@@ -86,35 +99,6 @@ async def chat(chat_input: ChatInput, token: str = Depends(verify_token)):
|
|
86 |
prompt = f"User: {chat_input.message}\nAI:"
|
87 |
model_name = chat_input.model_name
|
88 |
try:
|
89 |
-
|
90 |
-
response = openai.Completion.create(
|
91 |
-
engine="text-davinci-002",
|
92 |
-
prompt=prompt,
|
93 |
-
max_tokens=50,
|
94 |
-
n=1,
|
95 |
-
stop=None,
|
96 |
-
temperature=0.7,
|
97 |
-
)
|
98 |
-
message = response.choices[0].text.strip()
|
99 |
-
usages = response["usage"]
|
100 |
-
|
101 |
-
if model_name == "gpt-3.5-turbo":
|
102 |
-
response = openai.ChatCompletion.create(
|
103 |
-
model="gpt-3.5-turbo",
|
104 |
-
messages=[
|
105 |
-
{"role": "system", "content": prompt}
|
106 |
-
])
|
107 |
-
|
108 |
-
message = response.choices[0]["message"]["content"]
|
109 |
-
usages = response["usage"]
|
110 |
-
|
111 |
-
if model_name == "":
|
112 |
-
message = "Plase select the model"
|
113 |
-
usages = ""
|
114 |
-
|
115 |
-
return {
|
116 |
-
"message": message,
|
117 |
-
"usages" : usages
|
118 |
-
}
|
119 |
except Exception as e:
|
120 |
raise HTTPException(status_code=500, detail="An error occurred while processing the request." + str(e))
|
|
|
1 |
import openai
|
2 |
from fastapi import FastAPI, HTTPException
|
3 |
+
from fastapi.responses import StreamingResponse
|
4 |
from fastapi.middleware.cors import CORSMiddleware
|
5 |
from pydantic import BaseModel
|
6 |
from fastapi import FastAPI, HTTPException, Depends, Request, Response
|
|
|
53 |
expiry_date: str
|
54 |
ref_key: str
|
55 |
|
56 |
+
def get_openai_generator(prompt: str):
|
57 |
+
openai_stream = openai.ChatCompletion.create(
|
58 |
+
model="gpt-3.5-turbo",
|
59 |
+
messages=[{"role": "user", "content": prompt}],
|
60 |
+
temperature=0.0,
|
61 |
+
stream=True,
|
62 |
+
)
|
63 |
+
for event in openai_stream:
|
64 |
+
if "content" in event["choices"][0].delta:
|
65 |
+
current_response = event["choices"][0].delta.content
|
66 |
+
yield "data: " + current_response + "\n\n"
|
67 |
+
|
68 |
@app.get("/")
|
69 |
async def base_url():
|
70 |
try:
|
71 |
return {
|
72 |
+
"Please Check the documentation here": "https://huggingface.co/spaces/subhrajit-katonic/stream-chatapi/blob/main/README.md",
|
73 |
+
"Swagger UI" : "https://subhrajit-katonic-stream-chatapi.hf.space/docs"
|
74 |
}
|
75 |
except Exception as e:
|
76 |
raise HTTPException(status_code=500, detail="An error occurred while processing the request." + str(e))
|
|
|
99 |
prompt = f"User: {chat_input.message}\nAI:"
|
100 |
model_name = chat_input.model_name
|
101 |
try:
|
102 |
+
return StreamingResponse(get_openai_generator(prompt), media_type='text/event-stream')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
except Exception as e:
|
104 |
raise HTTPException(status_code=500, detail="An error occurred while processing the request." + str(e))
|