Spaces:
Running
Running
Upload 2 files
Browse files
main.py
CHANGED
@@ -628,16 +628,24 @@ def chatgpt_model(item: ChatgptModel):
|
|
628 |
@app.post("/ryuzaki/chatgpt3-turbo", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
629 |
def chatgpt3_turbo(item: Chatgpt3Texts):
|
630 |
if item.is_openai_original:
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
641 |
else:
|
642 |
url = "https://lexica.qewertyy.me/models"
|
643 |
params = {"model_id": 5, "prompt": item.query}
|
@@ -656,21 +664,24 @@ def chatgpt4_turbo(
|
|
656 |
item: OpenaiTexts,
|
657 |
api_key: None = Depends(validate_api_key)
|
658 |
):
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
|
|
|
|
|
|
674 |
|
675 |
@app.post("/ryuzaki/google-ai", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
676 |
def v1beta3_google_ai(
|
|
|
628 |
@app.post("/ryuzaki/chatgpt3-turbo", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
629 |
def chatgpt3_turbo(item: Chatgpt3Texts):
|
630 |
if item.is_openai_original:
|
631 |
+
try:
|
632 |
+
GPTbase = SOURCE_OPENAI_ACCESS_URL
|
633 |
+
response = OpenAiToken(api_key=item.api_key, api_base=GPTbase).chat_message_turbo(
|
634 |
+
query=item.query,
|
635 |
+
model="gpt-3.5-turbo",
|
636 |
+
is_stream=item.is_stream
|
637 |
+
)
|
638 |
+
answer = response[0]
|
639 |
+
continue_chat = response[1]
|
640 |
+
return SuccessResponse(
|
641 |
+
status="True",
|
642 |
+
randydev={
|
643 |
+
"message": answer,
|
644 |
+
"chat_history": continue_chat
|
645 |
+
}
|
646 |
+
)
|
647 |
+
except Exception as e:
|
648 |
+
return SuccessResponse(status="False", randydev={"message": f"Error responding: {e}"})
|
649 |
else:
|
650 |
url = "https://lexica.qewertyy.me/models"
|
651 |
params = {"model_id": 5, "prompt": item.query}
|
|
|
664 |
item: OpenaiTexts,
|
665 |
api_key: None = Depends(validate_api_key)
|
666 |
):
|
667 |
+
try:
|
668 |
+
GPTbase = SOURCE_OPENAI_ACCESS_URL
|
669 |
+
response = OpenAiToken(api_key=item.api_key, api_base=GPTbase).chat_message_turbo(
|
670 |
+
query=item.query,
|
671 |
+
model="gpt-4",
|
672 |
+
is_stream=item.is_stream
|
673 |
+
)
|
674 |
+
answer = response[0]
|
675 |
+
continue_chat = response[1]
|
676 |
+
return SuccessResponse(
|
677 |
+
status="True",
|
678 |
+
randydev={
|
679 |
+
"message": answer,
|
680 |
+
"chat_history": continue_chat
|
681 |
+
}
|
682 |
+
)
|
683 |
+
except Exception as e:
|
684 |
+
return SuccessResponse(status="False", randydev={"message": f"Error responding: {e}"})
|
685 |
|
686 |
@app.post("/ryuzaki/google-ai", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
687 |
def v1beta3_google_ai(
|
models.py
CHANGED
@@ -134,12 +134,13 @@ class AnimeStyled(BaseModel):
|
|
134 |
|
135 |
class OpenaiTexts(BaseModel):
|
136 |
query: str
|
137 |
-
api_key: str
|
138 |
is_stream: Optional[bool] = False
|
139 |
|
140 |
class Chatgpt3Texts(BaseModel):
|
141 |
query: str
|
142 |
-
api_key: str
|
|
|
143 |
is_openai_original: Optional[bool] = False
|
144 |
|
145 |
class TextCustom(BaseModel):
|
|
|
134 |
|
135 |
class OpenaiTexts(BaseModel):
|
136 |
query: str
|
137 |
+
api_key: Optional[str] = ""
|
138 |
is_stream: Optional[bool] = False
|
139 |
|
140 |
class Chatgpt3Texts(BaseModel):
|
141 |
query: str
|
142 |
+
api_key: Optional[str] = ""
|
143 |
+
is_stream: Optional[bool] = False
|
144 |
is_openai_original: Optional[bool] = False
|
145 |
|
146 |
class TextCustom(BaseModel):
|