Spaces:
Running
Running
Upload 2 files
Browse files
main.py
CHANGED
@@ -27,6 +27,7 @@ import shutil
|
|
27 |
import random
|
28 |
import tempfile
|
29 |
import io
|
|
|
30 |
from io import BytesIO
|
31 |
from datetime import datetime as dt
|
32 |
from dotenv import load_dotenv
|
@@ -96,6 +97,7 @@ SOURCE_TIKTOK_TECH_URL = os.environ["SOURCE_TIKTOK_TECH_URL"]
|
|
96 |
SOURCE_WHAT_GAY_URL = os.environ["SOURCE_WHAT_GAY_URL"]
|
97 |
SOURCE_ASSISTANT_GOOGLE_AI = os.environ["SOURCE_ASSISTANT_GOOGLE_AI"]
|
98 |
SOURCE_MONITOR_URL = os.environ["SOURCE_MONITOR_URL"]
|
|
|
99 |
|
100 |
# api keys
|
101 |
REVERSE_IMAGE_API = os.environ["REVERSE_IMAGE_API"]
|
@@ -572,7 +574,6 @@ def Anime_Styled(
|
|
572 |
return SuccessResponse(status="True", randydev={"data": encoded_string})
|
573 |
else:
|
574 |
return SuccessResponse(status="False", randydev={"data": "Not found image data"})
|
575 |
-
|
576 |
|
577 |
@app.post("/ryuzaki/unsplash")
|
578 |
def image_unsplash(item: GetImageUnsplash):
|
@@ -610,9 +611,7 @@ def chatgpt_model(item: ChatgptModel):
|
|
610 |
answer = check_response.get("content")
|
611 |
return SuccessResponse(
|
612 |
status="True",
|
613 |
-
randydev={
|
614 |
-
"message": answer
|
615 |
-
}
|
616 |
)
|
617 |
else:
|
618 |
params = {"model_id": 5, "prompt": item.query}
|
@@ -623,27 +622,64 @@ def chatgpt_model(item: ChatgptModel):
|
|
623 |
answer = check_response.get("content")
|
624 |
return SuccessResponse(
|
625 |
status="True",
|
626 |
-
randydev={
|
627 |
-
"message": answer
|
628 |
-
}
|
629 |
)
|
630 |
|
631 |
-
|
632 |
@app.post("/ryuzaki/chatgpt3-turbo", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
633 |
-
def chatgpt3_turbo(item:
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
646 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
647 |
|
648 |
@app.post("/ryuzaki/google-ai", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
649 |
def v1beta3_google_ai(
|
|
|
27 |
import random
|
28 |
import tempfile
|
29 |
import io
|
30 |
+
import openai
|
31 |
from io import BytesIO
|
32 |
from datetime import datetime as dt
|
33 |
from dotenv import load_dotenv
|
|
|
97 |
SOURCE_WHAT_GAY_URL = os.environ["SOURCE_WHAT_GAY_URL"]
|
98 |
SOURCE_ASSISTANT_GOOGLE_AI = os.environ["SOURCE_ASSISTANT_GOOGLE_AI"]
|
99 |
SOURCE_MONITOR_URL = os.environ["SOURCE_MONITOR_URL"]
|
100 |
+
SOURCE_OPENAI_ACCESS_URL = os.environ["SOURCE_OPENAI_ACCESS_URL"]
|
101 |
|
102 |
# api keys
|
103 |
REVERSE_IMAGE_API = os.environ["REVERSE_IMAGE_API"]
|
|
|
574 |
return SuccessResponse(status="True", randydev={"data": encoded_string})
|
575 |
else:
|
576 |
return SuccessResponse(status="False", randydev={"data": "Not found image data"})
|
|
|
577 |
|
578 |
@app.post("/ryuzaki/unsplash")
|
579 |
def image_unsplash(item: GetImageUnsplash):
|
|
|
611 |
answer = check_response.get("content")
|
612 |
return SuccessResponse(
|
613 |
status="True",
|
614 |
+
randydev={"message": answer}
|
|
|
|
|
615 |
)
|
616 |
else:
|
617 |
params = {"model_id": 5, "prompt": item.query}
|
|
|
622 |
answer = check_response.get("content")
|
623 |
return SuccessResponse(
|
624 |
status="True",
|
625 |
+
randydev={"message": answer}
|
|
|
|
|
626 |
)
|
627 |
|
|
|
628 |
@app.post("/ryuzaki/chatgpt3-turbo", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
629 |
+
def chatgpt3_turbo(item: Chatgpt3Texts):
|
630 |
+
if item.is_openai_original:
|
631 |
+
openai.api_key = item.api_key
|
632 |
+
openai.api_base = SOURCE_OPENAI_ACCESS_URL
|
633 |
+
chat_completion = openai.ChatCompletion.create(
|
634 |
+
model="gpt-3.5-turbo",
|
635 |
+
messages=[{"role": "user", "content": item.query}],
|
636 |
+
stream=True
|
637 |
+
)
|
638 |
+
if isinstance(chat_completion, dict):
|
639 |
+
print(chat_completion.choices[0].message.content)
|
640 |
+
else:
|
641 |
+
for token in chat_completion:
|
642 |
+
content = token["choices"][0]["delta"].get("content")
|
643 |
+
if content is not None:
|
644 |
+
return SuccessResponse(
|
645 |
+
status="True",
|
646 |
+
randydev={"message": content}
|
647 |
+
)
|
648 |
+
else:
|
649 |
+
url = "https://lexica.qewertyy.me/models"
|
650 |
+
params = {"model_id": 5, "prompt": item.query}
|
651 |
+
response = requests.post(url, params=params)
|
652 |
+
if response.status_code != 200:
|
653 |
+
return f"Error status: {response.status_code}"
|
654 |
+
check_response = response.json()
|
655 |
+
answer = check_response.get("content")
|
656 |
+
return SuccessResponse(
|
657 |
+
status="True",
|
658 |
+
randydev={"message": answer}
|
659 |
+
)
|
660 |
+
|
661 |
+
@app.post("/ryuzaki/chatgpt4-turbo", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
662 |
+
def chatgpt4_turbo(
|
663 |
+
item: OpenaiTexts,
|
664 |
+
api_key: None = Depends(validate_api_key)
|
665 |
+
):
|
666 |
+
openai.api_key = item.api_key
|
667 |
+
openai.api_base = SOURCE_OPENAI_ACCESS_URL
|
668 |
+
chat_completion = openai.ChatCompletion.create(
|
669 |
+
model="gpt-4-turbo",
|
670 |
+
messages=[{"role": "user", "content": item.query}],
|
671 |
+
stream=True
|
672 |
)
|
673 |
+
if isinstance(chat_completion, dict):
|
674 |
+
print(chat_completion.choices[0].message.content)
|
675 |
+
else:
|
676 |
+
for token in chat_completion:
|
677 |
+
content = token["choices"][0]["delta"].get("content")
|
678 |
+
if content is not None:
|
679 |
+
return SuccessResponse(
|
680 |
+
status="True",
|
681 |
+
randydev={"message": content}
|
682 |
+
)
|
683 |
|
684 |
@app.post("/ryuzaki/google-ai", response_model=SuccessResponse, responses={422: {"model": ErrorStatus}})
|
685 |
def v1beta3_google_ai(
|
models.py
CHANGED
@@ -132,5 +132,14 @@ class TranslateCustom(BaseModel):
|
|
132 |
class AnimeStyled(BaseModel):
|
133 |
query: str
|
134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
class TextCustom(BaseModel):
|
136 |
query: str
|
|
|
132 |
class AnimeStyled(BaseModel):
|
133 |
query: str
|
134 |
|
135 |
+
class OpenaiTexts(BaseModel):
|
136 |
+
query: str
|
137 |
+
api_key: str
|
138 |
+
|
139 |
+
class Chatgpt3Texts(BaseModel):
|
140 |
+
query: str
|
141 |
+
api_key: Optional[str] = None
|
142 |
+
is_openai_original: Optional[bool] = False
|
143 |
+
|
144 |
class TextCustom(BaseModel):
|
145 |
query: str
|