Spaces:
Running
Running
Create main.py
Browse files
main.py
ADDED
@@ -0,0 +1,816 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# Copyright 2020-2023 (c) Randy W @xtdevs, @xtsea
|
4 |
+
#
|
5 |
+
# from : https://github.com/TeamKillerX
|
6 |
+
# Channel : @RendyProjects
|
7 |
+
# This program is free software: you can redistribute it and/or modify
|
8 |
+
# it under the terms of the GNU Affero General Public License as published by
|
9 |
+
# the Free Software Foundation, either version 3 of the License, or
|
10 |
+
# (at your option) any later version.
|
11 |
+
#
|
12 |
+
# This program is distributed in the hope that it will be useful,
|
13 |
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
15 |
+
# GNU Affero General Public License for more details.
|
16 |
+
#
|
17 |
+
# You should have received a copy of the GNU Affero General Public License
|
18 |
+
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
19 |
+
|
20 |
+
import requests
|
21 |
+
import json
|
22 |
+
import base64
|
23 |
+
import re
|
24 |
+
import uvicorn
|
25 |
+
import os
|
26 |
+
import shutil
|
27 |
+
import random
|
28 |
+
import g4f
|
29 |
+
import tempfile
|
30 |
+
import io
|
31 |
+
from io import BytesIO
|
32 |
+
from datetime import datetime as dt
|
33 |
+
from dotenv import load_dotenv
|
34 |
+
from bs4 import BeautifulSoup
|
35 |
+
|
36 |
+
from typing import Union
|
37 |
+
from typing_extensions import Annotated
|
38 |
+
from typing import Annotated, Union
|
39 |
+
|
40 |
+
from pydantic import BaseModel
|
41 |
+
from base64 import b64decode as kc
|
42 |
+
from base64 import b64decode
|
43 |
+
from random import choice
|
44 |
+
from gpytranslate import SyncTranslator
|
45 |
+
from httpx import AsyncClient
|
46 |
+
from telegraph import Telegraph, upload_file
|
47 |
+
from pathlib import Path
|
48 |
+
from serpapi import GoogleSearch
|
49 |
+
|
50 |
+
from fastapi import FastAPI, UploadFile, File
|
51 |
+
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
|
52 |
+
from fastapi import Depends, FastAPI, HTTPException, status
|
53 |
+
from fastapi.responses import StreamingResponse
|
54 |
+
from fastapi import HTTPException
|
55 |
+
from fastapi import FastAPI, Request, Header
|
56 |
+
from fastapi import Body, Query
|
57 |
+
from fastapi.staticfiles import StaticFiles
|
58 |
+
from fastapi.templating import Jinja2Templates
|
59 |
+
from fastapi.responses import FileResponse
|
60 |
+
|
61 |
+
from RyuzakiLib.hackertools.chatgpt import RendyDevChat
|
62 |
+
from RyuzakiLib.hackertools.openai import OpenAiToken
|
63 |
+
from RyuzakiLib.mental import BadWordsList
|
64 |
+
|
65 |
+
import logging
|
66 |
+
import database as db
|
67 |
+
import functions as code
|
68 |
+
|
69 |
+
logging.basicConfig(level=logging.ERROR)
|
70 |
+
|
71 |
+
# I DON'T KNOW LIKE THIS HACKER
|
72 |
+
load_dotenv()
|
73 |
+
REVERSE_IMAGE_API = os.environ["REVERSE_IMAGE_API"]
|
74 |
+
OCR_API_KEY = os.environ["OCR_API_KEY"]
|
75 |
+
ONLY_DEVELOPER_API_KEYS = os.environ["ONLY_DEVELOPER_API_KEYS"]
|
76 |
+
HUGGING_TOKEN = os.environ["HUGGING_TOKEN"]
|
77 |
+
SOURCE_UNSPLASH_URL = os.environ["SOURCE_UNSPLASH_URL"]
|
78 |
+
SOURCE_OCR_URL = os.environ["SOURCE_OCR_URL"]
|
79 |
+
SOURCE_ALPHA_URL = os.environ["SOURCE_ALPHA_URL"]
|
80 |
+
SOURCR_WAIFU_URL = os.environ["SOURCE_ALPHA_URL"]
|
81 |
+
SOURCR_TIKTOK_WTF_URL = os.environ["SOURCR_TIKTOK_WTF_URL"]
|
82 |
+
SOURCR_TIKTOK_TECH_URL = os.environ["SOURCR_TIKTOK_TECH_URL"]
|
83 |
+
DEVELOPER_ID = os.environ["DEVELOPER_ID"]
|
84 |
+
|
85 |
+
description = """
|
86 |
+
- Ryuzaki Library: [Library Here](https://github.com/TeamKillerX/RyuzakiLib)
|
87 |
+
|
88 |
+
•Developed by [@xtdevs](https://t.me/xtdevs)
|
89 |
+
"""
|
90 |
+
|
91 |
+
app = FastAPI(
|
92 |
+
title="RyuzakiLib API",
|
93 |
+
description=description,
|
94 |
+
version="1.3.1",
|
95 |
+
terms_of_service="Use It Only For Personal Project Else I Need To Delete The Api",
|
96 |
+
contact={
|
97 |
+
"name": "RyuzakiLib",
|
98 |
+
"url": "https://t.me/xtdevs",
|
99 |
+
"email": "[email protected]",
|
100 |
+
},
|
101 |
+
docs_url="/"
|
102 |
+
)
|
103 |
+
|
104 |
+
def validate_api_key(api_key: str = Header(...)):
|
105 |
+
USERS_API_KEYS = db.get_all_api_keys()
|
106 |
+
if api_key not in USERS_API_KEYS:
|
107 |
+
raise HTTPException(status_code=401, detail="Invalid API key")
|
108 |
+
|
109 |
+
def validate_api_key_only_devs(api_key: str = Header(...)):
|
110 |
+
if api_key not in ONLY_DEVELOPER_API_KEYS:
|
111 |
+
raise HTTPException(status_code=401, detail="Invalid API key")
|
112 |
+
|
113 |
+
@app.get("/ryuzaki/getbanlist")
|
114 |
+
def sibyl_get_all_banlist():
|
115 |
+
banned_users = db.get_all_banned()
|
116 |
+
return {
|
117 |
+
"status": "True",
|
118 |
+
"randydev": {
|
119 |
+
"results": banned_users
|
120 |
+
}
|
121 |
+
}
|
122 |
+
|
123 |
+
@app.get("/ryuzaki/blacklist-words")
|
124 |
+
def blacklist_words():
|
125 |
+
try:
|
126 |
+
BLACKLIST_WORDS = BadWordsList()
|
127 |
+
results_all = BLACKLIST_WORDS.banned_by_google(file_txt="banned_by_google.txt", storage=True)
|
128 |
+
return {"status": "true", "results": results_all}
|
129 |
+
except Exception as e:
|
130 |
+
return {"status": "false", "message": f"Internal server error: {str(e)}"}
|
131 |
+
|
132 |
+
@app.delete("/ryuzaki/sibyldel")
|
133 |
+
def sibyl_system_delete(
|
134 |
+
user_id: int = Query(..., description="User ID in query parameter only developer"),
|
135 |
+
api_key: None = Depends(validate_api_key_only_devs)
|
136 |
+
):
|
137 |
+
try:
|
138 |
+
_, _, _, _, sibyl_user_id = db.get_sibyl_system_banned(user_id)
|
139 |
+
|
140 |
+
if sibyl_user_id:
|
141 |
+
db.remove_sibyl_system_banned(user_id)
|
142 |
+
return {"status": "true", "message": f"Successfully removed {user_id} from the Sibyl ban list."}
|
143 |
+
else:
|
144 |
+
return {"status": "false", "message": "Not found user"}
|
145 |
+
except Exception as e:
|
146 |
+
return {"status": "false", "message": f"Internal server error: {str(e)}"}
|
147 |
+
|
148 |
+
@app.post("/ryuzaki/sibylban")
|
149 |
+
def sibyl_system_ban(
|
150 |
+
user_id: int = Query(..., description="User ID in query parameter"),
|
151 |
+
reason: str = Query(..., description="Reason in query parameter"),
|
152 |
+
api_key: None = Depends(validate_api_key)
|
153 |
+
):
|
154 |
+
if user_id != int(DEVELOPER_ID):
|
155 |
+
return {"status": "false", "message": "Only Developer"}
|
156 |
+
|
157 |
+
try:
|
158 |
+
date_joined = str(dt.now())
|
159 |
+
sibyl_ban = random.choice(db.RAMDOM_STATUS)
|
160 |
+
_, _, is_banned, _, sibyl_user_id = get_sibyl_system_banned(user_id)
|
161 |
+
|
162 |
+
if sibyl_user_id and is_banned:
|
163 |
+
return {"status": "false", "message": "User is already banned"}
|
164 |
+
|
165 |
+
db.new_sibyl_system_banned(user_id, sibyl_ban, reason, date_joined)
|
166 |
+
|
167 |
+
return {
|
168 |
+
"status": "true",
|
169 |
+
"randydev": {
|
170 |
+
"user_id": user_id,
|
171 |
+
"sibyl_name": sibyl_ban,
|
172 |
+
"reason": reason,
|
173 |
+
"date_joined": date_joined,
|
174 |
+
"message": f"Successfully banned {user_id} from the Sibyl ban list."
|
175 |
+
}
|
176 |
+
}
|
177 |
+
except Exception as e:
|
178 |
+
logging.error(f"Error in sibyl_system_ban: {e}")
|
179 |
+
return {"status": "false", "message": "Internal server error"}
|
180 |
+
|
181 |
+
@app.get("/ryuzaki/sibyl")
|
182 |
+
def sibyl_system(
|
183 |
+
user_id: int = Query(..., description="User ID in query parameter"),
|
184 |
+
api_key: None = Depends(validate_api_key)
|
185 |
+
):
|
186 |
+
sibyl_name, reason, is_banned, date_joined, sibyl_user_id = db.get_sibyl_system_banned(user_id)
|
187 |
+
if sibyl_name and reason and is_banned and date_joined and sibyl_user_id:
|
188 |
+
return {
|
189 |
+
"status": "true",
|
190 |
+
"randydev": {
|
191 |
+
"sibyl_name": sibyl_name,
|
192 |
+
"reason": reason,
|
193 |
+
"is_banned": is_banned,
|
194 |
+
"date_joined": date_joined,
|
195 |
+
"sibyl_user_id": sibyl_user_id
|
196 |
+
}
|
197 |
+
}
|
198 |
+
else:
|
199 |
+
return {"status": "false", "message": "Not Found User"}
|
200 |
+
|
201 |
+
@app.get("/ryuzaki/ai")
|
202 |
+
def ryuzaki_ai(
|
203 |
+
text: str = Query(..., description="text in query parameter"),
|
204 |
+
api_key: None = Depends(validate_api_key)
|
205 |
+
):
|
206 |
+
try:
|
207 |
+
response_data = code.ryuzaki_ai_text(text)
|
208 |
+
|
209 |
+
if isinstance(response_data, list) and len(response_data) > 0:
|
210 |
+
first_result = response_data[0]
|
211 |
+
if "generated_text" in first_result:
|
212 |
+
message = first_result["generated_text"]
|
213 |
+
return {
|
214 |
+
"status": "true",
|
215 |
+
"randydev": {
|
216 |
+
"ryuzaki_text": message
|
217 |
+
}
|
218 |
+
}
|
219 |
+
|
220 |
+
return {"status": "false", "message": "Invalid response format"}
|
221 |
+
|
222 |
+
except Exception as e:
|
223 |
+
return {"status": "false", "message": f"error: {e}"}
|
224 |
+
|
225 |
+
@app.get("/ryuzaki/unsplash")
|
226 |
+
async def get_image_unsplash(query: str, size: str="500x500"):
|
227 |
+
url = SOURCE_UNSPLASH_URL
|
228 |
+
image_url = f"{url}/?{query}/{size}"
|
229 |
+
|
230 |
+
try:
|
231 |
+
response = requests.get(image_url)
|
232 |
+
response.raise_for_status()
|
233 |
+
except requests.exceptions.RequestException as e:
|
234 |
+
raise HTTPException(status_code=500, detail=f"Error fetching image: {e}")
|
235 |
+
|
236 |
+
return StreamingResponse(BytesIO(response.content), media_type="image/jpeg")
|
237 |
+
|
238 |
+
@app.get("/ryuzaki/reverse")
|
239 |
+
def google_reverse(
|
240 |
+
engine: str="google_reverse_image",
|
241 |
+
image_url: str=None,
|
242 |
+
language: str="en",
|
243 |
+
google_lang: str="us",
|
244 |
+
api_key: None = Depends(validate_api_key)
|
245 |
+
):
|
246 |
+
params = {
|
247 |
+
"api_key": REVERSE_IMAGE_API,
|
248 |
+
"engine": engine,
|
249 |
+
"image_url": image_url,
|
250 |
+
"hl": language,
|
251 |
+
"gl": google_lang
|
252 |
+
}
|
253 |
+
try:
|
254 |
+
search = GoogleSearch(params)
|
255 |
+
results = search.get_dict()
|
256 |
+
link = results["search_metadata"]["google_reverse_image_url"]
|
257 |
+
total_time_taken = results["search_metadata"]["total_time_taken"]
|
258 |
+
create_at = results["search_metadata"]["created_at"]
|
259 |
+
processed_at = results["search_metadata"]["processed_at"]
|
260 |
+
return {
|
261 |
+
"status": "true",
|
262 |
+
"randydev": {
|
263 |
+
"link": link,
|
264 |
+
"total_time_taken": total_time_taken,
|
265 |
+
"create_at": create_at,
|
266 |
+
"processed_at": processed_at
|
267 |
+
}
|
268 |
+
}
|
269 |
+
except Exception as e:
|
270 |
+
return {"status": "false", "message": f"Error {e}"}
|
271 |
+
|
272 |
+
@app.get("/ryuzaki/ocr")
|
273 |
+
def ocr_space_url(
|
274 |
+
url: str = Query(..., description="URL in query parameter"),
|
275 |
+
overlay: bool=False,
|
276 |
+
language: str = Query("eng", description="Language in query parameter"),
|
277 |
+
api_key: None = Depends(validate_api_key)
|
278 |
+
):
|
279 |
+
payload = {
|
280 |
+
"url": url,
|
281 |
+
"isOverlayRequired": overlay,
|
282 |
+
"apikey": OCR_API_KEY,
|
283 |
+
"language": language
|
284 |
+
}
|
285 |
+
try:
|
286 |
+
response = requests.post(SOURCE_OCR_URL, data=payload)
|
287 |
+
response.raise_for_status()
|
288 |
+
test_url = response.content.decode()
|
289 |
+
except requests.exceptions.RequestException as e:
|
290 |
+
return f"Error: {str(e)}"
|
291 |
+
try:
|
292 |
+
parsed_response = json.loads(test_url)
|
293 |
+
if "ParsedResults" in parsed_response and len(parsed_response["ParsedResults"]) > 0:
|
294 |
+
return {
|
295 |
+
"status": "true",
|
296 |
+
"randydev":{
|
297 |
+
"text": parsed_response["ParsedResults"][0]["ParsedText"]
|
298 |
+
}
|
299 |
+
}
|
300 |
+
else:
|
301 |
+
return {"status": "false", "message": "Error response."}
|
302 |
+
except (json.JSONDecodeError, KeyError):
|
303 |
+
return "Error parsing the OCR response."
|
304 |
+
|
305 |
+
@app.get("/ryuzaki/chatgpt4")
|
306 |
+
def chatgpt4_support(
|
307 |
+
query: str=None,
|
308 |
+
api_key: None = Depends(validate_api_key)
|
309 |
+
):
|
310 |
+
try:
|
311 |
+
response = g4f.ChatCompletion.create(
|
312 |
+
model=g4f.models.gpt_4,
|
313 |
+
messages=[{"role": "user", "content": query}],
|
314 |
+
)
|
315 |
+
return {
|
316 |
+
"status": "true",
|
317 |
+
"randydev":{
|
318 |
+
"message": response
|
319 |
+
}
|
320 |
+
}
|
321 |
+
except:
|
322 |
+
return {"status": "false", "message": "Error response."}
|
323 |
+
|
324 |
+
@app.post("/ryuzaki/chatgpt-model")
|
325 |
+
def chatgpt_model(
|
326 |
+
query: str=None,
|
327 |
+
model_id: int=1,
|
328 |
+
is_models: bool=True
|
329 |
+
):
|
330 |
+
try:
|
331 |
+
response = RendyDevChat(query).get_response_model(model_id=model_id, is_models=is_models)
|
332 |
+
return {
|
333 |
+
"status": "true",
|
334 |
+
"randydev":{
|
335 |
+
"message": response
|
336 |
+
}
|
337 |
+
}
|
338 |
+
except:
|
339 |
+
return {"status": "false", "message": "Error response."}
|
340 |
+
|
341 |
+
async def get_data(username):
|
342 |
+
base_msg = ""
|
343 |
+
async with AsyncClient() as gpx:
|
344 |
+
req = (await gpx.get(f"https://api.github.com/users/{username}")).json()
|
345 |
+
try:
|
346 |
+
avatar = req["avatar_url"]
|
347 |
+
twitter = req['twitter_username']
|
348 |
+
base_msg += "**❆ Gitub Information ❆** \n\n"
|
349 |
+
base_msg += f"**Profile Url:** {req['html_url']} \n"
|
350 |
+
base_msg += f"**Name:** `{req['name']}` \n"
|
351 |
+
base_msg += f"**Username:** `{req['login']}` \n"
|
352 |
+
base_msg += f"**User ID:** `{req['id']}` \n"
|
353 |
+
base_msg += f"**Location:** `{req['location']}` \n"
|
354 |
+
base_msg += f"**Company:** `{req['company']}` \n"
|
355 |
+
base_msg += f"**Blog:** `{req['name']}` \n"
|
356 |
+
base_msg += f"**Twitter:** `{f'https://twitter.com/{twitter}' if twitter else 'None'}` \n"
|
357 |
+
base_msg += f"**Bio:** `{req['bio']}` \n"
|
358 |
+
base_msg += f"**Public Repos:** `{req['public_repos']}` \n"
|
359 |
+
base_msg += f"**Public Gists:** `{req['public_gists']}` \n"
|
360 |
+
base_msg += f"**Followers:** `{req['followers']}` \n"
|
361 |
+
base_msg += f"**Following:** `{req['following']}` \n"
|
362 |
+
base_msg += f"**Created At:** `{req['created_at']}` \n"
|
363 |
+
base_msg += f"**Update At:** `{req['updated_at']}` \n"
|
364 |
+
return [base_msg, avatar]
|
365 |
+
except Exception as e:
|
366 |
+
base_msg += f"**An error occured while parsing the data!** \n\n**Traceback:** \n `{e}` \n\n`Make sure that you've sent the command with the correct username!`"
|
367 |
+
return [base_msg, "https://telegra.ph//file/32f69c18190666ea96553.jpg"]
|
368 |
+
|
369 |
+
@app.get("/ryuzaki/github")
|
370 |
+
async def github(username: str=None):
|
371 |
+
try:
|
372 |
+
details = await get_data(username)
|
373 |
+
return {
|
374 |
+
"status": "true",
|
375 |
+
"randydev":{
|
376 |
+
"avatar": details[1],
|
377 |
+
"results": details[0]
|
378 |
+
}
|
379 |
+
}
|
380 |
+
except:
|
381 |
+
return {"status": "false", "message": "Error response."}
|
382 |
+
|
383 |
+
@app.get("/ryuzaki/webshot")
|
384 |
+
def webshot(
|
385 |
+
url: str=None,
|
386 |
+
quality: str="1920x1080",
|
387 |
+
type_mine: str="JPEG",
|
388 |
+
pixels: str="1024",
|
389 |
+
cast: str="Z100"
|
390 |
+
):
|
391 |
+
try:
|
392 |
+
required_url = f"https://mini.s-shot.ru/{quality}/{type_mine}/{pixels}/{cast}/?{url}"
|
393 |
+
return {
|
394 |
+
"status": "true",
|
395 |
+
"randydev":{
|
396 |
+
"image_url": required_url
|
397 |
+
}
|
398 |
+
}
|
399 |
+
except:
|
400 |
+
return {"status": "false", "message": "Error response."}
|
401 |
+
|
402 |
+
@app.get("/ryuzaki/chatbot")
|
403 |
+
def chatbot(
|
404 |
+
query: str=None,
|
405 |
+
user_id: int=None,
|
406 |
+
bot_name: str=None,
|
407 |
+
bot_username: str=None
|
408 |
+
):
|
409 |
+
api_url = b64decode("aHR0cHM6Ly9hcGkuc2Fmb25lLmRldi9jaGF0Ym90").decode("utf-8")
|
410 |
+
params = {
|
411 |
+
"query": query,
|
412 |
+
"user_id": user_id,
|
413 |
+
"bot_name": bot_name,
|
414 |
+
"bot_master": bot_username
|
415 |
+
}
|
416 |
+
x = requests.get(f"{api_url}", params=params)
|
417 |
+
if x.status_code != 200:
|
418 |
+
return "Error api request"
|
419 |
+
try:
|
420 |
+
y = x.json()
|
421 |
+
response = y["response"]
|
422 |
+
return {
|
423 |
+
"status": "true",
|
424 |
+
"randydev":{
|
425 |
+
"message": response
|
426 |
+
}
|
427 |
+
}
|
428 |
+
except:
|
429 |
+
return {"status": "false", "message": "Error response."}
|
430 |
+
|
431 |
+
@app.get("/ryuzaki/waifu")
|
432 |
+
def waifu_pics(
|
433 |
+
types: str="sfw",
|
434 |
+
category: str="neko"
|
435 |
+
):
|
436 |
+
waifu_api = f"{SOURCR_WAIFU_URL}/{types}"
|
437 |
+
waifu_param = f"{waifu_api}/{category}"
|
438 |
+
|
439 |
+
response = requests.get(waifu_param)
|
440 |
+
|
441 |
+
if response.status_code != 200:
|
442 |
+
return "Sorry, there was an error processing your request. Please try again later"
|
443 |
+
data_waifu = response.json()
|
444 |
+
try:
|
445 |
+
waifu_image_url = data_waifu["url"]
|
446 |
+
except Exception as e:
|
447 |
+
return f"Error request {e}"
|
448 |
+
if waifu_image_url:
|
449 |
+
try:
|
450 |
+
return {
|
451 |
+
"status": "true",
|
452 |
+
"randydev":{
|
453 |
+
"image_url": waifu_image_url
|
454 |
+
}
|
455 |
+
}
|
456 |
+
except:
|
457 |
+
return {"status": "false", "message": "Error response"}
|
458 |
+
else:
|
459 |
+
return {"status": "false", "message": "Error response."}
|
460 |
+
|
461 |
+
@app.get("/ryuzaki/rayso")
|
462 |
+
def make_rayso(
|
463 |
+
code=None,
|
464 |
+
title: str="Ryuzaki Dev",
|
465 |
+
theme: str=None,
|
466 |
+
setlang: str="en",
|
467 |
+
auto_translate: bool=None,
|
468 |
+
ryuzaki_dark: bool=None
|
469 |
+
):
|
470 |
+
trans = SyncTranslator()
|
471 |
+
api_url = b64decode("aHR0cHM6Ly9hcGkuc2Fmb25lLm1lL3JheXNv").decode("utf-8")
|
472 |
+
if auto_translate:
|
473 |
+
source = trans.detect(code)
|
474 |
+
translation = trans(code, sourcelang=source, targetlang=setlang)
|
475 |
+
code = translation.text
|
476 |
+
else:
|
477 |
+
code = code
|
478 |
+
if ryuzaki_dark:
|
479 |
+
x = requests.post(
|
480 |
+
f"{api_url}",
|
481 |
+
json={
|
482 |
+
"code": code,
|
483 |
+
"title": title,
|
484 |
+
"theme": theme,
|
485 |
+
"darkMode": True
|
486 |
+
}
|
487 |
+
)
|
488 |
+
if x.status_code != 200:
|
489 |
+
return "Error api Gay"
|
490 |
+
data = x.json()
|
491 |
+
try:
|
492 |
+
image_data = base64.b64decode(data["image"])
|
493 |
+
return {
|
494 |
+
"status": "true",
|
495 |
+
"data":{
|
496 |
+
"image": image_data
|
497 |
+
}
|
498 |
+
}
|
499 |
+
except:
|
500 |
+
return {"status": "false", "message": "Error response"}
|
501 |
+
else:
|
502 |
+
x = requests.post(
|
503 |
+
f"{api_url}",
|
504 |
+
json={
|
505 |
+
"code": code,
|
506 |
+
"title": title,
|
507 |
+
"theme": theme,
|
508 |
+
"darkMode": False
|
509 |
+
}
|
510 |
+
)
|
511 |
+
if x.status_code != 200:
|
512 |
+
return "Error api Gay"
|
513 |
+
data = x.json()
|
514 |
+
try:
|
515 |
+
image_data = base64.b64decode(data["image"])
|
516 |
+
return {
|
517 |
+
"status": "true",
|
518 |
+
"data":{
|
519 |
+
"image": image_data
|
520 |
+
}
|
521 |
+
}
|
522 |
+
except:
|
523 |
+
return {"status": "false", "message": "Error response"}
|
524 |
+
|
525 |
+
@app.get("/ryuzaki/ipcheck")
|
526 |
+
def whois_ip_address(ip_address: str=None):
|
527 |
+
apikey = kc("M0QwN0UyRUFBRjU1OTQwQUY0NDczNEMzRjJBQzdDMUE=").decode("utf-8")
|
528 |
+
location_link = "https"
|
529 |
+
location_api = "api.ip2location.io"
|
530 |
+
location_key = f"key={apikey}"
|
531 |
+
location_search = f"ip={ip_address}"
|
532 |
+
location_param = (
|
533 |
+
f"{location_link}://{location_api}/?{location_key}&{location_search}"
|
534 |
+
)
|
535 |
+
response = requests.get(location_param)
|
536 |
+
if response.status_code != 200:
|
537 |
+
return "Sorry, there was an error processing your request. Please try again later"
|
538 |
+
data_location = response.json()
|
539 |
+
try:
|
540 |
+
location_ip = data_location["ip"]
|
541 |
+
location_code = data_location["country_code"]
|
542 |
+
location_name = data_location["country_name"]
|
543 |
+
location_region = data_location["region_name"]
|
544 |
+
location_city = data_location["city_name"]
|
545 |
+
location_zip = data_location["zip_code"]
|
546 |
+
location_zone = data_location["time_zone"]
|
547 |
+
location_card = data_location["as"]
|
548 |
+
except Exception as e:
|
549 |
+
return f"error {e}"
|
550 |
+
if (
|
551 |
+
location_ip
|
552 |
+
and location_code
|
553 |
+
and location_name
|
554 |
+
and location_region
|
555 |
+
and location_city
|
556 |
+
and location_zip
|
557 |
+
and location_zone
|
558 |
+
and location_card
|
559 |
+
):
|
560 |
+
return {
|
561 |
+
"ip_address": location_ip,
|
562 |
+
"country_code": location_code,
|
563 |
+
"region_name": location_region,
|
564 |
+
"city_name": location_city,
|
565 |
+
"zip_code": location_zip,
|
566 |
+
"time_zone": location_zone,
|
567 |
+
"as": location_card
|
568 |
+
}
|
569 |
+
else:
|
570 |
+
return {"status": "false", "message": "Invalid ip address"}
|
571 |
+
|
572 |
+
@app.get("/ryuzaki/tiktok_douyin")
|
573 |
+
def tiktok_douyin(tiktok_url: str=None):
|
574 |
+
response = requests.get(f"{SOURCR_TIKTOK_WTF_URL}={tiktok_url}")
|
575 |
+
if response.status_code != 200:
|
576 |
+
return "Error request:"
|
577 |
+
try:
|
578 |
+
download_video = response.json()["aweme_list"][0]["video"]["play_addr"]["url_list"][0]
|
579 |
+
download_audio = response.json()["aweme_list"][0]["music"]["play_url"]["url_list"][0]
|
580 |
+
description = response.json()["aweme_list"][0]["desc"]
|
581 |
+
author = response.json()["aweme_list"][0]["author"]["nickname"]
|
582 |
+
request = response.json()["aweme_list"][0]["author"]["signature"]
|
583 |
+
return {
|
584 |
+
"status": "true",
|
585 |
+
"randydev": {
|
586 |
+
"video_url": download_video,
|
587 |
+
"music_url": download_audio,
|
588 |
+
"description": description,
|
589 |
+
"author": author,
|
590 |
+
"request": request
|
591 |
+
}
|
592 |
+
}
|
593 |
+
except:
|
594 |
+
return {"status": "false", "message": "Error request"}
|
595 |
+
|
596 |
+
@app.get("/ryuzaki/tiktok")
|
597 |
+
def tiktok_downloader(tiktok_url: Union[str, None] = None, only_video: bool=None):
|
598 |
+
api_devs = SOURCR_TIKTOK_TECH_URL
|
599 |
+
parameter = f"tiktok?url={tiktok_url}"
|
600 |
+
api_url = f"{api_devs}/{parameter}"
|
601 |
+
response = requests.get(api_url)
|
602 |
+
|
603 |
+
if response.status_code != 200:
|
604 |
+
return "Error: Unable to fetch data from the TikTok API"
|
605 |
+
try:
|
606 |
+
results = response.json()
|
607 |
+
caption = results.get("result", {}).get("desc", "")
|
608 |
+
if only_video:
|
609 |
+
video_url = results.get("result", {}).get("withoutWaterMarkVideo", "")
|
610 |
+
if video_url:
|
611 |
+
return {
|
612 |
+
"download_url": video_url,
|
613 |
+
"caption": caption
|
614 |
+
}
|
615 |
+
else:
|
616 |
+
music_mp3 = results.get("result", {}).get("music", "")
|
617 |
+
if music_mp3:
|
618 |
+
return {
|
619 |
+
"music_url": music_mp3,
|
620 |
+
"caption": caption
|
621 |
+
}
|
622 |
+
return "Error: TikTok data not found or unsupported format"
|
623 |
+
except:
|
624 |
+
return {"status": "false", "message": "Invalid Link"}
|
625 |
+
|
626 |
+
@app.get("/ryuzaki/mediafire")
|
627 |
+
def mediafire(link: Union[str, None] = None):
|
628 |
+
try:
|
629 |
+
down_link = str(link)
|
630 |
+
mid = down_link.split('/', 5)
|
631 |
+
if mid[3] == "view":
|
632 |
+
mid[3] = "file"
|
633 |
+
down_link = '/'.join(mid)
|
634 |
+
print(down_link)
|
635 |
+
r = requests.get(down_link)
|
636 |
+
soup = BeautifulSoup(r.content, "html.parser")
|
637 |
+
a_href = soup.find("a", {"class": "input popsok"}).get("href")
|
638 |
+
a = str(a_href)
|
639 |
+
id = link.split('/', 5)[4]
|
640 |
+
a_byte = soup.find("a", {"class": "input popsok"}).get_text()
|
641 |
+
a_name = soup.find("div", {"class": "dl-btn-label"}).get_text()
|
642 |
+
details = soup.find("ul", {"class": "details"})
|
643 |
+
li_items = details.find_all('li')[1]
|
644 |
+
some = li_items.find_all("span")[0].get_text().split()
|
645 |
+
dat = list(some)
|
646 |
+
down = a_byte.replace(" ", "").strip()
|
647 |
+
time = dat[1]
|
648 |
+
date = dat[0]
|
649 |
+
byte = down.split("(", 1)[1].split(")", 1)[0]
|
650 |
+
name = a_name.replace(" ", "").strip()
|
651 |
+
return {
|
652 |
+
"status": "true",
|
653 |
+
"data": {
|
654 |
+
"file": {
|
655 |
+
"url": {
|
656 |
+
'directDownload': a,
|
657 |
+
"original": link,
|
658 |
+
},
|
659 |
+
"metadata": {
|
660 |
+
"id": id,
|
661 |
+
"name": name,
|
662 |
+
"size": {
|
663 |
+
"readable": byte
|
664 |
+
},
|
665 |
+
"DateAndTime": {
|
666 |
+
"time": time,
|
667 |
+
"date": date
|
668 |
+
}
|
669 |
+
}
|
670 |
+
}
|
671 |
+
}
|
672 |
+
}
|
673 |
+
|
674 |
+
except:
|
675 |
+
return "{'status': 'false', 'message': 'Invalid Link'}"
|
676 |
+
|
677 |
+
|
678 |
+
@app.get("/ryuzaki/gdrive")
|
679 |
+
def gdrive(link: Union[str, None] = None):
|
680 |
+
try:
|
681 |
+
down = link.split('/', 6)
|
682 |
+
url = f'https://drive.google.com/uc?export=download&id={down[5]}'
|
683 |
+
session = requests.Session()
|
684 |
+
|
685 |
+
response = session.get(url, stream=True)
|
686 |
+
headers = response.headers
|
687 |
+
content_disp = headers.get('content-disposition')
|
688 |
+
filename = None
|
689 |
+
if content_disp:
|
690 |
+
match = re.search(r'filename="(.+)"', content_disp)
|
691 |
+
if match:
|
692 |
+
filename = match.group(1)
|
693 |
+
|
694 |
+
content_length = headers.get('content-length')
|
695 |
+
last_modified = headers.get('last-modified')
|
696 |
+
content_type = headers.get('content-type')
|
697 |
+
|
698 |
+
return {
|
699 |
+
"status": "true",
|
700 |
+
"data": {
|
701 |
+
"file": {
|
702 |
+
"url": {
|
703 |
+
'directDownload': url,
|
704 |
+
"original": link,
|
705 |
+
},
|
706 |
+
"metadata": {
|
707 |
+
"id":
|
708 |
+
down[5],
|
709 |
+
"name":
|
710 |
+
filename if filename else 'No filename provided by the server.',
|
711 |
+
"size": {
|
712 |
+
"readable":
|
713 |
+
f'{round(int(content_length) / (1024 * 1024), 2)} MB' if
|
714 |
+
content_length else 'No content length provided by the server.',
|
715 |
+
"type":
|
716 |
+
content_type
|
717 |
+
if content_type else 'No content type provided by the server.'
|
718 |
+
},
|
719 |
+
"DateAndTime":
|
720 |
+
last_modified if last_modified else
|
721 |
+
'No last modified date provided by the server.',
|
722 |
+
}
|
723 |
+
}
|
724 |
+
}
|
725 |
+
}
|
726 |
+
|
727 |
+
except:
|
728 |
+
return "{'status': 'false', 'message': 'Invalid Link'}"
|
729 |
+
|
730 |
+
@app.get("/ryuzaki/anonfiles")
|
731 |
+
def anonfiles(link: Union[str, None] = None):
|
732 |
+
try:
|
733 |
+
r = requests.get(link)
|
734 |
+
soup = BeautifulSoup(r.content, "html.parser")
|
735 |
+
a_href = soup.find("a", {"id": "download-url"}).get("href")
|
736 |
+
a = str(a_href)
|
737 |
+
id = link.split('/', 4)[3]
|
738 |
+
jsondata = requests.get(
|
739 |
+
f'https://api.anonfiles.com/v2/file/{id}/info').json()
|
740 |
+
jsondata['data']['file']['url']['directDownload'] = a
|
741 |
+
del jsondata['data']['file']['url']['full']
|
742 |
+
|
743 |
+
return jsondata
|
744 |
+
except:
|
745 |
+
return "{'status': 'false', 'message': 'Invalid Link'}"
|
746 |
+
|
747 |
+
@app.get("/ryuzaki/filechan")
|
748 |
+
def filechan(link: Union[str, None] = None):
|
749 |
+
try:
|
750 |
+
r = requests.get(link)
|
751 |
+
soup = BeautifulSoup(r.content, "html.parser")
|
752 |
+
a_href = soup.find("a", {"id": "download-url"}).get("href")
|
753 |
+
a = str(a_href)
|
754 |
+
id = link.split('/', 4)[3]
|
755 |
+
jsondata = requests.get(
|
756 |
+
f'https://api.filechan.org/v2/file/{id}/info').json()
|
757 |
+
jsondata['data']['file']['url']['directDownload'] = a
|
758 |
+
del jsondata['data']['file']['url']['full']
|
759 |
+
|
760 |
+
return jsondata
|
761 |
+
except:
|
762 |
+
return "{'status': 'false', 'message': 'Invalid Link'}"
|
763 |
+
|
764 |
+
@app.get("/ryuzaki/letsupload")
|
765 |
+
def letsupload(link: Union[str, None] = None):
|
766 |
+
try:
|
767 |
+
r = requests.get(link)
|
768 |
+
soup = BeautifulSoup(r.content, "html.parser")
|
769 |
+
a_href = soup.find("a", {"id": "download-url"}).get("href")
|
770 |
+
a = str(a_href)
|
771 |
+
id = link.split('/', 4)[3]
|
772 |
+
jsondata = requests.get(
|
773 |
+
f'https://api.letsupload.cc/v2/file/{id}/info').json()
|
774 |
+
jsondata['data']['file']['url']['directDownload'] = a
|
775 |
+
del jsondata['data']['file']['url']['full']
|
776 |
+
|
777 |
+
return jsondata
|
778 |
+
except:
|
779 |
+
return "{'status': 'false', 'message': 'Invalid Link'}"
|
780 |
+
|
781 |
+
@app.get("/ryuzaki/megaupload")
|
782 |
+
def megaupload(link: Union[str, None] = None):
|
783 |
+
try:
|
784 |
+
r = requests.get(link)
|
785 |
+
soup = BeautifulSoup(r.content, "html.parser")
|
786 |
+
a_href = soup.find("a", {"id": "download-url"}).get("href")
|
787 |
+
a = str(a_href)
|
788 |
+
id = link.split('/', 4)[3]
|
789 |
+
jsondata = requests.get(
|
790 |
+
f'https://api.megaupload.nz/v2/file/{id}/info').json()
|
791 |
+
jsondata['data']['file']['url']['directDownload'] = a
|
792 |
+
del jsondata['data']['file']['url']['full']
|
793 |
+
|
794 |
+
return jsondata
|
795 |
+
except:
|
796 |
+
return "{'status': 'false', 'message': 'Invalid Link'}"
|
797 |
+
|
798 |
+
@app.get("/ryuzaki/myfile")
|
799 |
+
def myfile(link: Union[str, None] = None):
|
800 |
+
try:
|
801 |
+
r = requests.get(link)
|
802 |
+
soup = BeautifulSoup(r.content, "html.parser")
|
803 |
+
a_href = soup.find("a", {"id": "download-url"}).get("href")
|
804 |
+
a = str(a_href)
|
805 |
+
id = link.split('/', 4)[3]
|
806 |
+
jsondata = requests.get(
|
807 |
+
f'https://api.myfile.is/v2/file/{id}/info').json()
|
808 |
+
jsondata['data']['file']['url']['directDownload'] = a
|
809 |
+
del jsondata['data']['file']['url']['full']
|
810 |
+
|
811 |
+
return jsondata
|
812 |
+
except:
|
813 |
+
return "{'status': 'false', 'message': 'Invalid Link'}"
|
814 |
+
|
815 |
+
if __name__ == "__main__":
|
816 |
+
uvicorn.run(app, host="0.0.0.0")
|