Spaces:
Running
Running
Upload 3 files
Browse files- Dockerfile +47 -0
- api.py +80 -0
- model_process.py +72 -0
Dockerfile
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim-bullseye as builder
|
2 |
+
|
3 |
+
# Build dummy packages to skip installing them and their dependencies
|
4 |
+
RUN apt-get update \
|
5 |
+
&& apt-get install -y --no-install-recommends equivs \
|
6 |
+
&& equivs-control libgl1-mesa-dri \
|
7 |
+
&& printf 'Section: misc\nPriority: optional\nStandards-Version: 3.9.2\nPackage: libgl1-mesa-dri\nVersion: 99.0.0\nDescription: Dummy package for libgl1-mesa-dri\n' >> libgl1-mesa-dri \
|
8 |
+
&& equivs-build libgl1-mesa-dri \
|
9 |
+
&& mv libgl1-mesa-dri_*.deb /libgl1-mesa-dri.deb \
|
10 |
+
&& equivs-control adwaita-icon-theme \
|
11 |
+
&& printf 'Section: misc\nPriority: optional\nStandards-Version: 3.9.2\nPackage: adwaita-icon-theme\nVersion: 99.0.0\nDescription: Dummy package for adwaita-icon-theme\n' >> adwaita-icon-theme \
|
12 |
+
&& equivs-build adwaita-icon-theme \
|
13 |
+
&& mv adwaita-icon-theme_*.deb /adwaita-icon-theme.deb
|
14 |
+
|
15 |
+
FROM python:3.11-slim-bullseye
|
16 |
+
COPY --from=builder /*.deb /
|
17 |
+
WORKDIR /app
|
18 |
+
RUN echo "deb http://deb.debian.org/debian/ unstable main contrib non-free" >> /etc/apt/sources.list
|
19 |
+
RUN apt update
|
20 |
+
RUN apt install -y python3 python3-pip libgl1-mesa-glx wget libglib2.0-dev
|
21 |
+
RUN pip install playwright hcaptcha_challenger
|
22 |
+
RUN playwright install firefox --with-deps
|
23 |
+
|
24 |
+
RUN dpkg -i /libgl1-mesa-dri.deb \
|
25 |
+
&& dpkg -i /adwaita-icon-theme.deb \
|
26 |
+
# Install dependencies
|
27 |
+
&& apt-get update \
|
28 |
+
&& apt-get install -y --no-install-recommends xvfb dumb-init \
|
29 |
+
procps curl vim xauth \
|
30 |
+
# Remove temporary files and hardware decoding libraries
|
31 |
+
&& rm -rf /var/lib/apt/lists/* \
|
32 |
+
&& rm -f /usr/lib/x86_64-linux-gnu/libmfxhw* \
|
33 |
+
&& rm -f /usr/lib/x86_64-linux-gnu/mfx/* \
|
34 |
+
&& useradd --home-dir /app --shell /bin/sh foxer \
|
35 |
+
&& chown -R foxer:foxer .
|
36 |
+
|
37 |
+
RUN rm -rf /root/.cache
|
38 |
+
USER foxer
|
39 |
+
|
40 |
+
RUN mkdir -p "/app/.config/Ultralytics"
|
41 |
+
|
42 |
+
COPY main.py .
|
43 |
+
EXPOSE 7860
|
44 |
+
|
45 |
+
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
46 |
+
CMD ["/usr/local/bin/python","-u", "/app/main.py"]
|
47 |
+
|
api.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import model_process
|
3 |
+
from flask import Flask, jsonify, request, logging as flog
|
4 |
+
from flask_limiter.util import get_remote_address
|
5 |
+
|
6 |
+
app = Flask(__name__)
|
7 |
+
|
8 |
+
|
9 |
+
def get_ipaddr():
|
10 |
+
if request.access_route:
|
11 |
+
print(request.access_route[0])
|
12 |
+
return request.access_route[0]
|
13 |
+
else:
|
14 |
+
return request.remote_addr or '127.0.0.1'
|
15 |
+
|
16 |
+
|
17 |
+
handler = flog.default_handler
|
18 |
+
|
19 |
+
|
20 |
+
def get_token():
|
21 |
+
default_token = "init_token"
|
22 |
+
if os.path.exists("token"):
|
23 |
+
return open("token", "r").read().strip()
|
24 |
+
return default_token
|
25 |
+
|
26 |
+
|
27 |
+
def check_request(required_data, data):
|
28 |
+
token = get_token()
|
29 |
+
if not data or any(key not in data for key in required_data):
|
30 |
+
print("Error:Invalid Request Data\n" + str(data))
|
31 |
+
return False
|
32 |
+
if data["token"] != token:
|
33 |
+
print("Error:Invalid Token\n" + str(data))
|
34 |
+
return False
|
35 |
+
return True
|
36 |
+
|
37 |
+
|
38 |
+
@app.errorhandler(429)
|
39 |
+
def rate_limit_exceeded(e):
|
40 |
+
print(get_remote_address())
|
41 |
+
return jsonify(msg="Too many request"), 429
|
42 |
+
|
43 |
+
|
44 |
+
@app.errorhandler(405)
|
45 |
+
def method_not_allowed(e):
|
46 |
+
print(get_remote_address())
|
47 |
+
return jsonify(msg="Unauthorized Request"), 405
|
48 |
+
|
49 |
+
|
50 |
+
@app.route("/", methods=["GET"])
|
51 |
+
def index():
|
52 |
+
return jsonify(status_code=200, ip=get_ipaddr())
|
53 |
+
|
54 |
+
|
55 |
+
@app.route("/update/token", methods=["POST"])
|
56 |
+
def update_token():
|
57 |
+
require_data = ["token", "new_token"]
|
58 |
+
data = request.get_json(force=True, silent=True)
|
59 |
+
if not check_request(require_data, data):
|
60 |
+
return jsonify(msg="Unauthorized Request"), 403
|
61 |
+
token = open("token", "w+")
|
62 |
+
token.write(data["new_token"])
|
63 |
+
token.close()
|
64 |
+
return jsonify(msg="Token updated successfully", success=True)
|
65 |
+
|
66 |
+
|
67 |
+
@app.route("/api/solve", methods=["POST"])
|
68 |
+
def solver_captcha():
|
69 |
+
require_data = ["token", "data", "question"]
|
70 |
+
data = request.get_json(force=True, silent=True)
|
71 |
+
if not check_request(require_data, data):
|
72 |
+
return jsonify(msg="Unauthorized Request"), 403
|
73 |
+
try:
|
74 |
+
return model_process.get_result(data["question"], data["data"], data.get("example"))
|
75 |
+
except Exception as e:
|
76 |
+
print(e)
|
77 |
+
return "error", 500
|
78 |
+
|
79 |
+
|
80 |
+
app.run(host="0.0.0.0", port=8081)
|
model_process.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import string
|
5 |
+
import open_clip
|
6 |
+
import requests
|
7 |
+
import torch
|
8 |
+
import shutil
|
9 |
+
from PIL import Image
|
10 |
+
|
11 |
+
model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k')
|
12 |
+
tokenizer = open_clip.get_tokenizer('ViT-B-32')
|
13 |
+
|
14 |
+
|
15 |
+
def generate_random_string_and_hash(length=8):
|
16 |
+
# 生成随机字符串
|
17 |
+
letters = string.ascii_letters
|
18 |
+
random_string = ''.join(random.choice(letters) for i in range(length))
|
19 |
+
|
20 |
+
# 生成哈希值
|
21 |
+
hash_value = hashlib.sha256(random_string.encode()).hexdigest()
|
22 |
+
|
23 |
+
return hash_value
|
24 |
+
|
25 |
+
|
26 |
+
def process_img(image_input, text_inputs, classes):
|
27 |
+
with torch.no_grad():
|
28 |
+
image_features = model.encode_image(image_input)
|
29 |
+
text_features = model.encode_text(text_inputs)
|
30 |
+
|
31 |
+
# Pick the top 5 most similar labels for the image
|
32 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
33 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
34 |
+
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
|
35 |
+
value, index = similarity[0].topk(1)
|
36 |
+
class_name = classes[index]
|
37 |
+
return class_name
|
38 |
+
|
39 |
+
|
40 |
+
def get_result(question, data, example=None):
|
41 |
+
global model, preprocess, tokenizer
|
42 |
+
model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k')
|
43 |
+
tokenizer = open_clip.get_tokenizer('ViT-B-32')
|
44 |
+
sess = requests.session()
|
45 |
+
result = []
|
46 |
+
dir_path = generate_random_string_and_hash()
|
47 |
+
os.makedirs(f"temp/{dir_path}", exist_ok=True)
|
48 |
+
raw_answer = sess.get("https://yundisk.de/d/OneDrive_5G/Pic/data.json").json()
|
49 |
+
if question in raw_answer:
|
50 |
+
classes = raw_answer["classes"]
|
51 |
+
text_inputs = torch.cat([tokenizer(f"a photo of {c}") for c in classes])
|
52 |
+
if raw_answer["need_example"]:
|
53 |
+
if example:
|
54 |
+
example_file_path = f"{generate_random_string_and_hash()}.png"
|
55 |
+
with open(f"temp/{dir_path}/{example_file_path}", "wb+") as f:
|
56 |
+
f.write(sess.get(example).content)
|
57 |
+
example = preprocess(Image.open(f"temp/{dir_path}/{example_file_path}")).unsqueeze(0)
|
58 |
+
answer = [process_img(example, text_inputs, classes)]
|
59 |
+
else:
|
60 |
+
print(question)
|
61 |
+
return None
|
62 |
+
else:
|
63 |
+
answer = raw_answer["answer"]
|
64 |
+
for img in data:
|
65 |
+
img_path = f"{generate_random_string_and_hash()}.png"
|
66 |
+
with open(f"temp/{dir_path}/{img_path}", "wb+") as f:
|
67 |
+
f.write(sess.get(img).content)
|
68 |
+
img = preprocess(Image.open(f"temp/{dir_path}/{img_path}")).unsqueeze(0)
|
69 |
+
class_name = process_img(img, text_inputs, classes)
|
70 |
+
result.append(class_name in answer)
|
71 |
+
shutil.rmtree(f"temp/{dir_path}")
|
72 |
+
return result
|