Spaces:
Sleeping
Sleeping
acharyaaditya26
commited on
Commit
•
b0095ef
1
Parent(s):
bc4ea40
changes
Browse files- Dockerfile +14 -0
- app.py +61 -0
- requirements.txt +9 -0
Dockerfile
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
FROM python:3.9
|
3 |
+
|
4 |
+
RUN useradd -m -u 1000 user
|
5 |
+
USER user
|
6 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
7 |
+
|
8 |
+
WORKDIR /app
|
9 |
+
|
10 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
11 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
12 |
+
|
13 |
+
COPY --chown=user . /app
|
14 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
|
2 |
+
from typing import List
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from PIL import Image
|
5 |
+
import io
|
6 |
+
from transformers import AutoModel, AutoTokenizer
|
7 |
+
import torch
|
8 |
+
|
9 |
+
app = FastAPI()
|
10 |
+
|
11 |
+
# Load model and tokenizer
|
12 |
+
model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=True,
|
13 |
+
attn_implementation='sdpa', torch_dtype=torch.bfloat16)
|
14 |
+
model = model.eval().cuda()
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=True)
|
16 |
+
|
17 |
+
class FewshotExample(BaseModel):
|
18 |
+
image: bytes
|
19 |
+
question: str
|
20 |
+
answer: str
|
21 |
+
|
22 |
+
class PredictRequest(BaseModel):
|
23 |
+
fewshot_examples: List[FewshotExample]
|
24 |
+
test_image: bytes
|
25 |
+
test_question: str
|
26 |
+
|
27 |
+
@app.post("/predict_with_fewshot")
|
28 |
+
async def predict_with_fewshot(
|
29 |
+
fewshot_images: List[UploadFile] = File(...),
|
30 |
+
fewshot_questions: List[str] = Form(...),
|
31 |
+
fewshot_answers: List[str] = Form(...),
|
32 |
+
test_image: UploadFile = File(...),
|
33 |
+
test_question: str = Form(...)
|
34 |
+
):
|
35 |
+
# Validate input lengths
|
36 |
+
if len(fewshot_images)!= len(fewshot_questions) or len(fewshot_questions)!= len(fewshot_answers):
|
37 |
+
raise HTTPException(status_code=400, detail="Number of few-shot images, questions, and answers must match.")
|
38 |
+
|
39 |
+
msgs = []
|
40 |
+
try:
|
41 |
+
for fs_img, fs_q, fs_a in zip(fewshot_images, fewshot_questions, fewshot_answers):
|
42 |
+
img_content = await fs_img.read()
|
43 |
+
img = Image.open(io.BytesIO(img_content)).convert('RGB')
|
44 |
+
msgs.append({'role': 'user', 'content': [img, fs_q]})
|
45 |
+
msgs.append({'role': 'assistant', 'content': [fs_a]})
|
46 |
+
|
47 |
+
# Test example
|
48 |
+
test_img_content = await test_image.read()
|
49 |
+
test_img = Image.open(io.BytesIO(test_img_content)).convert('RGB')
|
50 |
+
msgs.append({'role': 'user', 'content': [test_img, test_question]})
|
51 |
+
|
52 |
+
# Get answer
|
53 |
+
answer = model.chat(
|
54 |
+
image=None,
|
55 |
+
msgs=msgs,
|
56 |
+
tokenizer=tokenizer
|
57 |
+
)
|
58 |
+
|
59 |
+
return {"answer": answer}
|
60 |
+
except Exception as e:
|
61 |
+
raise HTTPException(status_code=500, detail=f"Error processing request: {str(e)}")
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
Pillow==10.1.0
|
4 |
+
torch==2.1.2
|
5 |
+
torchvision==0.16.2
|
6 |
+
transformers==4.40.0
|
7 |
+
sentencepiece==0.1.99
|
8 |
+
decord
|
9 |
+
python-multipart
|