Abhilashvj's picture
Duplicate from Abhilashvj/computer-vision-backend
31e192b
raw
history blame
24.3 kB
import cv2
import numpy as np
import argparse
import base64
import io
import os
import re
import sys
import traceback
import uuid
from typing import List, Optional
from PIL import ImageEnhance
import traceback
import cv2
import numpy as np
import pandas as pd
import pinecone
import pyiqa
import timm
import torch
import uvicorn
from dotenv import load_dotenv
from fastapi import FastAPI, File, Form, HTTPException, UploadFile
from PIL import Image
from pydantic import BaseModel
from sentence_transformers import SentenceTransformer, util
from transformers import (
AutoFeatureExtractor,
AutoModel,
DonutProcessor,
VisionEncoderDecoderModel,
)
load_dotenv()
pinecone.init(api_key=os.getenv("PINECONE_KEY"), environment=os.getenv("PINECONE_ENV"))
DETECTION_URL = "/object-detection/"
CLASSIFICATION_URL = "/object-classification/"
QUALITY_ASSESSMENT_URL = "/quality-assessment/"
FACE_URL = "/face-anonymization/"
LICENCE_URL = "/licenceplate-anonymization/"
DOCUMENT_QA = "/document-qa/"
IMAGE_SIMILARITY_DEMO = "/find-similar-image/"
IMAGE_SIMILARITY_PINECONE_DEMO = "/find-similar-image-pinecone/"
INDEX_NAME = "imagesearch-demo"
INDEX_DIMENSION = 512
TMP_DIR = "tmp"
def enhance_image(pil_image):
# Convert PIL Image to OpenCV format
open_cv_image = np.array(pil_image)
# Convert RGB to BGR
open_cv_image = open_cv_image[:, :, ::-1].copy()
# Convert to grayscale
gray = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2GRAY)
# Histogram equalization
equ = cv2.equalizeHist(gray)
# Adaptive Histogram Equalization
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
adaptive_hist_eq = clahe.apply(gray)
# Gaussian Blurring
gaussian_blurred = cv2.GaussianBlur(adaptive_hist_eq, (5,5), 0)
# Noise reduction
denoised = cv2.medianBlur(gaussian_blurred, 3)
# Brightness & Contrast adjustment
lab = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2Lab)
l, a, b = cv2.split(lab)
cl = clahe.apply(l)
limg = cv2.merge((cl, a, b))
enhanced_image = cv2.cvtColor(limg, cv2.COLOR_Lab2BGR)
# Convert back to PIL Image
enhanced_pil_image = Image.fromarray(cv2.cvtColor(enhanced_image, cv2.COLOR_BGR2RGB))
# IMAGE AUGMENTATION
# For demonstration purposes, let's do a simple brightness adjustment.
# In practice, choose the augmentations that suit your task.
enhancer = ImageEnhance.Brightness(enhanced_pil_image)
enhanced_pil_image = enhancer.enhance(1.2) # Brighten the image by 20%
return enhanced_pil_image
if INDEX_NAME not in pinecone.list_indexes():
pinecone.create_index(INDEX_NAME, dimension=512, metric='cosine')
print("Connecting to Pinecone Index")
index = pinecone.Index(INDEX_NAME)
device = "cuda" if torch.cuda.is_available() else "cpu"
os.makedirs(TMP_DIR, exist_ok=True)
licence_model = torch.hub.load(
"ultralytics/yolov5", "custom", path="Licenseplate_model.pt", device="cpu", force_reload=True
)
licence_model.cpu()
detector = cv2.dnn.DetectionModel(
"res10_300x300_ssd_iter_140000_fp16.caffemodel", "deploy.prototxt"
)
processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
doc_qa_model = VisionEncoderDecoderModel.from_pretrained(
"naver-clova-ix/donut-base-finetuned-docvqa"
)
device = "cuda" if torch.cuda.is_available() else "cpu"
doc_qa_model.to(device)
os.makedirs(TMP_DIR, exist_ok=True)
model = torch.hub.load(
"ultralytics/yolov5", "custom", path="best.pt", device="cpu", force_reload=True
)
model.cpu()
classes = [
"gas-distribution-meter",
"gas-distribution-piping",
"gas-distribution-regulator",
"gas-distribution-valve",
]
class_to_idx = {
"gas-distribution-meter": 0,
"gas-distribution-piping": 1,
"gas-distribution-regulator": 2,
"gas-distribution-valve": 3,
}
idx_to_classes = {v: k for k, v in class_to_idx.items()}
modelname = "resnet50d"
model_weights = "best_classifer_model.pt"
num_classes = len(classes)
classifier_model = timm.create_model(
"resnet50d", pretrained=True, num_classes=num_classes, drop_path_rate=0.05
)
classifier_model.load_state_dict(
torch.load(model_weights, map_location=torch.device("cpu"))["model_state_dict"]
)
musiq_metric = pyiqa.create_metric("musiq-koniq", device=torch.device("cpu"))
image_sim_model = SentenceTransformer("clip-ViT-B-32")
# model_ckpt = "nateraw/vit-base-beans"
# extractor = AutoFeatureExtractor.from_pretrained(model_ckpt)
# image_sim_model = AutoModel.from_pretrained(model_ckpt)
app = FastAPI(title="CV Demos")
# Define the Response
class Prediction(BaseModel):
filename: str
contenttype: str
prediction: List[float] = []
# define response
@app.get("/")
def root_route():
return {"error": f"Use GET {DETECTION_URL} instead of the root route!"}
@app.post(
DETECTION_URL,
)
async def predict(file: UploadFile = File(...), quality_check: bool = False):
try:
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await file.read()
pil_image = Image.open(io.BytesIO(contents))
if quality_check:
print("RUNNING QUALITY CEHCK BEFORE OBJEFCT DETECTION!!!")
tmp_file = f"{TMP_DIR}/tmp.png"
pil_image.save(tmp_file)
score = musiq_metric(tmp_file)
if score < 50:
return {
"Error": "Image quality is not sufficient enough to be considered for object detection"
}
results = model(pil_image, size=640) # reduce size=320 for faster inference
return results.pandas().xyxy[0].to_json(orient="records")
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(CLASSIFICATION_URL)
async def classify(file: UploadFile = File(...)):
try:
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await file.read()
pil_image = Image.open(io.BytesIO(contents))
data_mean = (0.485, 0.456, 0.406)
data_std = (0.229, 0.224, 0.225)
image_size = (224, 224)
eval_transforms = timm.data.create_transform(
input_size=image_size, mean=data_mean, std=data_std
)
eval_transforms(pil_image).unsqueeze(dim=0).shape
classifier_model.eval()
print("RUNNING Image Classification!!!")
max_class_idx = np.argmax(
classifier_model(eval_transforms(pil_image).unsqueeze(dim=0)).detach().numpy()
)
predicted_class = idx_to_classes[max_class_idx]
print(f"Predicted Class idx: {max_class_idx} with name : {predicted_class}")
return {"object": predicted_class}
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(QUALITY_ASSESSMENT_URL)
async def quality_check(file: UploadFile = File(...)):
try:
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await file.read()
pil_image = Image.open(io.BytesIO(contents))
tmp_file = f"{TMP_DIR}/tmp.png"
pil_image.save(tmp_file)
score = musiq_metric(tmp_file).detach().numpy().tolist()
return {"score": score}
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
def anonymize_simple(image, factor=3.0):
# automatically determine the size of the blurring kernel based
# on the spatial dimensions of the input image
(h, w) = image.shape[:2]
kW = int(w / factor)
kH = int(h / factor)
# ensure the width of the kernel is odd
if kW % 2 == 0:
kW -= 1
# ensure the height of the kernel is odd
if kH % 2 == 0:
kH -= 1
# apply a Gaussian blur to the input image using our computed
# kernel size
return cv2.GaussianBlur(image, (kW, kH), 0)
def anonymize_pixelate(image, blocks=3):
# divide the input image into NxN blocks
(h, w) = image.shape[:2]
xSteps = np.linspace(0, w, blocks + 1, dtype="int")
ySteps = np.linspace(0, h, blocks + 1, dtype="int")
# loop over the blocks in both the x and y direction
for i in range(1, len(ySteps)):
for j in range(1, len(xSteps)):
# compute the starting and ending (x, y)-coordinates
# for the current block
startX = xSteps[j - 1]
startY = ySteps[i - 1]
endX = xSteps[j]
endY = ySteps[i]
# extract the ROI using NumPy array slicing, compute the
# mean of the ROI, and then draw a rectangle with the
# mean RGB values over the ROI in the original image
roi = image[startY:endY, startX:endX]
(B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]
cv2.rectangle(image, (startX, startY), (endX, endY), (B, G, R), -1)
# return the pixelated blurred image
return image
# define response
@app.get("/")
def root_route():
return {"error": f"Use GET {FACE_URL} or {LICENCE_URL} instead of the root route!"}
@app.post(
FACE_URL,
)
async def face_anonymize(
file: UploadFile = File(...), blur_type="simple", quality_check: bool = False
):
"""
https://pyimagesearch.com/2020/04/06/blur-and-anonymize-faces-with-opencv-and-python/
"""
try:
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await file.read()
pil_image = Image.open(io.BytesIO(contents)).convert("RGB")
detector = cv2.dnn.DetectionModel(
"res10_300x300_ssd_iter_140000_fp16.caffemodel", "deploy.prototxt"
)
open_cv_image = np.array(pil_image)
# Convert RGB to BGR
open_cv_image = open_cv_image[:, :, ::-1].copy()
(h, w) = open_cv_image.shape[:2]
# Getting the detections
detections = detector.detect(open_cv_image)
if len(detections[2]) > 0:
for face in detections[2]:
(x, y, w, h) = face.astype("int")
# extract the face ROI
face = open_cv_image[y : y + h, x : x + w]
if blur_type == "simple":
face = anonymize_simple(face)
else:
face = anonymize_pixelate(face)
open_cv_image[y : y + h, x : x + w] = face
_, encoded_img = cv2.imencode(".PNG", open_cv_image)
encoded_img = base64.b64encode(encoded_img)
return {
"filename": file.filename,
"dimensions": str(open_cv_image.shape),
"encoded_img": encoded_img,
}
except:
e = sys.exc_info()[1]
print(traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
@app.post(LICENCE_URL)
async def licence_anonymize(file: UploadFile = File(...), blur_type="simple"):
"""https://www.kaggle.com/code/gowrishankarp/license-plate-detection-yolov5-pytesseract/notebook#Visualize"""
try:
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await file.read()
pil_image = Image.open(io.BytesIO(contents))
results = licence_model(pil_image, size=640) # reduce size=320 for faster inference
pil_image = pil_image.convert("RGB")
open_cv_image = np.array(pil_image)
open_cv_image = open_cv_image[:, :, ::-1].copy()
df = results.pandas().xyxy[0]
for i, row in df.iterrows():
xmin = int(row["xmin"])
ymin = int(row["ymin"])
xmax = int(row["xmax"])
ymax = int(row["ymax"])
licence = open_cv_image[ymin:ymax, xmin:xmax]
if blur_type == "simple":
licence = anonymize_simple(licence)
else:
licence = anonymize_pixelate(licence)
open_cv_image[ymin:ymax, xmin:xmax] = licence
_, encoded_img = cv2.imencode(".PNG", open_cv_image)
encoded_img = base64.b64encode(encoded_img)
return {
"filename": file.filename,
"dimensions": str(open_cv_image.shape),
"encoded_img": encoded_img,
}
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
def process_document(image, question):
# prepare encoder inputs
pixel_values = processor(image, return_tensors="pt").pixel_values
# prepare decoder inputs
task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
prompt = task_prompt.replace("{user_input}", question)
decoder_input_ids = processor.tokenizer(
prompt, add_special_tokens=False, return_tensors="pt"
).input_ids
# generate answer
outputs = doc_qa_model.generate(
pixel_values.to(device),
decoder_input_ids=decoder_input_ids.to(device),
max_length=doc_qa_model.decoder.config.max_position_embeddings,
early_stopping=True,
pad_token_id=processor.tokenizer.pad_token_id,
eos_token_id=processor.tokenizer.eos_token_id,
use_cache=True,
num_beams=1,
bad_words_ids=[[processor.tokenizer.unk_token_id]],
return_dict_in_generate=True,
)
# postprocess
sequence = processor.batch_decode(outputs.sequences)[0]
sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(
processor.tokenizer.pad_token, ""
)
sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
return processor.token2json(sequence)
@app.post(DOCUMENT_QA)
async def document_qa(question: str = Form(...), file: UploadFile = File(...)):
try:
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await file.read()
pil_image = Image.open(io.BytesIO(contents))
# tmp_file = f"{TMP_DIR}/tmp.png"
# pil_image.save(tmp_file)
# answer_git_large = generate_answer_git(git_processor_large, git_model_large, image, question)
answer = process_document(pil_image, question)["answer"]
return {"answer": answer}
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(IMAGE_SIMILARITY_DEMO)
async def image_search_local(
images_to_search: List[UploadFile], query_image: UploadFile = File(...), top_k: int = 5
):
print(
f"Recived images of length: {len(images_to_search)} needs to retrieve top k : {top_k} similar images as result"
)
try:
extension = query_image.filename.split(".")[-1] in ("jpg", "jpeg", "png")
search_images = []
search_filenames = []
print("Processing request...")
for image in images_to_search:
if image.filename.split(".")[-1] not in ("jpg", "jpeg", "png"):
return "Image must be jpg or png format!"
# read image contain
search_filenames.append(image.filename)
contents = await image.read()
search_images.append(Image.open(io.BytesIO(contents)))
print("Indexing images to search...")
corpus_embeddings = image_sim_model.encode(
search_images, convert_to_tensor=True, show_progress_bar=True
)
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await query_image.read()
query_image = Image.open(io.BytesIO(contents))
print("Indexing query image...")
prompt_embedding = image_sim_model.encode(query_image, convert_to_tensor=True)
print("Searching query image...")
hits = util.semantic_search(prompt_embedding, corpus_embeddings, top_k=top_k)
# hits = pd.DataFrame(hits[0], columns=['corpus_id', 'score'])
# tmp_file = f"{TMP_DIR}/tmp.png"
# pil_image.save(tmp_file)
# answer_git_large = generate_answer_git(git_processor_large, git_model_large, image, question)
print("Creating the result..")
similar_images = []
print("hits ", hits)
for hit in hits[0]:
# print("Finding the image ")
# print("Type of images list ", type(search_images), "similar image id ", hit['corpus_id'])
open_cv_image = np.array(search_images[hit["corpus_id"]].convert("RGB"))[:, :, ::-1]
# print("cv2.imencode the image ")
_, encoded_img = cv2.imencode(".PNG", open_cv_image)
# print("base64 the image ")
encoded_img = base64.b64encode(encoded_img)
# print("Appending the image ")
similar_images.append(
{
"filename": search_filenames[hit["corpus_id"]],
"dimensions": str(open_cv_image.shape),
"score": hit["score"],
"encoded_img": encoded_img,
}
)
print("Sending result..")
return {"similar_images": similar_images}
except:
e = sys.exc_info()[1]
raise HTTPException(status_code=500, detail=str(e))
@app.post(IMAGE_SIMILARITY_PINECONE_DEMO)
async def image_search_pinecone(
images_to_search: Optional[List[UploadFile]] = File(None),
query_image: Optional[UploadFile] = File(None),
top_k: int = 5,
namespace="av_local",
action="query",
):
try:
# Function to delete all files from the database
print(f"Received request with images_to_search: {images_to_search} and query_image: {query_image} with action: {action}")
if action == "delete":
index = pinecone.Index(INDEX_NAME)
delete_response = index.delete(delete_all=True, namespace=namespace)
return {f"Deleted the namespace: {namespace}": delete_response}
elif action == "query" and query_image is not None:
extension = query_image.filename.split(".")[-1] in ("jpg", "jpeg", "png", "JPG", "PNG", "JPEG")
if not extension:
return "Image must be jpg or png format!"
# read image contain
contents = await query_image.read()
query_image = Image.open(io.BytesIO(contents))
print("Indexing query image...")
query_image = enhance_image(query_image)
prompt_embedding = image_sim_model.encode(query_image, convert_to_tensor=True).tolist()
if INDEX_NAME not in pinecone.list_indexes():
return {"similar_images": [], "status": "No index found for images"}
else:
index = pinecone.Index(INDEX_NAME)
query_response = index.query(
namespace=namespace,
top_k=top_k,
include_values=True,
include_metadata=True,
vector=prompt_embedding,
)
result_images = [d["metadata"]["file_path"] for d in query_response["matches"]]
print("Creating the result..")
similar_images = []
print("Retrieved matches ", query_response["matches"])
for file_path in result_images:
try:
# print("Finding the image ")
# print("Type of images list ", type(search_images), "similar image id ", hit['corpus_id'])
open_cv_image = cv2.imread(file_path)
# print("cv2.imencode the image ")
_, encoded_img = cv2.imencode(".PNG", open_cv_image)
# print("base64 the image ")
encoded_img = base64.b64encode(encoded_img)
# print("Appending the image ")
similar_images.append(
{
"filename": file_path,
"dimensions": str(open_cv_image.shape),
"score": 0,
"encoded_img": encoded_img,
}
)
except:
similar_images.append(
{
"filename": file_path,
"dimensions": None,
"score": 0,
"encoded_img": None,
}
)
print("Sending result..")
return {"similar_images": similar_images}
elif action == "index" and len(images_to_search) > 0:
print(
f"Recived images of length: {len(images_to_search)} needs to retrieve top k : {top_k} similar images as result"
)
print(f"Action indexing is executing for : {len(images_to_search)} images")
# if the index does not already exist, we create it
# check if the abstractive-question-answering index exists
print("checking pinecone Index")
if INDEX_NAME not in pinecone.list_indexes():
# delete the current index and create the new index if it does not exist
for delete_index in pinecone.list_indexes():
print(f"Deleting exitsing pinecone Index : {delete_index}")
pinecone.delete_index(delete_index)
print(f"Creating new pinecone Index : {INDEX_NAME}")
pinecone.create_index(INDEX_NAME, dimension=INDEX_DIMENSION, metric="cosine")
# instantiate connection to your Pinecone index
print(f"Connecting to pinecone Index : {INDEX_NAME}")
index = pinecone.Index(INDEX_NAME)
search_images = []
meta_datas = []
ids = []
print("Processing request...")
for image in images_to_search:
if image.filename.split(".")[-1] not in ("jpg", "jpeg", "png", "JPG", "PNG", "JPEG"):
return "Image must be jpg or png format!"
# read image contain
contents = await image.read()
pil_image = Image.open(io.BytesIO(contents))
tmp_file = f"{TMP_DIR}/{image.filename}"
pil_image.save(tmp_file)
meta_datas.append({"file_path": tmp_file})
search_images.append(pil_image)
ids.append(str(uuid.uuid1()).replace("-",""))
print("Encoding images to vectors...")
corpus_embeddings = image_sim_model.encode(
search_images, convert_to_tensor=True, show_progress_bar=True
).tolist()
print(f"Indexing images to pinecone Index : {INDEX_NAME}")
index.upsert(
vectors=list(zip(ids, corpus_embeddings, meta_datas)), namespace=namespace
)
return {"similar_images": [], "status": "Indexing succesfull for uploaded files"}
else:
return {"similar_images": []}
except Exception as e:
e = sys.exc_info()[1]
print(f"exception happened {e} {str(traceback.print_exc())}")
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Fast API exposing YOLOv5 model")
parser.add_argument("--port", default=8000, type=int, help="port number")
# parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
opt = parser.parse_args()
uvicorn.run(app, port=opt.port)