YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/model-cards#model-card-metadata)

Create and activate a virtual environment

python3 -m venv dtron2
source dtron2/bin/activate

Upgrade pip

pip install --upgrade pip

Install PyTorch

pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu

Clone the Detectron2 repository

git clone https://github.com/facebookresearch/detectron2.git

Navigate to the repository and build Detectron2

cd detectron2
python setup.py build develop

Create Django app

python manage.py startapp cancer_detection

Add the app to your Django project's INSTALLED_APPS in settings.py

INSTALLED_APPS = [
    ...,
    'cancer_detection',
]

in your venv create a python file for utilizing model

import numpy as np
import cv2
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from django.http import JsonResponse, HttpResponse

define path to the model.pth and path to .yaml file

config_path = "mask_rcnn_config.yaml"
model_path = "model_final.pth"

def load function

def load_model():
    global predictor
    try:
        # Initialize Detectron2 configuration
        cfg = get_cfg()
        cfg.merge_from_file(config_path)
        cfg.MODEL.WEIGHTS = model_path
        cfg.MODEL.DEVICE = "cpu"  # Use "cuda" for GPU

        # Create the predictor
        predictor = DefaultPredictor(cfg)
        print("Model loaded successfully.")
    except Exception as e:
        print(f"Error loading model: {e}")

define prediction function

def predict_image(image_file):
    try:
        # Read the uploaded image
        npimg = np.frombuffer(image_file.read(), np.uint8)
        image = cv2.imdecode(npimg, cv2.IMREAD_COLOR)

        # Make predictions
        outputs = predictor(image)

        # Visualize predictions
        metadata = MetadataCatalog.get("medical_train")  # Replace with your dataset name
        visualizer = Visualizer(image[:, :, ::-1], metadata, scale=1.2)
        output_image = visualizer.draw_instance_predictions(outputs["instances"].to("cpu")).get_image()

        # Convert the output image to bytes
        _, img_encoded = cv2.imencode('.jpg', output_image)
        return HttpResponse(img_encoded.tobytes(), content_type="image/jpeg")
    except Exception as e:
        return JsonResponse({"error": str(e)})

create views.app

from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from .detectron2_utils import load_model, predict_image

# Load the model when the server starts
load_model()

@csrf_exempt
def process_image(request):
    if request.method == 'POST' and request.FILES.get('image'):
        image_file = request.FILES['image']
        return predict_image(image_file)
    return JsonResponse({"error": "Invalid request. Please send an image."})

config urls.py

from django.urls import path
from . import views

urlpatterns = [
    path('process-image/', views.process_image, name='process_image'),
]

# run server
python manage.py runserver
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no library tag.