text
stringlengths 0
2.2M
|
---|
const shape_predictor& predictor
|
)
|
{
|
const unsigned long num_images = py::len(pyimages);
|
const unsigned long num_scales = py::len(pyscales);
|
if (num_images != py::len(pydetections))
|
throw dlib::error("The length of the detections list must match the length of the images list.");
|
if (num_scales > 0 && num_scales != num_images)
|
throw dlib::error("The length of the scales list must match the length of the detections list.");
|
std::vector<std::vector<full_object_detection> > detections(num_images);
|
std::vector<std::vector<double> > scales;
|
if (num_scales > 0)
|
scales.resize(num_scales);
|
dlib::array<numpy_image<unsigned char>> images(num_images);
|
// Now copy the data into dlib based objects so we can call the testing routine.
|
for (unsigned long i = 0; i < num_images; ++i)
|
{
|
const unsigned long num_boxes = py::len(pydetections[i]);
|
for (py::iterator det_it = pydetections[i].begin();
|
det_it != pydetections[i].end();
|
++det_it)
|
detections[i].push_back(det_it->cast<full_object_detection>());
|
assign_image(images[i], pyimages[i].cast<py::array>());
|
if (num_scales > 0)
|
{
|
if (num_boxes != py::len(pyscales[i]))
|
throw dlib::error("The length of the scales list must match the length of the detections list.");
|
for (py::iterator scale_it = pyscales[i].begin(); scale_it != pyscales[i].end(); ++scale_it)
|
scales[i].push_back(scale_it->cast<double>());
|
}
|
}
|
return test_shape_predictor_with_images(images, detections, scales, predictor);
|
}
|
inline double test_shape_predictor_with_images_no_scales_py (
|
const py::list& pyimages,
|
const py::list& pydetections,
|
const shape_predictor& predictor
|
)
|
{
|
py::list pyscales;
|
return test_shape_predictor_with_images_py(pyimages, pydetections, pyscales, predictor);
|
}
|
// ----------------------------------------------------------------------------------------
|
void bind_shape_predictors(py::module &m)
|
{
|
{
|
typedef full_object_detection type;
|
py::class_<type, std::shared_ptr<type>>(m, "full_object_detection",
|
"This object represents the location of an object in an image along with the \
|
positions of each of its constituent parts.")
|
.def(py::init(&full_obj_det_init), py::arg("rect"), py::arg("parts"),
|
"requires \n\
|
- rect: dlib rectangle \n\
|
- parts: list of dlib.point, or a dlib.points object.")
|
.def_property_readonly("rect", &full_obj_det_get_rect, "Bounding box from the underlying detector. Parts can be outside box if appropriate.")
|
.def_property_readonly("num_parts", &full_obj_det_num_parts, "The number of parts of the object.")
|
.def("part", &full_obj_det_part, py::arg("idx"), "A single part of the object as a dlib point.")
|
.def("parts", &full_obj_det_parts, "A vector of dlib points representing all of the parts.")
|
.def(py::pickle(&getstate<type>, &setstate<type>));
|
}
|
{
|
typedef shape_predictor_training_options type;
|
py::class_<type>(m, "shape_predictor_training_options",
|
"This object is a container for the options to the train_shape_predictor() routine.")
|
.def(py::init())
|
.def_readwrite("be_verbose", &type::be_verbose,
|
"If true, train_shape_predictor() will print out a lot of information to stdout while training.")
|
.def_readwrite("cascade_depth", &type::cascade_depth,
|
"The number of cascades created to train the model with.")
|
.def_readwrite("tree_depth", &type::tree_depth,
|
"The depth of the trees used in each cascade. There are pow(2, get_tree_depth()) leaves in each tree")
|
.def_readwrite("num_trees_per_cascade_level", &type::num_trees_per_cascade_level,
|
"The number of trees created for each cascade.")
|
.def_readwrite("nu", &type::nu,
|
"The regularization parameter. Larger values of this parameter \
|
will cause the algorithm to fit the training data better but may also \
|
cause overfitting. The value must be in the range (0, 1].")
|
.def_readwrite("oversampling_amount", &type::oversampling_amount,
|
"The number of randomly selected initial starting points sampled for each training example")
|
.def_readwrite("oversampling_translation_jitter", &type::oversampling_translation_jitter,
|
"The amount of translation jittering to apply to bounding boxes, a good value is in in the range [0 0.5].")
|
.def_readwrite("feature_pool_size", &type::feature_pool_size,
|
"Number of pixels used to generate features for the random trees.")
|
.def_readwrite("lambda_param", &type::lambda_param,
|
"Controls how tight the feature sampling should be. Lower values enforce closer features.")
|
.def_readwrite("num_test_splits", &type::num_test_splits,
|
"Number of split features at each node to sample. The one that gives the best split is chosen.")
|
.def_readwrite("landmark_relative_padding_mode", &type::landmark_relative_padding_mode,
|
"If True then features are drawn only from the box around the landmarks, otherwise they come from the bounding box and landmarks together. See feature_pool_region_padding doc for more details.")
|
.def_readwrite("feature_pool_region_padding", &type::feature_pool_region_padding,
|
/*!
|
This algorithm works by comparing the relative intensity of pairs of
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.