text
stringlengths
0
2.2M
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
int dilationT, int dilationW, int dilationH)
{
at::parallel_for(0, nbatch, 0, [&](int64_t start, int64_t end) {
for (const auto p : c10::irange(start, end)) {
max_pool3d_with_indices_single_out_frame(
input_data + p * istride,
output_data + p * ostride,
indices_data + p * ostride,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
dilationT, dilationW, dilationH
);
}
});
}
void max_pool3d_with_indices_out_cpu_template(
Tensor& output,
Tensor& indices,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
TORCH_CHECK((input_.ndimension() == 4 || input_.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
const int64_t nslices = input_.size(-4);
const int64_t itime = input_.size(-3);
const int64_t iheight = input_.size(-2);
const int64_t iwidth = input_.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode);
pool3d_shape_check(
input_,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth,
"max_pool3d_with_indices_out_cpu_template()");
/* get contiguous input */
Tensor input = input_.contiguous();
if (input.dim() == 4) { /* non-batch mode */
/* resize output */
output.resize_({nslices, otime, oheight, owidth});
/* indices will contain ti,i,j locations for each output point */
indices.resize_({nslices, otime, oheight, owidth});
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(),
"max_pool3d_with_indices_cpu",
[&] {
scalar_t *input_data = input.data_ptr<scalar_t>();
scalar_t *output_data = output.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();
max_pool3d_with_indices_single_out_frame(