text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dynamicconv_cuda
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.unfold import unfold1d
from torch import nn
from torch.autograd import Function
class dynamicconvFunction(Function):
@staticmethod
def forward(ctx, x, weights, padding_l):
ctx.padding_l = padding_l
outputs = dynamicconv_cuda.forward(x, weights, padding_l)
variables = [x, weights]
ctx.save_for_backward(*variables)
return outputs[0]
@staticmethod
def backward(ctx, grad_output):
outputs = dynamicconv_cuda.backward(
grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors
)
grad_input, grad_weights = outputs
return grad_input, grad_weights, None
@with_incremental_state
class DynamicconvLayer(nn.Module):
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
weight_softmax=False,
num_heads=1,
weight_dropout=0.0,
bias=False,
renorm_padding=False,
conv_bias=False,
query_size=None,
):
super(DynamicconvLayer, self).__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight_dropout_module = FairseqDropout(
weight_dropout, module_name=self.__class__.__name__
)
self.renorm_padding = renorm_padding
self.bias = bias
self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_linear.weight)
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.0)
nn.init.constant_(self.weight_linaer.bias, 0.0)
def forward(self, x, incremental_state=None, query=None, unfold=None):
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
# R = C // H
# during inference time, incremental BMM is faster
if incremental_state is not None:
unfold = (
x.size(0) > 512 if unfold is None else unfold
) # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
# during training time, use CUDA kernel
else:
weight = self.weight_linear(x).view(T, B, H, K)
if self.weight_softmax:
weight = F.softmax(weight, dim=-1)
if self.weight_dropout_module.p:
weight = self.weight_dropout_module(weight)
weight = weight.permute(1, 2, 3, 0).contiguous()
self.filters = weight
x = x.permute(1, 2, 0).contiguous()
output = dynamicconvFunction.apply(x, weight, self.padding_l).permute(
2, 0, 1
)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def _forward_unfolded(self, x, incremental_state, query):
"""The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right."""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T * B * H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(
incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :]
)
x_unfold = x_unfold.view(T * B * H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K - 1:
weight = weight.narrow(1, K - T, T)
K, padding_l = T, T - 1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T * B * H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2) :]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
"""Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
"""
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T * B * H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B * H, K).transpose(0, 1)
x = x.view(T, B * H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf"))
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K - 1:
weight = weight.narrow(2, K - T, T)
K, P = T, T - 1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False)
weight_expanded.as_strided(
(B * H, T, K), (T * (T + K - 1), T + K, 1)
).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
|
COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py",
"repo_id": "COCO-LM",
"token_count": 4118
}
| 207 |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
#include "lightconv_cuda_forward.cu"
#include "lightconv_cuda_backward.cu"
#include "../cuda_utils.cu"
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void lightconv_forward_kernel(const scalar_t* input,
const scalar_t* filters,
int minibatch, int sequenceLength,
int numFeatures, int numFiltersInBlock,
scalar_t* output) {
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int featureIdx = blockIdx.y;
const int filterIdx = featureIdx / numFiltersInBlock;
const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
scalar_t* outputFeature = &output[IOOffset];
const scalar_t* inputFilter = &filters[filterIdx * FS];
assert(blockDim.x == SB);
scalar_t filter[FS];
#pragma unroll
for (int i = 0; i < FS; ++i) {
filter[i] = inputFilter[i];
}
__shared__ scalar_t temp[SB + FS];
zeroSharedMem<FS, SB, padding_l>(temp);
const int numIterations = divUp<int, int>(sequenceLength, SB);
for (int i = 0; i < numIterations; ++i) {
// Read input into shared memory
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength,
i, numIterations, (numIterations == 1), temp);
__syncthreads();
scalar_t out = 0;
#pragma unroll
for (int j = 0; j < FS; ++j) {
out += filter[j] * temp[tid + j];
}
// Write output
const int outputOffset = inputOffset;
if ((outputOffset + tid) < sequenceLength) {
outputFeature[outputOffset + tid] = out;
}
__syncthreads();
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void lightconv_grad_wrt_input_kernel(
const scalar_t* input,
const scalar_t* filters,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
scalar_t* output) {
// input grad kernel is similar to forward kernel
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int featureIdx = blockIdx.y;
const int filterIdx = featureIdx / numFiltersInBlock;
const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
scalar_t* outputFeature = &output[IOOffset];
const scalar_t* inputFilter = &filters[filterIdx * FS];
assert(blockDim.x == SB);
scalar_t filter[FS];
// The only change is loading the filter in reverse
#pragma unroll
for (int i = 0; i < FS; ++i) {
filter[i] = inputFilter[FS - i - 1];
}
__shared__ scalar_t temp[SB + FS];
const int padding = FS - padding_l - 1;
zeroSharedMem<FS, SB, padding>(temp);
__syncthreads();
const int numIterations = divUp<int, int>(sequenceLength, SB);
for (int i = 0; i < numIterations; ++i) {
// Read input into shared memory
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding>(inputFeature, inputOffset, sequenceLength,
i, numIterations, false, temp);
__syncthreads();
scalar_t out = 0;
#pragma unroll
for (int j = 0; j < FS; ++j) {
out += filter[j] * temp[tid + j];
}
// Write output
const int outputOffset = inputOffset;
if ((outputOffset + tid) < sequenceLength) {
outputFeature[outputOffset + tid] = out;
}
__syncthreads();
}
}
// This is by far the most expensive kernel in terms of time taken.
// Can be 16x slower than the forward or grad_wrt_input when filter size is 31
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void lightconv_grad_wrt_weights_firstpass_short_kernel(
const scalar_t* input,
const scalar_t* gradInput,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
float* output) {
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int filterIdx = blockIdx.y;
const int numIterations = divUp<int, int>(sequenceLength, SB);
float* tempOutputGradWeight = &output[filterIdx * FS * minibatch];
assert(blockDim.x == SB);
__shared__ scalar_t tempInput[SB + FS];
__shared__ scalar_t tempGradInput[SB + FS];
// local weight accumulation
float accumWeights[FS];
// Initialize memory
for (int i = 0; i < FS; ++i) {
accumWeights[i] = float(0.0);
}
// loop over each sequence within filterblock
for (int idxInFilterBlock = 0; idxInFilterBlock < numFiltersInBlock; ++idxInFilterBlock) {
const int featureOffset = batchIdx * numFeatures * sequenceLength + (filterIdx * numFiltersInBlock + idxInFilterBlock) * sequenceLength;
const scalar_t* inputFeature = &input[featureOffset];
const scalar_t* gradInputFeature = &gradInput[featureOffset];
zeroSharedMem<FS, SB, padding_l>(tempInput);
zeroSharedMem<FS, SB, (FS/2)>(tempGradInput);
__syncthreads();
for (int i = 0; i < numIterations; ++i) {
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength,
i, numIterations, false, tempInput);
load_input_to_shared<FS, SB, (FS/2)>(gradInputFeature, inputOffset, sequenceLength,
i, numIterations, false, tempGradInput);
__syncthreads();
const int gradIndex = (FS/2) + tid;
scalar_t tempGrad = tempGradInput[gradIndex];
#pragma unroll
for (int j = 0; j < FS; j++) {
const int inputIndex = tid + j;
accumWeights[j] += tempInput[inputIndex] * tempGrad;
}
__syncthreads();
}
}
// Row-major sum
for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) {
float temp;
if (tid < sequenceLength) {
temp = accumWeights[filterWeightIdx];
} else {
temp = float(0.0);
}
const int outputOffset = filterWeightIdx * minibatch + batchIdx;
temp = blockReduce(temp);
if (tid == 0) {
tempOutputGradWeight[outputOffset] = temp;
}
}
}
template<int FS, int SB, typename scalar_t>
__global__
void lightconv_grad_wrt_weights_secondpass_short_kernel(
const float* input,
const int minibatch,
const int numFiltersInBlock,
scalar_t* output) {
assert(blockDim.x == SB);
const int tid = threadIdx.x;
const int filterIdx = blockIdx.x;
const int filterWeightIdx = blockIdx.y;
const int inputOffset = filterIdx * FS * minibatch +
filterWeightIdx * minibatch;
const float* tempInput = &input[inputOffset];
// read into shared memory for reduction
int readIndex = tid;
float sum = 0.0;
while (readIndex < minibatch) {
sum += tempInput[readIndex];
readIndex += SB;
}
float temp = blockReduce(sum);
if (tid == 0) {
output[blockIdx.x * FS + blockIdx.y] = temp;
}
}
// This is by far the most expensive kernel in terms of time taken.
// Can be 16x slower than the forward or grad_wrt_input when filter size is 31
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void lightconv_grad_wrt_weights_firstpass_kernel(
const scalar_t* input,
const scalar_t* gradInput,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
float* output) {
assert(blockDim.x == SB);
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int featureIdx = blockIdx.y;
const int filterIdx = featureIdx / numFiltersInBlock;
const int idxInFilterBlock = featureIdx % numFiltersInBlock;
const int numIterations = divUp<int, int>(sequenceLength, SB);
float temp;
__shared__ scalar_t tempInput[SB + FS];
__shared__ scalar_t tempGradInput[SB + FS];
zeroSharedMem<FS, SB, padding_l>(tempInput);
zeroSharedMem<FS, SB, (FS/2)>(tempGradInput);
__syncthreads();
float accumWeights[FS];
for (int i = 0; i < FS; ++i) {
accumWeights[i] = float(0.0);
}
const int IOOffset = batchIdx * numFeatures * sequenceLength + featureIdx * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
const scalar_t* gradInputFeature = &gradInput[IOOffset];
float* tempOutputGradWeight = &output[filterIdx * FS * minibatch * numFiltersInBlock];
for (int i = 0; i < numIterations; ++i) {
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset, sequenceLength,
i, numIterations, false, tempInput);
load_input_to_shared<FS, SB, (FS/2)>(gradInputFeature, inputOffset, sequenceLength,
i, numIterations, false, tempGradInput);
__syncthreads();
#pragma unroll
for (int j = 0; j < FS; ++j) {
accumWeights[j] += tempInput[tid + j] * tempGradInput[tid + (FS/2)];
}
__syncthreads();
}
// Row-major sum
for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) {
// Write to shared memory before reduction
if (tid < sequenceLength) {
temp = accumWeights[filterWeightIdx];
} else {
temp = float(0.0);
}
temp = blockReduce(temp);
const int outputOffset = filterWeightIdx * minibatch * numFiltersInBlock +
batchIdx * numFiltersInBlock +
idxInFilterBlock;
if (tid == 0) {
tempOutputGradWeight[outputOffset] = temp;
}
}
}
template<int FS, int SB, typename scalar_t>
__global__
void lightconv_grad_wrt_weights_secondpass_kernel(
const float* input,
const int minibatch,
const int numFiltersInBlock,
scalar_t* output) {
assert(blockDim.x == SB);
const int tid = threadIdx.x;
// What is the id within a minibatch
const int filterIdx = blockIdx.x;
const int filterWeightIdx = blockIdx.y;
const int inputOffset = filterIdx * FS * minibatch * numFiltersInBlock +
filterWeightIdx * minibatch * numFiltersInBlock;
const float* tempInput = &input[inputOffset];
int readIndex = tid;
float sum = float(0.0);
while (readIndex < (minibatch * numFiltersInBlock)) {
sum += tempInput[readIndex];
readIndex += SB;
}
float temp = blockReduce(sum);
if (tid == 0) {
output[blockIdx.x * FS + blockIdx.y] = temp;
}
}
|
COCO-LM/fairseq/fairseq/modules/lightconv_layer/lightconv_cuda_kernel.cu/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/lightconv_layer/lightconv_cuda_kernel.cu",
"repo_id": "COCO-LM",
"token_count": 4201
}
| 208 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
from operator import attrgetter, itemgetter
import numpy as np
import torch.distributed as dist
import torch.nn as nn
from .modules import PQConv2d, PQEmbedding, PQLinear
from .pq import PQ
def quantize_model_(
model,
size_tracker,
layers_to_quantize,
block_sizes_config,
n_centroids_config,
step=0,
n_iter=15,
eps=1e-6,
max_tentatives=100,
verbose=True,
):
"""
Quantize a model in-place by stages. All the targeted
layers are replaced by their quantized counterpart,
and the model is ready for the finetuning of the
centroids in a standard training loop (no modifications
required). Note that we do not quantize biases.
Args:
- model: a nn.Module
- size_tracker: useful for tracking quatization statistics
- layers_to_quantize: a list containing regexps for
filtering the layers to quantize at each stage according
to their name (as in model.named_parameters())
- block_sizes_config: dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
- n_centroids_config: dict like
{
'Conv2d': ('kernel_size', {'*': 256}),
'Linear': ('in_features', {'*': 256})
}
For instance, all conv2d layers are quantized with 256 centroids
- step: the layers to quantize inplace corresponding
to layers_to_quantize[step]
"""
quantized_layers = get_layers(model, layers_to_quantize[step])
for layer in quantized_layers:
# book-keeping
is_master_process = (not dist.is_initialized()) or (
dist.is_initialized() and dist.get_rank() == 0
)
verbose = verbose and is_master_process
# get block size and centroids
module = attrgetter(layer)(model)
block_size = get_param(module, layer, block_sizes_config)
n_centroids = get_param(module, layer, n_centroids_config)
if verbose:
logging.info(
f"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids"
)
# quantize layer
weight = module.weight.data.clone()
is_bias = "bias" in [x[0] for x in module.named_parameters()]
bias = module.bias.data.clone() if is_bias else None
quantizer = PQ(
weight,
block_size,
n_centroids=n_centroids,
n_iter=n_iter,
eps=eps,
max_tentatives=max_tentatives,
verbose=verbose,
)
# quantization performed on all GPUs with same seed
quantizer.encode()
centroids = quantizer.centroids.contiguous()
assignments = quantizer.assignments.contiguous()
# broadcast results to make sure weights are up-to-date
if dist.is_initialized():
dist.broadcast(centroids, 0)
dist.broadcast(assignments, 0)
# instantiate the quantized counterpart
if isinstance(module, nn.Linear):
out_features, in_features = map(
lambda k: module.__dict__[k], ["out_features", "in_features"]
)
quantized_module = PQLinear(
centroids, assignments, bias, in_features, out_features
)
elif isinstance(module, nn.Embedding):
num_embeddings, embedding_dim = map(
lambda k: module.__dict__[k], ["num_embeddings", "embedding_dim"]
)
quantized_module = PQEmbedding(
centroids, assignments, num_embeddings, embedding_dim
)
elif isinstance(module, nn.Conv2d):
out_channels, in_channels, kernel_size = map(
lambda k: module.__dict__[k],
["out_channels", "in_channels", "kernel_size"],
)
stride, padding, dilation, groups, padding_mode = map(
lambda k: module.__dict__[k],
["stride", "padding", "dilation", "groups", "padding_mode"],
)
quantized_module = PQConv2d(
centroids,
assignments,
bias,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
padding_mode=padding_mode,
)
else:
raise ValueError(f"Module {module} not yet supported for quantization")
# replace layer by its quantized counterpart
attrsetter(layer)(model, quantized_module)
# update statistics
size_tracker.update(weight, block_size, n_centroids)
# return name of quantized layers
return quantized_layers
def get_layers(model, filter_regexp):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers))
def get_param(module, layer_name, param_config):
"""
Given a quantization configuration, get the right parameter
for the module to be quantized.
Args:
- module: a nn.Module
- layer_name: the name of the layer
- param_config: a dict like
{
'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),
'Linear': ('in_features', {'*': 8})
}
For instance, all conv2d layers with kernel size 3x3 have
a block size of 9 and all Linear layers are quantized with
a block size of 8, irrespective of their size.
Remarks:
- if 'fuzzy_name' is passed as a parameter, layers whose layer_name
include 'fuzzy_name' will be assigned the given parameter.
In the following example, conv.expand layers will have a block
size of 9 while conv.reduce will have a block size of 4 and all
other layers will have a block size of 2.
{
'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}),
'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4})
}
"""
layer_type = module.__class__.__name__
if layer_type not in param_config:
raise KeyError(f"Layer type {layer_type} not in config for layer {module}")
feature, params = param_config[module.__class__.__name__]
if feature != "fuzzy_name":
feature_value = str(getattr(module, feature))
if feature_value not in params:
if "*" in params:
feature_value = "*"
else:
raise KeyError(
f"{feature}={feature_value} not in config for layer {module}"
)
else:
feature_values = [name for name in params if name in layer_name]
if len(feature_values) == 0:
if "*" in params:
feature_value = "*"
else:
raise KeyError(f"name={layer_name} not in config for {module}")
else:
feature_value = feature_values[0]
return params[feature_value]
class SizeTracker(object):
"""
Class to keep track of the compressed network size with iPQ.
Args:
- model: a nn.Module
Remarks:
- The compressed size is the sum of three components
for each layer in the network:
(1) Storing the centroids given by iPQ in fp16
(2) Storing the assignments of the blocks in int8
(3) Storing all non-compressed elements such as biases
- This cost in only valid if we use 256 centroids (then
indexing can indeed by done with int8).
"""
def __init__(self, model):
self.model = model
self.size_non_compressed_model = self.compute_size()
self.size_non_quantized = self.size_non_compressed_model
self.size_index = 0
self.size_centroids = 0
self.n_quantized_layers = 0
def compute_size(self):
"""
Computes the size of the model (in MB).
"""
res = 0
for _, p in self.model.named_parameters():
res += p.numel()
return res * 4 / 1024 / 1024
def update(self, W, block_size, n_centroids):
"""
Updates the running statistics when quantizing a new layer.
"""
# bits per weights
bits_per_weight = np.log2(n_centroids) / block_size
self.n_quantized_layers += 1
# size of indexing the subvectors of size block_size (in MB)
size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024
self.size_index += size_index_layer
# size of the centroids stored in float16 (in MB)
size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024
self.size_centroids += size_centroids_layer
# size of non-compressed layers, e.g. LayerNorms or biases (in MB)
size_uncompressed_layer = W.numel() * 4 / 1024 / 1024
self.size_non_quantized -= size_uncompressed_layer
def __repr__(self):
size_compressed = (
self.size_index + self.size_centroids + self.size_non_quantized
)
compression_ratio = self.size_non_compressed_model / size_compressed # NOQA
return (
f"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. "
f"After quantizing {self.n_quantized_layers} layers, size "
f"(indexing + centroids + other): {self.size_index:.2f} MB + "
f"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = "
f"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x"
)
def attrsetter(*items):
def resolve_attr(obj, attr):
attrs = attr.split(".")
head = attrs[:-1]
tail = attrs[-1]
for name in head:
obj = getattr(obj, name)
return obj, tail
def g(obj, val):
for attr in items:
resolved_obj, resolved_attr = resolve_attr(obj, attr)
setattr(resolved_obj, resolved_attr, val)
return g
|
COCO-LM/fairseq/fairseq/modules/quantization/pq/utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/pq/utils.py",
"repo_id": "COCO-LM",
"token_count": 5248
}
| 209 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.args = args
self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, 'quant_noise_pq', 0)
self.quant_noise_block_size = getattr(args, 'quant_noise_pq_block_size', 8) or 8
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu') or "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.encoder_normalize_before
self.fc1 = self.build_fc1(
self.embed_dim,
args.encoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.encoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor] = None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
need_weights=False,
attn_mask=attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self, embed_dim, args, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
|
COCO-LM/fairseq/fairseq/modules/transformer_layer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/transformer_layer.py",
"repo_id": "COCO-LM",
"token_count": 8124
}
| 210 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from omegaconf import II, DictConfig
import logging
try:
import deepspeed
has_deepspeed = True
except ImportError as e:
has_deepspeed = False
def _get_cpu_adam():
try:
from deepspeed.ops.op_builder import CPUAdamBuilder
return CPUAdamBuilder().load()
except ImportError:
# fbcode
from deepspeed.ops.adam import DeepSpeedCPUAdam as ds_opt_adam
return ds_opt_adam
@dataclass
class FairseqCPUAdamConfig(FairseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
fp16_adam_stats: bool = field(
default=False, metadata={"help": "use FP16 stats (with automatic scaling)"}
)
# TODO common vars below in parent
lr: List[float] = II("optimization.lr")
@register_optimizer("cpu_adam", dataclass=FairseqCPUAdamConfig)
class FairseqCPUAdam(FairseqOptimizer):
"""Adam optimizer for fairseq, optimized for CPU tensors.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
self._optimizer = CPUAdam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
"use_fp16_stats": self.cfg.fp16_adam_stats,
}
class CPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(
self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
use_fp16_stats=False,
):
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
}
super().__init__(params, defaults)
self.use_fp16_stats = use_fp16_stats
self.FLOAT16_MAX = 65504.0
if not has_deepspeed:
raise ImportError("Please install DeepSpeed: pip install deepspeed")
self.opt_id = CPUAdam.optimizer_id
CPUAdam.optimizer_id = CPUAdam.optimizer_id + 1
self.ds_opt_adam = _get_cpu_adam()
adamw_mode = True
self.ds_opt_adam.create_adam(
self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode
)
@property
def supports_flat_params(self):
return True
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group["params"]):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
state["step"] = 0
dtype = torch.float16 if self.use_fp16_stats else p.data.dtype
# gradient momentums
state["exp_avg"] = torch.zeros_like(
p.data, dtype=dtype, device="cpu"
)
# gradient variances
state["exp_avg_sq"] = torch.zeros_like(
p.data, dtype=dtype, device="cpu"
)
if self.use_fp16_stats:
assert torch.is_floating_point(p.data)
state["exp_avg_scale"] = 1.0
state["exp_avg_sq_scale"] = 1.0
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
p_data_bak = p.data # backup of the original data pointer
p.data = p.data.to(dtype=torch.float32, device="cpu")
p.grad.data = p.grad.data.to(dtype=torch.float32, device="cpu")
if self.use_fp16_stats:
exp_avg = exp_avg.float() * state["exp_avg_scale"]
exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"]
state["step"] += 1
beta1, beta2 = group["betas"]
self.ds_opt_adam.adam_update(
self.opt_id,
state["step"],
group["lr"],
beta1,
beta2,
group["eps"],
group["weight_decay"],
group["bias_correction"],
p.data,
p.grad.data,
exp_avg,
exp_avg_sq,
)
if p_data_bak.data_ptr() != p.data.data_ptr():
p_data_bak.copy_(p.data)
p.data = p_data_bak
if self.use_fp16_stats:
def inf_norm(t):
return torch.norm(t, float("inf"))
# from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py
state["exp_avg_scale"], state["exp_avg_sq_scale"] = (
1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX,
1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX,
)
state["exp_avg"], state["exp_avg_sq"] = (
(exp_avg / state["exp_avg_scale"]).half(),
(exp_avg_sq / state["exp_avg_sq_scale"]).half(),
)
return loss
|
COCO-LM/fairseq/fairseq/optim/cpu_adam.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/cpu_adam.py",
"repo_id": "COCO-LM",
"token_count": 3472
}
| 211 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
@dataclass
class TriangularLRScheduleConfig(FairseqDataclass):
max_lr: float = field(
default="???", metadata={"help": "max learning rate, must be more than cfg.lr"}
)
lr_period_updates: float = field(
default=5000,
metadata={"help": "initial number of updates per period (cycle length)"},
)
lr_shrink: float = field(
default=0.1, metadata={"help": "shrink factor for annealing"}
)
shrink_min: bool = field(
default=False, metadata={"help": "if set, also shrinks min lr"}
)
lr: List[float] = II("optimization.lr")
@register_lr_scheduler("triangular", dataclass=TriangularLRScheduleConfig)
class TriangularLRSchedule(FairseqLRScheduler):
"""Assign LR based on a triangular cyclical schedule.
See https://arxiv.org/pdf/1506.01186.pdf for details.
"""
def __init__(self, cfg: TriangularLRScheduleConfig, optimizer):
super().__init__(cfg, optimizer)
if len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with triangular."
" Consider --lr-scheduler=fixed instead."
)
lr = cfg.lr[0]
assert cfg.max_lr > lr, "max_lr must be more than lr"
self.min_lr = lr
self.max_lr = cfg.max_lr
self.stepsize = cfg.lr_period_updates // 2
self.lr_shrink = cfg.lr_shrink
self.shrink_min = cfg.shrink_min
# initial learning rate
self.lr = self.min_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
cycle = math.floor(num_updates / (2 * self.stepsize))
lr_shrink = self.lr_shrink ** cycle
max_lr = self.max_lr * lr_shrink
if self.shrink_min:
min_lr = self.min_lr * lr_shrink
else:
min_lr = self.min_lr
x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1)
self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x))
self.optimizer.set_lr(self.lr)
return self.lr
|
COCO-LM/fairseq/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py",
"repo_id": "COCO-LM",
"token_count": 1166
}
| 212 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent, populate_dataclass
from hydra.core.config_store import ConfigStore
from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: FairseqDataclass, **kwargs):
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
# legacy tasks
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = populate_dataclass(dc(), cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert task is not None, f"Could not infer task type from {cfg}. Available tasks: {TASK_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, FairseqTask):
raise ValueError(
"Task ({}: {}) must extend FairseqTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="fairseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("fairseq.tasks." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group("Additional command-line arguments")
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
|
COCO-LM/fairseq/fairseq/tasks/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/__init__.py",
"repo_id": "COCO-LM",
"token_count": 1856
}
| 213 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
import numpy as np
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@dataclass
class TranslationConfig(FairseqDataclass):
data: Optional[str] = field(
default=None,
metadata={
"help": "colon separated path to data directories list, will be iterated upon during epochs "
"in round-robin manner; however, valid and test data are always in the first directory "
"to avoid the need for repeating them in all directories"
},
)
source_lang: Optional[str] = field(
default=None,
metadata={
"help": "source language",
"argparse_alias": "-s",
},
)
target_lang: Optional[str] = field(
default=None,
metadata={
"help": "target language",
"argparse_alias": "-t",
},
)
load_alignments: bool = field(
default=False, metadata={"help": "load the binarized alignments"}
)
left_pad_source: bool = field(
default=True, metadata={"help": "pad the source on the left"}
)
left_pad_target: bool = field(
default=False, metadata={"help": "pad the target on the left"}
)
max_source_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=-1, metadata={"help": "the amount of upsample primary dataset"}
)
truncate_source: bool = field(
default=False, metadata={"help": "truncate source to max-source-positions"}
)
num_batch_buckets: int = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations"
},
)
train_subset: str = II("dataset.train_subset")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
# options for reporting BLEU during validation
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_args: Optional[str] = field(
default="{}",
metadata={
"help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string'
},
)
eval_bleu_detok: str = field(
default="space",
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; "
"use 'space' to disable detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: Optional[str] = field(
default="{}",
metadata={"help": "args for building the tokenizer, if needed, as JSON string"},
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None,
metadata={
"help": "remove BPE before computing BLEU",
"argparse_const": "@@ ",
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
@register_task("translation", dataclass=TranslationConfig)
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
"""
cfg: TranslationConfig
def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):
super().__init__(cfg)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, cfg: TranslationConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
# find language pair automatically
if cfg.source_lang is None or cfg.target_lang is None:
cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])
if cfg.source_lang is None or cfg.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict)))
return cls(cfg, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
load_alignments=self.cfg.load_alignments,
truncate_source=self.cfg.truncate_source,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.cfg.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, cfg):
model = super().build_model(cfg)
if self.cfg.eval_bleu:
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_bleu:
def sum_logs(key):
import torch
result = sum(log.get(key, 0) for log in logging_outputs)
if torch.is_tensor(result):
result = result.cpu()
return result
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.cfg.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
COCO-LM/fairseq/fairseq/tasks/translation.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/translation.py",
"repo_id": "COCO-LM",
"token_count": 8408
}
| 214 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import logging
import os
import shutil
import sys
from collections import Counter
from itertools import zip_longest
from multiprocessing import Pool
from fairseq import options, tasks, utils
from fairseq.binarizer import Binarizer
from fairseq.data import indexed_dataset
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.preprocess")
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(
logging.FileHandler(
filename=os.path.join(args.destdir, "preprocess.log"),
)
)
logger.info(args)
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
target = not args.only_source
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert (
not args.srcdict or not args.tgtdict
), "cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]},
src=True,
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert (
args.trainpref
), "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
offsets[worker_id],
offsets[worker_id + 1],
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
merge_result(
Binarizer.binarize(
input_file, vocab, lambda t: ds.add_item(t), offset=0, end=offsets[1]
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
logger.info(
"[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result["nseq"]
input_file = input_prefix
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
offsets[worker_id],
offsets[worker_id + 1],
),
callback=merge_result,
)
pool.close()
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl
)
merge_result(
Binarizer.binarize_alignments(
input_file,
utils.parse_alignment,
lambda t: ds.add_item(t),
offset=0,
end=offsets[1],
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(
vocab, validpref, outprefix, lang, num_workers=args.workers
)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.trainpref + "." + args.align_suffix,
"train.align",
num_workers=args.workers,
)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.validpref + "." + args.align_suffix,
"valid.align",
num_workers=args.workers,
)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(
args.testpref + "." + args.align_suffix,
"test.align",
num_workers=args.workers,
)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding="utf-8") as align_file:
with open(src_file_name, "r", encoding="utf-8") as src_file:
with open(tgt_file_name, "r", encoding="utf-8") as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w",
encoding="utf-8",
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl,
vocab_size=len(vocab),
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(
filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(
dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl,
vocab_size=None,
)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(
filename, parse_alignment, consumer, offset=offset, end=end
)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = "{}/{}".format(args.destdir, output_prefix)
if lang is not None:
lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang)
elif args.only_source:
lang_part = ""
else:
lang_part = ".{}-{}".format(args.source_lang, args.target_lang)
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
def get_offsets(input_file, num_workers):
return Binarizer.find_offsets(input_file, num_workers)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/fairseq_cli/preprocess.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq_cli/preprocess.py",
"repo_id": "COCO-LM",
"token_count": 7433
}
| 215 |
try:
import torch
import fused_layernorm_cuda
from .fused_layer_norm import FusedLayerNorm
del torch
del fused_layernorm_cuda
del fused_layer_norm
except ImportError as err:
print("cannot import kernels, please install the package")
|
COCO-LM/fairseq/fused_ops/fused_ops/layernorm/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/fused_ops/layernorm/__init__.py",
"repo_id": "COCO-LM",
"token_count": 91
}
| 216 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description="symmetric alignment builer")
# fmt: off
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
# fmt: on
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, "fast_align")
symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal")
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl"
)
# create joined file
joined_file = os.path.join(args.output_dir, "text.joined")
with open(args.source_file, "r", encoding="utf-8") as src, open(
args.target_file, "r", encoding="utf-8"
) as tgt:
with open(joined_file, "w", encoding="utf-8") as joined:
for s, t in zip_longest(src, tgt):
print("{} ||| {}".format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, "align.backward")
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, "align.forward")
fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file
)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, "align.backward")
bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format(
FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file
)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, "aligned")
sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin,
)
assert os.system(sym_cmd) == 0
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/scripts/build_sym_alignment.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/build_sym_alignment.py",
"repo_id": "COCO-LM",
"token_count": 1626
}
| 217 |
#!/usr/bin/env bash
rm -rf fsdp_dummy
mkdir -p fsdp_dummy
fairseq-train /private/home/sshleifer/data-bin/stories_mmap \
--ddp-backend fully_sharded --fp16 --fp16-init-scale 4 \
--cpu-offload --checkpoint-activations \
--task language_modeling --tokens-per-sample 256 --batch-size 8 \
--arch transformer_lm_gpt2_tiny \
--optimizer cpu_adam --adam-betas "(0.9,0.98)" \
--lr 0.0001 --lr-scheduler polynomial_decay --warmup-updates 5 --total-num-update 10 \
--max-update 10 --log-format json --log-interval 1 \
--save-interval-updates 10 --save-dir fsdp_dummy \
--restore-file x.pt "$@"
|
COCO-LM/fairseq/scripts/test_fsdp.sh/0
|
{
"file_path": "COCO-LM/fairseq/scripts/test_fsdp.sh",
"repo_id": "COCO-LM",
"token_count": 260
}
| 218 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from examples.speech_recognition.data import data_utils
class DataUtilsTest(unittest.TestCase):
def test_normalization(self):
sample_len1 = torch.tensor(
[
[
-0.7661,
-1.3889,
-2.0972,
-0.9134,
-0.7071,
-0.9765,
-0.8700,
-0.8283,
0.7512,
1.3211,
2.1532,
2.1174,
1.2800,
1.2633,
1.6147,
1.6322,
2.0723,
3.1522,
3.2852,
2.2309,
2.5569,
2.2183,
2.2862,
1.5886,
0.8773,
0.8725,
1.2662,
0.9899,
1.1069,
1.3926,
1.2795,
1.1199,
1.1477,
1.2687,
1.3843,
1.1903,
0.8355,
1.1367,
1.2639,
1.4707,
]
]
)
out = data_utils.apply_mv_norm(sample_len1)
assert not torch.isnan(out).any()
assert (out == sample_len1).all()
|
COCO-LM/fairseq/tests/speech_recognition/test_data_utils.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/speech_recognition/test_data_utils.py",
"repo_id": "COCO-LM",
"token_count": 1263
}
| 219 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import unittest
import torch
from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
from omegaconf import OmegaConf
@unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
class TestGradientScaling(unittest.TestCase):
def setUp(self):
self.x = torch.tensor([2.0]).cuda().half()
weight = 3.0
bias = 5.0
self.error = 1.0
self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half()
self.loss_fn = torch.nn.L1Loss()
self.model = torch.nn.Linear(1, 1)
self.model.weight.data = torch.tensor([[weight]])
self.model.bias.data = torch.tensor([bias])
self.model.cuda().half()
self.params = list(self.model.parameters())
self.cfg_dls = OmegaConf.create(
{
"optimization": {
"lr": [0.1],
},
"optimizer": {
"_name": "adam",
"lr": [0.1],
"adam_betas": "(0.9, 0.999)",
"adam_eps": 1e-8,
"weight_decay": 0.0,
},
"common": {
"fp16_init_scale": 1,
"fp16_scale_window": 1,
"fp16_scale_tolerance": 1,
"threshold_loss_scale": 1,
"min_loss_scale": 1e-4,
"tpu": False,
},
}
)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def run_iter(self, model, params, optimizer):
optimizer.zero_grad()
y = model(self.x)
loss = self.loss_fn(y, self.target)
optimizer.backward(loss)
self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16))
grad_norm = optimizer.clip_grad_norm(0)
self.assertAlmostEqual(grad_norm.item(), 2.2361, 4)
optimizer.step()
self.assertEqual(
model.weight,
torch.tensor(
[[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(
model.bias,
torch.tensor(
[5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True
),
)
self.assertEqual(optimizer.scaler.loss_scale, 2.0)
def test_mixed_precision(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
self.assertTrue(
all(
torch.all(
fp32_params.eq(
torch.tensor(
[3.1000, 5.1000], device="cuda:0", requires_grad=True
)
)
)
for fp32_params in optimizer.fp32_params.values()
)
)
def test_memory_efficient(self):
model = copy.deepcopy(self.model)
params = list(model.parameters())
optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params)
self.run_iter(model, params, optimizer)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_fp16_optimizer.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_fp16_optimizer.py",
"repo_id": "COCO-LM",
"token_count": 1905
}
| 220 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import math
import numpy as np
import tests.utils as test_utils
import torch
from fairseq import search
from fairseq.data.dictionary import Dictionary
from fairseq.models.transformer import TransformerModel
from fairseq.sequence_generator import EnsembleModel, SequenceGenerator
from fairseq.ngram_repeat_block import NGramRepeatBlock
from fairseq.tasks.fairseq_task import LegacyFairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), n=1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
class TestJitSequenceGeneratorBase(unittest.TestCase):
def setUp(self):
self.task, self.parser = get_dummy_task_and_parser()
eos = self.task.tgt_dict.eos()
src_tokens = torch.randint(3, 50, (2, 10)).long()
src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1)
src_lengths = torch.LongTensor([2, 10])
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
TransformerModel.add_args(self.parser)
args = self.parser.parse_args([])
args.encoder_layers = 2
args.decoder_layers = 1
self.transformer_model = TransformerModel.build_model(args, self.task)
def assertOutputEqual(self, hypo, pos_probs):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores)
self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel())
def assertTensorSizeEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
def assertHypoEqual(self, h1, h2):
"Check two hypos are equal"
self.assertTensorEqual(h1["tokens"], h2["tokens"])
self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"])
self.assertLess(abs(h1["score"] - h2["score"]), 1e-6)
self.assertAlmostEqual(h1["attention"], h2["attention"])
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
JIT_MSG = "Targeting OSS scriptability for the 1.6 release"
@unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG)
class TestJitSequenceGenerator(TestJitSequenceGeneratorBase):
def test_export_transformer(self):
model = self.transformer_model
torch.jit.script(model)
def test_ensemble_sequence_generator(self):
model = self.transformer_model
generator = SequenceGenerator(
[model],
self.task.tgt_dict,
beam_size=2,
no_repeat_ngram_size=2,
max_len_b=10,
)
scripted_model = torch.jit.script(generator)
self._test_save_and_load(scripted_model)
def test_export_ensemble_model(self):
model = self.transformer_model
ensemble_models = EnsembleModel([model])
torch.jit.script(ensemble_models)
class TestExportSearch(unittest.TestCase):
def setUp(self):
task, _ = get_dummy_task_and_parser()
self.tgt_dict = task.tgt_dict
self.min_top1_prob = 0.4
def test_export_diverse_bs(self):
search_strategy = search.DiverseBeamSearch(
self.tgt_dict, num_groups=2, diversity_strength=0.0
)
torch.jit.script(search_strategy)
def test_export_sampling(self):
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
torch.jit.script(search_strategy)
def test_export_diverse_siblings_search(self):
search_strategy = search.DiverseSiblingsSearch(
self.tgt_dict, diversity_rate=0.5
)
torch.jit.script(search_strategy)
class TestSequenceGeneratorBase(unittest.TestCase):
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
class TestSequenceGenerator(TestSequenceGeneratorBase):
def setUp(self):
(
self.tgt_dict,
self.w1,
self.w2,
src_tokens,
src_lengths,
self.model,
) = test_utils.sequence_generator_setup()
self.sample = {
"net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}
}
def test_with_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
# Sentence 1: unchanged from the normalized case
# Sentence 2: beams swap order
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, normalize_scores=False
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, max_len_b=2
)
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
def test_encoder_with_different_output_len(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(
args, self.tgt_dict, self.tgt_dict
)
reshaping_model = test_utils.TestReshapingModel.build_model(args, task)
generator = SequenceGenerator(
[reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2
)
hypos = generator.forward(self.sample)
for sent in [0, 1]:
for beam in [0, 1]:
assert hypos[sent][beam]["attention"] is not None
def test_generation_with_additional_input(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(
args, self.tgt_dict, self.tgt_dict
)
add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task)
generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2)
sample = self.sample.copy()
sample["net_input"]["fancy_other_input"] = sample["net_input"]["src_tokens"]
hypos = generator.forward(self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
@unittest.skipUnless(torch.cuda.is_available(), "")
class TestRepeatNgramBlocking(TestSequenceGeneratorBase):
@classmethod
def setUpClass(cls):
(
cls.tgt_dict,
cls.w1,
cls.w2,
src_tokens,
src_lengths,
cls.model,
) = test_utils.sequence_generator_setup()
return cls
def test_finds_repetitive_tokens(self):
bsz, vocab_size, beam_size, step = 2, 4, 1, 3
generated_tok = torch.tensor(
[[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda"
)
lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda")
desired_result = lprobs.new_tensor(
[[0.0, 0.0, -math.inf, 0.0], [0.0, 0.0, 0.0, -math.inf]]
)
cuda_ext_result, baseline_result = self._compare_cuda_ext_to_default_implem(
bsz, beam_size, generated_tok, lprobs, step, 2
)
self.assertTensorEqual(cuda_ext_result, desired_result)
self.assertTensorEqual(baseline_result, desired_result)
@unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG)
def test_jit_no_extension(self):
bsz, vocab_size, beam_size, step = 2, 4, 1, 3
generated_tok = torch.tensor(
[[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda"
)
lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda")
blocker = NGramRepeatBlock(2, use_extension=False)
base_result = blocker(generated_tok, lprobs.clone(), bsz, beam_size, step)
scripted_blocker = torch.jit.script(blocker)
jit_result = scripted_blocker(
generated_tok, lprobs.clone(), bsz, beam_size, step
)
self.assertTensorEqual(base_result, jit_result)
def test_ngram_blocking_same_as_default_implem(self):
"""Test that cuda extension returns same things as default impl in many settings."""
vocab_size = 4
step = 6
for _ in range(2):
block_param = np.random.choice([1, 2, 3, 4])
batch_size = np.random.randint(1, 8)
beam_size = np.random.choice([1, 2, 4, 8])
lprobs = torch.zeros((beam_size * batch_size, vocab_size), device="cuda")
generated_tok = torch.tensor(
np.random.randint(
0, vocab_size, size=(batch_size * beam_size, step + 1)
),
device="cuda",
dtype=torch.long,
)
self._compare_cuda_ext_to_default_implem(
batch_size,
beam_size,
generated_tok,
lprobs,
step,
block_param,
)
def _compare_cuda_ext_to_default_implem(
self, bsz, beam_size, generated_tok, lprobs, step, block_param
):
"""Assert that cuda extension and default implem return the same thing."""
blocker = NGramRepeatBlock(block_param)
assert blocker.use_extension, "Extension not compiled"
cuda_ext_result = blocker(
generated_tok,
lprobs.clone(),
bsz,
beam_size,
step,
)
blocker.use_extension = False
baseline_result = blocker(
generated_tok,
lprobs.clone(),
bsz,
beam_size,
step,
)
self.assertTensorEqual(cuda_ext_result, baseline_result)
blocker.use_extension = True
return cuda_ext_result, baseline_result
class TestDiverseBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
]
)
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[0.0, unk, 0.6, 0.4],
[0.0, unk, 0.6, 0.4],
# sentence 2:
[0.25, unk, 0.35, 0.4],
[0.25, unk, 0.35, 0.4],
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
# sentence 1:
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
# sentence 2:
[0.9, unk, 0.1, 0.0],
[0.9, unk, 0.1, 0.0],
]
),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
search_strategy = search.DiverseBeamSearch(
self.tgt_dict, num_groups=2, diversity_strength=0.0
)
generator = SequenceGenerator(
[self.model],
self.tgt_dict,
beam_size=2,
search_strategy=search_strategy,
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9])
class TestDiverseSiblingsSearch(TestDiverseBeamSearch):
def assertHypoScore(
self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0
):
pos_scores = torch.FloatTensor(pos_probs).log()
pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate)
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def test_diverse_beam_search(self):
search_strategy = search.DiverseSiblingsSearch(
self.tgt_dict, diversity_rate=0.5
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5)
class TestTopPSamplingSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor(
[
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
]
)
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
# The minimal probability of top 2 tokens.
self.min_top2_prob = 0.75
# The minimal probability of the top 1 token.
self.min_top1_prob = 0.4
w1_prob = self.min_top1_prob
w2_prob = self.min_top2_prob - self.min_top1_prob
eos_prob = 1 - self.min_top2_prob
args.beam_probs = [
# step 0:
torch.FloatTensor(
[
# eos w1 w2
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
]
),
# step 1:
torch.FloatTensor(
[
# eos w1 w2
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
]
),
# step 2:
torch.FloatTensor(
[
# eos w1 w2
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
]
),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_topp_sampling_search_low_prob(self):
# Given a prob low enough to top-P sampling, we expect only the top
# 1 token to be sampled, which always results in the same output.
low_sampling_topp = self.min_top1_prob / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=low_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1 = self.eos, self.w1
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w1, eos])
self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0])
def test_topp_sampling_search_high_prob(self):
# Given a prob high enough to top-P sampling, any of the top 2
# tokens could be sampled. This can cause different outputs.
high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0
search_strategy = search.Sampling(
self.tgt_dict, sampling_topp=high_sampling_topp
)
generator = SequenceGenerator(
[self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.forward(sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertTrue(
self.hypoTokens(hypos[0][0], [w1, w1, eos])
or self.hypoTokens(hypos[0][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0])
)
# sentence 1, beam 2
self.assertTrue(
self.hypoTokens(hypos[0][1], [w1, w1, eos])
or self.hypoTokens(hypos[0][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0])
)
# sentence 2, beam 1
self.assertTrue(
self.hypoTokens(hypos[1][0], [w1, w1, eos])
or self.hypoTokens(hypos[1][0], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0])
)
# sentence 2, beam 2
self.assertTrue(
self.hypoTokens(hypos[1][1], [w1, w1, eos])
or self.hypoTokens(hypos[1][1], [w1, w2, eos])
)
self.assertTrue(
self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0])
or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0])
)
def hypoTokens(self, hypo, tokens):
return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens))
def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
if not self.almostEqual(hypo["positional_scores"], pos_scores):
return False
if pos_scores.numel() != hypo["tokens"].numel():
return False
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
return abs(score - hypo["score"]) < 1e-6
def almostEqual(self, t1, t2):
return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4
def tensorEqual(self, t1, t2):
return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_sequence_generator.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_sequence_generator.py",
"repo_id": "COCO-LM",
"token_count": 14600
}
| 221 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Set pretrained model name, from ['cocolm-base', 'cocolm-large']
MODEL_NAME=$1
# GLUE task name, from ['MNLI', 'QQP', 'QNLI', 'SST-2', 'CoLA', 'RTE', 'MRPC', 'STS-B']
TASK=$2
# Path to GLUE dataset 'path/to/glue_data'
GLUE_PATH=$3
# Output path for results and fine-tuned model
OUT_PATH=$4
export DATASET_PATH=$GLUE_PATH/$TASK
export TASK_NAME=$(echo "$TASK" | tr '[:upper:]' '[:lower:]')
# Set max sequence length
export MAX_LEN=512
# Set path to cache train & dev features (tokenized, only use for this tokenizer!)
export TRAIN_CACHE=${DATASET_PATH}/$TASK_NAME.cocolm_cased.$MAX_LEN.cache
export DEV_CACHE=${DATASET_PATH}/$TASK_NAME.cocolm_cased.$MAX_LEN.cache
# Setting the hyperparameters for the run.
export BSZ=$5
export LR=$6
export EPOCH=$7
export WM=$8
export SEED=$9
# Set path to save the finetuned model and result score
export OUTPUT_PATH=$OUT_PATH/$TASK-$BSZ-$LR-$EPOCH-$WM-$SEED
mkdir -p $OUTPUT_PATH
touch $OUTPUT_PATH/train.log
python run_glue.py \
--model_type cocolm --model_name_or_path $MODEL_NAME --task_name $TASK_NAME \
--data_dir $DATASET_PATH --cached_train_file $TRAIN_CACHE --cached_dev_file $DEV_CACHE \
--config_name $MODEL_NAME \
--do_train --evaluate_during_training --logging_steps 1000 --output_dir $OUTPUT_PATH --max_grad_norm 0 --gradient_accumulation_steps 1 \
--max_seq_length $MAX_LEN --per_gpu_train_batch_size $BSZ --learning_rate $LR \
--num_train_epochs $EPOCH --weight_decay 0.01 --warmup_ratio $WM \
--adam_epsilon 1e-6 --adam_betas "0.9,0.98" \
--dropout_prob 0.1 --cls_dropout_prob 0.1 \
--seed $SEED \
--overwrite_output_dir |& tee $OUTPUT_PATH/train.log
# Add the following for fp16 training
# --fp16_init_loss_scale 128.0 --fp16 --fp16_opt_level O2
|
COCO-LM/huggingface/run_glue.sh/0
|
{
"file_path": "COCO-LM/huggingface/run_glue.sh",
"repo_id": "COCO-LM",
"token_count": 758
}
| 222 |
# ADE20k Semantic segmentation with CSWin
## Results and Models
| Backbone | Method | pretrain | Crop Size | Lr Schd | mIoU | mIoU (ms+flip) | #params | FLOPs | config | model | log |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| CSWin-T | UPerNet | ImageNet-1K | 512x512 | 160K | 49.3 | 50.7 | 60M | 959G | [`config`](configs/cswin/upernet_cswin_tiny.py) | [model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.2.0/upernet_cswin_tiny.pth) | [log](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.2.0/upernet_cswin_tiny.log.json) |
| CSWin-S | UperNet | ImageNet-1K | 512x512 | 160K | 50.4 | 51.5 | 65M | 1027G | [`config`](configs/cswin/upernet_cswin_small.py) |[model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.2.0/upernet_cswin_small.pth) | [log](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.2.0/upernet_cswin_small.log.json) |
| CSWin-B | UperNet | ImageNet-1K | 512x512 | 160K | 51.1 | 52.2 | 109M | 1222G | [`config`](configs/cswin/upernet_cswin_base.py) |[model](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.2.0/upernet_cswin_base.pth) | [log](https://github.com/microsoft/CSWin-Transformer/releases/download/v0.2.0/upernet_cswin_base.log.json) |
## Getting started
1. Install the [Swin_Segmentation](https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation) repository and some required packages.
```bash
git clone https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation
bash install_req.sh
```
2. Move the CSWin configs and backbone file to the corresponding folder.
```bash
cp -r configs/cswin <MMSEG_PATH>/configs/
cp config/_base/upernet_cswin.py <MMSEG_PATH>/config/_base_/models
cp backbone/cswin_transformer.py <MMSEG_PATH>/mmseg/models/backbones/
cp mmcv_custom/checkpoint.py <MMSEG_PATH>/mmcv_custom/
```
3. Install [apex](https://github.com/NVIDIA/apex) for mixed-precision training
```bash
git clone https://github.com/NVIDIA/apex
cd apex
pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
```
4. Follow the guide in [mmseg](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/dataset_prepare.md) to prepare the ADE20k dataset.
## Fine-tuning
Command format:
```
tools/dist_train.sh <CONFIG_PATH> <NUM_GPUS> --options model.pretrained=<PRETRAIN_MODEL_PATH>
```
For example, using a CSWin-T backbone with UperNet:
```bash
bash tools/dist_train.sh \
configs/cswin/upernet_cswin_tiny.py 8 \
--options model.pretrained=<PRETRAIN_MODEL_PATH>
```
pretrained models could be found at [main page](https://github.com/microsoft/CSWin-Transformer).
More config files can be found at [`configs/cswin`](configs/cswin).
## Evaluation
Command format:
```
tools/dist_test.sh <CONFIG_PATH> <CHECKPOINT_PATH> <NUM_GPUS> --eval mIoU
tools/dist_test.sh <CONFIG_PATH> <CHECKPOINT_PATH> <NUM_GPUS> --eval mIoU --aug-test
```
For example, evaluate a CSWin-T backbone with UperNet:
```bash
bash tools/dist_test.sh configs/cswin/upernet_cswin_tiny.py \
<CHECKPOINT_PATH> 8 --eval mIoU
```
---
## Acknowledgment
This code is built using the [mmsegmentation](https://github.com/open-mmlab/mmsegmentation) library, [Timm](https://github.com/rwightman/pytorch-image-models) library, the [Swin](https://github.com/microsoft/Swin-Transformer) repository.
|
CSWin-Transformer/segmentation/README.md/0
|
{
"file_path": "CSWin-Transformer/segmentation/README.md",
"repo_id": "CSWin-Transformer",
"token_count": 1359
}
| 223 |
# tags: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch/tags?quick-deploy=false
ARG BASE_IMAGE=openmpi4.1.0-cuda11.3-cudnn8-ubuntu20.04:latest
FROM mcr.microsoft.com/azureml/${BASE_IMAGE}
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --allow-downgrades --allow-change-held-packages --no-install-recommends \
build-essential \
cmake \
g++-7 \
git \
gpg \
curl \
vim \
wget \
ca-certificates \
libjpeg-dev \
libpng-dev \
librdmacm1 \
libibverbs1 \
ibverbs-providers \
openssh-client \
openssh-server \
libsm6 \
libxext6 \
ffmpeg \
libfontconfig1 \
libxrender1 \
libgl1-mesa-glx &&\
apt-get clean && rm -rf /var/lib/apt/lists/*
COPY environment.yml /tmp/environment.yml
# Create environment
ENV PATH=/opt/conda/bin:${PATH}
ENV conda update -n base conda
RUN conda env create -f /tmp/environment.yml
SHELL ["conda", "run", "-n", "climaX", "/bin/bash", "-c"]
ENV PATH /opt/conda/envs/climaX/bin:$PATH
ENV CONDA_DEFAULT_ENV climaX
|
ClimaX/docker/Dockerfile/0
|
{
"file_path": "ClimaX/docker/Dockerfile",
"repo_id": "ClimaX",
"token_count": 483
}
| 224 |
[data-md-color-scheme="climax"] {
--md-primary-fg-color: #4C8D91;
--md-primary-fg-color--light: #91504c;
--md-primary-fg-color--dark: #16292a;
}
|
ClimaX/docs/stylesheets/extra.css/0
|
{
"file_path": "ClimaX/docs/stylesheets/extra.css",
"repo_id": "ClimaX",
"token_count": 86
}
| 225 |
datadir: /data/CMIP6/CMCC
name: u_component_of_wind
cmip_name: ua
era_name: u
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/CMCC/config_u_component_of_wind.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/CMCC/config_u_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 66
}
| 226 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: http://esgf-data1.llnl.gov/thredds/fileServer/css03_data/CMIP6/CMIP
name: specific_humidity
cmip_name: hus
era_name: q
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190815
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/MPI-ESM/config_specific_humidity.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_specific_humidity.yml",
"repo_id": "ClimaX",
"token_count": 119
}
| 227 |
### Adapted from https://github.com/duncanwp/ClimateBench/blob/main/prep_input_data.ipynb
import os
import numpy as np
import torch
import xarray as xr
from torch.utils.data import Dataset
from torchvision.transforms import transforms
def load_x_y(data_path, list_simu, out_var):
x_all, y_all = {}, {}
for simu in list_simu:
input_name = 'inputs_' + simu + '.nc'
output_name = 'outputs_' + simu + '.nc'
if 'hist' in simu:
# load inputs
input_xr = xr.open_dataset(os.path.join(data_path, input_name))
# load outputs
output_xr = xr.open_dataset(os.path.join(data_path, output_name)).mean(dim='member')
output_xr = output_xr.assign({
"pr": output_xr.pr * 86400,
"pr90": output_xr.pr90 * 86400
}).rename({
'lon':'longitude',
'lat': 'latitude'
}).transpose('time','latitude', 'longitude').drop(['quantile'])
# Concatenate with historical data in the case of scenario 'ssp126', 'ssp370' and 'ssp585'
else:
# load inputs
input_xr = xr.open_mfdataset([
os.path.join(data_path, 'inputs_historical.nc'),
os.path.join(data_path, input_name)
]).compute()
# load outputs
output_xr = xr.concat([
xr.open_dataset(os.path.join(data_path, 'outputs_historical.nc')).mean(dim='member'),
xr.open_dataset(os.path.join(data_path, output_name)).mean(dim='member')
], dim='time').compute()
output_xr = output_xr.assign({
"pr": output_xr.pr * 86400,
"pr90": output_xr.pr90 * 86400
}).rename({
'lon':'longitude',
'lat': 'latitude'
}).transpose('time','latitude', 'longitude').drop(['quantile'])
print(input_xr.dims, output_xr.dims, simu)
x = input_xr.to_array().to_numpy()
x = x.transpose(1, 0, 2, 3).astype(np.float32) # N, C, H, W
x_all[simu] = x
y = output_xr[out_var].to_array().to_numpy() # 1, N, H, W
# y = np.expand_dims(y, axis=1) # N, 1, H, W
y = y.transpose(1, 0, 2, 3).astype(np.float32)
y_all[simu] = y
temp = xr.open_dataset(os.path.join(data_path, 'inputs_' + list_simu[0] + '.nc')).compute()
if 'latitude' in temp:
lat = np.array(temp['latitude'])
lon = np.array(temp['longitude'])
else:
lat = np.array(temp['lat'])
lon = np.array(temp['lon'])
return x_all, y_all, lat, lon
def input_for_training(x, skip_historical, history, len_historical):
time_length = x.shape[0]
# If we skip historical data, the first sequence created has as last element the first scenario data point
if skip_historical:
X_train_to_return = np.array([
x[i:i+history] for i in range(len_historical-history+1, time_length-history+1)
])
# Else we just go through the whole dataset historical + scenario (does not matter in the case of 'hist-GHG' and 'hist_aer')
else:
X_train_to_return = np.array([x[i:i+history] for i in range(0, time_length-history+1)])
return X_train_to_return
def output_for_training(y, skip_historical, history, len_historical):
time_length = y.shape[0]
# If we skip historical data, the first sequence created has as target element the first scenario data point
if skip_historical:
Y_train_to_return = np.array([
y[i+history-1] for i in range(len_historical-history+1, time_length-history+1)
])
# Else we just go through the whole dataset historical + scenario (does not matter in the case of 'hist-GHG' and 'hist_aer')
else:
Y_train_to_return = np.array([y[i+history-1] for i in range(0, time_length-history+1)])
return Y_train_to_return
def split_train_val(x, y, train_ratio=0.9):
shuffled_ids = np.random.permutation(x.shape[0])
train_len = int(train_ratio * x.shape[0])
train_ids = shuffled_ids[:train_len]
val_ids = shuffled_ids[train_len:]
return x[train_ids], y[train_ids], x[val_ids], y[val_ids]
class ClimateBenchDataset(Dataset):
def __init__(self, X_train_all, Y_train_all, variables, out_variables, lat, partition='train'):
super().__init__()
self.X_train_all = X_train_all
self.Y_train_all = Y_train_all
self.len_historical = 165
self.variables = variables
self.out_variables = out_variables
self.lat = lat
self.partition = partition
if partition == 'train':
self.inp_transform = self.get_normalize(self.X_train_all)
# self.out_transform = self.get_normalize(self.Y_train_all)
self.out_transform = transforms.Normalize(np.array([0.]), np.array([1.]))
else:
self.inp_transform = None
self.out_transform = None
if partition == 'test':
# only use 2080 - 2100 according to ClimateBench
self.X_train_all = self.X_train_all[-21:]
self.Y_train_all = self.Y_train_all[-21:]
self.get_rmse_normalization()
def get_normalize(self, data):
mean = np.mean(data, axis=(0, 1, 3, 4))
std = np.std(data, axis=(0, 1, 3, 4))
return transforms.Normalize(mean, std)
def set_normalize(self, inp_normalize, out_normalize): # for val and test
self.inp_transform = inp_normalize
self.out_transform = out_normalize
def get_rmse_normalization(self):
y_avg = torch.from_numpy(self.Y_train_all).squeeze(1).mean(0) # H, W
w_lat = np.cos(np.deg2rad(self.lat)) # (H,)
w_lat = w_lat / w_lat.mean()
w_lat = torch.from_numpy(w_lat).unsqueeze(-1).to(dtype=y_avg.dtype, device=y_avg.device) # (H, 1)
self.y_normalization = torch.abs(torch.mean(y_avg * w_lat))
def __len__(self):
return self.X_train_all.shape[0]
def __getitem__(self, index):
inp = self.inp_transform(torch.from_numpy(self.X_train_all[index]))
out = self.out_transform(torch.from_numpy(self.Y_train_all[index]))
# lead times = 0
lead_times = torch.Tensor([0.0]).to(dtype=inp.dtype)
return inp, out, lead_times, self.variables, self.out_variables
|
ClimaX/src/climax/climate_projection/dataset.py/0
|
{
"file_path": "ClimaX/src/climax/climate_projection/dataset.py",
"repo_id": "ClimaX",
"token_count": 3190
}
| 228 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# credits: https://github.com/ashleve/lightning-hydra-template/blob/main/src/models/mnist_module.py
from typing import Any
import torch
from pytorch_lightning import LightningModule
from torchvision.transforms import transforms
from climax.regional_forecast.arch import RegionalClimaX
from climax.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from climax.utils.metrics import (
lat_weighted_acc,
lat_weighted_mse,
lat_weighted_mse_val,
lat_weighted_rmse,
)
from climax.utils.pos_embed import interpolate_pos_embed
class RegionalForecastModule(LightningModule):
"""Lightning module for regional forecasting with the ClimaX model.
Args:
net (ClimaX): ClimaX model.
pretrained_path (str, optional): Path to pre-trained checkpoint.
lr (float, optional): Learning rate.
beta_1 (float, optional): Beta 1 for AdamW.
beta_2 (float, optional): Beta 2 for AdamW.
weight_decay (float, optional): Weight decay for AdamW.
warmup_epochs (int, optional): Number of warmup epochs.
max_epochs (int, optional): Number of total epochs.
warmup_start_lr (float, optional): Starting learning rate for warmup.
eta_min (float, optional): Minimum learning rate.
"""
def __init__(
self,
net: RegionalClimaX,
pretrained_path: str = "",
lr: float = 5e-4,
beta_1: float = 0.9,
beta_2: float = 0.99,
weight_decay: float = 1e-5,
warmup_epochs: int = 10000,
max_epochs: int = 200000,
warmup_start_lr: float = 1e-8,
eta_min: float = 1e-8,
):
super().__init__()
self.save_hyperparameters(logger=False, ignore=["net"])
self.net = net
if len(pretrained_path) > 0:
self.load_pretrained_weights(pretrained_path)
def load_pretrained_weights(self, pretrained_path):
if pretrained_path.startswith("http"):
checkpoint = torch.hub.load_state_dict_from_url(pretrained_path)
else:
checkpoint = torch.load(pretrained_path, map_location=torch.device("cpu"))
print("Loading pre-trained checkpoint from: %s" % pretrained_path)
checkpoint_model = checkpoint["state_dict"]
# interpolate positional embedding
interpolate_pos_embed(self.net, checkpoint_model, new_size=self.net.img_size)
state_dict = self.state_dict()
if self.net.parallel_patch_embed:
if "token_embeds.proj_weights" not in checkpoint_model.keys():
raise ValueError(
"Pretrained checkpoint does not have token_embeds.proj_weights for parallel processing. Please convert the checkpoints first or disable parallel patch_embed tokenization."
)
# checkpoint_keys = list(checkpoint_model.keys())
for k in list(checkpoint_model.keys()):
if "channel" in k:
checkpoint_model[k.replace("channel", "var")] = checkpoint_model[k]
del checkpoint_model[k]
for k in list(checkpoint_model.keys()):
if k not in state_dict.keys() or checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# load pre-trained model
msg = self.load_state_dict(checkpoint_model, strict=False)
print(msg)
def set_denormalization(self, mean, std):
self.denormalization = transforms.Normalize(mean, std)
def set_lat_lon(self, lat, lon):
self.lat = lat
self.lon = lon
def set_pred_range(self, r):
self.pred_range = r
def set_val_clim(self, clim):
self.val_clim = clim
def set_test_clim(self, clim):
self.test_clim = clim
def get_patch_size(self):
return self.net.patch_size
def training_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables, region_info = batch
loss_dict, _ = self.net.forward(
x, y, lead_times, variables, out_variables, [lat_weighted_mse], lat=self.lat, region_info=region_info
)
loss_dict = loss_dict[0]
for var in loss_dict.keys():
self.log(
"train/" + var,
loss_dict[var],
on_step=True,
on_epoch=False,
prog_bar=True,
)
loss = loss_dict["loss"]
return loss
def validation_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables, region_info = batch
if self.pred_range < 24:
log_postfix = f"{self.pred_range}_hours"
else:
days = int(self.pred_range / 24)
log_postfix = f"{days}_days"
all_loss_dicts = self.net.evaluate(
x,
y,
lead_times,
variables,
out_variables,
transform=self.denormalization,
metrics=[lat_weighted_mse_val, lat_weighted_rmse, lat_weighted_acc],
lat=self.lat,
clim=self.val_clim,
log_postfix=log_postfix,
region_info=region_info,
)
loss_dict = {}
for d in all_loss_dicts:
for k in d.keys():
loss_dict[k] = d[k]
for var in loss_dict.keys():
self.log(
"val/" + var,
loss_dict[var],
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
return loss_dict
def test_step(self, batch: Any, batch_idx: int):
x, y, lead_times, variables, out_variables, region_info = batch
if self.pred_range < 24:
log_postfix = f"{self.pred_range}_hours"
else:
days = int(self.pred_range / 24)
log_postfix = f"{days}_days"
all_loss_dicts = self.net.evaluate(
x,
y,
lead_times,
variables,
out_variables,
transform=self.denormalization,
metrics=[lat_weighted_mse_val, lat_weighted_rmse, lat_weighted_acc],
lat=self.lat,
clim=self.test_clim,
log_postfix=log_postfix,
region_info=region_info,
)
loss_dict = {}
for d in all_loss_dicts:
for k in d.keys():
loss_dict[k] = d[k]
for var in loss_dict.keys():
self.log(
"test/" + var,
loss_dict[var],
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
return loss_dict
def configure_optimizers(self):
decay = []
no_decay = []
for name, m in self.named_parameters():
if "var_embed" in name or "pos_embed" in name or "time_pos_embed" in name:
no_decay.append(m)
else:
decay.append(m)
optimizer = torch.optim.AdamW(
[
{
"params": decay,
"lr": self.hparams.lr,
"betas": (self.hparams.beta_1, self.hparams.beta_2),
"weight_decay": self.hparams.weight_decay,
},
{
"params": no_decay,
"lr": self.hparams.lr,
"betas": (self.hparams.beta_1, self.hparams.beta_2),
"weight_decay": 0,
},
]
)
lr_scheduler = LinearWarmupCosineAnnealingLR(
optimizer,
self.hparams.warmup_epochs,
self.hparams.max_epochs,
self.hparams.warmup_start_lr,
self.hparams.eta_min,
)
scheduler = {"scheduler": lr_scheduler, "interval": "step", "frequency": 1}
return {"optimizer": optimizer, "lr_scheduler": scheduler}
|
ClimaX/src/climax/regional_forecast/module.py/0
|
{
"file_path": "ClimaX/src/climax/regional_forecast/module.py",
"repo_id": "ClimaX",
"token_count": 4055
}
| 229 |
import os
import skimage.util as util
from skimage import io
from skimage.transform import resize
with open('train.txt', 'r') as fd:
image_files = fd.readlines()
total = len(image_files)
cnt = 0
# path/to/deepfashion directory
root = '/path/to/deepfashion'
# path/to/save directory
save_root = 'path/to/save'
for image_file in image_files:
image_file = os.path.join(root, image_file).strip()
image = io.imread(image_file)
pad_width_1 = (1101-750) // 2
pad_width_2 = (1101-750) // 2 + 1
image_pad = util.pad(image, ((0,0),(pad_width_1, pad_width_2),(0,0)), constant_values=232)
image_resize = resize(image_pad, (1024, 1024))
image_resize = (image_resize * 255).astype('uint8')
dst_file = os.path.dirname(image_file).replace(root, save_root)
os.makedirs(dst_file, exist_ok=True)
dst_file = os.path.join(dst_file, os.path.basename(image_file))
# dst_file = dst_file.replace('.jpg', '.png')
io.imsave(dst_file, image_resize)
cnt += 1
if cnt % 20 == 0:
print('Processing: %d / %d' % (cnt, total))
|
CoCosNet-v2/data/preprocess.py/0
|
{
"file_path": "CoCosNet-v2/data/preprocess.py",
"repo_id": "CoCosNet-v2",
"token_count": 447
}
| 230 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from models.networks.convgru import BasicUpdateBlock
from models.networks.ops import *
"""patch match"""
class Evaluate(nn.Module):
def __init__(self, temperature):
super().__init__()
self.filter_size = 3
self.temperature = temperature
def forward(self, left_features, right_features, offset_x, offset_y):
device = left_features.get_device()
batch_size, num, height, width = offset_x.size()
channel = left_features.size()[1]
matching_inds = offset_to_inds(offset_x, offset_y)
matching_inds = matching_inds.view(batch_size, num, height * width).permute(0, 2, 1).long()
base_batch = torch.arange(batch_size).to(device).long() * (height * width)
base_batch = base_batch.view(-1, 1, 1)
matching_inds_add_base = matching_inds + base_batch
right_features_view = right_features
match_cost = []
# using A[:, idx]
for i in range(matching_inds_add_base.size()[-1]):
idx = matching_inds_add_base[:, :, i]
idx = idx.contiguous().view(-1)
right_features_select = right_features_view[:, idx]
right_features_select = right_features_select.view(channel, batch_size, -1).transpose(0, 1)
match_cost_i = torch.sum(left_features * right_features_select, dim=1, keepdim=True) / self.temperature
match_cost.append(match_cost_i)
match_cost = torch.cat(match_cost, dim=1).transpose(1, 2)
match_cost = F.softmax(match_cost, dim=-1)
match_cost_topk, match_cost_topk_indices = torch.topk(match_cost, num//self.filter_size, dim=-1)
matching_inds = torch.gather(matching_inds, -1, match_cost_topk_indices)
matching_inds = matching_inds.permute(0, 2, 1).view(batch_size, -1, height, width).float()
offset_x, offset_y = inds_to_offset(matching_inds)
corr = match_cost_topk.permute(0, 2, 1)
return offset_x, offset_y, corr
class PropagationFaster(nn.Module):
def __init__(self):
super().__init__()
def forward(self, offset_x, offset_y, propagation_type="horizontal"):
device = offset_x.get_device()
self.horizontal_zeros = torch.zeros((offset_x.size()[0], offset_x.size()[1], offset_x.size()[2], 1)).to(device)
self.vertical_zeros = torch.zeros((offset_x.size()[0], offset_x.size()[1], 1, offset_x.size()[3])).to(device)
if propagation_type is "horizontal":
offset_x = torch.cat((torch.cat((self.horizontal_zeros, offset_x[:, :, :, :-1]), dim=3),
offset_x,
torch.cat((offset_x[:, :, :, 1:], self.horizontal_zeros), dim=3)), dim=1)
offset_y = torch.cat((torch.cat((self.horizontal_zeros, offset_y[:, :, :, :-1]), dim=3),
offset_y,
torch.cat((offset_y[:, :, :, 1:], self.horizontal_zeros), dim=3)), dim=1)
else:
offset_x = torch.cat((torch.cat((self.vertical_zeros, offset_x[:, :, :-1, :]), dim=2),
offset_x,
torch.cat((offset_x[:, :, 1:, :], self.vertical_zeros), dim=2)), dim=1)
offset_y = torch.cat((torch.cat((self.vertical_zeros, offset_y[:, :, :-1, :]), dim=2),
offset_y,
torch.cat((offset_y[:, :, 1:, :], self.vertical_zeros), dim=2)), dim=1)
return offset_x, offset_y
class PatchMatchOnce(nn.Module):
def __init__(self, opt):
super().__init__()
self.propagation = PropagationFaster()
self.evaluate = Evaluate(opt.temperature)
def forward(self, left_features, right_features, offset_x, offset_y):
prob = random.random()
if prob < 0.5:
offset_x, offset_y = self.propagation(offset_x, offset_y, "horizontal")
offset_x, offset_y, _ = self.evaluate(left_features, right_features, offset_x, offset_y)
offset_x, offset_y = self.propagation(offset_x, offset_y, "vertical")
offset_x, offset_y, corr = self.evaluate(left_features, right_features, offset_x, offset_y)
else:
offset_x, offset_y = self.propagation(offset_x, offset_y, "vertical")
offset_x, offset_y, _ = self.evaluate(left_features, right_features, offset_x, offset_y)
offset_x, offset_y = self.propagation(offset_x, offset_y, "horizontal")
offset_x, offset_y, corr = self.evaluate(left_features, right_features, offset_x, offset_y)
return offset_x, offset_y, corr
class PatchMatchGRU(nn.Module):
def __init__(self, opt):
super().__init__()
self.patch_match_one_step = PatchMatchOnce(opt)
self.temperature = opt.temperature
self.iters = opt.iteration_count
input_dim = opt.nef
hidden_dim = 32
norm = nn.InstanceNorm2d(hidden_dim, affine=False)
relu = nn.ReLU(inplace=True)
"""
concat left and right features
"""
self.initial_layer = nn.Sequential(
nn.Conv2d(input_dim*2, hidden_dim, kernel_size=3, padding=1, stride=1),
norm,
relu,
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1),
norm,
relu,
)
self.refine_net = BasicUpdateBlock()
def forward(self, left_features, right_features, right_input, initial_offset_x, initial_offset_y):
device = left_features.get_device()
batch_size, channel, height, width = left_features.size()
num = initial_offset_x.size()[1]
initial_input = torch.cat((left_features, right_features), dim=1)
hidden = self.initial_layer(initial_input)
left_features = left_features.view(batch_size, -1, height * width)
right_features = right_features.view(batch_size, -1, height * width)
right_features_view = right_features.transpose(0, 1).contiguous().view(channel, -1)
with torch.no_grad():
offset_x, offset_y = initial_offset_x, initial_offset_y
for it in range(self.iters):
with torch.no_grad():
offset_x, offset_y, corr = self.patch_match_one_step(left_features, right_features_view, offset_x, offset_y)
"""GRU refinement"""
flow = torch.cat((offset_x, offset_y), dim=1)
corr = corr.view(batch_size, -1, height, width)
hidden, delta_offset_x, delta_offset_y = self.refine_net(hidden, corr, flow)
offset_x = offset_x + delta_offset_x
offset_y = offset_y + delta_offset_y
with torch.no_grad():
matching_inds = offset_to_inds(offset_x, offset_y)
matching_inds = matching_inds.view(batch_size, num, height * width).permute(0, 2, 1).long()
base_batch = torch.arange(batch_size).to(device).long() * (height * width)
base_batch = base_batch.view(-1, 1, 1)
matching_inds_plus_base = matching_inds + base_batch
match_cost = []
# using A[:, idx]
for i in range(matching_inds_plus_base.size()[-1]):
idx = matching_inds_plus_base[:, :, i]
idx = idx.contiguous().view(-1)
right_features_select = right_features_view[:, idx]
right_features_select = right_features_select.view(channel, batch_size, -1).transpose(0, 1)
match_cost_i = torch.sum(left_features * right_features_select, dim=1, keepdim=True) / self.temperature
match_cost.append(match_cost_i)
match_cost = torch.cat(match_cost, dim=1).transpose(1, 2)
match_cost = F.softmax(match_cost, dim=-1)
right_input_view = right_input.transpose(0, 1).contiguous().view(right_input.size()[1], -1)
warp = torch.zeros_like(right_input)
# using A[:, idx]
for i in range(match_cost.size()[-1]):
idx = matching_inds_plus_base[:, :, i]
idx = idx.contiguous().view(-1)
right_input_select = right_input_view[:, idx]
right_input_select = right_input_select.view(right_input.size()[1], batch_size, -1).transpose(0, 1)
warp = warp + right_input_select * match_cost[:, :, i].unsqueeze(dim=1)
return matching_inds, warp
|
CoCosNet-v2/models/networks/patch_match.py/0
|
{
"file_path": "CoCosNet-v2/models/networks/patch_match.py",
"repo_id": "CoCosNet-v2",
"token_count": 3978
}
| 231 |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import random
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess_mode == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess_mode == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
elif opt.preprocess_mode == 'scale_shortside_and_crop':
ss, ls = min(w, h), max(w, h) # shortside and longside
width_is_shorter = w == ss
ls = int(opt.load_size * ls / ss)
new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss)
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params, method=Image.BICUBIC, normalize=True, toTensor=True):
transform_list = []
if opt.dataset_mode == 'flickr' and method == Image.NEAREST:
transform_list.append(transforms.Lambda(lambda img: __add1(img)))
if 'resize' in opt.preprocess_mode:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, interpolation=method))
elif 'scale_width' in opt.preprocess_mode:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method)))
elif 'scale_shortside' in opt.preprocess_mode:
transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, method)))
if 'crop' in opt.preprocess_mode:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess_mode == 'none':
base = 32
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
if opt.preprocess_mode == 'fixed':
w = opt.crop_size
h = round(opt.crop_size / opt.aspect_ratio)
transform_list.append(transforms.Lambda(lambda img: __resize(img, w, h, method)))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if opt.isTrain and 'rotate' in params.keys():
transform_list.append(transforms.Lambda(lambda img: __rotate(img, params['rotate'], method)))
if toTensor:
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def normalize():
return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def __resize(img, w, h, method=Image.BICUBIC):
return img.resize((w, h), method)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
def __scale_width(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), method)
def __scale_shortside(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
ss, ls = min(ow, oh), max(ow, oh) # shortside and longside
width_is_shorter = ow == ss
if (ss == target_width):
return img
ls = int(target_width * ls / ss)
nw, nh = (ss, ls) if width_is_shorter else (ls, ss)
return img.resize((nw, nh), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
return img.crop((x1, y1, x1 + tw, y1 + th))
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __rotate(img, deg, method=Image.BICUBIC):
return img.rotate(deg, resample=method)
def __add1(img):
return Image.fromarray(np.array(img) + 1)
|
CoCosNet/data/base_dataset.py/0
|
{
"file_path": "CoCosNet/data/base_dataset.py",
"repo_id": "CoCosNet",
"token_count": 1932
}
| 232 |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch.nn as nn
from torch.nn import init
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params += param.numel()
print('Network [%s] was created. Total number of parameters: %.1f million. '
'To see the architecture, do print(network).'
% (type(self).__name__, num_params / 1000000))
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if classname.find('BatchNorm2d') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init.normal_(m.weight.data, 1.0, gain)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=1.0)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'none': # uses pytorch's default init method
m.reset_parameters()
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
self.apply(init_func)
# propagate to children
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain)
|
CoCosNet/models/networks/base_network.py/0
|
{
"file_path": "CoCosNet/models/networks/base_network.py",
"repo_id": "CoCosNet",
"token_count": 1225
}
| 233 |
# CodeExecutor
This repo provides the code for reproducing the experiments in [Code Execution with Pre-trained Language Models](https://arxiv.org/pdf/2305.05383.pdf). **CodeExecutor** is a pre-trained model that learns to predict the execution traces using a code execution pre-training task and curriculum learning.
The pre-trained checkpoint of CodeExecutor is available on [Huggingface](https://huggingface.co/microsoft/codeexecutor).
Our dataset is available on [Zenodo](https://zenodo.org/record/8062703).
## 1. Dependency
- pip install pytorch
- pip install transformers
- pip install python-Levenshtein
## 2. Data
The **Python Code Execution datasets** are a series of datasets following an easy-to-hard paradigm, including the **SingleLine** **dataset**, **Tutorial** **dataset**, and **CodeNetMut** **dataset**. We provide each test set of the three on [Zenodo](https://zenodo.org/record/8062703).
Demo data (simplified version):
``` python
{
"id": 0,
"code": "s = ['x', 'y', 'z']",
"code_tokens": ["<0>", "s", "=", "[", "'x'", ",", "'y'", ",", "'z'", "]"],
"trace": ["<line> <0> <state> s : [ x , y , z ] </state>"],
"trace_tokens": ["<line>", "<0>", "<state>", "s", ":", "[", "x", ",", "y", ",", "z", "]", "</state>"]
}
```
We also construct a new dataset for the **zero-shot code-to-code search task**, by collecting 9,987 Python functions from CodeNet. Each function solves one of the 48 problems.
Demo data (simplified version):
``` python
{
"id": 0,
"code_id": "s204511158",
"problem_id": 340, # solve which problem
"original_code": "s = list(input())", # code without providing the test case
"code": "s = ['x', 'y', 'z']", # code provided with a test case
"code_tokens": ["<0>", "s", "=", "[", "'x'", ",", "'y'", ",", "'z'", "]"],
"trace": ["<line> <0> <state> s : [ x , y , z ] </state>"],
"trace_tokens": ["<line>", "<0>", "<state>", "s", ":", "[", "x", ",", "y", ",", "z", "]", "</state>"]
}
```
## 3. Pre-training
```bash
# prepare model checkpoint and datasets
cd pretrain
bash run.sh
```
A demo bash script (run.sh) is shown:
```bash
# Change the arguments as required:
# output_dir: the output directory to save inference results
# data_cache_dir: the output directory to save the data cache
# train_data_path: the path of the pre-training file
# eval_data_path: the path of the test file
# model_name_or_path: the path of the model to be evaluated
PER_NODE_GPU=8
python -m torch.distributed.launch --nproc_per_node=${PER_NODE_GPU} run.py \
--output_dir ../saved_models/pretrain_codeexecutor_stage_3 \
--data_cache_dir ../saved_models/pretrain_codeexecutor_stage_3 \
--train_data_path /drive/pretrain_codenetmut.json \
--another_train_data_path /drive/pretrain_tutorial.json \
--third_train_data_path /drive/single_line_hard_3_million.json \
--eval_data_path ../data/codenetmut_test.json \
--model_name_or_path ../saved_models/pretrain_codeexecutor_stage_2 \
--block_size 1024 \
--per_gpu_train_batch_size 4 \
--per_gpu_eval_batch_size 8 \
--gradient_accumulation_steps 8 \
--learning_rate 4e-4 \
--node_index=0 \
--gpu_per_node $PER_NODE_GPU \
--weight_decay 0.01 \
--adam_epsilon 1e-6 \
--max_grad_norm 1.0 \
--max_steps 1000000 \
--warmup_steps 10000 \
--save_steps 5000 \
--seed 123
```
## 3. Inference
Please download the [datasets](https://zenodo.org/record/8062703) first. Unzip it and move it to `./data`.
```bash
# prepare model checkpoint and datasets
cd inference
bash run.sh
```
A demo bash script (run.sh) is shown:
```bash
# Change the arguments as required:
# prefix: dataset type (codenet/tutorial/singleline)
# output_dir: the output directory to save inference results
# data_cache_dir: the output directory to save the data cache
# eval_data_path: the path of the test file
# model_name_or_path: the path of the model to be evaluated
CUDA_VISIBLE_DEBVISES=0 python run.py \
--prefix codenet \
--output_dir ../../saved_models/inference \
--data_cache_dir ../../saved_models/inference \
--eval_data_path ../data/codenetmut_test.json \
--model_name_or_path microsoft/codeexecutor \
--block_size 1024 \
--per_gpu_train_batch_size 8 \
--per_gpu_eval_batch_size 16 \
--gradient_accumulation_steps 8 \
--learning_rate 1e-4 \
--node_index 0 \
--weight_decay 0.01 \
--adam_epsilon 1e-6 \
--max_grad_norm 1.0 \
--max_steps 1000 \
--warmup_steps 10000 \
--save_steps 5000 \
--seed 123456
```
## 4. Downstream tasks
We apply CodeExecutor on code intelligence tasks, such as the Zero-shot Code-to-code Search task.
Here, we provide example code in which the baseline model is UniXcoder.
First, generate traces for the code-to-code search test set. We provide the prediction file `code_to_code_search_preds.txt` on [Zenodo](https://zenodo.org/record/8062703).
Or use the following script to generate the prediciton file (will be `../saved_models/code_to_code_search/preds.txt`).
```bash
# prepare model checkpoint and datasets
cd inference
CUDA_VISIBLE_DEBVISES=0 python run.py \
--prefix codenet \
--output_dir ../saved_models/code_to_code_search \
--data_cache_dir ../saved_models/code_to_code_search \
--eval_data_path ../data/code_to_code_search_test.json \
--model_name_or_path microsoft/codeexecutor \
--block_size 1024 \
--per_gpu_train_batch_size 8 \
--per_gpu_eval_batch_size 16 \
--gradient_accumulation_steps 8 \
--learning_rate 1e-4 \
--node_index 0 \
--weight_decay 0.01 \
--adam_epsilon 1e-6 \
--max_grad_norm 1.0 \
--max_steps 1000 \
--warmup_steps 10000 \
--save_steps 5000 \
--seed 123456
```
Second, utilize the program outputs extracted from the execution trace generated by CodeExecutor to facilitate the code-to-code search task.
```bash
cd downstream
bash run.sh
```
A demo bash script (run.sh) is shown:
```bash
# Change the arguments as required:
# trace_file: the path to the prediction file either downloaded or generated in the last step
source_lang=python
target_lang=python
python run.py \
--model_name_or_path microsoft/unixcoder-base \
--query_data_file ../data/code_to_code_search_test.json \
--candidate_data_file ../data/code_to_code_search_test.json \
--trace_file ../data/code_to_code_search_preds.txt \
--query_lang ${source_lang} \
--candidate_lang ${target_lang} \
--code_length 512 \
--eval_batch_size 256
```
# Reference
If you use this code or CodeExecutor, please consider citing us.
```
@article{liu2023code,
title={Code Execution with Pre-trained Language Models},
author={Liu, Chenxiao and Lu, Shuai and Chen, Weizhu and Jiang, Daxin and Svyatkovskiy, Alexey and Fu, Shengyu and Sundaresan, Neel and Duan, Nan},
journal={arXiv preprint arXiv:2305.05383},
year={2023}
}
```
|
CodeBERT/CodeExecutor/README.md/0
|
{
"file_path": "CodeBERT/CodeExecutor/README.md",
"repo_id": "CodeBERT",
"token_count": 2591
}
| 234 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: BLEU Score
#
# Copyright (C) 2001-2020 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# Contributors: Björn Mattsson, Dmitrijs Milajevs, Liling Tan
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""BLEU score implementation."""
import math
import sys
from fractions import Fraction
import warnings
from collections import Counter
from evaluator.CodeBLEU.utils import ngrams
def sentence_bleu(
references,
hypothesis,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
):
"""
Calculate BLEU score (Bilingual Evaluation Understudy) from
Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
"BLEU: a method for automatic evaluation of machine translation."
In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
0.5045...
If there is no ngrams overlap for any order of n-grams, BLEU returns the
value 0. This is because the precision for the order of n-grams without
overlap is 0, and the geometric mean in the final BLEU score computation
multiplies the 0 with the precision of other n-grams. This results in 0
(independently of the precision of the othe n-gram orders). The following
example has zero 3-gram and 4-gram overlaps:
>>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
0.0
To avoid this harsh behaviour when no ngram overlaps are found a smoothing
function can be used.
>>> chencherry = SmoothingFunction()
>>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
0.0370...
The default BLEU calculates a score for up to 4-grams using uniform
weights (this is called BLEU-4). To evaluate your translations with
higher/lower order ngrams, use customized weights. E.g. when accounting
for up to 5-grams with uniform weights (this is called BLEU-5) use:
>>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
0.3920...
:param references: reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The sentence-level BLEU score.
:rtype: float
"""
return corpus_bleu(
[references], [hypothesis], weights, smoothing_function, auto_reweigh
)
def corpus_bleu(
list_of_references,
hypotheses,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), (
"The number of hypotheses and their reference(s) should be the " "same "
)
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweigh:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method1
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
return s
def modified_precision(references, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = {}
for reference in references:
reference_counts = (
Counter(ngrams(reference, n)) if len(reference) >= n else Counter()
)
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {
ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()
}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False)
def closest_ref_length(references, hyp_len):
"""
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int
"""
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(
ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
)
return closest_ref_len
def brevity_penalty(closest_ref_len, hyp_len):
"""
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float
"""
if hyp_len > closest_ref_len:
return 1
# If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
elif hyp_len == 0:
return 0
else:
return math.exp(1 - closest_ref_len / hyp_len)
class SmoothingFunction:
"""
This is an implementation of the smoothing techniques
for segment-level BLEU scores that was presented in
Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
Smoothing Techniques for Sentence-Level BLEU. In WMT14.
http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
"""
def __init__(self, epsilon=0.1, alpha=5, k=5):
"""
This will initialize the parameters required for the various smoothing
techniques, the default values are set to the numbers used in the
experiments from Chen and Cherry (2014).
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',
... 'that', 'the', 'military', 'always', 'obeys', 'the',
... 'commands', 'of', 'the', 'party']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',
... 'that', 'the', 'military', 'will', 'forever', 'heed',
... 'Party', 'commands']
>>> chencherry = SmoothingFunction()
>>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS
0.4118...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS
0.4118...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS
0.4118...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS
0.4489...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS
0.4118...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS
0.4118...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS
0.4905...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS
0.4135...
>>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS
0.4905...
:param epsilon: the epsilon value use in method 1
:type epsilon: float
:param alpha: the alpha value use in method 6
:type alpha: int
:param k: the k value use in method 4
:type k: int
"""
self.epsilon = epsilon
self.alpha = alpha
self.k = k
def method0(self, p_n, *args, **kwargs):
"""
No smoothing.
"""
p_n_new = []
for i, p_i in enumerate(p_n):
if p_i.numerator != 0:
p_n_new.append(p_i)
else:
_msg = str(
"\nThe hypothesis contains 0 counts of {}-gram overlaps.\n"
"Therefore the BLEU score evaluates to 0, independently of\n"
"how many N-gram overlaps of lower order it contains.\n"
"Consider using lower n-gram order or use "
"SmoothingFunction()"
).format(i + 1)
warnings.warn(_msg)
# When numerator==0 where denonminator==0 or !=0, the result
# for the precision score should be equal to 0 or undefined.
# Due to BLEU geometric mean computation in logarithm space,
# we we need to take the return sys.float_info.min such that
# math.log(sys.float_info.min) returns a 0 precision score.
p_n_new.append(sys.float_info.min)
return p_n_new
def method1(self, p_n, *args, **kwargs):
"""
Smoothing method 1: Add *epsilon* counts to precision with 0 counts.
"""
return [
(p_i.numerator + self.epsilon) / p_i.denominator
if p_i.numerator == 0
else p_i
for p_i in p_n
]
def method2(self, p_n, *args, **kwargs):
"""
Smoothing method 2: Add 1 to both numerator and denominator from
Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of
machine translation quality using longest common subsequence and
skip-bigram statistics. In ACL04.
"""
return [
Fraction(p_i.numerator + 1, p_i.denominator + 1, _normalize=False)
for p_i in p_n
]
def method3(self, p_n, *args, **kwargs):
"""
Smoothing method 3: NIST geometric sequence smoothing
The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each
precision score whose matching n-gram count is null.
k is 1 for the first 'n' value for which the n-gram match count is null/
For example, if the text contains:
- one 2-gram match
- and (consequently) two 1-gram matches
the n-gram count for each individual precision score would be:
- n=1 => prec_count = 2 (two unigrams)
- n=2 => prec_count = 1 (one bigram)
- n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)
- n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)
"""
incvnt = 1 # From the mteval-v13a.pl, it's referred to as k.
for i, p_i in enumerate(p_n):
if p_i.numerator == 0:
p_n[i] = 1 / (2 ** incvnt * p_i.denominator)
incvnt += 1
return p_n
def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
"""
Smoothing method 4:
Shorter translations may have inflated precision values due to having
smaller denominators; therefore, we give them proportionally
smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry
suggests dividing by 1/ln(len(T)), where T is the length of the translation.
"""
hyp_len = hyp_len if hyp_len else len(hypothesis)
for i, p_i in enumerate(p_n):
if p_i.numerator == 0 and hyp_len != 0:
incvnt = i + 1 * self.k / math.log(
hyp_len
) # Note that this K is different from the K from NIST.
p_n[i] = incvnt / p_i.denominator
return p_n
def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
"""
Smoothing method 5:
The matched counts for similar values of n should be similar. To a
calculate the n-gram matched count, it averages the n−1, n and n+1 gram
matched counts.
"""
hyp_len = hyp_len if hyp_len else len(hypothesis)
m = {}
# Requires an precision value for an addition ngram order.
p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)]
m[-1] = p_n[0] + 1
for i, p_i in enumerate(p_n):
p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3
m[i] = p_n[i]
return p_n
def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
"""
Smoothing method 6:
Interpolates the maximum likelihood estimate of the precision *p_n* with
a prior estimate *pi0*. The prior is estimated by assuming that the ratio
between pn and pn−1 will be the same as that between pn−1 and pn−2; from
Gao and He (2013) Training MRF-Based Phrase Translation Models using
Gradient Ascent. In NAACL.
"""
hyp_len = hyp_len if hyp_len else len(hypothesis)
# This smoothing only works when p_1 and p_2 is non-zero.
# Raise an error with an appropriate message when the input is too short
# to use this smoothing technique.
assert p_n[2], "This smoothing method requires non-zero precision for bigrams."
for i, p_i in enumerate(p_n):
if i in [0, 1]: # Skips the first 2 orders of ngrams.
continue
else:
pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2]
# No. of ngrams in translation that matches the reference.
m = p_i.numerator
# No. of ngrams in translation.
l = sum(1 for _ in ngrams(hypothesis, i + 1))
# Calculates the interpolated precision.
p_n[i] = (m + self.alpha * pi0) / (l + self.alpha)
return p_n
def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
"""
Smoothing method 7:
Interpolates methods 4 and 5.
"""
hyp_len = hyp_len if hyp_len else len(hypothesis)
p_n = self.method4(p_n, references, hypothesis, hyp_len)
p_n = self.method5(p_n, references, hypothesis, hyp_len)
return p_n
|
CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/bleu.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/evaluator/CodeBLEU/bleu.py",
"repo_id": "CodeBERT",
"token_count": 11567
}
| 235 |
echo -e "import nltk\nnltk.download('punkt')" > ttmp.py
python ttmp.py
rm ttmp.py
|
CodeBERT/CodeReviewer/code/sh/test_nltk.sh/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/sh/test_nltk.sh",
"repo_id": "CodeBERT",
"token_count": 36
}
| 236 |
# Code Search
## Data Preprocess
Different from the setting of [CodeSearchNet](husain2019codesearchnet), the answer of each query is retrieved from the whole development and testing code corpus instead of 1,000 candidate codes. Besides, we observe that some queries contain content unrelated to the code, such as a link ``http://..." that refers to external resources. Therefore, we filter following examples to improve the quality of the dataset.
- Remove comments in the code
- Remove examples that codes cannot be parsed into an abstract syntax tree.
- Remove examples that #tokens of documents is < 3 or >256
- Remove examples that documents contain special tokens (e.g. <img ...> or https:...)
- Remove examples that documents are not English.
Data statistic about the cleaned dataset for code document generation is shown in this Table.
| PL | Training | Dev | Test | Candidates code |
| :--------- | :------: | :----: | :----: | :-------------: |
| Python | 251,820 | 13,914 | 14,918 | 43,827 |
| PHP | 241,241 | 12,982 | 14,014 | 52,660 |
| Go | 167,288 | 7,325 | 8,122 | 28,120 |
| Java | 164,923 | 5,183 | 10,955 | 40,347 |
| JavaScript | 58,025 | 3,885 | 3,291 | 13,981 |
| Ruby | 24,927 | 1,400 | 1,261 | 4,360 |
You can download and preprocess data using the following command.
```shell
unzip dataset.zip
cd dataset
bash run.sh
cd ..
```
## Dependency
- pip install torch
- pip install transformers
- pip install tree_sitter
### Tree-sitter (optional)
If the built file "parser/my-languages.so" doesn't work for you, please rebuild as the following command:
```shell
cd parser
bash build.sh
cd ..
```
## Fine-Tune
We fine-tuned the model on 2*V100-16G GPUs.
```shell
lang=ruby
mkdir -p ./saved_models/$lang
python run.py \
--output_dir=./saved_models/$lang \
--config_name=microsoft/graphcodebert-base \
--model_name_or_path=microsoft/graphcodebert-base \
--tokenizer_name=microsoft/graphcodebert-base \
--lang=$lang \
--do_train \
--train_data_file=dataset/$lang/train.jsonl \
--eval_data_file=dataset/$lang/valid.jsonl \
--test_data_file=dataset/$lang/test.jsonl \
--codebase_file=dataset/$lang/codebase.jsonl \
--num_train_epochs 10 \
--code_length 256 \
--data_flow_length 64 \
--nl_length 128 \
--train_batch_size 32 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456 2>&1| tee saved_models/$lang/train.log
```
## Inference and Evaluation
```shell
lang=ruby
python run.py \
--output_dir=./saved_models/$lang \
--config_name=microsoft/graphcodebert-base \
--model_name_or_path=microsoft/graphcodebert-base \
--tokenizer_name=microsoft/graphcodebert-base \
--lang=$lang \
--do_eval \
--do_test \
--train_data_file=dataset/$lang/train.jsonl \
--eval_data_file=dataset/$lang/valid.jsonl \
--test_data_file=dataset/$lang/test.jsonl \
--codebase_file=dataset/$lang/codebase.jsonl \
--num_train_epochs 10 \
--code_length 256 \
--data_flow_length 64 \
--nl_length 128 \
--train_batch_size 32 \
--eval_batch_size 64 \
--learning_rate 2e-5 \
--seed 123456 2>&1| tee saved_models/$lang/test.log
```
## Results
The results on the filtered dataset are shown in this Table:
| Model | Ruby | Javascript | Go | Python | Java | PHP | Overall |
| -------------- | :-------: | :--------: | :-------: | :-------: | :-------: | :-------: | :-------: |
| NBow | 0.162 | 0.157 | 0.330 | 0.161 | 0.171 | 0.152 | 0.189 |
| CNN | 0.276 | 0.224 | 0.680 | 0.242 | 0.263 | 0.260 | 0.324 |
| BiRNN | 0.213 | 0.193 | 0.688 | 0.290 | 0.304 | 0.338 | 0.338 |
| SelfAtt | 0.275 | 0.287 | 0.723 | 0.398 | 0.404 | 0.426 | 0.419 |
| RoBERTa | 0.587 | 0.517 | 0.850 | 0.587 | 0.599 | 0.560 | 0.617 |
| RoBERTa (code) | 0.628 | 0.562 | 0.859 | 0.610 | 0.620 | 0.579 | 0.643 |
| CodeBERT | 0.679 | 0.620 | 0.882 | 0.672 | 0.676 | 0.628 | 0.693 |
| GraphCodeBERT | **0.703** | **0.644** | **0.897** | **0.692** | **0.691** | **0.649** | **0.713** |
## Model and Demo
A pretrained model, additional training script with dataset, and demo of a finetuned CodeBERT model for the task of Code Search can be found here: https://drive.google.com/file/d/1ZO-xVIzGcNE6Gz9DEg2z5mIbBv4Ft1cK/view.
|
CodeBERT/GraphCodeBERT/codesearch/README.md/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/codesearch/README.md",
"repo_id": "CodeBERT",
"token_count": 1990
}
| 237 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from tree_sitter import Language, Parser
Language.build_library(
# Store the library in the `build` directory
'my-languages.so',
# Include one or more languages
[
'tree-sitter-go',
'tree-sitter-javascript',
'tree-sitter-python',
'tree-sitter-php',
'tree-sitter-java',
'tree-sitter-ruby',
'tree-sitter-c-sharp',
]
)
|
CodeBERT/GraphCodeBERT/refinement/parser/build.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/refinement/parser/build.py",
"repo_id": "CodeBERT",
"token_count": 162
}
| 238 |
# Clone Detection (BigCloneDetection)
## Data Download
```bash
mkdir dataset
cd dataset
wget https://github.com/microsoft/CodeXGLUE/raw/main/Code-Code/Clone-detection-BigCloneBench/dataset/data.jsonl
wget https://github.com/microsoft/CodeXGLUE/raw/main/Code-Code/Clone-detection-BigCloneBench/dataset/test.txt
wget https://github.com/microsoft/CodeXGLUE/raw/main/Code-Code/Clone-detection-BigCloneBench/dataset/train.txt
wget https://github.com/microsoft/CodeXGLUE/raw/main/Code-Code/Clone-detection-BigCloneBench/dataset/valid.txt
cd ..
```
## Dependency
- pip install torch
- pip install transformers
## Fine-Tune
Here we provide fine-tune settings for code summarization, whose results are reported in the paper.
```shell
# Training
python run.py \
--output_dir saved_models \
--model_name_or_path microsoft/unixcoder-base \
--do_train \
--train_data_file dataset/train.txt \
--eval_data_file dataset/valid.txt \
--num_train_epochs 1 \
--block_size 512 \
--train_batch_size 16 \
--eval_batch_size 32 \
--learning_rate 5e-5 \
--max_grad_norm 1.0 \
--seed 123456
# Evaluating
python run.py \
--output_dir saved_models \
--model_name_or_path microsoft/unixcoder-base \
--do_test \
--test_data_file dataset/test.txt \
--num_train_epochs 1 \
--block_size 512 \
--train_batch_size 16 \
--eval_batch_size 32 \
--learning_rate 5e-5 \
--max_grad_norm 1.0 \
--seed 123456
```
|
CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/README.md/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/BCB/README.md",
"repo_id": "CodeBERT",
"token_count": 586
}
| 239 |
pip install torch==1.6.0+cu92 torchvision==0.7.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html > log.txt 2>&1
pip install sklearn scipy transformers tqdm > log.txt 2>&1
CUDA_VISIBLE_DEVICES=15,12,13,14
lang=java #programming language
lr=5e-5
batch_size=32
accm_steps=1
beam_size=3
source_length=512
target_length=150
data_dir=../../dataset
output_dir=saved_models/$lang
train_file=$data_dir/train.json
dev_file=$data_dir/dev.json
epochs=30
pretrained_model=../../../pretrained-model/UniXcoder-base/
mkdir -p $output_dir
python run.py \
--do_train \
--do_eval \
--model_name_or_path $pretrained_model \
--train_filename $train_file \
--dev_filename $dev_file \
--tokenizer_name roberta-base \
--output_dir $output_dir \
--max_source_length $source_length \
--max_target_length $target_length \
--beam_size $beam_size \
--train_batch_size $batch_size \
--eval_batch_size $batch_size \
--learning_rate $lr \
--gradient_accumulation_steps $accm_steps \
--num_train_epochs $epochs 2>&1| tee $output_dir/train.log
batch_size=64
dev_file=$data_dir/dev.json
test_file=$data_dir/test.json
test_model=$output_dir/checkpoint-best-score/pytorch_model.bin #checkpoint for test
python run.py \
--do_test \
--model_name_or_path $pretrained_model \
--load_model_path $test_model \
--dev_filename $dev_file \
--test_filename $test_file \
--output_dir $output_dir \
--max_source_length $source_length \
--max_target_length $target_length \
--beam_size $beam_size \
--gradient_accumulation_steps $accm_steps \
--eval_batch_size $batch_size 2>&1| tee $output_dir/test.log
|
CodeBERT/UniXcoder/downstream-tasks/code-generation/run.sh/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-generation/run.sh",
"repo_id": "CodeBERT",
"token_count": 609
}
| 240 |
## CLUTRR
### Download the Code-Davinci-002 Inference Results
[Download Link](https://bdmbabel.blob.core.windows.net/public/clutrr.zip)
### Original Dataset
The dataset is synthesized from https://github.com/facebookresearch/clutrr, using the following Python script:
`python main.py --train_tasks 1.2,1.3 --test_tasks 1.2,1.3`
This corresponds to the first line of Table 2 in [this paper](https://arxiv.org/pdf/1908.06177.pdf).
|
CodeT/DIVERSE/data/clutrr.md/0
|
{
"file_path": "CodeT/DIVERSE/data/clutrr.md",
"repo_id": "CodeT",
"token_count": 149
}
| 241 |
import os
import time
from pathlib import Path
from prompt_file import *
def get_command_result(input, prompt_file):
"""
Checks if the input is a command and if so, executes it
Currently supported commands:
- start multi-turn
- stop multi-turn
- default context
- show context <n>
- view context
- save context
- clear context
- load context <filename>
- set engine <engine>
- set temperature <temperature>
- set max_tokens <max_tokens>
- set shell <shell>
Returns: command result or "" if no command matched
"""
if prompt_file == None:
return "", None
config = prompt_file.config
# configuration setting commands
if input.__contains__("set"):
# set temperature <temperature>
if input.__contains__("temperature"):
input = input.split()
if len(input) == 4:
config['temperature'] = float(input[3])
prompt_file.set_config(config)
print("# Temperature set to " + str(config['temperature']))
return "config set", prompt_file
else:
return "", prompt_file
# set max_tokens <max_tokens>
elif input.__contains__("max_tokens"):
input = input.split()
if len(input) == 4:
config['max_tokens'] = int(input[3])
prompt_file.set_config(config)
print("# Max tokens set to " + str(config['max_tokens']))
return "config set", prompt_file
else:
return "", prompt_file
elif input.__contains__("shell"):
input = input.split()
if len(input) == 4:
config['shell'] = input[3]
prompt_file.set_config(config)
print("# Shell set to " + str(config['shell']))
return "config set", prompt_file
else:
return "", prompt_file
elif input.__contains__("engine"):
input = input.split()
if len(input) == 4:
config['engine'] = input[3]
prompt_file.set_config(config)
print("# Engine set to " + str(config['engine']))
return "config set", prompt_file
else:
return "", prompt_file
if input.__contains__("show config"):
prompt_file.show_config()
return "config shown", prompt_file
# multi turn/single turn commands
if input.__contains__("multi-turn"):
# start context
if input.__contains__("start"):
if config['multi_turn'] == 'off':
prompt_file.start_multi_turn()
return "multi turn mode on", prompt_file
return "multi turn mode on", prompt_file
# stop context
if input.__contains__("stop"):
prompt_file.stop_multi_turn()
return "multi turn mode off", prompt_file
# context file commands
if input.__contains__("context"):
if input.__contains__("default"):
prompt_file.default_context()
return "stopped context", prompt_file
# show context <n>
if input.__contains__("show"):
print('\n')
with open(prompt_file.file_name, 'r') as f:
lines = f.readlines()
lines = lines[6:] # skip headers
line_numbers = 0
if len(input.split()) > 3:
line_numbers = int(input.split()[3])
if line_numbers != 0:
for line in lines[-line_numbers:]:
print('\n# '+line, end='')
else:
print('\n# '.join(lines))
return "context shown", prompt_file
# edit context
if input.__contains__("view"):
# open the prompt file in text editor
if config['shell'] != 'powershell':
os.system('open {}'.format(prompt_file.file_path))
else:
os.system('start {}'.format(prompt_file.file_path))
return "context shown", prompt_file
# save context <filename>
if input.__contains__("save"):
# save the current prompt file to a new file
# if filename not specified use the current time (to avoid name conflicts)
filename = time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
if len(input.split()) == 4:
filename = input.split()[3]
prompt_file.save_to(filename)
return "context saved", prompt_file
# clear context
if input.__contains__("clear"):
# temporary saving deleted prompt file
prompt_file.default_context()
return "unlearned interaction", prompt_file
# load context <filename>
if input.__contains__("load"):
# the input looks like # load context <filename>
# write everything from the file to the prompt file
input = input.split()
if len(input) == 4:
filename = input[3]
prompt_file.load_context(filename)
return "context loaded", prompt_file
print('\n#\tInvalid command format, did you specify which file to load?')
return "context loaded", prompt_file
return "", prompt_file
|
Codex-CLI/src/commands.py/0
|
{
"file_path": "Codex-CLI/src/commands.py",
"repo_id": "Codex-CLI",
"token_count": 2649
}
| 242 |
Contributing to Microsoft Cognitive Services Client Libraries & Samples
===============================================
So, you want to contribute on a client library or sample for one of the Microsoft Cognitive Services.
Here's what you need to know.
1. Each SDK should include both a client library and a sample showing the API in
action
2. When working on an SDK, it's important that we are consistent from project to project, so we ask you to follow the coding guidelines below:
- Windows [(Coding guidelines for C#)](https://msdn.microsoft.com/en-us/library/ff926074.aspx) -- also reference our [common Windows code](https://github.com/Microsoft/Cognitive-common-windows) for building samples
- Android [(Coding guidelines for
Java)](<http://source.android.com/source/code-style.html>)
- iOS Objective-C [(Coding guidelines for
Cocoa)](<https://developer.apple.com/library/mac/documentation/Cocoa/Conceptual/CodingGuidelines/CodingGuidelines.html>)
- Optional: Client Javascript ([Coding guidelines for
npm](<https://docs.npmjs.com/misc/coding-style>))
3. Samples are important for illustrating how to actually call into the API.
Samples should be as visual and reusable as possible.
- Do:
- Create a UI sample when possible.
- Make your sample user friendly. Expect that developers will want to try
different mainline scenarios and key APIs.
- Create code that's easy for other developers to copy/paste into their
own solutions
- Consider:
- Adding UI to allow devs to quickly copy/paste subscription keys, instead
of updating them in the code or using a config file. The
FaceAPI-WPF-Samples.sln provides an example.
- Don't:
- Leave your subscription key in the source of samples. You do not want your key to be abused by others.
Happy coding!
|
Cognitive-Face-Python/CONTRIBUTING.md/0
|
{
"file_path": "Cognitive-Face-Python/CONTRIBUTING.md",
"repo_id": "Cognitive-Face-Python",
"token_count": 577
}
| 243 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: person_group.py
Description: Person Group section of the Cognitive Face API.
"""
from . import util
def create(person_group_id, name=None, user_data=None):
"""Create a new person group with specified `person_group_id`, `name` and
user-provided `user_data`.
Args:
person_group_id: User-provided `person_group_id` as a string. The valid
characters include numbers, English letters in lower case, '-' and
'_'. The maximum length of the personGroupId is 64.i
name: Person group display name. The maximum length is 128.
user_data: User-provided data attached to the person group. The size
limit is 16KB.
Returns:
An empty response body.
"""
name = name or person_group_id
url = 'persongroups/{}'.format(person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PUT', url, json=json)
def delete(person_group_id):
"""Delete an existing person group. Persisted face images of all people in
the person group will also be deleted.
Args:
person_group_id: The `person_group_id` of the person group to be
deleted.
Returns:
An empty response body.
"""
url = 'persongroups/{}'.format(person_group_id)
return util.request('DELETE', url)
def get(person_group_id):
"""Retrieve the information of a person group, including its `name` and
`user_data`. This API returns person group information only, use
`person.lists` instead to retrieve person information under the person
group.
Args:
person_group_id: `person_group_id` of the target person group.
Returns:
The person group's information.
"""
url = 'persongroups/{}'.format(person_group_id)
return util.request('GET', url)
def get_status(person_group_id):
"""Retrieve the training status of a person group (completed or ongoing).
Training can be triggered by `person_group.train`. The training will
process for a while on the server side.
Args:
person_group_id: `person_group_id` of the target person group.
Returns:
The person group's training status.
"""
url = 'persongroups/{}/training'.format(person_group_id)
return util.request('GET', url)
def lists(start=None, top=None):
"""List person groups and their information.
Args:
start: Optional parameter. List person groups from the least
`person_group_id` greater than the "start". It contains no more
than 64 characters. Default is empty.
top: The number of person groups to list, ranging in [1, 1000]. Default
is 1000.
Returns:
An array of person groups and their information (`person_group_id`,
`name` and `user_data`).
"""
url = 'persongroups'
params = {
'start': start,
'top': top,
}
return util.request('GET', url, params=params)
def train(person_group_id):
"""Queue a person group training task, the training task may not be started
immediately.
Args:
person_group_id: Target person group to be trained.
Returns:
An empty JSON body.
"""
url = 'persongroups/{}/train'.format(person_group_id)
return util.request('POST', url)
def update(person_group_id, name=None, user_data=None):
"""Update an existing person group's display `name` and `user_data`. The
properties which does not appear in request body will not be updated.
Args:
person_group_id: `person_group_id` of the person group to be updated.
name: Optional parameter. Person group display name. The maximum length
is 128.
user_data: Optional parameter. User-provided data attached to the
person group. The size limit is 16KB.
Returns:
An empty response body.
"""
url = 'persongroups/{}'.format(person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PATCH', url, json=json)
|
Cognitive-Face-Python/cognitive_face/person_group.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/person_group.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 1523
}
| 244 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: __init__.py
Description: Model components for Python SDK sample.
"""
from model.face import Face
|
Cognitive-Face-Python/sample/model/__init__.py/0
|
{
"file_path": "Cognitive-Face-Python/sample/model/__init__.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 52
}
| 245 |
export CUDA_VISIBLE_DEVICES=2
python t5_run_train.py \
--model_name_or_path t5-base \
--subtask Com \
--method ControlExp \
--train_file finetune \
--max_steps 50000 \
--save_steps 50000 \
--batch_size 8 \
--ebatch_size 16 \
--gas 1 \
--seed 1 \
--set set1
|
ContextualSP/abstraction_probing/code/t5_code/Com_ControlExp_finetune.sh/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Com_ControlExp_finetune.sh",
"repo_id": "ContextualSP",
"token_count": 104
}
| 246 |
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.models.t5.modeling_t5 import (
T5PreTrainedModel,
T5Block,
T5LayerNorm,
T5Attention,
T5LayerCrossAttention,
T5LayerFF,
)
from transformers.modeling_outputs import (
Seq2SeqLMOutput,
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions
)
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
import math
import copy
import warnings
from torch.utils.checkpoint import checkpoint
from transformers.models.t5.configuration_t5 import T5Config
from transformers.modeling_utils import find_pruneable_heads_and_indices, prune_linear_layer
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
"""Compute binned relative position bias"""
context_position = torch.arange(
query_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[:, None]
memory_position = torch.arange(
key_length, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
class T5EncoderStack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.block))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
# Load onto devices
for k, v in self.device_map.items():
for layer in v:
cuda_device = "cuda:" + str(k)
self.block[layer] = self.block[layer].to(cuda_device)
# Set embed_tokens to first layer
self.embed_tokens = self.embed_tokens.to(self.first_device)
# Set final layer norm to last device
self.final_layer_norm = self.final_layer_norm.to(self.last_device)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
for i in range(len(self.block)):
self.block[i] = self.block[i].to("cpu")
self.embed_tokens = self.embed_tokens.to("cpu")
self.final_layer_norm = self.final_layer_norm.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, f":obj:`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if cross_attn_layer_head_mask is not None:
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
class T5DecoderStack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.block))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
# Load onto devices
for k, v in self.device_map.items():
for layer in v:
cuda_device = "cuda:" + str(k)
self.block[layer] = self.block[layer].to(cuda_device)
# Set embed_tokens to first layer
self.embed_tokens = self.embed_tokens.to(self.first_device)
# Set final layer norm to last device
self.final_layer_norm = self.final_layer_norm.to(self.last_device)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
for i in range(len(self.block)):
self.block[i] = self.block[i].to("cpu")
self.embed_tokens = self.embed_tokens.to("cpu")
self.final_layer_norm = self.final_layer_norm.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, f":obj:`use_cache` can only be set to `True` if {self} is used as a decoder"
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if cross_attn_layer_head_mask is not None:
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
class T5Generation(T5PreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
r"encoder\.p0",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5EncoderStack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5DecoderStack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.decoder.first_device)
self.model_parallel = True
def deparallelize(self):
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to("cpu")
self.decoder = self.decoder.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
Returns:
Examples::
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small')
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
>>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
"""
# pdb.set_trace()
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# If decoding with past key value states, only the last tokens
# should be given as an input
if past_key_values is not None:
assert labels is None, "Decoder should not use cached key value states when training."
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_inputs_embeds is not None:
decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
sequence_output = sequence_output * (self.model_dim ** -0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
# pdb.set_trace()
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
# logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
|
ContextualSP/abstraction_probing/code/t5_code/t5_model.py/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/t5_model.py",
"repo_id": "ContextualSP",
"token_count": 24657
}
| 247 |
theme: jekyll-theme-minimal
|
ContextualSP/adaptershare/_config.yml/0
|
{
"file_path": "ContextualSP/adaptershare/_config.yml",
"repo_id": "ContextualSP",
"token_count": 10
}
| 248 |
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
# Copyright 2021 Microsoft All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Post-processing utilities for question answering.
"""
import collections
import json
import logging
import os
from typing import Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
logger = logging.getLogger(__name__)
def load_json(path):
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
def reduce_features_to_examples(features):
examples = []
exits_dict = set()
for feature in features:
if feature["uid"] not in exits_dict:
examples.append({"uid": feature["uid"], "context": feature["context"]})
exits_dict.add(feature["uid"])
return examples
def extract_answers_from_features(features, is_v2=False, use_label=False):
answers = {}
exits_dict = set()
for feature in features:
if feature["uid"] not in exits_dict:
answers[feature["uid"]] = feature["answer"]
if is_v2 and "label" in feature:
answers[feature["uid"]]["is_impossible"] = (
True if feature["label"] else False
)
exits_dict.add(feature["uid"])
return answers
def postprocess_qa_predictions(
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
prefix: Optional[str] = None,
is_world_process_zero: bool = True,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether this process is the main process or not (used to determine if logging/saves should be done).
"""
assert (
len(predictions) == 2
), "`predictions` should be a tuple with two elements (start_logits, end_logits)."
all_start_logits, all_end_logits = predictions
assert len(predictions[0]) == len(
features
), f"Got {len(predictions[0])} predictions and {len(features)} features."
examples = reduce_features_to_examples(features)
# Build a map example to its corresponding features.
example_id_to_index = {example["uid"]: i for i, example in enumerate(examples)}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["uid"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)
logger.info(
f"Post-processing {len(examples)} example predictions split into {len(features)} features."
)
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get(
"token_is_max_context", None
)
# Update minimum null prediction.
null_ans_index = features[feature_index].get("null_ans_index", 0)
feature_null_score = (
start_logits[null_ans_index] + end_logits[null_ans_index]
)
if (
min_null_prediction is None
or min_null_prediction["score"] > feature_null_score
):
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[null_ans_index],
"end_logit": end_logits[null_ans_index],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[
-1 : -n_best_size - 1 : -1
].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if (
token_is_max_context is not None
and not token_is_max_context.get(str(start_index), False)
):
continue
prelim_predictions.append(
{
"offsets": (
offset_mapping[start_index][0],
offset_mapping[end_index][1],
),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(
prelim_predictions, key=lambda x: x["score"], reverse=True
)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(
p["offsets"] == (0, 0) for p in predictions
):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (
len(predictions) == 1 and predictions[0]["text"] == ""
):
predictions.insert(
0, {"text": "", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}
)
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["uid"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
# import pdb; pdb.set_trace()
if predictions:
best_non_null_pred = predictions[0]
for pred in predictions:
if pred["text"] != "":
best_non_null_pred = pred
break
# Then we compare to the null prediction using the threshold.
score_diff = (
null_score
- best_non_null_pred["start_logit"]
- best_non_null_pred["end_logit"]
)
scores_diff_json[example["uid"]] = float(
score_diff
) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["uid"]] = ""
else:
all_predictions[example["uid"]] = best_non_null_pred["text"]
else:
all_predictions[example["uid"]] = ""
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["uid"]] = [
{
k: (
float(v)
if isinstance(v, (np.float16, np.float32, np.float64))
else v
)
for k, v in pred.items()
}
for pred in predictions
]
answers = extract_answers_from_features(features, version_2_with_negative)
return all_predictions, answers
|
ContextualSP/adaptershare/data_utils/utils_qa.py/0
|
{
"file_path": "ContextualSP/adaptershare/data_utils/utils_qa.py",
"repo_id": "ContextualSP",
"token_count": 5608
}
| 249 |
# Copyright (c) Microsoft. All rights reserved.
from random import shuffle
from data_utils.metrics import calc_metrics
def load_scitail(file):
"""Loading data of scitail"""
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
blocks = line.strip().split("\t")
assert len(blocks) > 2
if blocks[0] == "-":
continue
sample = {
"uid": str(cnt),
"premise": blocks[0],
"hypothesis": blocks[1],
"label": blocks[2],
}
rows.append(sample)
cnt += 1
return rows
def load_snli(file, header=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 10
if blocks[-1] == "-":
continue
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[7],
"hypothesis": blocks[8],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_mnli(file, header=True, multi_snli=False, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 9
if blocks[-1] == "-":
continue
lab = "contradiction"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[8],
"hypothesis": blocks[9],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_mrpc(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 4
lab = 0
if is_train:
lab = int(blocks[0])
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_qnli(file, header=True, is_train=True):
"""QNLI for classification"""
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 2
lab = "not_entailment"
if is_train:
lab = blocks[-1]
if lab is None:
import pdb
pdb.set_trace()
sample = {
"uid": blocks[0],
"premise": blocks[1],
"hypothesis": blocks[2],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_qqp(file, header=True, is_train=True):
rows = []
cnt = 0
skipped = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 6:
skipped += 1
continue
if not is_train:
assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": int(blocks[0]),
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_rte(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 4:
continue
if not is_train:
assert len(blocks) == 3
lab = "not_entailment"
if is_train:
lab = blocks[-1]
sample = {
"uid": int(blocks[0]),
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": int(blocks[0]),
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_wnli(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 4:
continue
if not is_train:
assert len(blocks) == 3
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": lab,
}
else:
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": lab,
}
rows.append(sample)
cnt += 1
return rows
def load_diag(file, header=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 3
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": blocks[-1],
}
rows.append(sample)
cnt += 1
return rows
def load_sst(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 2:
continue
lab = 0
if is_train:
lab = int(blocks[-1])
sample = {"uid": cnt, "premise": blocks[0], "label": lab}
else:
sample = {"uid": int(blocks[0]), "premise": blocks[1], "label": lab}
cnt += 1
rows.append(sample)
return rows
def load_cola(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
if is_train and len(blocks) < 2:
continue
lab = 0
if is_train:
lab = int(blocks[1])
sample = {"uid": cnt, "premise": blocks[-1], "label": lab}
else:
sample = {"uid": cnt, "premise": blocks[-1], "label": lab}
rows.append(sample)
cnt += 1
return rows
def load_stsb(file, header=True, is_train=True):
rows = []
cnt = 0
with open(file, encoding="utf8") as f:
for line in f:
if header:
header = False
continue
blocks = line.strip().split("\t")
assert len(blocks) > 8
score = "0.0"
if is_train:
score = blocks[-1]
sample = {
"uid": cnt,
"premise": blocks[-3],
"hypothesis": blocks[-2],
"label": score,
}
else:
sample = {
"uid": cnt,
"premise": blocks[-2],
"hypothesis": blocks[-1],
"label": score,
}
rows.append(sample)
cnt += 1
return rows
def load_qnnli(file, header=True, is_train=True):
"""QNLI for ranking"""
rows = []
mis_matched_cnt = 0
cnt = 0
with open(file, encoding="utf8") as f:
lines = f.readlines()
if header:
lines = lines[1:]
assert len(lines) % 2 == 0
for idx in range(0, len(lines), 2):
block1 = lines[idx].strip().split("\t")
block2 = lines[idx + 1].strip().split("\t")
# train shuffle
assert len(block1) > 2 and len(block2) > 2
if is_train and block1[1] != block2[1]:
mis_matched_cnt += 1
continue
assert block1[1] == block2[1]
lab1, lab2 = "entailment", "entailment"
if is_train:
blocks = [block1, block2]
shuffle(blocks)
block1 = blocks[0]
block2 = blocks[1]
lab1 = block1[-1]
lab2 = block2[-1]
if lab1 == lab2:
mis_matched_cnt += 1
continue
assert "," not in lab1
assert "," not in lab2
assert "," not in block1[0]
assert "," not in block2[0]
sample = {
"uid": cnt,
"ruid": "%s,%s" % (block1[0], block2[0]),
"premise": block1[1],
"hypothesis": [block1[2], block2[2]],
"label": "%s,%s" % (lab1, lab2),
}
cnt += 1
rows.append(sample)
return rows
def submit(path, data, label_dict=None):
header = "index\tprediction"
with open(path, "w") as writer:
predictions, uids = data["predictions"], data["uids"]
writer.write("{}\n".format(header))
assert len(predictions) == len(uids)
# sort label
paired = [(int(uid), predictions[idx]) for idx, uid in enumerate(uids)]
paired = sorted(paired, key=lambda item: item[0])
for uid, pred in paired:
if label_dict is None:
writer.write("{}\t{}\n".format(uid, pred))
else:
assert type(pred) is int
writer.write("{}\t{}\n".format(uid, label_dict[pred]))
|
ContextualSP/adaptershare/experiments/glue/glue_utils.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/glue/glue_utils.py",
"repo_id": "ContextualSP",
"token_count": 6831
}
| 250 |
## Quickstart
### Example of XNLI based on XLM-R
1. Download XNLI data </br>
2. Prepro </br>
> python experiments\xnli\xnli_prepro.py </br>
> python prepro_std.py --model xlm-roberta-base --task_def experiments\xnli\xnli_task_def.yml --rood_dir [XNLI-DIR]
3. Train
> python train.py --data_dir data\canonical_data\xlm_base_cased\ --train_data xnli --test_data xnli --init_checkpoint xml-roberta-base --task_def experiments\xnli\xnli_task_def.yml --encoder_type 5
|
ContextualSP/adaptershare/experiments/xnli/README.md/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/xnli/README.md",
"repo_id": "ContextualSP",
"token_count": 198
}
| 251 |
cola:
# PremiseOnly + Classification
data_format: PremiseOnly
dropout_p: 0.05
enable_san: false
metric_meta:
- ACC
- MCC
loss: CeCriterion
kd_loss: MseCriterion
n_class: 2
split_names:
- train
task_type: Classification
mnli:
# PremiseAndOneHypothesis + Classification
data_format: PremiseAndOneHypothesis
dropout_p: 0.3
enable_san: true
labels:
- contradiction
- neutral
- entailment
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
n_class: 3
split_names:
- train
task_type: Classification
stsb:
# PremiseAndOneHypotheise + Regression
data_format: PremiseAndOneHypothesis
enable_san: false
metric_meta:
- Pearson
- Spearman
n_class: 1
split_names:
- train
loss: MseCriterion
kd_loss: MseCriterion
task_type: Regression
|
ContextualSP/adaptershare/int_test_data/glue/input/prepro_std/glue_task_def.yml/0
|
{
"file_path": "ContextualSP/adaptershare/int_test_data/glue/input/prepro_std/glue_task_def.yml",
"repo_id": "ContextualSP",
"token_count": 305
}
| 252 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import torch
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
import torch.nn as nn
from enum import IntEnum
def stable_kl(logit, target, epsilon=1e-6, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0 / (p + epsilon) - 1 + epsilon).detach().log()
ry = -(1.0 / (y + epsilon) - 1 + epsilon).detach().log()
if reduce:
return (p * (rp - ry) * 2).sum() / bs
else:
return (p * (rp - ry) * 2).sum()
class Criterion(_Loss):
def __init__(self, alpha=1.0, name="criterion"):
super().__init__()
"""Alpha is used to weight each loss term
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight"""
return
class CeCriterion(Criterion):
def __init__(self, alpha=1.0, name="Cross Entropy Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight"""
if weight is not None:
loss = torch.sum(
F.cross_entropy(
input,
target,
reduce=False,
reduction="none",
ignore_index=ignore_index,
)
* weight
)
else:
loss = F.cross_entropy(input, target, ignore_index=ignore_index)
loss = loss * self.alpha
return loss
class SeqCeCriterion(CeCriterion):
def __init__(self, alpha=1.0, name="Seq Cross Entropy Criterion"):
super().__init__(alpha, name)
def forward(self, input, target, weight=None, ignore_index=-1):
target = target.view(-1)
if weight:
loss = torch.mean(
F.cross_entropy(input, target, reduce=False, ignore_index=ignore_index)
* weight
)
else:
loss = F.cross_entropy(input, target, ignore_index=ignore_index)
loss = loss * self.alpha
return loss
class MseCriterion(Criterion):
def __init__(self, alpha=1.0, name="MSE Regression Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight"""
if weight:
loss = torch.mean(
F.mse_loss(input.squeeze(), target, reduce=False)
* weight.reshape((target.shape[0], 1))
)
else:
loss = F.mse_loss(input.squeeze(), target)
loss = loss * self.alpha
return loss
class KlCriterion(Criterion):
def __init__(self, alpha=1.0, name="KL Div Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits"""
input = input.float()
target = target.float()
loss = F.kl_div(
F.log_softmax(input, dim=-1, dtype=torch.float32),
F.softmax(target, dim=-1, dtype=torch.float32),
reduction="batchmean",
)
loss = loss * self.alpha
return loss
class NsKlCriterion(Criterion):
def __init__(self, alpha=1.0, name="KL Div Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits"""
input = input.float()
target = target.float()
loss = stable_kl(input, target.detach())
loss = loss * self.alpha
return loss
class SymKlCriterion(Criterion):
def __init__(self, alpha=1.0, name="KL Div Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(
self, input, target, weight=None, ignore_index=-1, reduction="batchmean"
):
"""input/target: logits"""
input = input.float()
target = target.float()
loss = F.kl_div(
F.log_softmax(input, dim=-1, dtype=torch.float32),
F.softmax(target.detach(), dim=-1, dtype=torch.float32),
reduction=reduction,
) + F.kl_div(
F.log_softmax(target, dim=-1, dtype=torch.float32),
F.softmax(input.detach(), dim=-1, dtype=torch.float32),
reduction=reduction,
)
loss = loss * self.alpha
return loss
class NsSymKlCriterion(Criterion):
def __init__(self, alpha=1.0, name="KL Div Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""input/target: logits"""
input = input.float()
target = target.float()
loss = stable_kl(input, target.detach()) + stable_kl(target, input.detach())
loss = loss * self.alpha
return loss
class JSCriterion(Criterion):
def __init__(self, alpha=1.0, name="JS Div Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(
self, input, target, weight=None, ignore_index=-1, reduction="batchmean"
):
"""input/target: logits"""
input = input.float()
target = target.float()
m = F.softmax(target.detach(), dim=-1, dtype=torch.float32) + F.softmax(
input.detach(), dim=-1, dtype=torch.float32
)
m = 0.5 * m
loss = F.kl_div(
F.log_softmax(input, dim=-1, dtype=torch.float32), m, reduction=reduction
) + F.kl_div(
F.log_softmax(target, dim=-1, dtype=torch.float32), m, reduction=reduction
)
loss = loss * self.alpha
return loss
class HLCriterion(Criterion):
def __init__(self, alpha=1.0, name="Hellinger Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(
self, input, target, weight=None, ignore_index=-1, reduction="batchmean"
):
"""input/target: logits"""
input = input.float()
target = target.float()
si = F.softmax(target.detach(), dim=-1, dtype=torch.float32).sqrt_()
st = F.softmax(input.detach(), dim=-1, dtype=torch.float32).sqrt_()
loss = F.mse_loss(si, st)
loss = loss * self.alpha
return loss
class RankCeCriterion(Criterion):
def __init__(self, alpha=1.0, name="Cross Entropy Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1, pairwise_size=1):
input = input.view(-1, pairwise_size)
target = target.contiguous().view(-1, pairwise_size)[:, 0]
if weight:
loss = torch.mean(
F.cross_entropy(input, target, reduce=False, ignore_index=ignore_index)
* weight
)
else:
loss = F.cross_entropy(input, target, ignore_index=ignore_index)
loss = loss * self.alpha
return loss
class SpanCeCriterion(Criterion):
def __init__(self, alpha=1.0, name="Span Cross Entropy Criterion"):
super().__init__()
"""This is for extractive MRC, e.g., SQuAD, ReCoRD ... etc
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight"""
assert len(input) == 2
start_input, end_input = input
if len(target) == 3:
start_target, end_target, _ = target
else:
assert len(target) == 2
start_target, end_target = target
if weight:
b = torch.mean(
F.cross_entropy(
start_input, start_target, reduce=False, ignore_index=ignore_index
)
* weight
)
e = torch.mean(
F.cross_entropy(
end_input, end_target, reduce=False, ignore_index=ignore_index
)
* weight
)
else:
b = F.cross_entropy(start_input, start_target, ignore_index=ignore_index)
e = F.cross_entropy(end_input, end_target, ignore_index=ignore_index)
loss = 0.5 * (b + e) * self.alpha
return loss
class SpanYNCeCriterion(Criterion):
def __init__(self, alpha=1.0, name="Span Cross Entropy Criterion"):
super().__init__()
"""This is for extractive MRC, e.g., SQuAD, ReCoRD ... etc
"""
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""weight: sample weight"""
assert len(input) == 3
start_input, end_input, labels_input = input
# start/end/yesno
start_target, end_target, labels_target = target
if weight:
b = torch.mean(
F.cross_entropy(
start_input, start_target, reduce=False, ignore_index=ignore_index
)
* weight
)
e = torch.mean(
F.cross_entropy(
end_input, end_target, reduce=False, ignore_index=ignore_index
)
* weight
)
# yes/no
e = torch.mean(
F.cross_entropy(
labels_input, labels_target, reduce=False, ignore_index=ignore_index
)
* weight
)
else:
b = F.cross_entropy(start_input, start_target, ignore_index=ignore_index)
e = F.cross_entropy(end_input, end_target, ignore_index=ignore_index)
c = F.cross_entropy(labels_input, labels_target, ignore_index=ignore_index)
loss = 0.5 * (b + e) * self.alpha + c
return loss
class MlmCriterion(Criterion):
def __init__(self, alpha=1.0, name="BERT pre-train Criterion"):
super().__init__()
self.alpha = alpha
self.name = name
def forward(self, input, target, weight=None, ignore_index=-1):
"""TODO: support sample weight, xiaodl"""
mlm_y, y = target
mlm_p, nsp_p = input
mlm_p = mlm_p.view(-1, mlm_p.size(-1))
mlm_y = mlm_y.view(-1)
mlm_loss = F.cross_entropy(mlm_p, mlm_y, ignore_index=ignore_index)
nsp_loss = F.cross_entropy(nsp_p, y)
loss = mlm_loss + nsp_loss
loss = loss * self.alpha
return loss
class LossCriterion(IntEnum):
CeCriterion = 0
MseCriterion = 1
RankCeCriterion = 2
SpanCeCriterion = 3
SeqCeCriterion = 4
MlmCriterion = 5
KlCriterion = 6
SymKlCriterion = 7
NsKlCriterion = 8
NsSymKlCriterion = 9
JSCriterion = 10
HLCriterion = 11
LOSS_REGISTRY = {
LossCriterion.CeCriterion: CeCriterion,
LossCriterion.MseCriterion: MseCriterion,
LossCriterion.RankCeCriterion: RankCeCriterion,
LossCriterion.SpanCeCriterion: SpanCeCriterion,
LossCriterion.SeqCeCriterion: SeqCeCriterion,
LossCriterion.MlmCriterion: MlmCriterion,
LossCriterion.KlCriterion: KlCriterion,
LossCriterion.SymKlCriterion: SymKlCriterion,
LossCriterion.NsKlCriterion: NsKlCriterion,
LossCriterion.NsSymKlCriterion: NsSymKlCriterion,
LossCriterion.JSCriterion: JSCriterion,
LossCriterion.HLCriterion: HLCriterion,
}
|
ContextualSP/adaptershare/mt_dnn/loss.py/0
|
{
"file_path": "ContextualSP/adaptershare/mt_dnn/loss.py",
"repo_id": "ContextualSP",
"token_count": 5654
}
| 253 |
import torch
import torch.nn as nn
from torch.optim import Optimizer, Adam
class WarmupPolynomialLRScheduler:
optimizer: Optimizer
num_warmup_steps: int
start_lr: float
end_lr: float
decay_steps: int
power: float
def __init__(self, optimizer: Optimizer, start_lr: float, num_warmup_steps: int = 2000, end_lr: float = 0.0, decay_steps: int = 98000, power: float = 1.0) -> None:
self.optimizer = optimizer
self.num_warmup_steps = num_warmup_steps
self.start_lr = start_lr
self.end_lr = end_lr
self.decay_steps = decay_steps
self.power = power
def update(self, step: int):
if step < self.num_warmup_steps:
warmup_frac_done = step / self.num_warmup_steps
new_lr = self.start_lr * warmup_frac_done
elif step < (self.num_warmup_steps + self.decay_steps):
new_lr = (self.start_lr - self.end_lr) * (
1 - (step - self.num_warmup_steps) / self.decay_steps
) ** self.power + self.end_lr
else:
new_lr = self.end_lr
for param_group in self.optimizer.param_groups:
param_group["lr"] = new_lr
class BertWarmupPolynomialLRScheduler(WarmupPolynomialLRScheduler):
bert_factor: float
def __init__(self, optimizer: Optimizer, start_lr: float, bert_factor: float, num_warmup_steps: int = 2000, end_lr: float = 0.0, decay_steps: int = 98000, power: float = 1.0) -> None:
super().__init__(optimizer, start_lr, num_warmup_steps=num_warmup_steps, end_lr=end_lr, decay_steps=decay_steps, power=power)
self.bert_factor = bert_factor
def update(self, step):
super(BertWarmupPolynomialLRScheduler, self).update(step)
for param_group in self.optimizer.param_groups:
if param_group["name"] == "bert":
param_group["lr"] /= self.bert_factor
def _is_bert_parameter(param_name: str):
return param_name.startswith('bert') or param_name.startswith('encoder.bert')
def get_optimizer_and_lr_scheduler(model: nn.Module, lr: float, num_warmup_steps: int = 2000, bert_factor: float = 8):
optimizer = Adam(params=[
{
"name": "no-bert",
"params": (
parameters
for name, parameters in model.named_parameters()
if not _is_bert_parameter(name)
),
},
{
"name": "bert",
"params": (
parameters
for name, parameters in model.named_parameters()
if _is_bert_parameter(name)
),
}])
lr_scheduler = BertWarmupPolynomialLRScheduler(optimizer=optimizer, start_lr=lr, bert_factor=bert_factor, num_warmup_steps=num_warmup_steps)
return optimizer, lr_scheduler
|
ContextualSP/awakening_latent_grounding/models/optmizers.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/models/optmizers.py",
"repo_id": "ContextualSP",
"token_count": 1305
}
| 254 |
from enum import Enum
import re
import json
from collections import defaultdict
from typing import List, Dict, Tuple
from dataclasses import dataclass, field
from transformers import BertTokenizer
"""
Constant values
"""
SOS_Token = '<sos>'
EOS_Token = '<eos>'
UNK_Token = '<unk>'
TBL_Token = '<tbl>'
VAL_Token = '<val>'
Tbl_Col_Sep = '[TC_SEP]'
Col_Val_Sep = '[CV_SEP]'
DB_Col_Keys = ['[Null_Key]', '[Primary_Key]', '[Foreign_Key]', '[PF_Key]']
Bert_Special_Tokens = {
TBL_Token: '[unused10]',
'*': '[unused15]',
'text': '[unused21]',
'number': '[unused22]',
'time': '[unused23]',
'boolean': '[unused24]',
'real': '[unused25]',
'integer': '[unused26]',
Tbl_Col_Sep: '[unused30]',
DB_Col_Keys[0]: '[unused40]',
DB_Col_Keys[1]: '[unused41]',
DB_Col_Keys[2]: '[unused42]',
DB_Col_Keys[3]: '[unused43]',
}
Max_Decoding_Steps = 100
@dataclass(order=False, frozen=True)
class Token:
index: int # token index in utterance
token: str # token original value
lemma: str # lemmatise value
pieces: List[str] # bert pieces
def to_json(self) -> Dict:
return self.__dict__
@staticmethod
def from_json(obj: Dict):
return Token(**obj)
def __str__(self):
return self.token
@dataclass
class Utterance:
text: str
tokens: List[Token]
pieces: List[str] = field(init=False)
token2pieces: List[Tuple[int, int]] = field(init=False)
piece2token: List[int] = field(init=False)
def __post_init__(self):
pieces, token2pieces, piece2token = [], [], []
for i, token in enumerate(self.tokens):
n_pieces = len(token.pieces)
token2pieces += [(len(pieces), len(pieces) + n_pieces - 1)]
pieces += token.pieces
piece2token += [i] * n_pieces
self.pieces = pieces
self.token2pieces = token2pieces
self.piece2token = piece2token
def __str__(self):
return self.text
def __len__(self):
return len(self.tokens)
@property
def num_tokens(self):
return len(self.tokens)
@property
def text_tokens(self) -> List[str]:
return [token.token for token in self.tokens]
def to_json(self) -> Dict:
return {
'text': self.text,
'tokens': [x.to_json() for x in self.tokens]
}
@classmethod
def from_json(cls, obj: Dict):
return Utterance(
text=obj['text'],
tokens=[Token.from_json(x) for x in obj['tokens']] if obj['tokens'] is not None else None
)
def get_token2pieces(self):
token2pieces = []
count = 0
for i, token in enumerate(self.tokens):
token2pieces += [(count, count + len(token.pieces) - 1)]
count += len(token.pieces)
return token2pieces
def get_piece2token(self):
piece2token = []
for tok_idx, token in enumerate(self.tokens):
for piece in token.pieces:
piece2token.append(tok_idx)
return piece2token
def get_pieces(self):
pieces = []
for token in self.tokens:
pieces += token.pieces
return pieces
@dataclass
class STSchema:
"""
Single-table schema
"""
table_id: str
column_names: List[str]
column_types: List[str]
id_map: Dict[str, int] = field(init=False) # column names to index
def __post_init__(self):
assert len(self.column_names) == len(self.column_types)
id_map = {}
for name in self.column_names:
assert name not in id_map
id_map[name] = len(id_map)
self.id_map = id_map
@property
def num_columns(self):
return len(self.column_names)
def to_json(self) -> Dict:
return {
'table_id': self.table_id,
'column_names': self.column_names,
'column_types': self.column_types
}
@classmethod
def from_json(cls, obj: Dict):
return STSchema(**obj)
def to_string(self):
column_with_types = ["{}/{}".format(c, t) for c, t in zip(self.column_names, self.column_types)]
return "{}:\t{}".format(self.table_id, " || ".join(column_with_types))
@dataclass
class WTQSchema:
table_id: str
column_headers: List[str]
column_names_internal: List[str] # Internal column name used in WikiTableQuestion to generate SQL
column_types_internal: List[str]
internal_to_header: List[int]
header_to_internals: Dict[int, List[int]] = field(init=False)
column_header_to_id: Dict[str, int] = field(init=False)
internal_name_to_id: Dict[str, int] = field(init=False)
column_id_to_suffix_types: Dict[int, List[str]] = field(init=False)
def __post_init__(self):
header_to_internals = defaultdict(list)
for internal_id, header_id in enumerate(self.internal_to_header):
header_to_internals[header_id].append(internal_id)
assert len(header_to_internals) == len(self.column_headers)
self.header_to_internals = header_to_internals
column_header_to_id = {}
for idx, name in enumerate(self.column_headers):
if name in column_header_to_id:
continue
column_header_to_id[name] = idx
self.column_header_to_id = column_header_to_id
internal_name_to_id = {}
for idx, name in enumerate(self.column_names_internal):
if name in internal_name_to_id:
continue
internal_name_to_id[name] = idx
self.internal_name_to_id = internal_name_to_id
column_id_to_suffix_types = {}
for idx in range(len(self.column_headers)):
suffix_types = set([])
for internal_id in self.header_to_internals[idx]:
suffix_types.add(self.get_suffix_type(self.column_names_internal[internal_id]))
column_id_to_suffix_types[idx] = list(suffix_types)
self.column_id_to_suffix_types = column_id_to_suffix_types
@staticmethod
def get_suffix_type(internal_name: str) -> str:
if not re.match('^c\d', internal_name):
return ''
return re.sub('^c\d+', '', internal_name).strip()
def lookup_header_id(self, column_name: str):
header_id = self.column_header_to_id[column_name]
return header_id
def lookup_header_id_from_internal(self, internal_name: str):
internal_id = self.internal_name_to_id[internal_name]
header_id = self.internal_to_header[internal_id]
return header_id
def lookup_header_and_suffix(self, internal_name: str):
header_id = self.lookup_header_id_from_internal(internal_name)
return self.column_headers[header_id], self.get_suffix_type(internal_name)
def to_json(self):
return {
'table_id': self.table_id,
'column_headers': self.column_headers,
'column_names_internal': self.column_names_internal,
'column_types_internal': self.column_types_internal,
'internal_to_header': self.internal_to_header,
}
@classmethod
def from_json(cls, obj: Dict):
return WTQSchema(**obj)
def to_string(self):
out_strs = []
for _, header in enumerate(self.column_headers):
out_strs.append(header)
return "{}: {}".format(self.table_id, " || ".join(out_strs))
@dataclass
class SpiderSchema:
db_id: str
column_names: List[str]
column_types: List[str]
column_names_lemma: List[str]
column_names_original: List[str]
table_names: List[str]
table_names_lemma: List[str]
table_names_original: List[str]
table_to_columns: Dict[int, List[int]]
column_to_table: Dict[int, int]
primary_keys: List[int]
foreign_keys: List[Tuple[int, int]]
id_map: Dict[str, int] = field(init=False) # column full name & table name to index
def __post_init__(self):
self.id_map = self._build()
def _build(self):
idMap = {}
for i, _ in enumerate(self.column_names_original):
idMap[self.get_column_full_name(i)] = i
for i, tab in enumerate(self.table_names_original):
key = tab.lower()
idMap[key] = i
return idMap
@property
def num_tables(self) -> int:
return len(self.table_names_original)
@property
def num_columns(self) -> int:
return len(self.column_names_original)
def build_column2ids(self) -> Dict[str, int]:
col2ids = defaultdict(list)
for c_idx, c_name in enumerate(self.column_names):
col2ids[c_name.lower()].append(c_idx)
return col2ids
@classmethod
def from_json(cls, obj: Dict):
table_to_columns = {}
for i, ids in obj['table_to_columns'].items():
table_to_columns[int(i)] = ids
obj['table_to_columns'] = table_to_columns
column_to_table = {}
for c_idx, t_idx in obj['column_to_table'].items():
column_to_table[int(c_idx)] = int(t_idx)
obj['column_to_table'] = column_to_table
obj.pop('id_map', None)
return SpiderSchema(**obj)
def to_json(self) -> Dict:
return self.__dict__
@property
def schema(self):
tables = defaultdict(list)
for tbl_idx, tbl_name in enumerate(self.table_names_original):
for col_idx in self.table_to_columns[tbl_idx]:
tables[tbl_name.lower()].append(self.column_names_original[col_idx].lower())
return tables
@property
def idMap(self):
return self.id_map
def get_column_full_name(self, column_idx: int) -> str:
if self.column_names_original[column_idx] == '*':
return '*'
table_name = self.table_names_original[self.column_to_table[column_idx]]
return '{}.{}'.format(table_name, self.column_names_original[column_idx]).lower()
def get_col_identifier_name(self, index: int) -> str:
if self.column_names_original[index] == '*':
return '*'
table_name = self.table_names_original[self.column_to_table[index]]
return '{}.{}'.format(table_name, self.column_names_original[index]).lower()
def get_tbl_identifier_name(self, index: int) -> str:
return self.table_names_original[index].lower()
def get_identifier_name(self, type: str, index: int) -> str:
if type in ['tbl', 'table']:
return self.get_tbl_identifier_name(index)
elif type in ['col', 'column']:
return self.get_col_identifier_name(index)
else:
raise NotImplementedError()
def get_column_key_code(self, column_idx: int) -> int:
key = 0
if column_idx in self.primary_keys:
key |= 1
for c1, c2 in self.foreign_keys:
if column_idx in [c1, c2]:
key |= 2
return key
def to_string(self, sep: str = '\n'):
schema_strs = ["db id: {}".format(self.db_id)]
for tbl_id, col_ids in self.table_to_columns.items():
if tbl_id == -1:
continue
tbl_name = self.table_names[tbl_id]
col_strs = []
for col_id in col_ids:
col_str = '{}/{}'.format(self.column_names[col_id], self.column_types[col_id])
if col_id in self.primary_keys:
col_str += '(PK)'
col_strs += [col_str]
schema_strs.append("{}: {}".format(tbl_name, " || ".join(col_strs)))
fk_strs = []
for c_idx1, c_idx2 in self.foreign_keys:
fk_strs.append(
'{}::{} - {}::{}'.format(self.table_names[self.column_to_table[c_idx1]], self.column_names[c_idx1],
self.table_names[self.column_to_table[c_idx2]], self.column_names[c_idx2]))
schema_strs.append("FKs: {}".format(" || ".join(fk_strs)))
return sep.join(schema_strs)
def get_name(self, e_type: str, e_id: int):
if e_type == 'tbl':
return self.table_names[e_id]
elif e_type == 'col':
tbl_id = self.column_to_table[e_id]
return '{}[{}]'.format(self.column_names[e_id], self.table_names[tbl_id])
else:
col_name = self.get_name('col', e_id)
return 'val_{}'.format(col_name)
def get_table_with_columns(self) -> Dict[str, List[str]]:
tables = defaultdict(list)
for tbl_idx, tbl_name in enumerate(self.table_names_original):
for col_idx in self.table_to_columns[tbl_idx]:
tables[tbl_name].append(self.column_names_original[col_idx])
return tables
class SQLTokenType(int, Enum):
null = 0
keyword = 1
table = 2
column = 3
value = 4
def __str__(self):
return self.name
@property
def abbr(self):
return ['null', 'keyword', 'tbl', 'col', 'val'][int(self)]
class SQLFieldType(int, Enum):
Select = 0
From = 1
GroupBy = 2
Where = 3
Having = 4
Sort = 5
class SQLToken:
token_type: SQLTokenType
value: str
def __init__(self, token_type: SQLTokenType, value: str) -> None:
self.token_type = token_type
self.value = value
def __repr__(self):
return str(self)
def __str__(self):
return self.value
def __eq__(self, other: object) -> bool:
if not isinstance(other, SQLToken):
return False
return other.token_type == self.token_type and other.value == self.value
def to_json(self) -> Dict:
return {
'token_type': self.token_type,
'value': self.value,
}
@classmethod
def from_json(cls, obj: Dict):
token_type = SQLTokenType(obj['token_type'])
if token_type == SQLTokenType.keyword:
return KeywordToken.from_json(obj)
elif token_type == SQLTokenType.table:
return TableToken.from_json(obj)
elif token_type == SQLTokenType.column:
return ColumnToken.from_json(obj)
elif token_type == SQLTokenType.value:
return ValueToken.from_json(obj)
elif token_type == SQLTokenType.null:
return SQLToken(SQLTokenType.null, None)
else:
raise NotImplementedError("Not supported type: {}".format(token_type))
class KeywordToken(SQLToken):
def __init__(self, keyword: str) -> None:
super().__init__(SQLTokenType.keyword, keyword)
@property
def keyword(self) -> str:
return self.value
def __eq__(self, other: object) -> bool:
return isinstance(other, KeywordToken) and self.keyword == other.keyword
@classmethod
def from_json(cls, obj: Dict):
return KeywordToken(keyword=obj['value'])
class TableToken(SQLToken):
def __init__(self, table_name: str = None) -> None:
super().__init__(SQLTokenType.table, table_name)
def __str__(self):
if not self.table_name:
return 'T'
return self.table_name
@property
def table_name(self) -> str:
return self.value
def __eq__(self, other: object) -> bool:
return isinstance(other, TableToken) and self.table_name == other.table_name
@classmethod
def from_json(cls, obj: Dict):
return TableToken(table_name=obj['value'])
class ColumnToken(SQLToken):
suffix_type: str # Used for WTQ
def __init__(self, column_header: str, suffix_type: str) -> None:
super().__init__(SQLTokenType.column, column_header)
self.suffix_type = suffix_type
@property
def column_name(self):
return self.value
def __str__(self):
return self.column_name + self.suffix_type
def __eq__(self, other: object) -> bool:
return isinstance(other,
ColumnToken) and other.column_name == self.column_name and other.suffix_type == self.suffix_type
def to_json(self) -> Dict:
return {
'token_type': self.token_type,
'value': self.value,
'suffix_type': self.suffix_type
}
@classmethod
def from_json(cls, obj: Dict):
return ColumnToken(column_header=obj['value'], suffix_type=obj['suffix_type'])
class ValueToken(SQLToken):
columns: List[str]
span: Tuple[int, int]
def __init__(self, value: str, span: Tuple[int, int] = None, columns: List[str] = None) -> None:
assert value is not None or span is not None
super().__init__(SQLTokenType.value, value)
self.span = span
self.columns = columns
def __str__(self):
if self.span is None:
if isinstance(self.value, str):
return self.value
return str(self.value)
return "{}[{}:{}]".format(self.value, self.span[0], self.span[1])
def to_json(self):
return {
'token_type': self.token_type,
'value': self.value,
'columns': self.columns,
'span': self.span
}
def __eq__(self, other: object) -> bool:
if self.span is not None:
return isinstance(other, ValueToken) and self.span == other.span
return isinstance(other, ValueToken) and self.value == other.value
def __ne__(self, other: object) -> bool:
return not other == self
@property
def start(self) -> int:
return self.span[0]
@property
def end(self) -> int:
return self.span[1]
@classmethod
def from_json(cls, obj: Dict):
return ValueToken(value=obj['value'], span=obj['span'], columns=obj['columns'])
@dataclass(frozen=True)
class SQLExpression:
tokens: List[SQLToken]
db_id: str = field(default=None)
@property
def sql(self):
return " ".join([str(term) for term in self.tokens]).replace('\n', '\\n')
def __len__(self):
return len(self.tokens)
def __str__(self):
return self.sql
def __repr__(self) -> str:
return self.sql
def to_json(self) -> Dict:
return {'tokens': [x.to_json() for x in self.tokens]}
def __eq__(self, other: object) -> bool:
if not isinstance(other, SQLExpression) or len(self.tokens) != len(other.tokens):
return False
for i in range(len(self.tokens)):
if not self.tokens[i] == other.tokens[i]:
return False
return True
@classmethod
def from_json(cls, obj: Dict):
return SQLExpression(tokens=[SQLToken.from_json(x) for x in obj['tokens']])
@dataclass
class SQLTokenHypothesis:
tokens: List[SQLToken]
scores: List[float]
total_score: float
def is_finished(self):
last_token = self.tokens[-1]
return last_token.token_type == SQLTokenType.keyword and last_token.value == EOS_Token
def __len__(self):
return len(self.tokens)
@property
def num_steps(self):
return len(self.tokens)
def update(self, token: SQLToken, score: float):
return SQLTokenHypothesis(tokens=self.tokens + [token], scores=self.scores + [score],
total_score=self.total_score + score)
def to_sql(self):
if self.is_finished:
return SQLExpression(tokens=self.tokens[1:-1])
else:
return SQLExpression(tokens=self.tokens[1:])
@classmethod
def from_token(cls, token: SQLToken):
return SQLTokenHypothesis(tokens=[token], scores=[0.0], total_score=0.0)
@classmethod
def from_sos(cls):
return SQLTokenHypothesis(tokens=[KeywordToken(SOS_Token)], scores=[0], total_score=0)
class AlignmentLabel:
token: Token
align_type: SQLTokenType
align_value: str
confidence: float = 1.0
def __init__(self, token: Token, align_type: SQLTokenType, align_value: str, confidence: float = 1.0):
self.token = token
self.align_type = align_type
self.align_value = align_value
self.confidence = confidence
def to_json(self) -> Dict:
return {'token': self.token.to_json(), 'align_type': self.align_type, 'align_value': self.align_value,
'confidence': self.confidence}
@classmethod
def from_jon(cls, obj: Dict):
return AlignmentLabel(
token=Token.from_json(obj['token']),
align_type=SQLTokenType(obj['align_type']),
align_value=obj['align_value'],
confidence=obj['confidence']
)
def to_slsql(self, schema: SpiderSchema) -> Dict:
if self.align_type == SQLTokenType.null:
return None
elif self.align_type == SQLTokenType.table:
tbl_id = schema.id_map[self.align_value]
return {'type': 'tbl', 'id': tbl_id, 'token': self.token.token, 'value': self.align_value}
elif self.align_type == SQLTokenType.column:
col_id = schema.id_map[self.align_value]
return {'type': 'col', 'id': col_id, 'token': self.token.token, 'value': self.align_value}
elif self.align_type == SQLTokenType.value:
column_name = self.align_value.replace("VAL_", "")
col_id = schema.id_map[column_name]
return {'type': 'val', 'id': col_id, 'token': self.token.token, 'value': self.align_value}
else:
raise NotImplementedError()
def __str__(self):
if self.align_type == SQLTokenType.null:
return self.token.token
return "{}/{}/{:.3f}".format(self.token.token, self.align_value, self.confidence)
def __eq__(self, value):
assert isinstance(value, AlignmentLabel)
return self.token.index == value.token.index and self.align_type == value.align_type and self.align_value == value.align_value
class SchemaRelation(int, Enum):
null = 0
table_table = 1
table_column = 2
column_table = 3
column_column = 4
column_value = 5
value_column = 6
table_column_pk = 7
column_table_pk = 8
column_column_fk_fw = 9
column_column_fk_bw = 10
def save_json_objects(objects: List, path: str):
with open(path, 'w', encoding='utf-8') as fw:
fw.write('[\n')
for idx, obj in enumerate(objects):
if idx == len(objects) - 1:
fw.write(json.dumps(obj) + "\n")
else:
fw.write(json.dumps(obj) + ",\n")
fw.write(']\n')
def generate_utterance(tokenizer: BertTokenizer, text: str, tokens: List[str] = None,
lemma: List[str] = None) -> Utterance:
assert text is not None or tokens is not None
if text is None:
text = " ".join(tokens)
if tokens is None:
tokens = text.split()
if lemma is None:
lemma = [x.lower() for x in tokens]
assert len(tokens) == len(lemma)
new_tokens = []
for i, (tok, lem) in enumerate(zip(tokens, lemma)):
pieces = tokenizer.tokenize(lem)
token = Token(index=i, token=tok, lemma=lem, pieces=pieces)
new_tokens += [token]
return Utterance(text=text, tokens=new_tokens)
|
ContextualSP/awakening_latent_grounding/utils/data_types.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/utils/data_types.py",
"repo_id": "ContextualSP",
"token_count": 10629
}
| 255 |
import pdb
import random
import statistics
from itertools import chain
import math
import torch.nn.functional as F
from torch import nn
from masked_cross_entropy import *
from utils import Categorical
from modules.BinaryTreeBasedModule import BinaryTreeBasedModule
from utils import clamp_grad
import torch
USE_CUDA = torch.cuda.is_available()
PAD_token = 0
SOS_token = 1
EOS_token = 2
x1_token = 3
x2_token = 4
x3_token = 5
x4_token = 6
all_action_words = ["i_look", "i_jump", "i_walk", "i_run", "i_turn_right", "i_turn_left"]
available_src_vars = ['x1', 'x2', 'x3', 'x4']
MAX_LENGTH = 10
def flatten(l):
for el in l:
if hasattr(el, "__iter__") and not isinstance(el, str):
for sub in flatten(el):
yield sub
else:
yield el
class BottomUpTreeComposer(BinaryTreeBasedModule):
def __init__(self, input_dim, hidden_dim, vocab_size, leaf_transformation, trans_hidden_dim,
self_attention_in_tree=False, dropout_prob=None):
super().__init__(input_dim, hidden_dim, leaf_transformation, trans_hidden_dim, dropout_prob)
self.embd_parser = nn.Embedding(vocab_size, input_dim)
self.sr_linear = nn.Linear(in_features=hidden_dim, out_features=2)
self.use_self_attention = self_attention_in_tree
if self.use_self_attention:
self.q = nn.Parameter(torch.empty(size=(hidden_dim * 2,), dtype=torch.float32))
else:
self.q = nn.Parameter(torch.empty(size=(hidden_dim,), dtype=torch.float32))
# if use self attention, we should employ these parameters
self.bilinear_w = nn.Bilinear(hidden_dim, hidden_dim, 1)
self.hidden_dim = hidden_dim
self.reset_parameters()
def reset_parameters(self):
super().reset_parameters()
nn.init.normal_(self.q, mean=0, std=0.01)
nn.init.uniform_(self.bilinear_w.weight, -0.1, 0.1)
def forward(self, pair, x, mask,
relaxed=False, tau_weights=None, straight_through=False, noise=None,
eval_actions=None, eval_sr_actions=None, eval_swr_actions=None, debug_info=None):
input_span = pair[0].split()
span_start_end = [[i, i] for i in range(len(input_span))]
x = self.embd_parser(x)
probs = []
gumbel_noise = []
actions = []
entropy = []
normalized_entropy = []
log_prob = []
sr_probs = []
sr_gumbel_noise = []
sr_actions = []
sr_entropy = []
sr_normalized_entropy = []
sr_log_prob = []
hidden, cell = self._transform_leafs(x, mask)
swr_probs = []
swr_gumbel_noise = []
swr_actions = []
swr_entropy = []
swr_normalized_entropy = []
swr_log_prob = []
reduce_span = []
all_span = []
tree_sr_log_prob = []
# make debug info List of List
debug_reduce_probs = []
debug_merge_probs = []
for i in range(x.shape[1]):
noise_i = None if noise is None else noise[i]
eval_swr_actions_i = None if eval_swr_actions is None else eval_swr_actions[i]
swr_cat_distr, swr_gumbel_noise_i, swr_actions_i = self._swr_make_step(hidden, i, relaxed, tau_weights,
straight_through, noise_i,
eval_swr_actions_i)
hidden, cell = self.swr_abst_embed(hidden, cell, i, swr_actions_i)
if swr_actions_i[0, 0] == 1:
input_span[i] = "x1"
debug_reduce_probs.append(float(swr_cat_distr.probs[0][0]))
swr_probs.append(swr_cat_distr.probs)
swr_gumbel_noise.append(swr_gumbel_noise_i)
swr_actions.append(swr_actions_i)
swr_entropy.append(swr_cat_distr.entropy)
swr_normalized_entropy.append(swr_cat_distr.normalized_entropy)
swr_log_prob.append(-swr_cat_distr.log_prob(swr_actions_i))
span = [i, i]
all_span.append(span)
tree_sr_log_prob.append(-swr_cat_distr.log_prob(swr_actions_i))
if swr_actions_i[0, 0] == 1 or x.shape[1] == 1:
reduce_span.append(span)
# swr_log_prob = None if relaxed else sum(swr_log_prob)
swr_entropy = sum(swr_entropy)
swr_normalized_entropy = sum(swr_normalized_entropy) / (torch.sum(mask[:, 0:], dim=-1) + 1e-17)
swr_rl_info = [swr_entropy, swr_normalized_entropy, swr_actions, swr_log_prob]
if x.shape[1] == 1:
actions = []
sr_actions = []
entropy, normalized_entropy, log_prob = 0, 0, 0
sr_entropy, sr_normalized_entropy, sr_log_prob = 0, 0, 0
else:
for i in range(1, x.shape[1]):
noise_i = None if noise is None else noise[i - 1]
ev_actions_i = None if eval_actions is None else eval_actions[i - 1]
eval_sr_actions_i = None if eval_sr_actions is None else eval_sr_actions[i - 1]
cat_distr, gumbel_noise_i, actions_i, hidden, cell = self._make_step(hidden, cell, mask[:, i:], relaxed,
tau_weights,
straight_through, noise_i,
ev_actions_i)
# add merge prob distribution
debug_merge_probs.extend([float(ele) for ele in cat_distr.probs[0]])
probs.append(cat_distr.probs)
gumbel_noise.append(gumbel_noise_i)
actions.append(actions_i)
entropy.append(cat_distr.entropy)
normalized_entropy.append(cat_distr.normalized_entropy)
log_prob.append(-cat_distr.log_prob(actions_i))
sr_cat_distr, sr_gumbel_noise_i, sr_actions_i = self._sr_make_step(hidden, actions_i, relaxed,
tau_weights,
straight_through, noise_i,
eval_sr_actions_i)
action_idx = actions_i[0].argmax().item()
merged_span = " ".join(input_span[action_idx:action_idx + 2])
if len(merged_span.split()) >= 3:
sr_actions_i = torch.tensor([[1., 0.]]).cuda()
if merged_span.count(available_src_vars[0]) >= 2:
sr_actions_i = torch.tensor([[1., 0.]]).cuda()
if sr_actions_i[0, 0] == 1:
merged_span = available_src_vars[0]
input_span = input_span[:action_idx] + [merged_span] + input_span[action_idx + 2:]
hidden, cell = self.abst_embed(hidden, cell, actions_i, sr_actions_i)
debug_reduce_probs.append(float(sr_cat_distr.probs[0][0]))
sr_probs.append(sr_cat_distr.probs)
sr_gumbel_noise.append(sr_gumbel_noise_i)
sr_actions.append(sr_actions_i)
sr_entropy.append(sr_cat_distr.entropy)
sr_normalized_entropy.append(sr_cat_distr.normalized_entropy)
sr_log_prob.append(-sr_cat_distr.log_prob(sr_actions_i))
log_prob_step = - cat_distr.log_prob(actions_i) - sr_cat_distr.log_prob(sr_actions_i)
span_left = span_start_end[action_idx]
span_right = span_start_end[action_idx + 1]
span = [span_left[0], span_right[1]]
all_span.append(span)
tree_sr_log_prob.append(log_prob_step)
if (sr_actions_i[0, 0] == 1) or (i == x.shape[1] - 1):
reduce_span.append(span)
span_start_end = span_start_end[:action_idx] + [span] + span_start_end[action_idx + 2:]
# log_prob = None if relaxed else sum(log_prob)
entropy = sum(entropy)
# normalize by the number of layers - 1.
# -1 because the last layer contains only one possible action and the entropy is zero anyway.
normalized_entropy = sum(normalized_entropy) / (torch.sum(mask[:, 2:], dim=-1) + 1e-17)
# sr_log_prob = None if relaxed else sum(sr_log_prob)
sr_entropy = sum(sr_entropy)
sr_normalized_entropy = sum(sr_normalized_entropy) / (torch.sum(mask[:, 1:], dim=-1) + 1e-17)
assert relaxed is False
tree_rl_infos = [entropy, normalized_entropy, actions, log_prob]
sr_rl_infos = [sr_entropy, sr_normalized_entropy, sr_actions, sr_log_prob]
reduce_span = [reduce_span[-1]]
spans_info = []
for span in all_span:
span_start = span[0]
span_end = span[1]
for fspan in reduce_span:
fspan_start = fspan[0]
fspan_end = fspan[1]
if fspan_start <= span_start and span_end <= fspan_end:
distance = (span_start - fspan_start) + (fspan_end - span_end)
span_info = [span, fspan, distance]
spans_info.append(span_info)
break
if debug_info is not None:
debug_info["merge_prob"] = debug_merge_probs
debug_info["reduce_prob"] = debug_reduce_probs
return tree_rl_infos, sr_rl_infos, swr_rl_info, tree_sr_log_prob, spans_info
def swr_abst_embed(self, hidden, cell, i, swr_actions_i):
if swr_actions_i[0, 0] == 1:
word_index = i
x_mask = torch.tensor([[1.]]).cuda()
src_var_id = x1_token
h_x, c_x = self._transform_leafs(self.embd_parser(torch.tensor([[src_var_id]]).cuda()), mask=x_mask)
h_p_new = torch.cat([hidden[:, :word_index], h_x, hidden[:, word_index + 1:]], dim=1)
c_p_new = torch.cat([cell[:, :word_index], c_x, cell[:, word_index + 1:]], dim=1)
else:
h_p_new, c_p_new = hidden, cell
return h_p_new, c_p_new
def abst_embed(self, hidden, cell, actions_i, sr_actions_i):
if sr_actions_i[0, 0] == 1:
actions_index = actions_i[0].argmax().item()
x_mask = torch.tensor([[1.]]).cuda()
src_var_id = x1_token
h_x, c_x = self._transform_leafs(self.embd_parser(torch.tensor([[src_var_id]]).cuda()), mask=x_mask)
h_p_new = torch.cat([hidden[:, :actions_index], h_x, hidden[:, actions_index + 1:]], dim=1)
c_p_new = torch.cat([cell[:, :actions_index], c_x, cell[:, actions_index + 1:]], dim=1)
else:
h_p_new, c_p_new = hidden, cell
return h_p_new, c_p_new
def _swr_make_step(self, hidden, i, relaxed, tau_weights, straight_through, gumbel_noise, ev_swr_actions):
# ==== calculate the prob distribution over the merge actions and sample one ====
word_index = i
h_word = hidden[:, word_index]
sr_score = self.sr_linear(h_word)
sr_mask = torch.ones_like(sr_score)
sr_cat_distr = Categorical(sr_score, sr_mask)
if ev_swr_actions is None:
sr_actions, gumbel_noise = self._sample_action(sr_cat_distr, sr_mask, relaxed, tau_weights,
straight_through,
gumbel_noise)
else:
sr_actions = ev_swr_actions
return sr_cat_distr, gumbel_noise, sr_actions
def _sr_make_step(self, hidden, actions_i, relaxed, tau_weights, straight_through, gumbel_noise, ev_sr_actions):
# ==== calculate the prob distribution over the merge actions and sample one ====
actions_index = actions_i.argmax(dim=-1)[0]
h_act = hidden[:, actions_index]
sr_score = self.sr_linear(h_act)
sr_mask = torch.ones_like(sr_score)
sr_cat_distr = Categorical(sr_score, sr_mask)
if ev_sr_actions is None:
sr_actions, gumbel_noise = self._sample_action(sr_cat_distr, sr_mask, relaxed, tau_weights,
straight_through,
gumbel_noise)
else:
sr_actions = ev_sr_actions
return sr_cat_distr, gumbel_noise, sr_actions
def _make_step(self, hidden, cell, mask, relaxed, tau_weights, straight_through, gumbel_noise, ev_actions):
# ==== calculate the prob distribution over the merge actions and sample one ====
h_l, c_l = hidden[:, :-1], cell[:, :-1]
h_r, c_r = hidden[:, 1:], cell[:, 1:]
h_p, c_p = self.tree_lstm_cell(h_l, c_l, h_r, c_r)
if self.use_self_attention:
cand_size = h_p.shape[1]
query_vector = h_p.unsqueeze(dim=2).repeat(1, 1, cand_size, 1). \
view(-1, cand_size * cand_size, self.hidden_dim)
value_vector = h_p.unsqueeze(dim=1).repeat(1, cand_size, 1, 1). \
view(-1, cand_size * cand_size, self.hidden_dim)
attn_score = torch.tanh(self.bilinear_w(query_vector, value_vector))
attn_weights = F.softmax(attn_score.view(-1, cand_size, cand_size), dim=2).view(-1, cand_size * cand_size,
1)
value_vector_flatten = value_vector * attn_weights
attn_vector = value_vector_flatten.view(-1, cand_size, cand_size, self.hidden_dim).sum(dim=2)
q_mul_vector = torch.cat([h_p, attn_vector], dim=-1)
else:
q_mul_vector = h_p
score = torch.matmul(q_mul_vector, self.q) # (N x L x d, d) -> (N x L)
cat_distr = Categorical(score, mask)
if ev_actions is None:
actions, gumbel_noise = self._sample_action(cat_distr, mask, relaxed, tau_weights, straight_through,
gumbel_noise)
else:
actions = ev_actions
# ==== incorporate sampled action into the agent's representation of the environment state ====
h_p, c_p = BinaryTreeBasedModule._merge(actions, h_l, c_l, h_r, c_r, h_p, c_p, mask)
return cat_distr, gumbel_noise, actions, h_p, c_p
def _sample_action(self, cat_distr, mask, relaxed, tau_weights, straight_through, gumbel_noise):
if self.training:
if relaxed:
N = mask.sum(dim=-1, keepdim=True)
tau = tau_weights[0] + tau_weights[1].exp() * torch.log(N + 1) + tau_weights[2].exp() * N
actions, gumbel_noise = cat_distr.rsample(temperature=tau, gumbel_noise=gumbel_noise)
if straight_through:
actions_hard = torch.zeros_like(actions)
actions_hard.scatter_(-1, actions.argmax(dim=-1, keepdim=True), 1.0)
actions = (actions_hard - actions).detach() + actions
actions = clamp_grad(actions, -0.5, 0.5)
else:
actions, gumbel_noise = cat_distr.rsample(gumbel_noise=gumbel_noise)
else:
actions = torch.zeros_like(cat_distr.probs)
actions.scatter_(-1, torch.argmax(cat_distr.probs, dim=-1, keepdim=True), 1.0)
gumbel_noise = None
return actions, gumbel_noise
class BinaryTreeEncoder(BinaryTreeBasedModule):
def __init__(self, input_dim, hidden_dim, vocab_size, input_lang,
leaf_transformation=BinaryTreeBasedModule.no_transformation, trans_hidden_dim=None, dropout_prob=None):
super().__init__(input_dim, hidden_dim, leaf_transformation, trans_hidden_dim, dropout_prob)
self.input_lang = input_lang
self.embd_tree = nn.Embedding(vocab_size, input_dim)
def forward(self, input_token, parse_tree, mask, reduce_info, actions_scale):
input_embed = self.embd_tree(input_token)
hidden_reduce, cell_reduce = self._transform_leafs(input_embed, mask)
stop_idx = reduce_info['stop_idx']
reduce_idx = reduce_info['reduce_idx']
reduce_idx2reduce_x = reduce_info['reduce_idx2reduce_x']
mask_idx = 1
hidden_subtrees_dict = {}
for idx in range(hidden_reduce.shape[1]):
hidden_subtree = hidden_reduce[:, idx:idx + 1, :]
hidden_subtrees_dict[str([idx, idx])] = hidden_subtree
for i in range(stop_idx + 1):
if isinstance(parse_tree[i], int):
hidden, cell = hidden_reduce, cell_reduce
merge_pos = parse_tree[i]
if i in reduce_idx:
reduce_x = reduce_idx2reduce_x[i]
x_token = self.input_lang.word2index[reduce_x]
x_embed = self.embd_tree(torch.tensor([[x_token]]).cuda())
x_mask = torch.tensor([[1.]]).cuda()
hidden_x, cell_x = self._transform_leafs(x_embed, x_mask)
hidden_reduce = torch.cat([hidden[:, :merge_pos, :], hidden_x, hidden[:, merge_pos + 1:, :]], dim=1)
cell_reduce = torch.cat([cell[:, :merge_pos, :], cell_x, cell[:, merge_pos + 1:, :]], dim=1)
else:
hidden_reduce, cell_reduce = hidden, cell
elif parse_tree[i] is None:
hidden, cell = hidden_reduce, cell_reduce
merge_pos = parse_tree[i - 1]
else:
h_l, c_l = hidden_reduce[:, :-1], cell_reduce[:, :-1]
h_r, c_r = hidden_reduce[:, 1:], cell_reduce[:, 1:]
h_p, c_p = self.tree_lstm_cell(h_l, c_l, h_r, c_r)
hidden, cell = self._merge(parse_tree[i], h_l, c_l, h_r, c_r, h_p, c_p, mask[:, mask_idx:])
mask_idx += 1
merge_pos = (parse_tree[i][0] == 1).nonzero()[0, 0]
if i in reduce_idx:
reduce_x = reduce_idx2reduce_x[i]
x_token = self.input_lang.word2index[reduce_x]
x_embed = self.embd_tree(torch.tensor([[x_token]]).cuda())
x_mask = torch.tensor([[1.]]).cuda()
hidden_x, cell_x = self._transform_leafs(x_embed, x_mask)
hidden_reduce = torch.cat([hidden[:, :merge_pos, :], hidden_x, hidden[:, merge_pos + 1:, :]], dim=1)
cell_reduce = torch.cat([cell[:, :merge_pos, :], cell_x, cell[:, merge_pos + 1:, :]], dim=1)
else:
hidden_reduce, cell_reduce = hidden, cell
if i < stop_idx:
scale = actions_scale[i][0]
hidden_subtrees_dict[str(scale)] = hidden_reduce[:, merge_pos:merge_pos + 1, :]
if reduce_idx:
for reduce_i in reduce_idx:
start, end = actions_scale[reduce_i][0]
for scale in hidden_subtrees_dict.copy():
scale_start, scale_end = scale.lstrip('[').rstrip(']').split(',')
scale_start, scale_end = int(scale_start), int(scale_end)
if (scale_start > start and scale_end <= end) or (scale_start >= start and scale_end < end):
hidden_subtrees_dict.pop(scale)
scale_stop = actions_scale[stop_idx][0]
start, end = scale_stop
hidden_subtree_list = []
for scale in hidden_subtrees_dict:
scale_start, scale_end = scale.lstrip('[').rstrip(']').split(',')
scale_start, scale_end = int(scale_start), int(scale_end)
if scale_start >= start and scale_end <= end:
hidden_subtree_list.append(hidden_subtrees_dict[scale])
hidden_subtree = torch.cat(hidden_subtree_list, dim=1)
final_merge_hidden = hidden[:, merge_pos:merge_pos + 1, :]
final_merge_cell = cell[:, merge_pos:merge_pos + 1, :]
return final_merge_hidden.transpose(0, 1), final_merge_cell.transpose(0, 1), hidden_subtree.transpose(0, 1),
class EncoderRNN(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, n_layers=1, dropout=0.0, bidirectional=False):
super(EncoderRNN, self).__init__()
self.bidirectional = bidirectional
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=bidirectional)
def forward(self, input_seqs, input_lengths):
'''
:param input_seqs:
Variable of shape (num_step(T),batch_size(B)), sorted decreasingly by lengths(for packing)
:param input:
list of sequence length
:param hidden:
initial state of GRU
:returns:
GRU outputs in shape (T,B,hidden_size(H))
last hidden stat of RNN(i.e. last output for GRU)
'''
embedded = self.embedding(input_seqs)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
# outputs, hidden = self.gru(packed, hidden)
outputs, (hidden, cell) = self.lstm(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded)
if self.bidirectional:
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs
return hidden, cell, outputs
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, embed_size, output_size, n_layers=1, dropout_p=0.1):
super(DecoderRNN, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.embed_size = embed_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
# Define layers
self.embedding = nn.Embedding(output_size, embed_size)
self.dropout = nn.Dropout(dropout_p)
self.gru = nn.GRU(embed_size, hidden_size, n_layers)
# self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p)
# self.attn_combine = nn.Linear(hidden_size + embed_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, word_input, last_hidden):
'''
:param word_input:
word input for current time step, in shape (B)
:param last_hidden:
last hidden stat of the decoder, in shape (layers*direction*B*H)
:param encoder_outputs:
encoder outputs in shape (T*B*H)
:return
decoder output
Note: we run this one step at a time i.e. you should use a outer loop
to process the whole sequence
Tip(update):
EncoderRNN may be bidirectional or have multiple layers, so the shape of hidden states can be
different from that of DecoderRNN
You may have to manually guarantee that they have the same dimension outside this function,
e.g, select the encoder hidden state of the foward/backward pass.
'''
# Get the embedding of the current input word (last output word)
word_embedded = self.embedding(word_input).view(1, word_input.size(0), -1) # (1,B,V)
word_embedded = self.dropout(word_embedded)
rnn_input = word_embedded
output, hidden = self.gru(rnn_input, last_hidden)
output = output.squeeze(0) # (1,B,V)->(B,V)
# output = F.log_softmax(self.out(output), dim=-1)
output = self.out(output)
# Return final output, hidden state
return output, hidden
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs, src_len=None):
'''
:param hidden:
previous hidden state of the decoder, in shape (layers*directions,B,H)
:param encoder_outputs:
encoder outputs from Encoder, in shape (T,B,H)
:param src_len:
used for masking. NoneType or tensor in shape (B) indicating sequence length
:return
attention energies in shape (B,T)
'''
max_len = encoder_outputs.size(0)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
encoder_outputs = encoder_outputs.transpose(0, 1) # [B*T*H]
attn_energies = self.score(H, encoder_outputs) # compute attention score
if src_len is not None:
mask = []
for b in range(src_len.size(0)):
mask.append([0] * src_len[b].item() + [1] * (encoder_outputs.size(1) - src_len[b].item()))
mask = (torch.ByteTensor(mask).unsqueeze(1)).cuda() # [B,1,T]
attn_energies = attn_energies.masked_fill(mask, -1e18)
return F.softmax(attn_energies).unsqueeze(1) # normalize with softmax
def score(self, hidden, encoder_outputs):
energy = F.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) # [B*T*2H]->[B*T*H]
energy = energy.transpose(2, 1) # [B*H*T]
v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) # [B*1*H]
energy = torch.bmm(v, energy) # [B*1*T]
return energy.squeeze(1) # [B*T]
class BahdanauAttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, embed_size, output_size, n_layers=1, dropout_p=0.1):
super(BahdanauAttnDecoderRNN, self).__init__()
# Define parameters
self.hidden_size = hidden_size
self.embed_size = embed_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
# Define layers
self.embedding = nn.Embedding(output_size, embed_size)
self.dropout = nn.Dropout(dropout_p)
self.attn = Attn('concat', hidden_size)
# self.gru = nn.GRU(hidden_size + embed_size, hidden_size, n_layers)
self.lstm = nn.LSTM(hidden_size + embed_size, hidden_size, n_layers)
# self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p)
# self.attn_combine = nn.Linear(hidden_size + embed_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, word_input, last_hidden, last_cell, encoder_outputs):
'''
:param word_input:
word input for current time step, in shape (B)
:param last_hidden:
last hidden stat of the decoder, in shape (layers*direction*B*H)
:param encoder_outputs:
encoder outputs in shape (T*B*H)
:return
decoder output
Note: we run this one step at a time i.e. you should use a outer loop
to process the whole sequence
Tip(update):
EncoderRNN may be bidirectional or have multiple layers, so the shape of hidden states can be
different from that of DecoderRNN
You may have to manually guarantee that they have the same dimension outside this function,
e.g, select the encoder hidden state of the foward/backward pass.
'''
word_embedded = self.embedding(word_input).view(1, word_input.size(0), -1) # (1,B,V)
word_embedded = self.dropout(word_embedded)
attn_weights = self.attn(last_hidden[-1], encoder_outputs)
context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # (B,1,V)
context = context.transpose(0, 1) # (1,B,V)
rnn_input = torch.cat((word_embedded, context), 2)
output, (hidden, cell) = self.lstm(rnn_input, (last_hidden, last_cell))
output = output.squeeze(0) # (1,B,V)->(B,V)
output = self.out(output)
return output, hidden, cell
class EncoderDecoderSolver(nn.Module):
def __init__(self, word_dim, hidden_dim, vocab_size, label_dim, input_lang, output_lang,
encode_mode=None, x_ratio_rate=None):
super().__init__()
self.input_lang = input_lang
self.output_lang = output_lang
self.encode_mode = encode_mode
self.encoder = EncoderRNN(vocab_size, word_dim, hidden_dim)
self.decoder = BahdanauAttnDecoderRNN(hidden_dim, word_dim, label_dim)
self.x_ratio_rate = x_ratio_rate
def get_action_scale_infer(self, actions, all_reduce_idx):
actions_scale = []
all_scale = []
for idx in range(len(actions)):
action = actions[idx]
if isinstance(action, int):
action_pos = [action, action]
all_scale.append([action_pos, []])
action_scale = [all_scale[action][0], all_scale[action][1]]
actions_scale.append(action_scale)
if idx in all_reduce_idx:
all_scale[action] = [action_pos, [action_pos]]
continue
elif action is None:
assert len(actions) == 2
action_scale = [all_scale[0][0], all_scale[0][1]]
actions_scale.append(action_scale)
continue
action_index = action.argmax(dim=-1)[0]
scale_left_pos = all_scale[action_index][0][0]
scale_right_pos = all_scale[action_index + 1][0][1]
action_scale_pos = [scale_left_pos, scale_right_pos]
scale_left_x = all_scale[action_index][1]
scale_right_x = all_scale[action_index + 1][1]
action_scale_x = scale_left_x + scale_right_x
action_scale = [action_scale_pos, action_scale_x]
if idx not in all_reduce_idx:
actions_scale.append(action_scale)
elif idx in all_reduce_idx:
actions_scale.append(action_scale)
action_scale = [action_scale_pos, [action_scale_pos]]
all_scale = all_scale[:action_index] + [action_scale] + all_scale[action_index + 2:]
return actions_scale
def forward(self, pair, actions, sr_actions, swr_actions, input_batches, input_mask, epoch, debug_info=None):
rewards = []
reward_scale = []
reward_scale2idx = {}
all_reduce_idx = []
global_memory_output = {}
all_actions = [idx for idx in range(len(swr_actions))] + actions
all_sr_actions = swr_actions + sr_actions
actions_scale = self.get_action_scale_infer(all_actions, all_reduce_idx)
if len(pair[0].split()) == 1:
scale2idx = {'[0, 0]': 0}
else:
scale2idx = {}
for idx, scale in enumerate(actions_scale):
scale2idx[str(scale[0])] = idx
compose_infos = []
for idx in range(len(actions_scale)):
compose_info = {'step': idx,
'stop_idx': idx,
'reduce_idx': [],
'reduce_idx2reduce_x': {},
'reduce_x2reduce_idx': {}
}
compose_infos.append(compose_info)
normalized_entropy = []
log_probs = []
decoded_words = []
count_x_ratio = []
debug_decoder_results = []
debug_encoder_inputs = []
for idx in range(len(all_actions)):
compose_info = compose_infos[idx]
if self.encode_mode == 'seq':
input_sen = pair[0].split()
span_start_pos, span_end_pos = actions_scale[idx][0][0], actions_scale[idx][0][1]
x_spans = actions_scale[idx][1]
if len(x_spans) == 0:
input_var_constant_seq = input_sen[span_start_pos:span_end_pos + 1]
elif len(x_spans) == 1:
src_slot = x_spans[0]
src_slot_idx = scale2idx[str(src_slot)]
src_var = compose_info['reduce_idx2reduce_x'][src_slot_idx]
src_slot_start_pos, src_slot_end_pos = src_slot[0], src_slot[1]
input_var_constant_seq = input_sen[span_start_pos:src_slot_start_pos] + \
[src_var] + input_sen[src_slot_end_pos + 1:span_end_pos + 1]
elif len(x_spans) == 2:
src_slot_first = x_spans[0]
src_slot_second = x_spans[1]
assert src_slot_second[0] > src_slot_first[1]
src_slot_first_idx = scale2idx[str(src_slot_first)]
src_slot_second_idx = scale2idx[str(src_slot_second)]
src_var_first = compose_info['reduce_idx2reduce_x'][src_slot_first_idx]
src_var_second = compose_info['reduce_idx2reduce_x'][src_slot_second_idx]
src_slot_first_start_pos, src_slot_first_end_pos = src_slot_first[0], src_slot_first[1]
src_slot_second_start_pos, src_slot_second_end_pos = src_slot_second[0], src_slot_second[1]
input_var_constant_seq = input_sen[span_start_pos:src_slot_first_start_pos] + \
[src_var_first] + input_sen[
src_slot_first_end_pos + 1:src_slot_second_start_pos] + \
[src_var_second] + input_sen[src_slot_second_end_pos + 1:span_end_pos + 1]
input_var_constant_idx = [self.input_lang.word2index[word]
for word in input_var_constant_seq] + [EOS_token]
input_batches = torch.tensor(input_var_constant_idx).unsqueeze(1).cuda()
input_length = [len(input_batches)]
encoder_hidden, encoder_cell, hidden_subtree = self.encoder(input_batches, input_length)
else:
encoder_hidden, encoder_cell, hidden_subtree = self.encoder(input_batches, all_actions, input_mask,
compose_info,
actions_scale)
if all_sr_actions[idx][0, 0] == 1:
if self.training:
get_sup, possible_tokens = self.jud_sup(idx, all_actions, compose_info, global_memory_output, pair)
if random.random() < 0.8:
use_sup = False
else:
use_sup = True
else:
get_sup, possible_tokens = False, {}
use_sup = False
# get_sup = False
if get_sup is False or use_sup is False:
decoded_words, normalized_entropy_step, log_prob_step, decoded_words_ori = self.get_sub_output(
encoder_hidden, encoder_cell, hidden_subtree,
compose_info,
all_actions[idx], global_memory_output)
else:
decoded_words, normalized_entropy_step, log_prob_step, decoded_words_ori = self.get_sub_output_sup(
encoder_hidden, encoder_cell, hidden_subtree,
compose_info,
all_actions[idx], global_memory_output,
possible_tokens, pair)
normalized_entropy.append(normalized_entropy_step)
log_probs.append(log_prob_step)
all_reduce_idx.append(idx)
# memorize all intermediate outputs
global_memory_output[idx] = decoded_words
# add debug outputs
debug_decoder_results.append(" ".join([self.output_lang.index2word[idx]
for idx in decoded_words_ori]))
actions_scale = self.get_action_scale_infer(all_actions, all_reduce_idx)
reward = self.cal_reward(decoded_words, pair[1])
if idx >= len(swr_actions):
temp_count_x = 0
for src_var in available_src_vars:
if src_var in compose_info['reduce_x2reduce_idx']:
tgt_var_idx = available_src_vars.index(src_var) + x1_token
if tgt_var_idx in decoded_words_ori:
temp_count_x += decoded_words_ori.count(tgt_var_idx)
# sum of count
count_x_ratio.append(temp_count_x / len(decoded_words_ori))
rewards.append(reward)
reward_scale.append(actions_scale[idx])
reward_scale2idx[str(actions_scale[idx][0])] = len(rewards) - 1
if all_sr_actions[idx][0, 0] == 0 and idx == len(all_actions) - 1:
rewards.append(0.)
reward_scale.append(actions_scale[idx])
reward_scale2idx[str(actions_scale[idx][0])] = len(rewards) - 1
log_probs.append(torch.tensor([0.]).cuda())
if all_sr_actions[idx][0, 0] == 1:
# add debug inputs
debug_reduce_info = compose_infos[idx]
# fetch out the anonymize span
debug_action_scale = actions_scale[idx]
tokens = pair[0].split(" ")
if len(debug_reduce_info['reduce_idx']) == 0:
temp_span = debug_action_scale[0]
debug_encoder_inputs.append(" ".join(tokens[temp_span[0]: temp_span[1] + 1]))
else:
reduce_steps = debug_reduce_info['reduce_idx']
anno_tokens = list(tokens)
for step in reduce_steps:
temp_span = actions_scale[step][0]
anno_tokens[temp_span[0]] = debug_reduce_info['reduce_idx2reduce_x'][step]
for i in range(temp_span[0] + 1, temp_span[1] + 1):
anno_tokens[i] = ""
anno_tokens = anno_tokens[actions_scale[idx][0][0]: actions_scale[idx][0][1] + 1]
debug_encoder_inputs.append(" ".join([token for token in anno_tokens
if token != '']))
else:
debug_decoder_results.append("NONE")
debug_encoder_inputs.append("NONE")
compose_infos = []
for idy in range(len(actions_scale)):
compose_info = {'step': idy,
'stop_idx': idy,
'reduce_idx': [],
'reduce_idx2reduce_x': {},
'reduce_x2reduce_idx': {}
}
action_scale = actions_scale[idy]
if action_scale[1] != []:
# x_index = 1
x_index = 0
src_var_list = available_src_vars[:2]
if self.training:
random.shuffle(src_var_list)
for scale in action_scale[1]:
scale_idx = scale2idx[str(scale)]
compose_info['reduce_idx'].append(scale_idx)
# x_str = 'x' + str(x_index)
if x_index > 1:
# print("x num > 1")
x_index = 0
x_str = src_var_list[x_index]
compose_info['reduce_idx2reduce_x'][scale_idx] = x_str
compose_info['reduce_x2reduce_idx'][x_str] = scale_idx
x_index += 1
compose_infos.append(compose_info)
if decoded_words and all_sr_actions[-1][0, 0] == 1:
final_output = [word for word in flatten(decoded_words)]
final_output_words = [self.output_lang.index2word[token] for token in final_output]
pred_labels = " ".join(final_output_words)
else:
pred_labels = ""
if normalized_entropy:
normalized_entropy = sum(normalized_entropy) / len(normalized_entropy)
else:
normalized_entropy = 0.
avg_full_var_ratio = statistics.mean(
[1.0 if count_x_ratio[i] >= 0.99 else 0.0 for i in range(len(count_x_ratio))]) \
if len(count_x_ratio) > 0 else 0.0
if pred_labels == pair[1]:
rewards[-1] = rewards[-1] + avg_full_var_ratio * self.x_ratio_rate
rewards = self.iter_rewards(rewards, reward_scale, reward_scale2idx)
span2reward = {}
for reward, scale in zip(rewards, reward_scale):
span2reward[str(scale[0])] = reward
if debug_info is not None:
debug_info["decoder_outputs"] = debug_decoder_results
debug_info["decoder_inputs"] = debug_encoder_inputs
return pred_labels, normalized_entropy, log_probs, rewards, span2reward
def jud_sub_right(self, idx, all_actions, reduce_info, all_sub_output, pair):
sub_right = False
if idx == len(all_actions) - 1:
if reduce_info['reduce_idx2reduce_x']:
x_output_pair = []
for reduce_idx in reduce_info['reduce_idx2reduce_x']:
sub_output = all_sub_output[reduce_idx]
if sub_output:
sub_output_flat = [word for word in flatten(sub_output)]
final_output_words = [self.output_lang.index2word[token] for token in sub_output_flat]
sub_pred_labels = " ".join(final_output_words)
else:
sub_pred_labels = ""
x_output_pair.append([reduce_info['reduce_idx2reduce_x'][reduce_idx], sub_pred_labels])
if len(x_output_pair) == 1:
if x_output_pair[0][1] != '':
if x_output_pair[0][1] in pair[1]:
sub_right = True
else:
assert len(x_output_pair) == 2
if x_output_pair[0][1] != '' and x_output_pair[1][1] != '':
if x_output_pair[0][1] in pair[1] and x_output_pair[1][1] in pair[1]:
sub_right = True
return sub_right
def jud_sup(self, idx, all_actions, reduce_info, all_sub_output, pair):
get_sup = False
possible_tokens = {len(pair[1].split()): [EOS_token]}
output_tokens = [self.output_lang.word2index[word] for word in pair[1].split()]
for idx, token in enumerate(output_tokens):
possible_tokens[idx] = [token]
if idx == len(all_actions) - 1:
if reduce_info['reduce_idx2reduce_x']:
x_output_pair = []
for reduce_idx in reduce_info['reduce_idx2reduce_x']:
sub_output = all_sub_output[reduce_idx]
sub_output_flat = [word for word in flatten(sub_output)]
final_output_words = [self.output_lang.index2word[token] for token in sub_output_flat]
sub_pred_labels = " ".join(final_output_words)
x_output_pair.append([reduce_info['reduce_idx2reduce_x'][reduce_idx], sub_pred_labels])
if len(x_output_pair) == 1:
if x_output_pair[0][1] in pair[1]:
get_sup = True
output_with_x = pair[1]
sub_labels = x_output_pair[0][1]
x_used = x_output_pair[0][0]
output_with_x = output_with_x.replace(sub_labels, x_used)
plus_idy = 0
plus_length = len(sub_labels.split()) - 1
possible_tokens[x_used] = plus_length
for idy, word in enumerate(output_with_x.split()):
if word == x_used:
possible_tokens[plus_idy + idy].append(self.output_lang.word2index[x_used])
plus_idy = plus_idy + plus_length
else:
assert len(x_output_pair) == 2
if x_output_pair[0][1] in pair[1] and x_output_pair[1][1] in pair[1]:
get_sup = True
output_with_x = pair[1]
sub_labels = x_output_pair[0][1]
x_used = x_output_pair[0][0]
output_with_x = output_with_x.replace(sub_labels, x_used)
plus_idy = 0
plus_length = len(sub_labels.split()) - 1
possible_tokens[x_used] = plus_length
for idy, word in enumerate(output_with_x.split()):
if word == x_used:
possible_tokens[plus_idy + idy].append(self.output_lang.word2index[x_used])
plus_idy = plus_idy + plus_length
output_with_x = pair[1]
sub_labels = x_output_pair[1][1]
x_used = x_output_pair[1][0]
output_with_x = output_with_x.replace(sub_labels, x_used)
plus_idy = 0
plus_length = len(sub_labels.split()) - 1
possible_tokens[x_used] = plus_length
for idy, word in enumerate(output_with_x.split()):
if word == x_used:
possible_tokens[plus_idy + idy].append(self.output_lang.word2index[x_used])
plus_idy = plus_idy + plus_length
return get_sup, possible_tokens
def get_local_refer(self, sr_scale_sent, final_sent):
if sr_scale_sent in self.composes_instrs:
return self.composes_outputs[sr_scale_sent]
else:
return final_sent
def same_rewards(self, rewards):
rewards_all_same = []
for idx, _ in enumerate(rewards):
rewards_all_same.append(rewards[-1])
return rewards
def iter_rewards(self, rewards, reward_scale, reward_scale2idx):
rewards_num = len(rewards)
for idx in reversed(range(rewards_num)):
reward_global = rewards[idx]
scale = reward_scale[idx]
scales_affect = scale[1]
if not scales_affect:
continue
else:
for scale_affect in scales_affect:
scale_affect_idx = reward_scale2idx[str(scale_affect)]
rewards[scale_affect_idx] = reward_global
return rewards
def cal_reward(self, decoded_words, target_sent):
if decoded_words:
final_output = [word for word in flatten(decoded_words)]
final_output_words = [self.output_lang.index2word[token] for token in final_output]
pred_labels = " ".join(final_output_words)
else:
pred_labels = ""
common_labels = self.get_longest_common_substring(pred_labels, target_sent)
common_labels_length = len(common_labels)
pred_labels_length = len(pred_labels.split())
target_labels_length = len(target_sent.split())
iou_similar = common_labels_length / (pred_labels_length + target_labels_length - common_labels_length)
reward = iou_similar
return reward
def get_sub_output_sup(self, encoder_hidden, encoder_cell, hidden_subtree, reduce_info, action, all_sub_output,
possible_tokens,
pair):
x_tokens = reduce_info['reduce_x2reduce_idx']
################################################################################
if isinstance(action, int):
possible_output_tokens_first = [self.output_lang.word2index[x] for x in all_action_words]
possible_output_tokens = possible_output_tokens_first + [EOS_token]
else:
# possible_output_tokens = [EOS_token]
possible_output_tokens_first = [self.output_lang.word2index[x] for x in all_action_words]
for x in available_src_vars:
if x in x_tokens:
possible_output_tokens_first.append(self.output_lang.word2index[x])
else:
continue
possible_output_tokens = possible_output_tokens_first + [EOS_token]
mask_first = [1. if token in possible_output_tokens_first else 0. for token in range(self.output_lang.n_words)]
decode_mask_first = torch.tensor([mask_first]).cuda()
mask = [1. if token in possible_output_tokens else 0. for token in range(self.output_lang.n_words)]
decode_mask = torch.tensor([mask]).cuda()
decoder_input = torch.LongTensor([SOS_token])
decoder_hidden = encoder_hidden
decoder_cell = encoder_cell
if USE_CUDA:
decoder_input = decoder_input.cuda()
decoded_words = []
decoded_words_ori = []
normalized_entropy = []
log_prob = []
di = 0
while True:
assert di <= len(pair[1].split())
decoder_output, decoder_hidden, decoder_cell = self.decoder(
decoder_input, decoder_hidden, decoder_cell, hidden_subtree
)
decode_score = decoder_output
# if reduce_info['step'] == 4:
# pdb.set_trace()
if di == 0:
cat_distr = Categorical(decode_score, decode_mask_first)
else:
cat_distr = Categorical(decode_score, decode_mask)
mask_di = [1. if token in possible_tokens[di] else 0. for token in range(self.output_lang.n_words)]
decode_mask_di = torch.tensor([mask_di]).cuda()
cat_distr_di = Categorical(decode_score, decode_mask_di)
decode_actions, gumbel_noise = self._sample_action(cat_distr_di, False, None)
normalized_entropy.append(cat_distr.normalized_entropy)
log_prob.append(-cat_distr.log_prob(decode_actions))
topv, topi = decode_actions.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
if di == 0:
pdb.set_trace()
# decoded_words.append('<EOS>')
break
else:
ni_word = self.output_lang.index2word[ni.item()]
if ni_word in x_tokens:
x_reduce_idx = reduce_info['reduce_x2reduce_idx'][ni_word]
x_sub_output = all_sub_output[x_reduce_idx]
decoded_words.append(x_sub_output)
di = di + possible_tokens[ni_word]
else:
decoded_words.append(ni.item())
decoded_words_ori.append(ni.item())
decoder_input = torch.LongTensor([ni]).cuda()
di += 1
normalized_entropy = sum(normalized_entropy) / len(normalized_entropy)
log_prob = sum(log_prob)
return decoded_words, normalized_entropy, log_prob, decoded_words_ori
def get_sub_output_length_sup(self, encoder_hidden, hidden_subtree, reduce_info, action, all_sub_output, pair):
x_tokens = reduce_info['reduce_x2reduce_idx']
min_length = len(pair[1].split())
# final_output_tokens = [self.output_lang.word2index[word] for word in final_output_sent.split()]
################################################################################
if isinstance(action, int):
possible_output_tokens = [self.output_lang.word2index[x] for x in all_action_words] + [EOS_token]
max_length = MAX_LENGTH
else:
possible_output_tokens = [EOS_token]
# possible_output_tokens = [self.output_lang.word2index[x] for x in all_output_prims] + [EOS_token]
for x in available_src_vars:
if x in x_tokens:
possible_output_tokens.append(self.output_lang.word2index[x])
else:
continue
max_length = MAX_LENGTH
mask = [1. if token in possible_output_tokens else 0. for token in range(self.output_lang.n_words)]
decode_mask = torch.tensor([mask]).cuda()
while True:
if x_tokens:
expected_tokens = [token for token in x_tokens]
else:
expected_tokens = []
decoder_input = Variable(torch.LongTensor([SOS_token]))
decoder_hidden = encoder_hidden
if USE_CUDA:
decoder_input = decoder_input.cuda()
decoded_words = []
decoded_words_ori = []
normalized_entropy = []
log_prob = []
total_length = 0
for di in range(max_length):
decoder_output, decoder_hidden = self.decoder(
decoder_input, decoder_hidden, hidden_subtree
)
decode_score = decoder_output
# if reduce_info['step'] == 4:
# pdb.set_trace()
cat_distr = Categorical(decode_score, decode_mask)
decode_actions, gumbel_noise = self._sample_action(cat_distr, False, None)
if total_length < min_length:
pdb.set_trace()
normalized_entropy.append(cat_distr.normalized_entropy)
log_prob.append(-cat_distr.log_prob(decode_actions))
topv, topi = decode_actions.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
# decoded_words.append('<EOS>')
break
else:
ni_word = self.output_lang.index2word[ni.item()]
if ni_word in x_tokens:
x_reduce_idx = reduce_info['reduce_x2reduce_idx'][ni_word]
x_sub_output = all_sub_output[x_reduce_idx]
decoded_words.append(x_sub_output)
length_step = len([word for word in flatten(x_sub_output)])
if ni_word in expected_tokens:
expected_tokens.remove(ni_word)
else:
decoded_words.append(ni.item())
length_step = 1
decoded_words_ori.append(ni.item())
total_length = total_length + length_step
decoder_input = torch.LongTensor([ni]).cuda()
break
normalized_entropy = sum(normalized_entropy) / len(normalized_entropy)
# normalized_entropy = sum(normalized_entropy)
log_prob = sum(log_prob)
return decoded_words, normalized_entropy, log_prob, decoded_words_ori
def get_sub_output(self, encoder_hidden, encoder_cell, hidden_subtree, reduce_info, action, all_sub_output):
used_src_vars = reduce_info['reduce_x2reduce_idx']
if isinstance(action, int):
possible_output_tokens_first = [self.output_lang.word2index[x] for x in all_action_words]
max_length = MAX_LENGTH
possible_output_tokens = possible_output_tokens_first + [EOS_token]
else:
# possible_output_tokens = [EOS_token]
possible_output_tokens_first = [self.output_lang.word2index[x] for x in all_action_words]
for src_var in available_src_vars:
if src_var in used_src_vars:
tgt_var = self.output_lang.word2index[src_var]
possible_output_tokens_first.append(tgt_var)
else:
continue
max_length = MAX_LENGTH
possible_output_tokens = possible_output_tokens_first + [EOS_token]
mask_first = [1. if token in possible_output_tokens_first else 0. for token in range(self.output_lang.n_words)]
decode_mask_first = torch.tensor([mask_first]).cuda()
mask = [1. if token in possible_output_tokens else 0. for token in range(self.output_lang.n_words)]
decode_mask = torch.tensor([mask]).cuda()
while True:
if used_src_vars:
expected_tokens = [token for token in used_src_vars]
else:
expected_tokens = []
decoder_input = Variable(torch.LongTensor([SOS_token]))
decoder_hidden = encoder_hidden
decoder_cell = encoder_cell
if USE_CUDA:
decoder_input = decoder_input.cuda()
decoded_words = []
decoded_words_ori = []
normalized_entropy = []
log_prob = []
for di in range(max_length):
decoder_output, decoder_hidden, decoder_cell = self.decoder(
decoder_input, decoder_hidden, decoder_cell, hidden_subtree
)
decode_score = decoder_output
if di == 0:
cat_distr = Categorical(decode_score, decode_mask_first)
else:
cat_distr = Categorical(decode_score, decode_mask)
decode_actions, gumbel_noise = self._sample_action(cat_distr, False, None)
normalized_entropy.append(cat_distr.normalized_entropy)
log_prob.append(-cat_distr.log_prob(decode_actions))
topv, topi = decode_actions.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
break
else:
ni_word = self.output_lang.index2word[ni.item()]
if ni_word in used_src_vars:
x_reduce_idx = reduce_info['reduce_x2reduce_idx'][ni_word]
x_sub_output = all_sub_output[x_reduce_idx]
decoded_words.append(x_sub_output)
if ni_word in expected_tokens:
expected_tokens.remove(ni_word)
else:
decoded_words.append(ni.item())
decoded_words_ori.append(ni.item())
decoder_input = Variable(torch.LongTensor([ni])).cuda()
break
normalized_entropy = sum(normalized_entropy) / len(normalized_entropy)
log_prob = sum(log_prob)
return decoded_words, normalized_entropy, log_prob, decoded_words_ori
def _sample_action(self, cat_distr, relaxed, gumbel_noise):
if self.training:
assert relaxed is False
actions, gumbel_noise = cat_distr.rsample(gumbel_noise=gumbel_noise)
else:
actions = torch.zeros_like(cat_distr.probs)
actions.scatter_(-1, torch.argmax(cat_distr.probs, dim=-1, keepdim=True), 1.0)
gumbel_noise = None
return actions, gumbel_noise
def get_longest_common_substring(self, pred_sent, target_sent):
output_ori2syb = {"i_look": "a",
"i_jump": "b",
"i_walk": "c",
"i_run": "d",
"i_turn_right": "e",
"i_turn_left": "f",
" ": ""}
for ori in output_ori2syb:
pred_sent = pred_sent.replace(ori, output_ori2syb[ori])
target_sent = target_sent.replace(ori, output_ori2syb[ori])
lstr1 = len(pred_sent)
lstr2 = len(target_sent)
record = [[0 for i in range(lstr2 + 1)] for j in range(lstr1 + 1)]
maxNum = 0
p = 0
for i in range(lstr1):
for j in range(lstr2):
if pred_sent[i] == target_sent[j]:
record[i + 1][j + 1] = record[i][j] + 1
if record[i + 1][j + 1] > maxNum:
maxNum = record[i + 1][j + 1]
p = i + 1
return pred_sent[p - maxNum:p]
class HRLModel(nn.Module):
def __init__(self, vocab_size, word_dim, hidden_dim, label_dim,
decay_r, encode_mode, x_ratio_rate,
composer_leaf=BinaryTreeBasedModule.no_transformation, composer_trans_hidden=None,
input_lang=None, output_lang=None):
super().__init__()
self.input_lang = input_lang
self.output_lang = output_lang
self.label_dim = label_dim
self.composer = BottomUpTreeComposer(word_dim, hidden_dim, vocab_size, composer_leaf,
composer_trans_hidden)
self.solver = EncoderDecoderSolver(word_dim, hidden_dim, vocab_size, label_dim, input_lang,
output_lang,
encode_mode=encode_mode,
x_ratio_rate=x_ratio_rate)
self.criterion = nn.CrossEntropyLoss(reduction='none')
# self.reset_parameters()
self.is_test = False
self.decay_r = decay_r
def get_policy_parameters(self):
return list(chain(self.composer.parameters()))
def get_environment_parameters(self):
return list(chain(self.solver.parameters()))
def forward(self, pair, x, mask, is_test=False, epoch=None, debug_info=None):
self.is_test = is_test
normalized_entropy, tree_actions, sr_actions, swr_actions, pred_labels, tree_sr_log_prob, tree_sr_rewards, decoder_log_probs, decode_rewards = self._forward(
pair, x, mask, epoch, debug_info=debug_info)
return pred_labels, tree_sr_log_prob, tree_sr_rewards, decoder_log_probs, decode_rewards, \
tree_actions, sr_actions, swr_actions, normalized_entropy
def _forward(self, pair, x, mask, epoch, debug_info):
tree_rl_infos, sr_rl_infos, swr_rl_info, tree_sr_log_prob, spans_info = self.composer(pair, x, mask,
debug_info=debug_info)
tree_entropy, tree_normalized_entropy, tree_actions, tree_log_prob = tree_rl_infos
sr_entropy, sr_normalized_entropy, sr_actions, sr_log_prob = sr_rl_infos
swr_entropy, swr_normalized_entropy, swr_actions, swr_log_prob = swr_rl_info
# actions = self.get_left_actions(x)
pred_labels, decoder_normalized_entropy, decoder_log_probs, decode_rewards, span2reward = self.solver(
pair, tree_actions,
sr_actions, swr_actions, x, mask,
epoch, debug_info=debug_info)
assert len(decoder_log_probs) == len(decode_rewards) == len(span2reward)
tree_sr_rewards = []
decode_from_root_rewards = []
swr_sr_actions = swr_actions + sr_actions
decay_r = self.decay_r
for idx, span_info in enumerate(spans_info):
fspan = span_info[1]
freward = span2reward[str(fspan)]
fr = span_info[2]
tree_sr_reward = freward * (decay_r ** fr)
tree_sr_rewards.append(tree_sr_reward)
if swr_sr_actions[idx][0, 0] == 1 or idx == len(swr_sr_actions) - 1:
decode_from_root_rewards.append(tree_sr_reward)
assert len(decode_rewards) == len(decode_from_root_rewards)
assert len(tree_sr_rewards) == len(tree_sr_log_prob)
normalized_entropy = tree_normalized_entropy + sr_normalized_entropy + swr_normalized_entropy + decoder_normalized_entropy
return normalized_entropy, tree_actions, sr_actions, swr_actions, pred_labels, tree_sr_log_prob, tree_sr_rewards, decoder_log_probs, decode_from_root_rewards
|
ContextualSP/compositional_generalization/model.py/0
|
{
"file_path": "ContextualSP/compositional_generalization/model.py",
"repo_id": "ContextualSP",
"token_count": 33973
}
| 256 |
#!/usr/bin/env bash
export model_file=../checkpoints/run_rewrite
export config_file=../configs/rewrite.jsonnet
export train_data_path=../dataset/Rewrite/train.txt
export validation_data_path=../dataset/Rewrite/dev.txt
export seed=2
allennlp train -s ${model_file} ${config_file} \
--include-package data_reader \
--include-package model \
-o "{\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\", \"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\"}"
|
ContextualSP/incomplete_utterance_rewriting/src/train_rewrite.sh/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/train_rewrite.sh",
"repo_id": "ContextualSP",
"token_count": 186
}
| 257 |
import re
from collections import Set, defaultdict
from typing import Dict, Tuple, List
from allennlp.data import Tokenizer, Token
from ordered_set import OrderedSet
from unidecode import unidecode
from .utils import TableColumn, read_dataset_schema, read_dataset_values
from allennlp.semparse.contexts.knowledge_graph import KnowledgeGraph
# == stop words that will be omitted by ContextGenerator
STOP_WORDS = {"", "", "all", "being", "-", "over", "through", "yourselves", "its", "before",
"hadn", "with", "had", ",", "should", "to", "only", "under", "ours", "has", "ought", "do",
"them", "his", "than", "very", "cannot", "they", "not", "during", "yourself", "him",
"nor", "did", "didn", "'ve", "this", "she", "each", "where", "because", "doing", "some", "we", "are",
"further", "ourselves", "out", "what", "for", "weren", "does", "above", "between", "mustn", "?",
"be", "hasn", "who", "were", "here", "shouldn", "let", "hers", "by", "both", "about", "couldn",
"of", "could", "against", "isn", "or", "own", "into", "while", "whom", "down", "wasn", "your",
"from", "her", "their", "aren", "there", "been", ".", "few", "too", "wouldn", "themselves",
":", "was", "until", "more", "himself", "on", "but", "don", "herself", "haven", "those", "he",
"me", "myself", "these", "up", ";", "below", "'re", "can", "theirs", "my", "and", "would", "then",
"is", "am", "it", "doesn", "an", "as", "itself", "at", "have", "in", "any", "if", "!",
"again", "'ll", "no", "that", "when", "same", "how", "other", "which", "you", "many", "shan",
"'t", "'s", "our", "after", "most", "'d", "such", "'m", "why", "a", "off", "i", "yours", "so",
"the", "having", "once"}
class SparcDBContext:
db_schemas = {}
db_schemas_id_col = {}
db_schemas_id_tab = {}
db_tables_data = {}
def __init__(self, db_id: str, tokenizer: Tokenizer, tables_file: str, database_path: str, utterance: List[Token]):
self.database_path = database_path
self.tables_file = tables_file
self.db_id = db_id
self.tokenized_utterance = utterance
if db_id not in SparcDBContext.db_schemas:
SparcDBContext.db_schemas, SparcDBContext.db_schemas_id_col, SparcDBContext.db_schemas_id_tab \
= read_dataset_schema(self.tables_file)
self.schema = SparcDBContext.db_schemas[db_id]
# get id to column/table
self.id_to_col = SparcDBContext.db_schemas_id_col[db_id]
self.id_to_tab = SparcDBContext.db_schemas_id_tab[db_id]
self.knowledge_graph = self.get_db_knowledge_graph(db_id)
entity_texts = [self.knowledge_graph.entity_text[entity].lower()
for entity in self.knowledge_graph.entities]
entity_tokens = tokenizer.batch_tokenize(entity_texts)
self.entity_tokens = entity_tokens
@staticmethod
def entity_key_for_column(table_name: str, column: TableColumn) -> str:
if column.foreign_key is not None:
column_type = "foreign"
elif column.is_primary_key:
column_type = "primary"
else:
column_type = column.column_type
return f"column:{column_type.lower()}:{table_name.lower()}:{column.name.lower()}"
def get_db_knowledge_graph(self, db_id: str) -> KnowledgeGraph:
entities: Set[str] = set()
neighbors: Dict[str, OrderedSet[str]] = defaultdict(OrderedSet)
entity_text: Dict[str, str] = {}
foreign_keys_to_column: Dict[str, str] = {}
db_schema = self.schema
tables = db_schema.values()
if db_id not in self.db_tables_data:
self.db_tables_data[db_id] = read_dataset_values(db_id, self.database_path, tables)
tables_data = self.db_tables_data[db_id]
string_column_mapping: Dict[str, set] = defaultdict(set)
for table, table_data in tables_data.items():
for table_row in table_data:
# TODO: special case for column *
if db_schema[table.name].columns[0].name == '*':
columns = db_schema[table.name].columns[1:]
else:
columns = db_schema[table.name].columns
assert len(columns) == len(table_row)
for column, cell_value in zip(db_schema[table.name].columns, table_row):
if column.column_type == 'text' and type(cell_value) is str:
cell_value_normalized = self.normalize_string(cell_value)
column_key = self.entity_key_for_column(table.name, column)
string_column_mapping[cell_value_normalized].add(column_key)
for table in tables:
table_key = f"table:{table.name.lower()}"
entities.add(table_key)
entity_text[table_key] = table.text
for column in db_schema[table.name].columns:
entity_key = self.entity_key_for_column(table.name, column)
entities.add(entity_key)
neighbors[entity_key].add(table_key)
neighbors[table_key].add(entity_key)
entity_text[entity_key] = column.text
# dynamic entities of values in question
# TODO: we should disable the string match entities in train.
# Because it will cause the inconsistent between train and test
# value_entities = self.get_values_from_question(string_column_mapping)
#
# for value_repr, column_keys in value_entities:
# entities.add(value_repr)
# for column_key in column_keys:
# neighbors[value_repr].add(column_key)
# neighbors[column_key].add(value_repr)
# entity_text[value_repr] = value_repr.replace("string:", "").replace("_", " ")
# loop again after we have gone through all columns to link foreign keys columns
for table_name in db_schema.keys():
for column in db_schema[table_name].columns:
if column.foreign_key is None:
continue
for foreign_key in column.foreign_key:
other_column_table, other_column_name = foreign_key.split(':')
# must have exactly one by design
other_column = [col for col in db_schema[other_column_table].columns
if col.name == other_column_name][0]
entity_key = self.entity_key_for_column(table_name, column)
other_entity_key = self.entity_key_for_column(other_column_table, other_column)
neighbors[entity_key].add(other_entity_key)
neighbors[other_entity_key].add(entity_key)
foreign_keys_to_column[entity_key] = other_entity_key
kg = KnowledgeGraph(entities, dict(neighbors), entity_text)
kg.foreign_keys_to_column = foreign_keys_to_column
return kg
@staticmethod
def _string_in_table(candidate: str,
string_column_mapping: Dict[str, set]) -> List[str]:
"""
Checks if the string occurs in the table, and if it does, returns the names of the columns
under which it occurs. If it does not, returns an empty list.
"""
candidate_column_names: List[str] = []
# First check if the entire candidate occurs as a cell.
if candidate in string_column_mapping:
candidate_column_names = string_column_mapping[candidate]
# If not, check if it is a substring pf any cell value.
if not candidate_column_names:
for cell_value, column_names in string_column_mapping.items():
if candidate in cell_value:
candidate_column_names.extend(column_names)
candidate_column_names = list(set(candidate_column_names))
return candidate_column_names
def get_values_from_question(self,
string_column_mapping: Dict[str, set]) -> List[Tuple[str, str]]:
entity_data = []
for i, token in enumerate(self.tokenized_utterance):
token_text = token.text
if token_text in STOP_WORDS:
continue
normalized_token_text = self.normalize_string(token_text)
if not normalized_token_text:
continue
token_columns = self._string_in_table(normalized_token_text, string_column_mapping)
if token_columns:
token_type = token_columns[0].split(":")[1]
entity_data.append({'value': normalized_token_text,
'token_start': i,
'token_end': i + 1,
'token_type': token_type,
'token_in_columns': token_columns})
# extracted_numbers = self._get_numbers_from_tokens(self.question_tokens)
# filter out number entities to avoid repetition
expanded_entities = []
for entity in self._expand_entities(self.tokenized_utterance, entity_data, string_column_mapping):
if entity["token_type"] == "text":
expanded_entities.append((f"string:{entity['value']}", entity['token_in_columns']))
# return expanded_entities, extracted_numbers #TODO(shikhar) Handle conjunctions
return expanded_entities
@staticmethod
def normalize_string(string: str) -> str:
"""
These are the transformation rules used to normalize cell in column names in Sempre. See
``edu.stanford.nlp.sempre.tables.StringNormalizationUtils.characterNormalize`` and
``edu.stanford.nlp.sempre.tables.TableTypeSystem.canonicalizeName``. We reproduce those
rules here to normalize and canonicalize cells and columns in the same way so that we can
match them against constants in logical forms appropriately.
"""
# Normalization rules from Sempre
# \u201A -> ,
string = re.sub("‚", ",", string)
string = re.sub("„", ",,", string)
string = re.sub("[·・]", ".", string)
string = re.sub("…", "...", string)
string = re.sub("ˆ", "^", string)
string = re.sub("˜", "~", string)
string = re.sub("‹", "<", string)
string = re.sub("›", ">", string)
string = re.sub("[‘’´`]", "'", string)
string = re.sub("[“”«»]", "\"", string)
string = re.sub("[•†‡²³]", "", string)
string = re.sub("[‐‑–—−]", "-", string)
# Oddly, some unicode characters get converted to _ instead of being stripped. Not really
# sure how sempre decides what to do with these... TODO(mattg): can we just get rid of the
# need for this function somehow? It's causing a whole lot of headaches.
string = re.sub("[ðø′″€⁄ªΣ]", "_", string)
# This is such a mess. There isn't just a block of unicode that we can strip out, because
# sometimes sempre just strips diacritics... We'll try stripping out a few separate
# blocks, skipping the ones that sempre skips...
string = re.sub("[\\u0180-\\u0210]", "", string).strip()
string = re.sub("[\\u0220-\\uFFFF]", "", string).strip()
string = string.replace("\\n", "_")
string = re.sub("\\s+", " ", string)
# canonicalization rules from sempre.
string = re.sub("[^\\w]", "_", string)
string = re.sub("_+", "_", string)
string = re.sub("_$", "", string)
return unidecode(string.lower())
def _expand_entities(self, question, entity_data, string_column_mapping: Dict[str, set]):
new_entities = []
for entity in entity_data:
# to ensure the same strings are not used over and over
if new_entities and entity['token_end'] <= new_entities[-1]['token_end']:
continue
current_start = entity['token_start']
current_end = entity['token_end']
current_token = entity['value']
current_token_type = entity['token_type']
current_token_columns = entity['token_in_columns']
while current_end < len(question):
next_token = question[current_end].text
next_token_normalized = self.normalize_string(next_token)
if next_token_normalized == "":
current_end += 1
continue
candidate = "%s_%s" % (current_token, next_token_normalized)
candidate_columns = self._string_in_table(candidate, string_column_mapping)
candidate_columns = list(set(candidate_columns).intersection(current_token_columns))
if not candidate_columns:
break
candidate_type = candidate_columns[0].split(":")[1]
if candidate_type != current_token_type:
break
current_end += 1
current_token = candidate
current_token_columns = candidate_columns
new_entities.append({'token_start': current_start,
'token_end': current_end,
'value': current_token,
'token_type': current_token_type,
'token_in_columns': current_token_columns})
return new_entities
|
ContextualSP/interactive_text_to_sql/src/context/db_context.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/context/db_context.py",
"repo_id": "ContextualSP",
"token_count": 6280
}
| 258 |
# coding: utf-8
question_template = "What do you mean by the word {0}? " \
"Is that an attribute name, an attribute value or others?" \
"Select a proper answer is you think we're have a misunderstanding of it."
|
ContextualSP/interactive_text_to_sql/src/utils/templates.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/utils/templates.py",
"repo_id": "ContextualSP",
"token_count": 98
}
| 259 |
import git
def commit_diff(c):
"""Return the set of changed files.
Args:
c (git.Commit)
Returns:
set[str]: a set of file paths (relative to the git repo's root directory).
"""
changed = set()
def add_path(blob):
if blob is not None:
changed.add(blob.path)
prev_c = c.parents[0]
for x in c.diff(prev_c):
add_path(x.a_blob)
add_path(x.b_blob)
return changed
|
ContextualSP/lemon/executor/gtd/git_utils.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/git_utils.py",
"repo_id": "ContextualSP",
"token_count": 211
}
| 260 |
# Copyright (C) 2006, 2008, 2009, 2010 by Canonical Ltd
# Written by John Arbash Meinel <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""A custom importer and regex compiler which logs time spent."""
import sys
import time
import re
_parent_stack = []
_total_stack = {}
_info = {}
_cur_id = 0
_timer = time.time
if sys.platform == 'win32':
_timer = time.clock
def stack_add(name, frame_name, frame_lineno, scope_name=None):
"""Start a new record on the stack"""
global _cur_id
_cur_id += 1
this_stack = (_cur_id, name)
if _parent_stack:
_total_stack[_parent_stack[-1]].append(this_stack)
_total_stack[this_stack] = []
_parent_stack.append(this_stack)
_info[this_stack] = [len(_parent_stack)-1, frame_name, frame_lineno, scope_name]
return this_stack
def stack_finish(this, cost):
"""Finish a given entry, and record its cost in time"""
global _parent_stack
assert _parent_stack[-1] == this, \
'import stack does not end with this %s: %s' % (this, _parent_stack)
_parent_stack.pop()
_info[this].append(cost)
def log_stack_info(out_file, sorted=True, hide_fast=True):
# Find all of the roots with import = 0
out_file.write('%5s %5s %-40s @ %s:%s\n'
% ('cum', 'inline', 'name', 'file', 'line'))
todo = [(value[-1], key) for key,value in _info.items() if value[0] == 0]
if sorted:
todo.sort()
while todo:
cum_time, cur = todo.pop()
children = _total_stack[cur]
c_times = []
info = _info[cur]
if hide_fast and info[-1] < 0.0001:
continue
# Compute the module time by removing the children times
mod_time = info[-1]
for child in children:
c_info = _info[child]
mod_time -= c_info[-1]
c_times.append((c_info[-1], child))
# indent, cum_time, mod_time, name,
# scope_name, frame_name, frame_lineno
out_file.write('%5.1f %5.1f %-40s @ %s:%d\n'
% (info[-1]*1000., mod_time*1000.,
('+'*info[0] + cur[1]),
info[1], info[2]))
if sorted:
c_times.sort()
else:
c_times.reverse()
todo.extend(c_times)
_real_import = __import__
def timed_import(name, globals=None, locals=None, fromlist=None, level=None):
"""Wrap around standard importer to log import time"""
# normally there are 4, but if this is called as __import__ eg by
# /usr/lib/python2.6/email/__init__.py then there may be only one
# parameter
# level is only passed by python2.6
if globals is None:
# can't determine the scope name afaics; we could peek up the stack to
# see where this is being called from, but it should be a rare case.
scope_name = None
else:
scope_name = globals.get('__name__', None)
if scope_name is None:
scope_name = globals.get('__file__', None)
if scope_name is None:
scope_name = list(globals.keys())
else:
# Trim out paths before bzrlib
loc = scope_name.find('bzrlib')
if loc != -1:
scope_name = scope_name[loc:]
# For stdlib, trim out early paths
loc = scope_name.find('python2.4')
if loc != -1:
scope_name = scope_name[loc:]
# Figure out the frame that is doing the importing
frame = sys._getframe(1)
frame_name = frame.f_globals.get('__name__', '<unknown>')
extra = ''
if frame_name.endswith('demandload'):
# If this was demandloaded, we have 3 frames to ignore
extra = '(demandload) '
frame = sys._getframe(4)
frame_name = frame.f_globals.get('__name__', '<unknown>')
elif frame_name.endswith('lazy_import'):
# If this was lazily imported, we have 3 frames to ignore
extra = '[l] '
frame = sys._getframe(4)
frame_name = frame.f_globals.get('__name__', '<unknown>')
if fromlist:
extra += ' [%s]' % (', '.join(map(str, fromlist)),)
frame_lineno = frame.f_lineno
this = stack_add(extra + name, frame_name, frame_lineno, scope_name)
tstart = _timer()
try:
# Do the import
mod = _real_import(name, globals, locals, fromlist)
finally:
tload = _timer()-tstart
stack_finish(this, tload)
return mod
_real_compile = re._compile
def timed_compile(*args, **kwargs):
"""Log how long it takes to compile a regex"""
# And who is requesting this?
frame = sys._getframe(2)
frame_name = frame.f_globals.get('__name__', '<unknown>')
extra = ''
if frame_name.endswith('lazy_regex'):
# If this was lazily compiled, we have 3 more frames to ignore
extra = '[l] '
frame = sys._getframe(5)
frame_name = frame.f_globals.get('__name__', '<unknown>')
frame_lineno = frame.f_lineno
this = stack_add(extra+repr(args[0]), frame_name, frame_lineno)
tstart = _timer()
try:
# Measure the compile time
comp = _real_compile(*args, **kwargs)
finally:
tcompile = _timer() - tstart
stack_finish(this, tcompile)
return comp
def install():
"""Install the hooks for measuring import and regex compile time."""
__builtins__['__import__'] = timed_import
re._compile = timed_compile
def uninstall():
"""Remove the import and regex compile timing hooks."""
__builtins__['__import__'] = _real_import
re._compile = _real_compile
|
ContextualSP/lemon/executor/gtd/profile_imports.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/profile_imports.py",
"repo_id": "ContextualSP",
"token_count": 2583
}
| 261 |
from abc import ABCMeta, abstractmethod
class PredicatesComputer(object, metaclass=ABCMeta):
"""Compute the set of possible LF predicates for a context, along with
their alignments to the utterance tokens.
The resulting predicates are used as `choices` in ParseCase.
The alignments are used for soft copying and delexicalization.
"""
@abstractmethod
def compute_predicates(self, tokens):
"""Compute the possible predicates for the tokens of the utterance.
Args:
tokens (list[unicode])
Returns:
list[(Predicate, alignment)]
where alignment is list[(utterance token index, alignment strength)]
"""
raise NotImplementedError
|
ContextualSP/lemon/executor/strongsup/predicates_computer.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/predicates_computer.py",
"repo_id": "ContextualSP",
"token_count": 256
}
| 262 |
from abc import ABCMeta, abstractmethod
class RLongState(object, metaclass=ABCMeta):
"""Represents a row of objects, each of which has various properties.
Used in:
- RLongWorld as the initial state
- RLongDenotation as the current state during execution
- RLongValue as the final state
"""
__slots__ = ['_objects']
def __init__(self, objects):
"""Create a new RLongState.
Args:
objects (list).
"""
self._objects = objects
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self._objects == other._objects)
def __hash__(self):
return hash(self._objects)
def __repr__(self):
return ' '.join(repr(x) for x in self._objects)
def __getitem__(self, i):
return self._objects[i]
def __len__(self):
return len(self._objects)
def dump_human_readable(self, fout):
"""Dump a human-readable representation to a file object.
By default, print repr(self).
"""
print(self, file=fout)
@property
def objects(self):
return self._objects
@property
def all_objects(self):
return self._objects
@classmethod
def from_raw_string(cls, raw_string):
"""Create a new RLongState from dataset string.
This is a CLASS METHOD.
"""
raise NotImplementedError
@abstractmethod
def apply_join(self, value, prop):
"""Return the result of joining the property with the value.
Args:
value: Property value
prop (str): Property name
Returns:
A result (object)
"""
raise NotImplementedError
@abstractmethod
def apply_double_join(self, value1, value2, prop):
"""Return the result of joining the property with 2 values.
Args:
value1: Property value
value2: Property value
prop (str): Property name
Returns:
A result (object)
"""
raise NotImplementedError
@abstractmethod
def apply_action(self, action, stack):
"""Apply an action and return the new state.
Relevant arguments should be popped from the stack.
Args:
action (str)
stack (list)
Returns:
(new_state, history_entry)
new_state (RLongState): State after the action is applied
history_entry (tuple): An entry to be added to history
"""
raise NotImplementedError
@abstractmethod
def resolve_argument(self, argument):
"""Return a RLongObject that corresponds to the history argument
but with the current properties.
Args:
argument (RLongObject)
Returns:
RLongObject
"""
raise NotImplementedError
def reverse_action(self, action):
"""(Optional method) Return the reversed action.
Args:
action (str)
Returns:
reversed action (str)
"""
raise NotImplementedError
class RLongObject(object):
__slots__ = ()
# just a marker class
################################
# Helper methods
def get_single_object(stack_entry):
if isinstance(stack_entry, list):
assert len(stack_entry) == 1, 'Cannot operate on > 1 objects'
return stack_entry[0]
return stack_entry
################################
# Alchemy domain
class RLongAlchemyObject(tuple, RLongObject):
__slots__ = ()
def __new__(self, position, chemicals):
"""Create a new RLongAlchemyObject.
Args:
position (int): Position of the beaker (starting with 1)
chemicals (str): The beaker's content.
Each character represents 1 unit of chemical of that color.
An empty string represents an empty beaker.
"""
color = (None if not chemicals
or any(x != chemicals[0] for x in chemicals)
else chemicals[0])
return tuple.__new__(RLongAlchemyObject, (position, chemicals, color))
@property
def position(self):
"""Return the beaker's position (int)."""
return self[0]
@property
def chemicals(self):
"""Return the beaker's content (str).
Each character represents 1 unit of chemical of that color.
An empty string represents an empty beaker.
"""
return self[1]
@property
def color(self):
"""If the beaker is not empty and has homogeneous content,
return the beaker's chemical color (1-character str).
Otherwise, return None.
"""
return self[2]
@property
def amount(self):
"""Return the amount of chemical (int)."""
return len(self[1])
def __repr__(self):
return '{}:{}'.format(self.position, self.chemicals or '_')
class RLongAlchemyState(RLongState):
"""State for alchemy domain.
Properties: position, color, amount
Actions: pour, mix, drain
"""
__slots__ = ()
@classmethod
def from_raw_string(cls, raw_string):
"""Create a new RLongAlchemyState from dataset string.
Format for each object: {position}:{chemicals}
"""
objects = []
for raw_object in raw_string.split():
raw_position, raw_chemicals = raw_object.split(':')
objects.append(RLongAlchemyObject(
int(raw_position),
'' if raw_chemicals == '_' else raw_chemicals))
return cls(objects)
def apply_join(self, value, prop):
if prop == 'Color':
return [x for x in self._objects if x.color == value]
else:
raise ValueError('Unknown property {}'.format(prop))
def apply_double_join(self, value1, value2, prop):
raise ValueError('Unknown property {}'.format(prop))
def apply_action(self, action, stack):
if action == 'Pour':
# Object Object Pour
target_pos = get_single_object(stack.pop()).position
source_pos = get_single_object(stack.pop()).position
assert source_pos != target_pos, \
'Cannot pour: Source and target are the same'
target = self._objects[target_pos - 1]
source = self._objects[source_pos - 1]
assert source.color is not None, \
'Cannot pour: Source does not have a pourable content'
assert source.amount + target.amount <= 4, \
'Cannot pour: Overflow'
new_objects = self._objects[:]
new_objects[target_pos - 1] = RLongAlchemyObject(
target_pos, target.chemicals + source.chemicals)
new_objects[source_pos - 1] = RLongAlchemyObject(source_pos, '')
return type(self)(new_objects), ('Pour', source, target)
elif action == 'Mix':
# Object Mix; the chemical becomes brown
target_pos = get_single_object(stack.pop()).position
target = self._objects[target_pos - 1]
assert target.amount, \
'Cannot mix: No content'
assert target.color is None, \
'Cannot mix: The content is already homogeneous'
new_objects = self._objects[:]
new_objects[target_pos - 1] = RLongAlchemyObject(
target_pos, 'b' * target.amount)
return type(self)(new_objects), ('Mix', target)
elif action == 'Drain':
# Object Number Drain
drain_amount = stack.pop()
target_pos = get_single_object(stack.pop()).position
target = self._objects[target_pos - 1]
assert target.amount, \
'Cannot drain: No content'
new_objects = self._objects[:]
if isinstance(drain_amount, str) and drain_amount[0] == 'X':
# Fraction
numer, denom = int(drain_amount[1]), int(drain_amount[3])
assert target.amount % denom == 0, \
'Cannot drain: Invalid fraction'
drain_amount = int(target.amount * numer / denom)
assert (isinstance(drain_amount, int)
and 0 < drain_amount <= target.amount), \
'Cannot drain: Invalid drain amount'
remaining = target.amount - drain_amount
new_objects[target_pos - 1] = RLongAlchemyObject(
target_pos, target.chemicals[:remaining])
return type(self)(new_objects), ('Drain', target, drain_amount)
else:
raise ValueError('Unknown action {}'.format(action))
def resolve_argument(self, argument):
# Beaker is uniquely determined by position
return self._objects[argument.position - 1]
################################
# Scene Domain
class RLongSceneObject(tuple, RLongObject):
__slots__ = ()
def __new__(self, position, shirt, hat, id_):
"""Create a new RLongSceneObject.
An empty space is not an object.
Args:
position (int): Position of the person (starting with 1)
shirt (str): The shirt color.
hat (str): The hat color. Special color `e` means no hat.
id_ (int): The hidden ID used when retrieving with H1, H2, ...
"""
return tuple.__new__(RLongSceneObject, (position, shirt, hat, id_))
@property
def position(self):
"""Return the person's position (int)."""
return self[0]
@property
def shirt(self):
"""Return the shirt color (str)."""
return self[1]
@property
def hat(self):
"""Return the hat color (str)."""
return self[2]
@property
def apparent(self):
"""Return the non-ID part."""
return self[:3]
@property
def id_(self):
"""Return the ID (int)."""
return self[3]
def __repr__(self):
return '{}:{}{}'.format(self.position,
self.shirt or '_', self.hat or '_')
class RLongSceneState(RLongState):
"""State for the scene domain.
Properties: position, shirt, hat
Actions: create, delete, move, swaphat
"""
STAGE_LENGTH = 10
__slots__ = ['_next_id']
def __init__(self, objects, next_id):
"""Create a new RLongSceneState.
Args:
objects (list).
next_id (int): The next available object ID.
"""
RLongState.__init__(self, objects)
self._next_id = next_id
def __eq__(self, other):
return (isinstance(other, self.__class__)
and len(self._objects) == len(other._objects)
and all(self._objects[i].apparent == other._objects[i].apparent
for i in range(len(self._objects))))
@classmethod
def from_raw_string(cls, raw_string):
"""Create a new RLongSceneState from dataset string.
Format for each object: {position}:{shirt}{hat}
"""
objects = []
id_ = 0
for raw_object in raw_string.split():
raw_position, raw_colors = raw_object.split(':')
if raw_colors != '__':
objects.append(RLongSceneObject(
int(raw_position), raw_colors[0],
'e' if raw_colors[1] == '_' else raw_colors[1],
id_))
id_ += 1
return cls(objects, id_)
def get_object_with_id(self, id_):
target = [x for x in self._objects if x.id_ == id_]
assert target, 'No object matching ID'
assert len(target) == 1, 'Multiple objects matching ID'
return target[0]
def apply_join(self, value, prop):
if prop == 'Shirt':
return [x for x in self._objects if x.shirt == value]
elif prop == 'Hat':
return [x for x in self._objects if x.hat == value]
elif prop == 'Left':
target_id = get_single_object(value).id_
target = self.get_object_with_id(target_id)
assert target.position > 1, \
'Cannot call left on leftmost person'
return target.position - 1
elif prop == 'Right':
target_id = get_single_object(value).id_
target = self.get_object_with_id(target_id)
assert target.position < self.STAGE_LENGTH, \
'Cannot call right on rightmost person'
return target.position + 1
else:
raise ValueError('Unknown property {}'.format(prop))
def apply_double_join(self, value1, value2, prop):
if prop == 'ShirtHat':
return [x for x in self._objects if x.shirt == value1
and x.hat == value2]
else:
raise ValueError('Unknown property {}'.format(prop))
def apply_action(self, action, stack):
if action == 'Leave':
# Object Leave
target_id = get_single_object(stack.pop()).id_
target = self.get_object_with_id(target_id)
new_objects = [x for x in self._objects if x.id_ != target_id]
return type(self)(new_objects, self._next_id), \
('Leave', target)
elif action == 'SwapHats':
# Object Object SwapHats
target1_id = get_single_object(stack.pop()).id_
target2_id = get_single_object(stack.pop()).id_
assert target1_id != target2_id, \
'Cannot swap hats: Two targets are the same'
target1 = self.get_object_with_id(target1_id)
target2 = self.get_object_with_id(target2_id)
new_objects = []
for x in self._objects:
if x.id_ == target1_id:
new_objects.append(RLongSceneObject(
x.position, x.shirt, target2.hat, x.id_))
elif x.id_ == target2_id:
new_objects.append(RLongSceneObject(
x.position, x.shirt, target1.hat, x.id_))
else:
new_objects.append(x)
return type(self)(new_objects, self._next_id), \
('SwapHats', target1, target2)
elif action == 'Move':
# Object Number Move
new_pos = stack.pop()
assert isinstance(new_pos, int), \
'Cannot move: Position is not an integer'
if new_pos < 0:
new_pos = self.STAGE_LENGTH + 1 + new_pos
assert all(x.position != new_pos for x in self._objects), \
'Cannot move: Target position is occupied'
target_id = get_single_object(stack.pop()).id_
target = self.get_object_with_id(target_id)
assert target.position != new_pos, \
'Cannot move: Target and source positions are the same'
new_objects = []
for x in self._objects:
if x == target:
new_objects.append(RLongSceneObject(
new_pos, x.shirt, x.hat, x.id_))
else:
new_objects.append(x)
new_objects.sort(key=lambda x: x.position)
return type(self)(new_objects, self._next_id), \
('Move', target, new_pos)
elif action == 'Create':
# Number Color Color|e Create
hat = stack.pop()
shirt = stack.pop()
new_pos = stack.pop()
assert isinstance(hat, str) and len(hat) == 1, \
'Cannot create: Invalid hat color'
assert isinstance(shirt, str) and len(shirt) == 1, \
'Cannot create: Invalid hat color'
assert isinstance(new_pos, int), \
'Cannot create: Position is not an integer'
if new_pos < 0:
new_pos = self.STAGE_LENGTH + 1 + new_pos
assert all(x.position != new_pos for x in self._objects), \
'Cannot create: Target position is occupied'
new_objects = self._objects[:]
new_person = RLongSceneObject(
new_pos, shirt, hat, self._next_id)
new_objects.append(new_person)
new_objects.sort(key=lambda x: x.position)
return type(self)(new_objects, self._next_id + 1), \
('Create', new_person)
else:
raise ValueError('Unknown action {}'.format(action))
def resolve_argument(self, argument):
# Person is uniquely determined by ID
# If the person is on the stage, get its identity.
for x in self._objects:
if x.id_ == argument.id_:
return x
# The object is not in the scene
return RLongSceneObject(0, argument.shirt, argument.hat, argument.id_)
################################
# Tangrams Domain
class RLongTangramsObject(tuple, RLongObject):
__slots__ = ()
def __new__(self, position, shape):
"""Create a new RLongTangramsObject.
Args:
position (int): Position of the tangram (starting with 1)
shape (str): Shape ID.
"""
return tuple.__new__(RLongTangramsObject, (position, shape))
@property
def position(self):
"""Return the person's position (int)."""
return self[0]
@property
def shape(self):
"""Return the shape ID (str)."""
return self[1]
def __repr__(self):
return '{}:{}'.format(self.position, self.shape)
class RLongTangramsState(RLongState):
"""State for the tangrams domain.
Properties: position, shape
Actions: add, delete, swap
"""
__slots__ = ()
@classmethod
def from_raw_string(cls, raw_string):
"""Create a new RLongTangramsState from dataset string.
Format for each object: {position}:{shape}
"""
objects = []
for raw_object in raw_string.split():
raw_position, raw_shape = raw_object.split(':')
objects.append(RLongTangramsObject(
int(raw_position), raw_shape))
return cls(objects)
def get_object_with_shape(self, shape):
target = [x for x in self._objects if x.shape == shape]
assert target, 'No object matching shape'
return target[0]
def apply_join(self, value, prop):
# Can only use indexing.
raise ValueError('Unknown property {}'.format(prop))
def apply_double_join(self, value1, value2, prop):
raise ValueError('Unknown property {}'.format(prop))
def apply_action(self, action, stack):
if action == 'Add':
# Number Object Add
target_shape = get_single_object(stack.pop()).shape
new_pos = stack.pop()
assert isinstance(new_pos, int), \
'Cannot add: Position is not an integer'
if new_pos < 0:
new_pos = len(self._objects) + 2 + new_pos
assert new_pos <= len(self._objects) + 1, \
'Cannot add: Position out of bound'
new_tangram = RLongTangramsObject(new_pos, target_shape)
new_objects = [new_tangram]
for x in self._objects:
assert x.shape != target_shape, \
'Cannot add: Repeated shape'
if x.position < new_pos:
new_objects.append(x)
else:
new_objects.append(RLongTangramsObject(
x.position + 1, x.shape))
new_objects.sort(key=lambda x: x.position)
return type(self)(new_objects), ('Add', new_pos, new_tangram)
elif action == 'Swap':
# Object Object Swap
target1_shape = get_single_object(stack.pop()).shape
target2_shape = get_single_object(stack.pop()).shape
assert target1_shape != target2_shape, \
'Cannot swap: Two targets are the same'
target1 = self.get_object_with_shape(target1_shape)
target2 = self.get_object_with_shape(target2_shape)
new_objects = []
for x in self._objects:
if x.shape == target1_shape:
new_objects.append(RLongTangramsObject(
x.position, target2.shape))
elif x.shape == target2_shape:
new_objects.append(RLongTangramsObject(
x.position, target1.shape))
else:
new_objects.append(x)
return type(self)(new_objects), ('Swap', target1, target2)
elif action == 'Remove':
# Object Leave
target_shape = get_single_object(stack.pop()).shape
target = self.get_object_with_shape(target_shape)
new_objects = []
for x in self._objects:
if x.position < target.position:
new_objects.append(x)
elif x.position > target.position:
new_objects.append(RLongTangramsObject(
x.position - 1, x.shape))
return type(self)(new_objects), ('Remove', target)
else:
raise ValueError('Unknown action {}'.format(action))
def resolve_argument(self, argument):
# Tangram is uniquely determined by shape
# If the tangram is on the stage, get its identity.
for x in self._objects:
if x.shape == argument.shape:
return x
# The object is not in the scene
return RLongTangramsObject(0, argument.shape)
class RLongUndogramsState(RLongTangramsState):
"""State for the tangrams domain, but supports HUndo.
Properties: position, shape
Actions: add, delete, swap
"""
__slots__ = ()
def apply_action(self, action, stack):
new_state, command = RLongTangramsState.apply_action(self, action, stack)
if action == 'Remove':
# We also add position to the arguments to make it parallel to AAdd
command = (command[0], command[1].position, command[1])
return new_state, command
def reverse_action(self, action):
if action == 'Swap':
return 'Swap'
elif action == 'Remove':
return 'Add'
elif action == 'Add':
return 'Remove'
else:
raise ValueError('Unknown action {}'.format(action))
|
ContextualSP/lemon/executor/strongsup/rlong/state.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/rlong/state.py",
"repo_id": "ContextualSP",
"token_count": 10345
}
| 263 |
import numpy as np
from strongsup.predicate import Predicate
def softmax(stuff):
"""Quick and dirty way to compute softmax"""
return (np.exp(stuff) / np.sum(np.exp(stuff))).tolist()
class PredicateGenerator(object):
"""Generate predicates with the specified context."""
def __init__(self, context):
self.context = context
self.cache = {}
def __call__(self, name):
if name not in self.cache:
self.cache[name] = Predicate(name, self.context)
return self.cache[name]
|
ContextualSP/lemon/executor/strongsup/tests/utils.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tests/utils.py",
"repo_id": "ContextualSP",
"token_count": 202
}
| 264 |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/LICENSE/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/LICENSE",
"repo_id": "ContextualSP",
"token_count": 3168
}
| 265 |
name: grc
channels:
- defaults
dependencies:
- pip=20.2.2=py37_0
- python=3.7.5=h0371630_0
- pip:
- numpy==1.19.2
- overrides==3.1.0
- scikit-learn==0.23.2
- scipy==1.5.2
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/environment.yml/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/environment.yml",
"repo_id": "ContextualSP",
"token_count": 111
}
| 266 |
from process.process import Process, Conversion, Move, Input, Output
from process.summary import ProcessSummary
from process.action_file import ActionFile
from process.sentence_file import sentences_from_sentences_file
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/__init__.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/__init__.py",
"repo_id": "ContextualSP",
"token_count": 50
}
| 267 |
## SciTail Evaluator
This script evaluates predictions on the SciTail dataset and produces an accuracy score.
## Example
```bash
% python3 evaluator.py -a answers.jsonl -p predictions.csv -o metrics.json
% cat metrics.json
{"accuracy": 0.8}
```
## Usage
The script takes two input files and produces one output file.
### Input answers
A file has id and gold labels in JSONL format. For example:
```bash
% cat answers.jsonl
{ "id": "P1", "gold_label": "E" }
{ "id": "P2", "gold_label": "E" }
{ "id": "P3", "gold_label": "N" }
{ "id": "P4", "gold_label": "N" }
{ "id": "P5", "gold_label": "N" }
```
(Attributes besides `id` and `gold_label` in each object are ignored.)
### Input predictions
A predictions file that has predictions in CSV format. For example:
```bash
% cat predictions.csv
P1,E
P2,N
P3,N
P4,N
P5,N
```
### Output metrics
A JSON file that has an accuracy score in the range 0.0 to 1.0. For example:
```bash
% cat metrics.json
{"accuracy": 0.8}
```
## Development
### Unit tests
Run unit tests with `python3 test_evaluator.py`.
### Docker
Ultimately this evaluator is run in a Docker container. To test that it works there, run `test.sh`.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/evaluator/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/evaluator/README.md",
"repo_id": "ContextualSP",
"token_count": 413
}
| 268 |
from transformers import Seq2SeqTrainer
from typing import Dict, List, Optional
import torch
import numpy as np
import logging
from torch.utils.data import Dataset
from typing import Any, Dict, List, Optional, Tuple, Union,NamedTuple
from transformers import Seq2SeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
import torch.nn as nn
import collections
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.trainer_utils import (
denumpify_detensorize,
EvalLoopOutput,
EvalPrediction)
from torch.utils.data import DataLoader, Dataset, IterableDataset, RandomSampler, SequentialSampler
logger = logging.getLogger(__name__)
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class GenTrainer(Seq2SeqTrainer):
def __init__(self, *args, num_return_seq=4,num_beams=4,gan_alpha=0.8,**kwargs):
super().__init__(*args, **kwargs)
self.num_return_seq = num_return_seq
self._num_beams = num_beams
# self.gan_alpha = gan_alpha
# self.kl_loss = nn.KLDivLoss(reduction="batchmean")
# print(f'The coefficient for teacher loss: {gan_alpha}')
def compute_loss(self, model, inputs):
outputs = model(**inputs, output_hidden_states=False)
return outputs.loss
'''
def compute_loss(self, model, inputs):
assert "labels" in inputs, """labels is required to compute loss"""
is_gold = inputs.pop("is_gold") # Should be torch.LongTensor
ver_prob = inputs.pop("ver_prob") # Should be torch.FloatTensor
outputs = model(**inputs, output_hidden_states=False)
entropy = outputs.loss # All loss
teacher_forcing_loss = is_gold * entropy # We only consider teacher forcing loss when is_gold=True
ver_prob = ((1 - is_gold) * ver_prob)[1:]
gen_score = 1 - ((1 - is_gold) * entropy)[1:]
ver_prob_norm = torch.softmax(ver_prob,dim=-1)
gen_score_norm = torch.softmax(gen_score,dim=-1)
gan_loss = self.kl_loss(ver_prob_norm,gen_score_norm)
# gen_score = gen_score
# ver_prob_rank = torch.argsort(ver_prob, dim=-1, descending=True).float().unsqueeze(0)
# gen_score_rank = torch.argsort(gen_score, dim=-1, descending=True).float().unsqueeze(0)
# gan_loss = 1 - torch.cosine_similarity(ver_prob_rank, gen_score_rank,
# dim=-1) # torch.cosine_embedding_loss(ver_prob_rank,gen_score_rank,target=torch.Tensor(1).cuda())
gan_alpha = self.gan_alpha # model.module.gan_alpha # self.gan_alpha
# print(gan_alpha)
ALPHA = gan_alpha
BETA = 1 - gan_alpha
loss = (ALPHA * teacher_forcing_loss).sum() + (BETA * gan_loss).sum()
# print(f"is_gold: {is_gold}")
# print(
# f"Verifier predict probability (ver_prob): {ver_prob}({ver_prob_norm}),(score prob) {gen_score} ({gen_score_norm})")
# print(f"Cross entropy loss: {entropy}")
# print(f"teacher_forcing_loss: {teacher_forcing_loss}")
# print(f"gan_loss: {gan_loss}")
# print(f"Total loss: {loss}")
# exit()
return loss
'''
'''
def compute_loss(self, model, inputs):
assert "labels" in inputs, """labels is required to compute loss"""
is_gold = inputs.pop("is_gold") # Should be torch.LongTensor
ver_prob = inputs.pop("ver_prob") # Should be torch.FloatTensor
outputs = model(**inputs, output_hidden_states=False)
entropy = outputs.loss # All loss
teacher_forcing_loss = is_gold * entropy # We only consider teacher forcing loss when is_gold=True
ver_prob = ((1-is_gold)*ver_prob)[1:]
gen_score = 1-((1-is_gold)*entropy)[1:]
gen_score = gen_score
ver_prob_rank = torch.argsort(ver_prob,dim=-1,descending=True).float().unsqueeze(0)
gen_score_rank = torch.argsort(gen_score,dim=-1,descending=True).float().unsqueeze(0)
gan_loss = 1-torch.cosine_similarity(ver_prob_rank,gen_score_rank,dim=-1)#torch.cosine_embedding_loss(ver_prob_rank,gen_score_rank,target=torch.Tensor(1).cuda())
gan_alpha = self.gan_alpha#model.module.gan_alpha # self.gan_alpha
# print(gan_alpha)
ALPHA = gan_alpha
BETA = 1 - gan_alpha
loss = (ALPHA * teacher_forcing_loss).sum() + (BETA * gan_loss).sum()
print(f"is_gold: {is_gold}")
print(f"Verifier predict probability (ver_prob): {ver_prob}({ver_prob_rank}),(score prob) {gen_score} ({gen_score_rank})")
print(f"Cross entropy loss: {entropy}")
print(f"teacher_forcing_loss: {teacher_forcing_loss}")
print(f"gan_loss: {gan_loss}")
print(f"Total loss: {loss}")
# exit()
return loss
'''
'''
def compute_loss(self, model, inputs):
assert "labels" in inputs, """labels is required to compute loss"""
is_gold = inputs.pop("is_gold") # Should be torch.LongTensor
ver_prob = inputs.pop("ver_prob") # Should be torch.FloatTensor
outputs = model(**inputs, output_hidden_states=False)
entropy = outputs.loss # All loss
teacher_forcing_loss = is_gold * entropy # We only consider teacher forcing loss when is_gold=True
normalized_ver_prob = (1 - is_gold) * ver_prob
gan_loss = normalized_ver_prob * entropy # We only add GAN-loss for is_gold=False
ALPHA = 1
BETA = 1
loss = (ALPHA * teacher_forcing_loss + BETA * gan_loss).sum()
print(f"is_gold: {is_gold}")
print(f"Verifier predict probability (ver_prob): {ver_prob}")
print(f"Cross entropy loss: {entropy}")
print(f"teacher_forcing_loss: {teacher_forcing_loss}")
print(f"gan_loss: {gan_loss}")
print(f"Total loss: {loss}")
# exit()
return loss
'''
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
)
has_labels = "labels" in inputs
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
gen_kwargs = {
"max_length": self._max_length if self._max_length is not None else self.model.config.max_length,
"num_beams": self._num_beams if self._num_beams is not None else self.model.config.num_beams,
"synced_gpus": False, # True if is_deepspeed_zero3_enabled() else False,
"num_return_sequences": self.num_return_seq if self.num_return_seq else 10
}
# print(gen_kwargs['num_beams'],gen_kwargs['num_return_sequences'])
if self.tokenizer is not None:
generation_inputs = {k: v for k, v in inputs.items() if k in self.tokenizer.model_input_names}
# very ugly hack to make it work
generation_inputs["input_ids"] = generation_inputs.pop(self.tokenizer.model_input_names[0])
else:
generation_inputs = inputs["input_ids"]
generated_tokens = self.model.generate(
**generation_inputs,
**gen_kwargs,
)
# in case the batch is shorter than max length, the output should be padded
# print(all_generated_tokens.shape)
# for i in range(all_generated_tokens.size(1)):
# generated_tokens = all_generated_tokens[:,i,:]
# if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
# all_generated_tokens[:,i,:] = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if has_labels:
if self.label_smoother is not None:
loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
else:
loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
else:
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
labels = inputs["labels"]
if labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
generated_tokens = generated_tokens.view(-1, gen_kwargs["num_return_sequences"], gen_kwargs["max_length"])
# print(generated_tokens.size())
return (loss, generated_tokens, labels)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
preds_for_eval = [preds[0] for preds in all_preds]
# print([len(pred) for pred in preds_for_eval])
# print(len(all_labels))
metrics = self.compute_metrics(EvalPrediction(predictions=preds_for_eval, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
|
ContextualSP/logigan/pre-training/GenTrainer.py/0
|
{
"file_path": "ContextualSP/logigan/pre-training/GenTrainer.py",
"repo_id": "ContextualSP",
"token_count": 8488
}
| 269 |
from z3 import *
import random
from random import shuffle
from itertools import combinations, product
from typing import List, Tuple
from functools import partial
from tqdm import tqdm
import os
solver = Solver()
vars_all_candidates = [chr(i) for i in list(range(97, 122))]
for symbol in vars_all_candidates:
# init all variables
exec("{0} = Bool('{0}')".format(symbol))
def sample_single_logic(var_inputs: Tuple):
var_1, var_2 = var_inputs
# given two vars, sample a logic to represent these
# decide order of two vars
logic_var_1 = var_1
logic_var_2 = var_2
if random.random() > 0.5:
var_1 = "not {}".format(var_1)
logic_var_1 = "Not({})".format(logic_var_1)
if random.random() > 0.5:
var_2 = "not {}".format(var_2)
logic_var_2 = "Not({})".format(logic_var_2)
# implies of two logic var
if random.random() > 0.5:
var_1, var_2 = var_2, var_1
logic_var_1, logic_var_2 = logic_var_2, logic_var_1
text = "( {} -> {} ) ;".format(var_1, var_2)
logic = "Implies({}, {})".format(logic_var_1, logic_var_2)
return logic, text
def sample_simple_hypo(var_candidates: List):
# sample 1 or 2
sample_num = 1 if random.random() < 0.75 else 2
var_combinations = list(combinations(var_candidates, 2))
shuffle(var_combinations)
if sample_num == 1 or len(var_combinations) == 1:
# select the first one as the var_candidates
var_1, var_2 = var_combinations[0]
return sample_single_logic((var_1, var_2))
sample_predicate = "And" if random.random() < 0.75 else "Or"
var_1, var_2 = var_combinations[0]
logic_1 = sample_single_logic((var_1, var_2))
var_1, var_2 = var_combinations[1]
logic_2 = sample_single_logic((var_1, var_2))
# build both
logic_part = sample_predicate + "({}, {})".format(logic_1[0], logic_2[0])
if sample_predicate == "And":
text_part = logic_1[1] + " " + logic_2[1]
else:
text_part = logic_1[1] + " or " + logic_2[1]
return logic_part, text_part
def validate_statements(fact: List, hypo_linear: str = None):
solver.reset()
# construct the overall statement
if hypo_linear is None:
fact_linear = ", ".join(fact)
logic_state = "And({})".format(fact_linear)
exec("solver.add(" + logic_state + ")")
result = solver.check()
if result.r == 1:
return True
else:
return False
else:
fact_linear = ", ".join(fact)
logic_state = "Not(Implies(And({0}), {1}))".format(fact_linear, hypo_linear)
exec("solver.add(" + logic_state + ")")
result = solver.check()
if result.r == -1:
return True
else:
return False
def sample_example():
vars_candidates = [chr(i) for i in list(range(97, 122))]
shuffle(vars_candidates)
# take the first 3-4 to construct the complete logic rules
# total_var_count = random.randint(4, 9)
total_var_count = 4
use_var_count = min(random.randint(2, total_var_count - 2), 4)
used_vars = vars_candidates[:use_var_count]
# take the 4-10 to construct negative examples
unused_vars = vars_candidates[use_var_count: total_var_count]
# for each pair of vars, firstly we construct var pairs
used_combines = list(combinations(used_vars, 2))
# we should verify that the logic is valid
context_logic_is_valid = False
logic_facts = []
text_facts = []
while not context_logic_is_valid:
shuffle(used_combines)
# take 3~5 from them
count_combines = random.randint(3, 6)
used_combines = used_combines[: count_combines]
logic_facts = list(map(sample_single_logic, used_combines))
# if the logic in context is valid, break
logic_facts, text_facts = zip(*logic_facts)
context_logic_is_valid = validate_statements(logic_facts)
logic_facts = list(logic_facts)
text_facts = list(text_facts)
# add some other facts
unused_combines = list(combinations(unused_vars, 2))
shuffle(unused_combines)
unused_text_facts = list(map(sample_single_logic, unused_combines[: random.randint(5, 15)]))
_, unused_text_facts = zip(*unused_text_facts)
unused_text_facts = list(unused_text_facts)
# add some used var and unused var facts which cannot affect the logic
add_negative_count = random.randint(1, 2)
all_combines = list(product(vars_candidates[:use_var_count], vars_candidates[use_var_count:total_var_count]))
shuffle(all_combines)
while add_negative_count > 0:
# try to add one
temp_logic_facts = logic_facts[:]
cur_logic, cur_text = sample_single_logic(all_combines[-add_negative_count])
temp_logic_facts.append(cur_logic)
if validate_statements(temp_logic_facts):
# update logic facts and textual facts
logic_facts.append(cur_logic)
text_facts.append(cur_text)
add_negative_count -= 1
# sample logic hypo
logic_hypo, text_hypo = None, ""
while logic_hypo is None:
logic_hypo, text_hypo = sample_simple_hypo(used_vars)
if logic_hypo in logic_facts:
# too trivial to verify
logic_hypo = None
# give an answer
text_facts = list(text_facts + unused_text_facts)
shuffle(text_facts)
text_final = text_hypo + " [SEP] " + " ".join(text_facts)
answer_final = "1" if validate_statements(logic_facts, logic_hypo) else "0"
return text_final, answer_final
def convert_logical_data(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
train_src_f = open(os.path.join(output_dir, "train.raw.input0"), "w", encoding="utf8")
train_tgt_f = open(os.path.join(output_dir, "train.label"), "w", encoding="utf8")
dev_src_f = open(os.path.join(output_dir, "dev.raw.input0"), "w", encoding="utf8")
dev_tgt_f = open(os.path.join(output_dir, "dev.label"), "w", encoding="utf8")
for _ in tqdm(range(100000)):
input_line, output_line = sample_example()
train_src_f.write(input_line + "\n")
train_tgt_f.write(output_line + "\n")
for _ in tqdm(range(2000)):
input_line, output_line = sample_example()
dev_src_f.write(input_line + "\n")
dev_tgt_f.write(output_line + "\n")
train_src_f.close()
train_tgt_f.close()
dev_src_f.close()
dev_tgt_f.close()
if __name__ == '__main__':
convert_logical_data("pretrain_logic")
|
ContextualSP/poet/synthesize_logic_corpus.py/0
|
{
"file_path": "ContextualSP/poet/synthesize_logic_corpus.py",
"repo_id": "ContextualSP",
"token_count": 2798
}
| 270 |
import random
import torch
from torch import nn
from torch.nn.functional import softmax
from utils import Trie, Tree
MAX_LEN = 256
class Parser(nn.Module):
def __init__(self, src_dictionary, trg_dictionary, model, device):
super().__init__()
self.src_dictionary = src_dictionary
self.trg_dictionary = trg_dictionary
self.model = model
self.device = device
def forward(self, src_info, trg_info, teacher_force_rate = 1):
# src_info, trg_info, label_info, ori_idx = self.transform(src, trg, label)
src_ids, src_lengths = src_info
trg_ids, trg_lengths = trg_info
output, _ = self.model(src_info, trg_ids, teacher_force_rate)
# print("output shape:", output.shape)
return output
def inference(self, src_info):
src_ids, src_lengths = src_info
encoder_outputs, hidden = self.model.encoder(src_ids, src_lengths)
# print(self.device)
root = Tree(torch.LongTensor([self.trg_dictionary.SOS]).to(self.device))
mask = self.model.create_mask(src_ids)
with torch.no_grad():
alpha = torch.ones(self.trg_dictionary.size()).to(src_ids)
self.build_tree(root, hidden, encoder_outputs, mask, alpha, 0)
return Trie(node = root).get_path()
def build_tree(self, tree_node, hidden, encoder_outputs, mask, alpha, depth):
if depth > 20:
return
input = tree_node.value
if input == self.trg_dictionary.EOS:
return
output, hidden, _ = self.model.decoder(input, hidden, encoder_outputs, mask)
output = alpha * torch.sigmoid(output.squeeze(0))
for i in range(output.shape[0]):
if output[i] > 0.5:
child = Tree(torch.LongTensor([i]).to(self.device))
tree_node.children[i] = child
for k, child in tree_node.children.items():
self.build_tree(child, hidden, encoder_outputs, mask, alpha, depth+1)
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, pad_idx, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
self.pad_idx = pad_idx
def forward(self, src_, trg = None, teacher_forcing_ratio=1):
src, src_len = src_
batch_size = src.shape[1]
max_len = trg.shape[0] if trg is not None else MAX_LEN
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(max_len-1, batch_size, trg_vocab_size).to(self.device)
attn_scores = []
encoder_outputs, hidden = self.encoder(src, src_len)
input = trg[0, :] if trg is not None else src[0, :]
mask = self.create_mask(src)
for t in range(1, max_len):
output, hidden, attn_score = self.decoder(input, hidden, encoder_outputs, mask)
outputs[t-1] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.argmax(1)
input = trg[t] if teacher_force else top1
attn_scores.append(attn_score)
return outputs, torch.cat(attn_scores, dim = 1).to(self.device)
def create_mask(self, src):
mask = (src != self.pad_idx).permute(1, 0)
return mask
class Encoder(nn.Module):
def __init__(self, input_dim, src_emb_dim, enc_hid_dim, dec_hid_dim, dropout, pad_idx, embed =None):
super().__init__()
self.nl_embedding = nn.Embedding(input_dim, src_emb_dim)
if embed is not None:
print('using the glove embedding')
self.nl_embedding.weight.data.copy_(embed)
else:
print("not using glove embedding")
self.vocab_size = input_dim
self.emb_dim = src_emb_dim
self.rnn = nn.GRU(self.emb_dim, enc_hid_dim, bidirectional=True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout_src = nn.Dropout(dropout)
def forward(self, src, src_len):
embedded = self.dropout_src(self.nl_embedding(src)* (self.vocab_size ** 0.5))
embedded = nn.utils.rnn.pack_padded_sequence(embedded, src_len.data, batch_first=False)
outputs, hidden = self.rnn(embedded)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
hidden = torch.tanh(self.fc(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)))
return outputs, hidden
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)
self.v = nn.Parameter(torch.rand(dec_hid_dim))
def forward(self, hidden, encoder_outputs, mask):
batch_size = encoder_outputs.shape[1]
src_len = encoder_outputs.shape[0]
hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))
energy = energy.permute(0, 2, 1)
v = self.v.repeat(batch_size, 1).unsqueeze(1)
attention = torch.bmm(v, energy).squeeze(1)
attention = attention.masked_fill(mask == 0, -1e10)
return softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super().__init__()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs, mask):
# input = [batch_size]
# hidden = [batch_size, dec_hid_dim]
# encoder_outputs = [src_sent_len, batch_size, enc_hid_dim * 2]
# print(input.shape, hidden.shape, encoder_outputs.shape)
input = input.unsqueeze(0) # [1, batch_size]
embedded = self.dropout(self.embedding(input)) # [1, batch_size, emb_dim]
# a = self.attention(hidden, encoder_outputs) # [batch_size, src_len]
a = self.attention(hidden, encoder_outputs, mask) # [batch_size, src_len]
a = a.unsqueeze(1) # [batch_size, 1, src_len]
encoder_outputs = encoder_outputs.permute(1, 0, 2)
weighted = torch.bmm(a, encoder_outputs)
weighted = weighted.permute(1, 0, 2)
rnn_input = torch.cat((embedded, weighted), dim=2)
# print("rnn_input shape:", rnn_input.shape)
output, hidden = self.rnn(rnn_input, hidden.unsqueeze(0))
assert (output == hidden).all()
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
output = self.out(torch.cat((output, weighted, embedded), dim=1))
return output, hidden.squeeze(0), a
|
ContextualSP/poset_decoding/sketch_prediction/model.py/0
|
{
"file_path": "ContextualSP/poset_decoding/sketch_prediction/model.py",
"repo_id": "ContextualSP",
"token_count": 3243
}
| 271 |
## Build Documentation:
#### Install Requirements
```python
pip install -r requirements.txt
```
#### Build Documentation
```bash
# Enter docs folder.
cd docs
# Use sphinx autodoc to generate rst.
sphinx-apidoc -o source/ ../matchzoo/
# Generate html from rst
make clean
make html
```
This will install all the packages need in the code. This can cause some error [issue](https://github.com/readthedocs/readthedocs.org/issues/5882)
That is not necessary.
So , we have a new way to generate documents
Follow this [link](https://sphinx-autoapi.readthedocs.io/en/latest/tutorials.html)
```bash
pip install sphinx-autoapi
```
then modify the conf.py
```bash
extensions = ['autoapi.extension']
autoapi_dirs = ['../mypackage']
```
then
```bash
make html
```
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/Readme.md/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/docs/Readme.md",
"repo_id": "ContextualSP",
"token_count": 261
}
| 272 |
from .data_pack import DataPack, load_data_pack
from .pack import pack
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/data_pack/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/data_pack/__init__.py",
"repo_id": "ContextualSP",
"token_count": 21
}
| 273 |
from pathlib import Path
from .load_glove_embedding import load_glove_embedding
from .load_fasttext_embedding import load_fasttext_embedding
DATA_ROOT = Path(__file__).parent
EMBED_RANK = DATA_ROOT.joinpath('embed_rank.txt')
EMBED_10 = DATA_ROOT.joinpath('embed_10_word2vec.txt')
EMBED_10_GLOVE = DATA_ROOT.joinpath('embed_10_glove.txt')
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/embeddings/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/embeddings/__init__.py",
"repo_id": "ContextualSP",
"token_count": 131
}
| 274 |
"""The rank hinge loss."""
import torch
from torch import nn
import torch.nn.functional as F
class RankHingeLoss(nn.Module):
"""
Creates a criterion that measures rank hinge loss.
Given inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`,
and a label 1D mini-batch tensor :math:`y` (containing 1 or -1).
If :math:`y = 1` then it assumed the first input should be ranked
higher (have a larger value) than the second input, and vice-versa
for :math:`y = -1`.
The loss function for each sample in the mini-batch is:
.. math::
loss_{x, y} = max(0, -y * (x1 - x2) + margin)
"""
__constants__ = ['num_neg', 'margin', 'reduction']
def __init__(self, num_neg: int = 1, margin: float = 1.,
reduction: str = 'mean'):
"""
:class:`RankHingeLoss` constructor.
:param num_neg: Number of negative instances in hinge loss.
:param margin: Margin between positive and negative scores.
Float. Has a default value of :math:`0`.
:param reduction: String. Specifies the reduction to apply to
the output: ``'none'`` | ``'mean'`` | ``'sum'``.
``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the
number of elements in the output,
``'sum'``: the output will be summed.
"""
super().__init__()
self.num_neg = num_neg
self.margin = margin
self.reduction = reduction
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor):
"""
Calculate rank hinge loss.
:param y_pred: Predicted result.
:param y_true: Label.
:return: Hinge loss computed by user-defined margin.
"""
y_pos = y_pred[::(self.num_neg + 1), :]
y_neg = []
for neg_idx in range(self.num_neg):
neg = y_pred[(neg_idx + 1)::(self.num_neg + 1), :]
y_neg.append(neg)
y_neg = torch.cat(y_neg, dim=-1)
y_neg = torch.mean(y_neg, dim=-1, keepdim=True)
y_true = torch.ones_like(y_pos)
return F.margin_ranking_loss(
y_pos, y_neg, y_true,
margin=self.margin,
reduction=self.reduction
)
@property
def num_neg(self):
"""`num_neg` getter."""
return self._num_neg
@num_neg.setter
def num_neg(self, value):
"""`num_neg` setter."""
self._num_neg = value
@property
def margin(self):
"""`margin` getter."""
return self._margin
@margin.setter
def margin(self, value):
"""`margin` setter."""
self._margin = value
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/losses/rank_hinge_loss.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/losses/rank_hinge_loss.py",
"repo_id": "ContextualSP",
"token_count": 1214
}
| 275 |
"""An implementation of BiMPM Model."""
import typing
import torch
import torch.nn as nn
from torch.nn import functional as F
from matchzoo.engine import hyper_spaces
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
class BiMPM(BaseModel):
"""
BiMPM Model.
Reference:
- https://github.com/galsang/BIMPM-pytorch/blob/master/model/BIMPM.py
Examples:
>>> model = BiMPM()
>>> model.params['num_perspective'] = 4
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=False
)
params.add(Param(name='mask_value', value=0,
desc="The value to be masked from inputs."))
params.add(Param(name='dropout', value=0.2,
desc="Dropout rate."))
params.add(Param(name='hidden_size', value=100,
hyper_space=hyper_spaces.quniform(
low=100, high=300, q=100),
desc="Hidden size."))
# BiMPM parameters
params.add(Param(name='num_perspective', value=20,
hyper_space=hyper_spaces.quniform(
low=20, high=100, q=20),
desc='num_perspective'))
return params
def build(self):
"""Make function layers."""
self.embedding = self._make_default_embedding_layer()
# Context Representation Layer
self.context_LSTM = nn.LSTM(
input_size=self._params['embedding_output_dim'],
hidden_size=self._params['hidden_size'],
num_layers=1,
bidirectional=True,
batch_first=True
)
# Matching Layer
for i in range(1, 9):
setattr(self, f'mp_w{i}',
nn.Parameter(torch.rand(self._params['num_perspective'],
self._params['hidden_size'])))
# Aggregation Layer
self.aggregation_LSTM = nn.LSTM(
input_size=self._params['num_perspective'] * 8,
hidden_size=self._params['hidden_size'],
num_layers=1,
bidirectional=True,
batch_first=True
)
# Prediction Layer
self.pred_fc1 = nn.Linear(
self._params['hidden_size'] * 4,
self._params['hidden_size'] * 2)
self.pred_fc2 = self._make_output_layer(
self._params['hidden_size'] * 2)
# parameters
self.reset_parameters()
def forward(self, inputs):
"""Forward."""
# Word Representation Layer
# (batch, seq_len) -> (batch, seq_len, word_dim)
# [B, L], [B, R]
p, h = inputs['text_left'].long(), inputs['text_right'].long()
# [B, L, D]
# [B, R, D]
p = self.embedding(p)
h = self.embedding(h)
p = self.dropout(p)
h = self.dropout(h)
# Context Representation Layer
# (batch, seq_len, hidden_size * 2)
con_p, _ = self.context_LSTM(p)
con_h, _ = self.context_LSTM(h)
con_p = self.dropout(con_p)
con_h = self.dropout(con_h)
# (batch, seq_len, hidden_size)
con_p_fw, con_p_bw = torch.split(con_p,
self._params['hidden_size'],
dim=-1)
con_h_fw, con_h_bw = torch.split(con_h,
self._params['hidden_size'],
dim=-1)
# 1. Full-Matching
# (batch, seq_len, hidden_size), (batch, hidden_size)
# -> (batch, seq_len, l)
mv_p_full_fw = mp_matching_func(
con_p_fw, con_h_fw[:, -1, :], self.mp_w1)
mv_p_full_bw = mp_matching_func(
con_p_bw, con_h_bw[:, 0, :], self.mp_w2)
mv_h_full_fw = mp_matching_func(
con_h_fw, con_p_fw[:, -1, :], self.mp_w1)
mv_h_full_bw = mp_matching_func(
con_h_bw, con_p_bw[:, 0, :], self.mp_w2)
# 2. Maxpooling-Matching
# (batch, seq_len1, seq_len2, l)
mv_max_fw = mp_matching_func_pairwise(con_p_fw, con_h_fw, self.mp_w3)
mv_max_bw = mp_matching_func_pairwise(con_p_bw, con_h_bw, self.mp_w4)
# (batch, seq_len, l)
mv_p_max_fw, _ = mv_max_fw.max(dim=2)
mv_p_max_bw, _ = mv_max_bw.max(dim=2)
mv_h_max_fw, _ = mv_max_fw.max(dim=1)
mv_h_max_bw, _ = mv_max_bw.max(dim=1)
# 3. Attentive-Matching
# (batch, seq_len1, seq_len2)
att_fw = attention(con_p_fw, con_h_fw)
att_bw = attention(con_p_bw, con_h_bw)
# (batch, seq_len2, hidden_size) -> (batch, 1, seq_len2, hidden_size)
# (batch, seq_len1, seq_len2) -> (batch, seq_len1, seq_len2, 1)
# output: -> (batch, seq_len1, seq_len2, hidden_size)
att_h_fw = con_h_fw.unsqueeze(1) * att_fw.unsqueeze(3)
att_h_bw = con_h_bw.unsqueeze(1) * att_bw.unsqueeze(3)
# (batch, seq_len1, hidden_size) -> (batch, seq_len1, 1, hidden_size)
# (batch, seq_len1, seq_len2) -> (batch, seq_len1, seq_len2, 1)
# output: -> (batch, seq_len1, seq_len2, hidden_size)
att_p_fw = con_p_fw.unsqueeze(2) * att_fw.unsqueeze(3)
att_p_bw = con_p_bw.unsqueeze(2) * att_bw.unsqueeze(3)
# (batch, seq_len1, hidden_size) / (batch, seq_len1, 1)
# output: -> (batch, seq_len1, hidden_size)
att_mean_h_fw = div_with_small_value(
att_h_fw.sum(dim=2),
att_fw.sum(dim=2, keepdim=True))
att_mean_h_bw = div_with_small_value(
att_h_bw.sum(dim=2),
att_bw.sum(dim=2, keepdim=True))
# (batch, seq_len2, hidden_size) / (batch, seq_len2, 1)
# output: -> (batch, seq_len2, hidden_size)
att_mean_p_fw = div_with_small_value(
att_p_fw.sum(dim=1),
att_fw.sum(dim=1, keepdim=True).permute(0, 2, 1))
att_mean_p_bw = div_with_small_value(
att_p_bw.sum(dim=1),
att_bw.sum(dim=1, keepdim=True).permute(0, 2, 1))
# (batch, seq_len, l)
mv_p_att_mean_fw = mp_matching_func(
con_p_fw, att_mean_h_fw, self.mp_w5)
mv_p_att_mean_bw = mp_matching_func(
con_p_bw, att_mean_h_bw, self.mp_w6)
mv_h_att_mean_fw = mp_matching_func(
con_h_fw, att_mean_p_fw, self.mp_w5)
mv_h_att_mean_bw = mp_matching_func(
con_h_bw, att_mean_p_bw, self.mp_w6)
# 4. Max-Attentive-Matching
# (batch, seq_len1, hidden_size)
att_max_h_fw, _ = att_h_fw.max(dim=2)
att_max_h_bw, _ = att_h_bw.max(dim=2)
# (batch, seq_len2, hidden_size)
att_max_p_fw, _ = att_p_fw.max(dim=1)
att_max_p_bw, _ = att_p_bw.max(dim=1)
# (batch, seq_len, l)
mv_p_att_max_fw = mp_matching_func(con_p_fw, att_max_h_fw, self.mp_w7)
mv_p_att_max_bw = mp_matching_func(con_p_bw, att_max_h_bw, self.mp_w8)
mv_h_att_max_fw = mp_matching_func(con_h_fw, att_max_p_fw, self.mp_w7)
mv_h_att_max_bw = mp_matching_func(con_h_bw, att_max_p_bw, self.mp_w8)
# (batch, seq_len, l * 8)
mv_p = torch.cat(
[mv_p_full_fw, mv_p_max_fw, mv_p_att_mean_fw, mv_p_att_max_fw,
mv_p_full_bw, mv_p_max_bw, mv_p_att_mean_bw, mv_p_att_max_bw],
dim=2)
mv_h = torch.cat(
[mv_h_full_fw, mv_h_max_fw, mv_h_att_mean_fw, mv_h_att_max_fw,
mv_h_full_bw, mv_h_max_bw, mv_h_att_mean_bw, mv_h_att_max_bw],
dim=2)
mv_p = self.dropout(mv_p)
mv_h = self.dropout(mv_h)
# Aggregation Layer
# (batch, seq_len, l * 8) -> (2, batch, hidden_size)
_, (agg_p_last, _) = self.aggregation_LSTM(mv_p)
_, (agg_h_last, _) = self.aggregation_LSTM(mv_h)
# 2 * (2, batch, hidden_size) -> 2 * (batch, hidden_size * 2)
# -> (batch, hidden_size * 4)
x = torch.cat(
[agg_p_last.permute(1, 0, 2).contiguous().view(
-1, self._params['hidden_size'] * 2),
agg_h_last.permute(1, 0, 2).contiguous().view(
-1, self._params['hidden_size'] * 2)],
dim=1)
x = self.dropout(x)
# Prediction Layer
x = torch.tanh(self.pred_fc1(x))
x = self.dropout(x)
x = self.pred_fc2(x)
return x
def reset_parameters(self):
"""Init Parameters."""
# Word Representation Layer
# <unk> vectors is randomly initialized
nn.init.uniform_(self.embedding.weight.data[0], -0.1, 0.1)
# Context Representation Layer
nn.init.kaiming_normal_(self.context_LSTM.weight_ih_l0)
nn.init.constant_(self.context_LSTM.bias_ih_l0, val=0)
nn.init.orthogonal_(self.context_LSTM.weight_hh_l0)
nn.init.constant_(self.context_LSTM.bias_hh_l0, val=0)
nn.init.kaiming_normal_(self.context_LSTM.weight_ih_l0_reverse)
nn.init.constant_(self.context_LSTM.bias_ih_l0_reverse, val=0)
nn.init.orthogonal_(self.context_LSTM.weight_hh_l0_reverse)
nn.init.constant_(self.context_LSTM.bias_hh_l0_reverse, val=0)
# Matching Layer
for i in range(1, 9):
w = getattr(self, f'mp_w{i}')
nn.init.kaiming_normal_(w)
# Aggregation Layer
nn.init.kaiming_normal_(self.aggregation_LSTM.weight_ih_l0)
nn.init.constant_(self.aggregation_LSTM.bias_ih_l0, val=0)
nn.init.orthogonal_(self.aggregation_LSTM.weight_hh_l0)
nn.init.constant_(self.aggregation_LSTM.bias_hh_l0, val=0)
nn.init.kaiming_normal_(self.aggregation_LSTM.weight_ih_l0_reverse)
nn.init.constant_(self.aggregation_LSTM.bias_ih_l0_reverse, val=0)
nn.init.orthogonal_(self.aggregation_LSTM.weight_hh_l0_reverse)
nn.init.constant_(self.aggregation_LSTM.bias_hh_l0_reverse, val=0)
# Prediction Layer ----
nn.init.uniform_(self.pred_fc1.weight, -0.005, 0.005)
nn.init.constant_(self.pred_fc1.bias, val=0)
# nn.init.uniform(self.pred_fc2.weight, -0.005, 0.005)
# nn.init.constant(self.pred_fc2.bias, val=0)
def dropout(self, v):
"""Dropout Layer."""
return F.dropout(v, p=self._params['dropout'], training=self.training)
def mp_matching_func(v1, v2, w):
"""
Basic mp_matching_func.
:param v1: (batch, seq_len, hidden_size)
:param v2: (batch, seq_len, hidden_size) or (batch, hidden_size)
:param w: (num_psp, hidden_size)
:return: (batch, num_psp)
"""
seq_len = v1.size(1)
num_psp = w.size(0)
# (1, 1, hidden_size, l)
w = w.transpose(1, 0).unsqueeze(0).unsqueeze(0)
# (batch, seq_len, hidden_size, l)
v1 = w * torch.stack([v1] * num_psp, dim=3)
if len(v2.size()) == 3:
v2 = w * torch.stack([v2] * num_psp, dim=3)
else:
v2 = w * torch.stack(
[torch.stack([v2] * seq_len, dim=1)] * num_psp, dim=3)
m = F.cosine_similarity(v1, v2, dim=2)
return m
def mp_matching_func_pairwise(v1, v2, w):
"""
Basic mp_matching_func_pairwise.
:param v1: (batch, seq_len1, hidden_size)
:param v2: (batch, seq_len2, hidden_size)
:param w: (num_psp, hidden_size)
:param num_psp
:return: (batch, num_psp, seq_len1, seq_len2)
"""
num_psp = w.size(0)
# (1, l, 1, hidden_size)
w = w.unsqueeze(0).unsqueeze(2)
# (batch, l, seq_len, hidden_size)
v1, v2 = (w * torch.stack([v1] * num_psp, dim=1),
w * torch.stack([v2] * num_psp, dim=1))
# (batch, l, seq_len, hidden_size->1)
v1_norm = v1.norm(p=2, dim=3, keepdim=True)
v2_norm = v2.norm(p=2, dim=3, keepdim=True)
# (batch, l, seq_len1, seq_len2)
n = torch.matmul(v1, v2.transpose(2, 3))
d = v1_norm * v2_norm.transpose(2, 3)
# (batch, seq_len1, seq_len2, l)
m = div_with_small_value(n, d).permute(0, 2, 3, 1)
return m
def attention(v1, v2):
"""
Attention.
:param v1: (batch, seq_len1, hidden_size)
:param v2: (batch, seq_len2, hidden_size)
:return: (batch, seq_len1, seq_len2)
"""
# (batch, seq_len1, 1)
v1_norm = v1.norm(p=2, dim=2, keepdim=True)
# (batch, 1, seq_len2)
v2_norm = v2.norm(p=2, dim=2, keepdim=True).permute(0, 2, 1)
# (batch, seq_len1, seq_len2)
a = torch.bmm(v1, v2.permute(0, 2, 1))
d = v1_norm * v2_norm
return div_with_small_value(a, d)
def div_with_small_value(n, d, eps=1e-8):
"""
Small values are replaced by 1e-8 to prevent it from exploding.
:param n: tensor
:param d: tensor
:return: n/d: tensor
"""
d = d * (d > eps).float() + eps * (d <= eps).float()
return n / d
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/bimpm.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/bimpm.py",
"repo_id": "ContextualSP",
"token_count": 7150
}
| 276 |
"""An implementation of MVLSTM Model."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.base_callback import BaseCallback
from matchzoo.engine import hyper_spaces
from matchzoo.dataloader import callbacks
class MVLSTM(BaseModel):
"""
MVLSTM Model.
Examples:
>>> model = MVLSTM()
>>> model.params['hidden_size'] = 32
>>> model.params['top_k'] = 50
>>> model.params['mlp_num_layers'] = 2
>>> model.params['mlp_num_units'] = 20
>>> model.params['mlp_num_fan_out'] = 10
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.params['dropout_rate'] = 0.0
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params.add(Param(name='hidden_size', value=32,
desc="Integer, the hidden size in the "
"bi-directional LSTM layer."))
params.add(Param(name='num_layers', value=1,
desc="Integer, number of recurrent layers."))
params.add(Param(
'top_k', value=10,
hyper_space=hyper_spaces.quniform(low=2, high=100),
desc="Size of top-k pooling layer."
))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="Float, the dropout rate."
))
return params
@classmethod
def get_default_padding_callback(
cls,
fixed_length_left: int = 10,
fixed_length_right: int = 40,
pad_word_value: typing.Union[int, str] = 0,
pad_word_mode: str = 'pre',
with_ngram: bool = False,
fixed_ngram_length: int = None,
pad_ngram_value: typing.Union[int, str] = 0,
pad_ngram_mode: str = 'pre'
) -> BaseCallback:
"""
Model default padding callback.
The padding callback's on_batch_unpacked would pad a batch of data to
a fixed length.
:return: Default padding callback.
"""
return callbacks.BasicPadding(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_word_value=pad_word_value,
pad_word_mode=pad_word_mode,
with_ngram=with_ngram,
fixed_ngram_length=fixed_ngram_length,
pad_ngram_value=pad_ngram_value,
pad_ngram_mode=pad_ngram_mode
)
def build(self):
"""Build model structure."""
self.embedding = self._make_default_embedding_layer()
self.left_bilstm = nn.LSTM(
input_size=self._params['embedding_output_dim'],
hidden_size=self._params['hidden_size'],
num_layers=self._params['num_layers'],
batch_first=True,
dropout=self._params['dropout_rate'],
bidirectional=True
)
self.right_bilstm = nn.LSTM(
input_size=self._params['embedding_output_dim'],
hidden_size=self._params['hidden_size'],
num_layers=self._params['num_layers'],
batch_first=True,
dropout=self._params['dropout_rate'],
bidirectional=True
)
self.mlp = self._make_multi_layer_perceptron_layer(
self._params['top_k']
)
self.dropout = nn.Dropout(p=self._params['dropout_rate'])
self.out = self._make_output_layer(
self._params['mlp_num_fan_out']
)
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# H = LSTM hidden size
# K = size of top-k
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
query, doc = inputs['text_left'], inputs['text_right']
# Process left and right input.
# shape = [B, L, D]
# shape = [B, R, D]
embed_query = self.embedding(query.long())
embed_doc = self.embedding(doc.long())
# Bi-directional LSTM
# shape = [B, L, 2 * H]
# shape = [B, R, 2 * H]
rep_query, _ = self.left_bilstm(embed_query)
rep_doc, _ = self.right_bilstm(embed_doc)
# Top-k matching
# shape = [B, L, R]
matching_matrix = torch.einsum(
'bld,brd->blr',
F.normalize(rep_query, p=2, dim=-1),
F.normalize(rep_doc, p=2, dim=-1)
)
# shape = [B, L * R]
matching_signals = torch.flatten(matching_matrix, start_dim=1)
# shape = [B, K]
matching_topk = torch.topk(
matching_signals,
k=self._params['top_k'],
dim=-1,
sorted=True
)[0]
# shape = [B, *]
dense_output = self.mlp(matching_topk)
# shape = [B, *]
out = self.out(self.dropout(dense_output))
return out
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/mvlstm.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/mvlstm.py",
"repo_id": "ContextualSP",
"token_count": 2721
}
| 277 |
"""Basic Preprocessor."""
from tqdm import tqdm
import typing
from . import units
from matchzoo import DataPack
from matchzoo.engine.base_preprocessor import BasePreprocessor
from .build_vocab_unit import build_vocab_unit
from .build_unit_from_data_pack import build_unit_from_data_pack
from .chain_transform import chain_transform
tqdm.pandas()
class BasicPreprocessor(BasePreprocessor):
"""
Baisc preprocessor helper.
:param truncated_mode: String, mode used by :class:`TruncatedLength`.
Can be 'pre' or 'post'.
:param truncated_length_left: Integer, maximize length of :attr:`left`
in the data_pack.
:param truncated_length_right: Integer, maximize length of :attr:`right`
in the data_pack.
:param filter_mode: String, mode used by :class:`FrequenceFilterUnit`. Can
be 'df', 'cf', and 'idf'.
:param filter_low_freq: Float, lower bound value used by
:class:`FrequenceFilterUnit`.
:param filter_high_freq: Float, upper bound value used by
:class:`FrequenceFilterUnit`.
:param remove_stop_words: Bool, use :class:`StopRemovalUnit` unit or not.
Example:
>>> import matchzoo as mz
>>> train_data = mz.datasets.toy.load_data('train')
>>> test_data = mz.datasets.toy.load_data('test')
>>> preprocessor = mz.preprocessors.BasicPreprocessor(
... truncated_length_left=10,
... truncated_length_right=20,
... filter_mode='df',
... filter_low_freq=2,
... filter_high_freq=1000,
... remove_stop_words=True
... )
>>> preprocessor = preprocessor.fit(train_data, verbose=0)
>>> preprocessor.context['vocab_size']
226
>>> processed_train_data = preprocessor.transform(train_data,
... verbose=0)
>>> type(processed_train_data)
<class 'matchzoo.data_pack.data_pack.DataPack'>
>>> test_data_transformed = preprocessor.transform(test_data,
... verbose=0)
>>> type(test_data_transformed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
"""
def __init__(self,
truncated_mode: str = 'pre',
truncated_length_left: int = None,
truncated_length_right: int = None,
filter_mode: str = 'df',
filter_low_freq: float = 1,
filter_high_freq: float = float('inf'),
remove_stop_words: bool = False,
ngram_size: typing.Optional[int] = None):
"""Initialization."""
super().__init__()
self._truncated_mode = truncated_mode
self._truncated_length_left = truncated_length_left
self._truncated_length_right = truncated_length_right
if self._truncated_length_left:
self._left_truncatedlength_unit = units.TruncatedLength(
self._truncated_length_left, self._truncated_mode
)
if self._truncated_length_right:
self._right_truncatedlength_unit = units.TruncatedLength(
self._truncated_length_right, self._truncated_mode
)
self._filter_unit = units.FrequencyFilter(
low=filter_low_freq,
high=filter_high_freq,
mode=filter_mode
)
self._units = self._default_units()
if remove_stop_words:
self._units.append(units.stop_removal.StopRemoval())
self._ngram_size = ngram_size
if ngram_size:
self._context['ngram_process_unit'] = units.NgramLetter(
ngram=ngram_size, reduce_dim=True
)
def fit(self, data_pack: DataPack, verbose: int = 1):
"""
Fit pre-processing context for transformation.
:param data_pack: data_pack to be preprocessed.
:param verbose: Verbosity.
:return: class:`BasicPreprocessor` instance.
"""
data_pack = data_pack.apply_on_text(chain_transform(self._units),
verbose=verbose)
fitted_filter_unit = build_unit_from_data_pack(self._filter_unit,
data_pack,
flatten=False,
mode='right',
verbose=verbose)
data_pack = data_pack.apply_on_text(fitted_filter_unit.transform,
mode='right', verbose=verbose)
self._context['filter_unit'] = fitted_filter_unit
vocab_unit = build_vocab_unit(data_pack, verbose=verbose)
self._context['vocab_unit'] = vocab_unit
vocab_size = len(vocab_unit.state['term_index'])
self._context['vocab_size'] = vocab_size
self._context['embedding_input_dim'] = vocab_size
if self._ngram_size:
data_pack = data_pack.apply_on_text(
self._context['ngram_process_unit'].transform,
mode='both',
verbose=verbose
)
ngram_unit = build_vocab_unit(data_pack, verbose=verbose)
self._context['ngram_vocab_unit'] = ngram_unit
self._context['ngram_vocab_size'] = len(
ngram_unit.state['term_index'])
return self
def transform(self, data_pack: DataPack, verbose: int = 1) -> DataPack:
"""
Apply transformation on data, create truncated length representation.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:`DataPack` object.
"""
data_pack = data_pack.copy()
data_pack.apply_on_text(chain_transform(self._units), inplace=True,
verbose=verbose)
data_pack.apply_on_text(self._context['filter_unit'].transform,
mode='right', inplace=True, verbose=verbose)
data_pack.apply_on_text(self._context['vocab_unit'].transform,
mode='both', inplace=True, verbose=verbose)
if self._truncated_length_left:
data_pack.apply_on_text(self._left_truncatedlength_unit.transform,
mode='left', inplace=True, verbose=verbose)
if self._truncated_length_right:
data_pack.apply_on_text(self._right_truncatedlength_unit.transform,
mode='right', inplace=True,
verbose=verbose)
data_pack.append_text_length(inplace=True, verbose=verbose)
data_pack.drop_empty(inplace=True)
return data_pack
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/basic_preprocessor.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/basic_preprocessor.py",
"repo_id": "ContextualSP",
"token_count": 3360
}
| 278 |
import nltk
from .unit import Unit
class Stemming(Unit):
"""
Process unit for token stemming.
:param stemmer: stemmer to use, `porter` or `lancaster`.
"""
def __init__(self, stemmer='porter'):
"""Initialization."""
self.stemmer = stemmer
def transform(self, input_: list) -> list:
"""
Reducing inflected words to their word stem, base or root form.
:param input_: list of string to be stemmed.
"""
if self.stemmer == 'porter':
porter_stemmer = nltk.stem.PorterStemmer()
return [porter_stemmer.stem(token) for token in input_]
elif self.stemmer == 'lancaster' or self.stemmer == 'krovetz':
lancaster_stemmer = nltk.stem.lancaster.LancasterStemmer()
return [lancaster_stemmer.stem(token) for token in input_]
else:
raise ValueError(
'Not supported supported stemmer type: {}'.format(
self.stemmer))
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/stemming.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/stemming.py",
"repo_id": "ContextualSP",
"token_count": 441
}
| 279 |
"""Download file."""
import typing
from pathlib import Path
import os
import hashlib
import shutil
import sys
import tarfile
import time
import zipfile
import collections
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlretrieve
import numpy as np
import matchzoo
class Progbar(object):
"""
Displays a progress bar.
:param target: Total number of steps expected, None if unknown.
:param width: Progress bar width on screen.
:param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
:param stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
:param interval: Minimum visual progress update interval (in seconds).
"""
def __init__(
self,
target,
width=30,
verbose=1,
interval=0.05,
):
"""Init."""
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self._dynamic_display = ((hasattr(sys.stdout,
'isatty') and sys.stdout.isatty()
) or 'ipykernel' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
self._start = time.time()
self._last_update = 0
def update(self, current):
"""Updates the progress bar."""
self._seen_so_far = current
now = time.time()
info = ' - {0:.0f}s'.format(now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and self.target is not
None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
bar = '{2:{0:d}d}/{1} ['.format(
numdigits, self.target, current)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '{0:7d}/Unknown'.format(current)
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = int(time_per_unit * (self.target - current))
if eta > 3600:
eta_format = ('{0:d}:{1:02d}:{2:02d}'.format(
eta // 3600, (eta % 3600) // 60, eta % 60))
elif eta > 60:
eta_format = '{0:d}:{1:02d}'.format(eta // 60, eta % 60)
else:
eta_format = '{0:d}s'.format(eta)
info = ' - ETA: {0}'.format(eta_format)
else:
if time_per_unit >= 1:
info += ' {0:.0f}s/step'.format(time_per_unit)
elif time_per_unit >= 1e-3:
info += ' {0:.0f}ms/step'.format(time_per_unit * 1e3)
else:
info += ' {0:.0f}us/step'.format(time_per_unit * 1e6)
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def _extract_archive(file_path, path='.', archive_format='auto'):
"""
Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
:param file_path: path to the archive file
:param path: path to extract the archive file
:param archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
:return: True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(
fname: str = None,
origin: str = None,
untar: bool = False,
extract: bool = False,
md5_hash: typing.Any = None,
file_hash: typing.Any = None,
hash_algorithm: str = 'auto',
archive_format: str = 'auto',
cache_subdir: typing.Union[Path, str] = 'data',
cache_dir: typing.Union[Path, str] = matchzoo.USER_DATA_DIR,
verbose: int = 1
) -> str:
"""
Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.matchzoo/datasets`, placed in the cache_subdir `data`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.matchzoo/datasets/data/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
:param fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
:param origin: Original URL of the file.
:param untar: Deprecated in favor of 'extract'. Boolean, whether the file
should be decompressed.
:param md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file
for verification.
:param file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
:param cache_subdir: Subdirectory under the cache dir where the file is
saved. If an absolute path `/path/to/folder` is specified the file
will be saved at that location.
:param hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'. The default 'auto' detects
the hash algorithm in use.
:papram extract: True tries extracting the file as an Archive, like tar
or zip.
:param archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
:param cache_dir: Location to store cached files, when None it defaults to
the [matchzoo.USER_DATA_DIR](~/.matchzoo/datasets).
:param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
:return: Path to the downloaded file.
"""
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.matchzoo')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the file hash '
'does not match the original value of file_hash.'
' We will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(
target=total_size, verbose=verbose)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {} : {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""
Validates a file against a sha256 or md5 hash.
:param fpath: path to the file being validated
:param file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
:param algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
:param chunk_size: Bytes to read at a time, important for large files.
:return: Whether the file is valid.
"""
if ((algorithm == 'sha256') or (algorithm == 'auto' and len(
file_hash) == 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""
Calculates a file sha256 or md5 hash.
:param fpath: path to the file being validated
:param algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
:param chunk_size: Bytes to read at a time, important for large files.
:return: The file hash.
"""
if algorithm == 'sha256':
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/get_file.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/get_file.py",
"repo_id": "ContextualSP",
"token_count": 5793
}
| 280 |
import pytest
import hyperopt.pyll.base
from matchzoo.engine import hyper_spaces
@pytest.fixture(scope='module', params=[
lambda x: x + 2,
lambda x: x - 2,
lambda x: x * 2,
lambda x: x / 2,
lambda x: x // 2,
lambda x: x ** 2,
lambda x: 2 + x,
lambda x: 2 - x,
lambda x: 2 * x,
lambda x: 2 / x,
lambda x: 2 // x,
lambda x: 2 ** x,
lambda x: -x
])
def op(request):
return request.param
@pytest.fixture(scope='module', params=[
hyper_spaces.choice(options=[0, 1]),
hyper_spaces.uniform(low=0, high=10),
hyper_spaces.quniform(low=0, high=10, q=2)
])
def proxy(request):
return request.param
def test_init(proxy):
assert isinstance(proxy.convert('label'), hyperopt.pyll.base.Apply)
def test_op(proxy, op):
assert isinstance(op(proxy).convert('label'), hyperopt.pyll.base.Apply)
def test_str(proxy):
assert isinstance(str(proxy), str)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/engine/test_hyper_spaces.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/engine/test_hyper_spaces.py",
"repo_id": "ContextualSP",
"token_count": 386
}
| 281 |
<jupyter_start><jupyter_code>%run init.ipynb
ranking_task = mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss(num_neg=1))
ranking_task.metrics = [
mz.metrics.NormalizedDiscountedCumulativeGain(k=3),
mz.metrics.NormalizedDiscountedCumulativeGain(k=5),
mz.metrics.MeanAveragePrecision()
]
preprocessor = mz.models.aNMM.get_default_preprocessor()
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=300)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed
)
padding_callback = mz.models.aNMM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
sort=False,
shuffle=True,
callback=padding_callback,
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
sort=False,
shuffle=False,
callback=padding_callback,
)
model = mz.models.aNMM()
model.params['task'] = ranking_task
model.params['embedding'] = embedding_matrix
model.params['dropout_rate'] = 0.1
model.build()
print(model, sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adam(model.parameters(), lr = 3e-4)
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=15
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/anmm.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/anmm.ipynb",
"repo_id": "ContextualSP",
"token_count": 814
}
| 282 |
Coming soon.
|
ContextualSP/qaap/README.md/0
|
{
"file_path": "ContextualSP/qaap/README.md",
"repo_id": "ContextualSP",
"token_count": 3
}
| 283 |
set model_file=checkpoints_sparc/sparc_concat_none_model
set validation_file=dataset_sparc/dev.json
set validation_out_file=dataset_sparc/dev.jsonl
set prediction_out_file=predict.jsonl
python postprocess.py --valid_file %validation_file% --valid_out_file %validation_out_file%
allennlp predict ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
--include-package predictor.sparc_predictor ^
--predictor sparc ^
--dataset-reader-choice validation ^
--batch-size 1 ^
--cuda-device 0 ^
--output-file %model_file%/%prediction_out_file% ^
%model_file%/model.tar.gz %validation_out_file%
|
ContextualSP/semantic_parsing_in_context/bash_files/windows/predict.bat/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/windows/predict.bat",
"repo_id": "ContextualSP",
"token_count": 225
}
| 284 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict
from typing import List
from typing import Tuple
import editdistance
import numpy as np
from allennlp.common.checks import ConfigurationError
from allennlp.data import TokenIndexer, Tokenizer
from allennlp.data.fields.knowledge_graph_field import KnowledgeGraphField
from allennlp.data.tokenizers.token import Token
from allennlp.semparse.contexts.knowledge_graph import KnowledgeGraph
from overrides import overrides
from context.grammar import Grammar, Action
from context.world import SparcWorld
"""
Code mainly borrowed from https://github.com/benbogin/spider-schema-gnn
"""
class SparcKnowledgeGraphField(KnowledgeGraphField):
"""
This implementation calculates all non-graph-related features (i.e. no related_column),
then takes each one of the features to calculate related column features, by taking the max score of all neighbours
"""
def __init__(self,
knowledge_graph: KnowledgeGraph,
utterance_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
tokenizer: Tokenizer = None,
bert_mode: str = "v0",
feature_extractors: List[str] = None,
entity_tokens: List[List[Token]] = None,
linking_features: List[List[List[float]]] = None,
include_in_vocab: bool = True,
max_table_tokens: int = None) -> None:
if bert_mode == "v0":
feature_extractors = feature_extractors if feature_extractors is not None else [
# 'number_token_match',
'exact_token_match',
'contains_exact_token_match',
'lemma_match',
'contains_lemma_match',
'edit_distance',
'span_overlap_fraction',
'span_lemma_overlap_fraction']
else:
feature_extractors = feature_extractors if feature_extractors is not None else [
# 'number_token_match',
'exact_token_match',
'contains_exact_token_match',
'lemma_match',
'contains_lemma_match',
'edit_distance',
'span_overlap_fraction',
'span_lemma_overlap_fraction']
super().__init__(knowledge_graph, utterance_tokens, token_indexers,
tokenizer=tokenizer, feature_extractors=feature_extractors, entity_tokens=entity_tokens,
linking_features=linking_features, include_in_vocab=include_in_vocab,
max_table_tokens=max_table_tokens)
self.linking_features = self._compute_related_linking_features(self.linking_features)
# hack needed to fix calculation of feature extractors in the inherited as_tensor method
self._feature_extractors = feature_extractors * 2
def _compute_related_linking_features(self,
non_related_features: List[List[List[float]]]) -> List[List[List[float]]]:
linking_features = non_related_features
entity_to_index_map = {}
for entity_id, entity in enumerate(self.knowledge_graph.entities):
entity_to_index_map[entity] = entity_id
for entity_id, (entity, entity_text) in enumerate(zip(self.knowledge_graph.entities, self.entity_texts)):
# FIXME: if [CLS] and [SEP] in entity_text, remove them for cleaning features
for token_index, token in enumerate(self.utterance_tokens):
entity_token_features = linking_features[entity_id][token_index]
for feature_index, feature_extractor in enumerate(self._feature_extractors):
neighbour_features = []
for neighbor in self.knowledge_graph.neighbors[entity]:
# we only care about table/columns relations here, not foreign-primary
if entity.startswith('column') and neighbor.startswith('column'):
continue
neighbor_index = entity_to_index_map[neighbor]
neighbour_features.append(non_related_features[neighbor_index][token_index][feature_index])
entity_token_features.append(max(neighbour_features))
return linking_features
@overrides
def _edit_distance(self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token]) -> float:
entity_text = ' '.join(e.text for e in entity_text)
edit_distance = float(editdistance.eval(entity_text, token.text))
# normalize length
maximum_len = max(len(entity_text), len(token.text))
return 1.0 - edit_distance / maximum_len
@overrides
def empty_field(self) -> 'SparcKnowledgeGraphField':
# TODO: HACK the error. We use utterance mask to judge whether the position is masked, not the KG field.
return self
def index_entity_type(world: SparcWorld):
column_type_ids = ['@@PAD@@',
'boolean', 'foreign', 'number', 'others', 'primary', 'text', 'time', 'string', 'table']
# now we have 9 types
assert len(column_type_ids) == 10
# record the entity index
entity_type_indices = []
for entity_index, entity in enumerate(world.db_context.knowledge_graph.entities):
parts = entity.split(':')
entity_main_type = parts[0]
if entity_main_type == 'column' or entity_main_type == 'string' or entity_main_type == 'table':
if entity_main_type in column_type_ids:
entity_type = column_type_ids.index(entity_main_type)
else:
column_type = parts[1]
entity_type = column_type_ids.index(column_type)
else:
raise ConfigurationError("Get the unknown entity: {}".format(entity))
# TODO: 0 for padding
entity_type_indices.append(entity_type)
return np.array(entity_type_indices)
def find_start_end(cus_list, pattern, min_start=0) -> Tuple:
"""
Find the start & end of pattern in cus_list. If none, return 0,0.
:param cus_list:
:param pattern:
:param min_start: at least from which position to match
:return:
"""
for i in range(len(cus_list)):
if i < min_start:
continue
if cus_list[i] == pattern[0] and cus_list[i:i + len(pattern)] == pattern:
return i, i + len(pattern)
return 0, 0
def diff_tree(precedent_action_seq: List[Action],
action_seq: List[Action],
copy_rule_dict: Dict[int, Action],
ret_tree: bool = True):
"""
:param precedent_action_seq:
:param action_seq:
:param copy_rule_dict:
:param ret_tree: if return True, return the segment-level supervision; else return the token-level supervision.
:return:
"""
copy_subtree_list = []
action_seq_with_copy = []
precedent_tree = Grammar.extract_all_subtree(precedent_action_seq)
cur_tree = Grammar.extract_all_subtree(action_seq)
precedent_tree_match = [False] * len(precedent_tree)
cur_tree_match = [False] * len(cur_tree)
action_seq_match = [False] * len(action_seq)
precedent_action_seq_match = [False] * len(precedent_action_seq)
for pre_ind in range(len(precedent_tree)):
# we will change precedent_tree in the process
pre_sub_tree = precedent_tree[pre_ind]
# has matched, continue
if precedent_tree_match[pre_ind]:
continue
for cur_ind in range(len(cur_tree)):
cur_sub_tree = cur_tree[cur_ind]
# has matched, continue
if cur_tree_match[cur_ind]:
continue
if str(pre_sub_tree) == str(cur_sub_tree):
# find cur_sub_tree start/end in action_seq, and its corresponding str
cur_start = 0
pre_start = 0
while True:
cur_start, cur_end = find_start_end(action_seq, cur_sub_tree, min_start=cur_start)
pre_start, pre_end = find_start_end(precedent_action_seq, cur_sub_tree, min_start=pre_start)
pre_used = True in precedent_action_seq_match[pre_start: pre_end]
cur_used = True in action_seq_match[cur_start: cur_end]
if not pre_used and not cur_used:
break
elif pre_used and cur_used:
pre_start += 1
cur_start += 1
elif pre_used:
pre_start += 1
elif cur_used:
cur_start += 1
if cur_end != 0 and pre_end != 0:
# record the precedent copy index
copy_subtree_list.append((cur_start, cur_end, pre_ind, pre_start, pre_end))
# make all the subtrees marked as True
for ind in range(cur_start, cur_end):
action_seq_match[ind] = True
for ind in range(pre_start, pre_end):
precedent_action_seq_match[ind] = True
# mark the pre_ind and fol_ind as True
precedent_tree_match[pre_ind] = True
cur_tree_match[cur_ind] = True
# sort copy_subtree_list via start idx
copy_subtree_list = sorted(copy_subtree_list, key=lambda x: x[0])
ind = 0
copy_pointer = 0
while ind < len(action_seq):
if action_seq_match[ind] is False:
# add original action
action_seq_with_copy.append(action_seq[ind])
ind += 1
else:
cur_start, cur_end, pre_ind, pre_start, pre_end = copy_subtree_list[copy_pointer]
assert cur_start == ind
if ret_tree:
action_seq_with_copy.append(copy_rule_dict[pre_ind])
else:
for i in range(pre_start, pre_end):
action_seq_with_copy.append(copy_rule_dict[i])
ind = cur_end
copy_pointer += 1
return action_seq_with_copy
|
ContextualSP/semantic_parsing_in_context/dataset_reader/util.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/dataset_reader/util.py",
"repo_id": "ContextualSP",
"token_count": 4858
}
| 285 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from typing import Tuple
def get_span_representation(forward_encoder_out, backward_encoder_out, span_start, span_end):
"""
Given a span start/end position, fetch the subtraction representation of the span from LSTM.
"""
# span end is always larger than actual value
span_end -= 1
forward_span_repr = get_forward_span_repr(forward_encoder_out, span_start, span_end)
backward_span_repr = get_backward_span_repr(backward_encoder_out, span_start, span_end)
# cat two representations
span_repr = torch.cat((forward_span_repr, backward_span_repr))
return span_repr
def get_forward_span_repr(forward_encoder_out, span_start, span_end):
"""
Get forward span representation
"""
if span_end >= len(forward_encoder_out):
span_end = len(forward_encoder_out) - 1
assert span_start <= span_end
if span_start == 0:
forward_span_repr = forward_encoder_out[span_end]
else:
forward_span_repr = forward_encoder_out[span_end] - forward_encoder_out[span_start - 1]
return forward_span_repr
def get_backward_span_repr(backward_encoder_out, span_start, span_end):
"""
Get backward span representation
"""
assert span_start <= span_end
if span_end >= len(backward_encoder_out) - 1:
backward_span_repr = backward_encoder_out[span_start]
else:
backward_span_repr = backward_encoder_out[span_start] - backward_encoder_out[span_end + 1]
return backward_span_repr
def find_start_end(cus_list, pattern) -> Tuple:
"""
Find the start & end of pattern in cus_list. If none, return 0,0.
:param cus_list:
:param pattern:
:return:
"""
for i in range(len(cus_list)):
if cus_list[i] == pattern[0] and cus_list[i:i + len(pattern)] == pattern:
return i, i + len(pattern)
return 0, 0
|
ContextualSP/semantic_parsing_in_context/models/util.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/models/util.py",
"repo_id": "ContextualSP",
"token_count": 751
}
| 286 |
{
"random_seed": 42,
"numpy_seed": 42,
"pytorch_seed": 42,
"dataset_reader": {
"type": "sparc",
"lazy": false,
"loading_limit": -1,
"context_mode": "turn",
"bert_mode": "v3",
"utterance_token_indexers": {
"bert": {
"type": "bert-pretrained",
"pretrained_model": "bert-base-uncased",
"do_lowercase": true,
"never_lowercase": [
"[UNK]",
"[SEP]",
"[PAD]",
"[CLS]",
"[MASK]"
],
"use_starting_offsets": false,
"truncate_long_sequences": false
}
}
},
"model": {
"type": "sparc",
"loss_mask": 8,
"serialization_dir": "",
"text_embedder": {
"bert": {
"type": "bert-pretrained",
"pretrained_model": "bert-base-uncased",
"top_layer_only": true,
"requires_grad": true
},
"allow_unmatched_keys": true,
"embedder_to_indexer_map": {
"bert": [
"bert",
"bert-offsets",
"bert-type-ids"
]
}
},
"action_embedding_dim": 100,
"entity_embedding_dim": 768,
"discourse_output_dim": 100,
"text_encoder": {
"type": "lstm",
"input_size": 868,
"hidden_size": 200,
"bidirectional": true,
"num_layers": 1
},
"decoder_beam_search": {
"beam_size": 10
},
"training_beam_size": 1,
"max_decoding_steps": 100,
"input_attention": {
"type": "dot_product"
},
"dropout_rate": 0.5,
"bert_mode": "v3",
"use_schema_encoder": true,
"use_feature_score": true,
"use_linking_embedding": true,
"use_discourse_encoder": true,
"use_attend_over_history": true,
"use_turn_position": true
},
"iterator": {
"type": "basic",
"batch_size": 1
},
"validation_iterator": {
"type": "basic",
"batch_size": 1
},
"trainer": {
"num_epochs": 100,
"cuda_device": 0,
"patience": 10,
"validation_metric": "+sql_exact_match",
"optimizer": {
"type": "adam",
"parameter_groups": [
[
[
".*text_embedder.*"
],
{
"lr": 1e-5
}
]
],
"lr": 1e-3
},
"learning_rate_scheduler": {
"type": "reduce_on_plateau",
"factor": 0.5,
"mode": "max",
"patience": 5
},
"num_serialized_models_to_keep": 10,
"should_log_learning_rate": true
}
}
|
ContextualSP/semantic_parsing_in_context/train_configs_bert/turn.none.jsonnet/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/train_configs_bert/turn.none.jsonnet",
"repo_id": "ContextualSP",
"token_count": 1110
}
| 287 |
#!/bin/bash
#requirement:
#./data/spider
#./BART-large
# data/spider -> data/spider_schema_linking_tag
python step1_schema_linking.py --dataset=spider
# data/spider_schema_linking_tag -> dataset_post/spider_sl
python step2_serialization.py
###training
python train.py \
--dataset_path ./dataset_post/spider_sl/bin/ \
--exp_name spider_sl_v1 \
--models_path ./models \
--total_num_update 10000 \
--max_tokens 1024 \
--bart_model_path ./data/BART-large \
###evaluate
python step3_evaluate --constrain
|
ContextualSP/unified_parser_text_to_sql/running_pipeline.sh/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/running_pipeline.sh",
"repo_id": "ContextualSP",
"token_count": 202
}
| 288 |
## Data Preprocess
#### Get Parsed SQL Output
The SQL parsing script is `process_sql.py` in the main directory. Please refer to `parsed_sql_examples.sql` for the explanation of some parsed SQL output examples.
If you would like to use `process_sql.py` to parse SQL queries by yourself, `parse_sql_one.py` provides an example of how the script is called. Or you can use `parse_raw_json.py` to update all parsed SQL results (value for `sql`) in `train.json` and `dev.json`.
#### Get Table Info from Database
To generate the final `tables.json` file. It reads sqlite files from `database/` dir and previous `tables.json` with hand-corrected names:
```
python process/get_tables.py [dir includes many subdirs containing database.sqlite files] [output file name e.g. output.json] [existing tables.json file to be inherited]
```
|
ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/README.md/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/README.md",
"repo_id": "ContextualSP",
"token_count": 238
}
| 289 |
import math
import sys
from typing import Iterable, Optional
from timm.utils.model import unwrap_model
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from lib import utils
import random
import time
def sample_configs(choices):
config = {}
dimensions = ['mlp_ratio', 'num_heads']
depth = random.choice(choices['depth'])
for dimension in dimensions:
config[dimension] = [random.choice(choices[dimension]) for _ in range(depth)]
config['embed_dim'] = [random.choice(choices['embed_dim'])]*depth
config['layer_num'] = depth
return config
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,
amp: bool = True, teacher_model: torch.nn.Module = None,
teach_loss: torch.nn.Module = None, choices=None, mode='super', retrain_config=None):
model.train()
criterion.train()
# set random seed
random.seed(epoch)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if mode == 'retrain':
config = retrain_config
model_module = unwrap_model(model)
print(config)
model_module.set_sample_config(config=config)
print(model_module.get_sampled_params_numel(config))
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
# sample random config
if mode == 'super':
config = sample_configs(choices=choices)
model_module = unwrap_model(model)
model_module.set_sample_config(config=config)
elif mode == 'retrain':
config = retrain_config
model_module = unwrap_model(model)
model_module.set_sample_config(config=config)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if amp:
with torch.cuda.amp.autocast():
if teacher_model:
with torch.no_grad():
teach_output = teacher_model(samples)
_, teacher_label = teach_output.topk(1, 1, True, True)
outputs = model(samples)
loss = 1/2 * criterion(outputs, targets) + 1/2 * teach_loss(outputs, teacher_label.squeeze())
else:
outputs = model(samples)
loss = criterion(outputs, targets)
else:
outputs = model(samples)
if teacher_model:
with torch.no_grad():
teach_output = teacher_model(samples)
_, teacher_label = teach_output.topk(1, 1, True, True)
loss = 1 / 2 * criterion(outputs, targets) + 1 / 2 * teach_loss(outputs, teacher_label.squeeze())
else:
loss = criterion(outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
if amp:
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
else:
loss.backward()
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device, amp=True, choices=None, mode='super', retrain_config=None):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
if mode == 'super':
config = sample_configs(choices=choices)
model_module = unwrap_model(model)
model_module.set_sample_config(config=config)
else:
config = retrain_config
model_module = unwrap_model(model)
model_module.set_sample_config(config=config)
print("sampled model config: {}".format(config))
parameters = model_module.get_sampled_params_numel(config)
print("sampled model parameters: {}".format(parameters))
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
if amp:
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
else:
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
Cream/AutoFormer/supernet_engine.py/0
|
{
"file_path": "Cream/AutoFormer/supernet_engine.py",
"repo_id": "Cream",
"token_count": 2771
}
| 290 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
lib_path = osp.join(this_dir, '..')
add_path(lib_path)
|
Cream/CDARTS/CDARTS/_init_paths.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS/_init_paths.py",
"repo_id": "Cream",
"token_count": 148
}
| 291 |
import numpy as np
def quantize(arr, min_val, max_val, levels, dtype=np.int64):
"""Quantize an array of (-inf, inf) to [0, levels-1].
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the quantized array.
Returns:
tuple: Quantized array.
"""
if not (isinstance(levels, int) and levels > 1):
raise ValueError(
'levels must be a positive integer, but got {}'.format(levels))
if min_val >= max_val:
raise ValueError(
'min_val ({}) must be smaller than max_val ({})'.format(
min_val, max_val))
arr = np.clip(arr, min_val, max_val) - min_val
quantized_arr = np.minimum(
np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1)
return quantized_arr
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
"""Dequantize an array.
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the dequantized array.
Returns:
tuple: Dequantized array.
"""
if not (isinstance(levels, int) and levels > 1):
raise ValueError(
'levels must be a positive integer, but got {}'.format(levels))
if min_val >= max_val:
raise ValueError(
'min_val ({}) must be smaller than max_val ({})'.format(
min_val, max_val))
dequantized_arr = (arr + 0.5).astype(dtype) * (max_val -
min_val) / levels + min_val
return dequantized_arr
|
Cream/CDARTS/CDARTS_detection/mmcv/arraymisc/quantization.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/arraymisc/quantization.py",
"repo_id": "Cream",
"token_count": 815
}
| 292 |
from .colorspace import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv,
hsv2bgr, bgr2hls, hls2bgr, iminvert)
from .geometry import imflip, imrotate, imcrop, impad, impad_to_multiple
from .normalize import imnormalize, imdenormalize
from .resize import imresize, imresize_like, imrescale
__all__ = [
'bgr2gray', 'gray2bgr', 'bgr2rgb', 'rgb2bgr', 'bgr2hsv', 'hsv2bgr',
'bgr2hls', 'hls2bgr', 'iminvert', 'imflip', 'imrotate', 'imcrop', 'impad',
'impad_to_multiple', 'imnormalize', 'imdenormalize', 'imresize',
'imresize_like', 'imrescale'
]
|
Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/image/transforms/__init__.py",
"repo_id": "Cream",
"token_count": 279
}
| 293 |
from .hook import Hook
from .checkpoint import CheckpointHook
from .closure import ClosureHook
from .lr_updater import LrUpdaterHook
from .optimizer import OptimizerHook, OptimizerArchHook
from .iter_timer import IterTimerHook
from .sampler_seed import DistSamplerSeedHook
from .memory import EmptyCacheHook
from .logger import (LoggerHook, TextLoggerHook, PaviLoggerHook,
TensorboardLoggerHook)
__all__ = [
'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', 'OptimizerHook', 'OptimizerArchHook',
'IterTimerHook', 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook',
'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook'
]
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/__init__.py",
"repo_id": "Cream",
"token_count": 257
}
| 294 |
from enum import Enum
class Priority(Enum):
"""Hook priority levels.
+------------+------------+
| Level | Value |
+============+============+
| HIGHEST | 0 |
+------------+------------+
| VERY_HIGH | 10 |
+------------+------------+
| HIGH | 30 |
+------------+------------+
| NORMAL | 50 |
+------------+------------+
| LOW | 70 |
+------------+------------+
| VERY_LOW | 90 |
+------------+------------+
| LOWEST | 100 |
+------------+------------+
"""
HIGHEST = 0
VERY_HIGH = 10
HIGH = 30
NORMAL = 50
LOW = 70
VERY_LOW = 90
LOWEST = 100
def get_priority(priority):
"""Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
"""
if isinstance(priority, int):
if priority < 0 or priority > 100:
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/priority.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/priority.py",
"repo_id": "Cream",
"token_count": 562
}
| 295 |
/* Generated by Cython 0.27.3 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_27_3"
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef __cplusplus
#error "Cython files generated with the C++ option must be compiled with a C++ compiler."
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#else
#define CYTHON_INLINE inline
#endif
#endif
template<typename T>
void __Pyx_call_destructor(T& x) {
x.~T();
}
template<typename T>
class __Pyx_FakeReference {
public:
__Pyx_FakeReference() : ptr(NULL) { }
__Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
T *operator->() { return ptr; }
T *operator&() { return ptr; }
operator T&() { return *ptr; }
template<typename U> bool operator ==(U other) { return *ptr == other; }
template<typename U> bool operator !=(U other) { return *ptr != other; }
private:
T *ptr;
};
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__mmcv___ext
#define __PYX_HAVE_API__mmcv___ext
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "flow_warp.hpp"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"mmcv/video/optflow_warp/flow_warp_module.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* BufferGetAndValidate.proto */
#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\
((obj == Py_None || obj == NULL) ?\
(__Pyx_ZeroBuffer(buf), 0) :\
__Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static void __Pyx_ZeroBuffer(Py_buffer* buf);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* PyIdentifierFromString.proto */
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/* ModuleImport.proto */
static PyObject *__Pyx_ImportModule(const char *name);
/* TypeImport.proto */
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void); /*proto*/
/* Module declarations from 'mmcv._ext' */
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "mmcv._ext"
extern int __pyx_module_is_main_mmcv___ext;
int __pyx_module_is_main_mmcv___ext = 0;
/* Implementation of 'mmcv._ext' */
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_builtin_ImportError;
static const char __pyx_k_Hi[] = "Hi";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_STUFF[] = "STUFF";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_img_array[] = "img_array";
static const char __pyx_k_mmcv__ext[] = "mmcv._ext";
static const char __pyx_k_out_array[] = "out_array";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_flow_array[] = "flow_array";
static const char __pyx_k_zeros_like[] = "zeros_like";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_flow_warp_c[] = "flow_warp_c";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_filling_value[] = "filling_value";
static const char __pyx_k_interpolate_mode[] = "interpolate_mode";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_mmcv_video_optflow_warp_flow_war[] = "mmcv/video/optflow_warp/flow_warp_module.pyx";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_n_s_Hi;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_STUFF;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_filling_value;
static PyObject *__pyx_n_s_flow_array;
static PyObject *__pyx_n_s_flow_warp_c;
static PyObject *__pyx_n_s_img_array;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_interpolate_mode;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_mmcv__ext;
static PyObject *__pyx_kp_s_mmcv_video_optflow_warp_flow_war;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_out_array;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_zeros_like;
static PyObject *__pyx_pf_4mmcv_4_ext_flow_warp_c(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_img_array, PyArrayObject *__pyx_v_flow_array, int __pyx_v_filling_value, int __pyx_v_interpolate_mode); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_codeobj__11;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":11
* void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode)
*
* def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<<
* np.ndarray[double, ndim=3, mode="c"] flow_array not None,
* int filling_value=0,
*/
/* Python wrapper */
static PyObject *__pyx_pw_4mmcv_4_ext_1flow_warp_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_4mmcv_4_ext_1flow_warp_c = {"flow_warp_c", (PyCFunction)__pyx_pw_4mmcv_4_ext_1flow_warp_c, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_4mmcv_4_ext_1flow_warp_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_img_array = 0;
PyArrayObject *__pyx_v_flow_array = 0;
int __pyx_v_filling_value;
int __pyx_v_interpolate_mode;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("flow_warp_c (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_img_array,&__pyx_n_s_flow_array,&__pyx_n_s_filling_value,&__pyx_n_s_interpolate_mode,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_img_array)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flow_array)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("flow_warp_c", 0, 2, 4, 1); __PYX_ERR(0, 11, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_filling_value);
if (value) { values[2] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_interpolate_mode);
if (value) { values[3] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flow_warp_c") < 0)) __PYX_ERR(0, 11, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_img_array = ((PyArrayObject *)values[0]);
__pyx_v_flow_array = ((PyArrayObject *)values[1]);
if (values[2]) {
__pyx_v_filling_value = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_filling_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13, __pyx_L3_error)
} else {
__pyx_v_filling_value = ((int)0);
}
if (values[3]) {
__pyx_v_interpolate_mode = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_interpolate_mode == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14, __pyx_L3_error)
} else {
__pyx_v_interpolate_mode = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("flow_warp_c", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 11, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("mmcv._ext.flow_warp_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_img_array), __pyx_ptype_5numpy_ndarray, 0, "img_array", 0))) __PYX_ERR(0, 11, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_flow_array), __pyx_ptype_5numpy_ndarray, 0, "flow_array", 0))) __PYX_ERR(0, 12, __pyx_L1_error)
__pyx_r = __pyx_pf_4mmcv_4_ext_flow_warp_c(__pyx_self, __pyx_v_img_array, __pyx_v_flow_array, __pyx_v_filling_value, __pyx_v_interpolate_mode);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_4mmcv_4_ext_flow_warp_c(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_img_array, PyArrayObject *__pyx_v_flow_array, int __pyx_v_filling_value, int __pyx_v_interpolate_mode) {
PyObject *__pyx_v_out_array = NULL;
__Pyx_LocalBuf_ND __pyx_pybuffernd_flow_array;
__Pyx_Buffer __pyx_pybuffer_flow_array;
__Pyx_LocalBuf_ND __pyx_pybuffernd_img_array;
__Pyx_Buffer __pyx_pybuffer_img_array;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
__Pyx_RefNannySetupContext("flow_warp_c", 0);
__pyx_pybuffer_img_array.pybuffer.buf = NULL;
__pyx_pybuffer_img_array.refcount = 0;
__pyx_pybuffernd_img_array.data = NULL;
__pyx_pybuffernd_img_array.rcbuffer = &__pyx_pybuffer_img_array;
__pyx_pybuffer_flow_array.pybuffer.buf = NULL;
__pyx_pybuffer_flow_array.refcount = 0;
__pyx_pybuffernd_flow_array.data = NULL;
__pyx_pybuffernd_flow_array.rcbuffer = &__pyx_pybuffer_flow_array;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_img_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_img_array, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 11, __pyx_L1_error)
}
__pyx_pybuffernd_img_array.diminfo[0].strides = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_img_array.diminfo[0].shape = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_img_array.diminfo[1].strides = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_img_array.diminfo[1].shape = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_img_array.diminfo[2].strides = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_img_array.diminfo[2].shape = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.shape[2];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_flow_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_flow_array, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 11, __pyx_L1_error)
}
__pyx_pybuffernd_flow_array.diminfo[0].strides = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_flow_array.diminfo[0].shape = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_flow_array.diminfo[1].strides = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_flow_array.diminfo[1].shape = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_flow_array.diminfo[2].strides = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_flow_array.diminfo[2].shape = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.shape[2];
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":16
* int interpolate_mode=1):
*
* out_array = np.zeros_like(img_array) # <<<<<<<<<<<<<<
*
* FlowWarp(<double*> np.PyArray_DATA(img_array),
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, ((PyObject *)__pyx_v_img_array)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_img_array)};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_img_array)};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else
#endif
{
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL;
__Pyx_INCREF(((PyObject *)__pyx_v_img_array));
__Pyx_GIVEREF(((PyObject *)__pyx_v_img_array));
PyTuple_SET_ITEM(__pyx_t_4, 0+1, ((PyObject *)__pyx_v_img_array));
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_out_array = __pyx_t_1;
__pyx_t_1 = 0;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":20
* FlowWarp(<double*> np.PyArray_DATA(img_array),
* <double*> np.PyArray_DATA(flow_array),
* <double*> np.PyArray_DATA(out_array), # <<<<<<<<<<<<<<
* out_array.shape[0],
* out_array.shape[1],
*/
if (!(likely(((__pyx_v_out_array) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out_array, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 20, __pyx_L1_error)
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":21
* <double*> np.PyArray_DATA(flow_array),
* <double*> np.PyArray_DATA(out_array),
* out_array.shape[0], # <<<<<<<<<<<<<<
* out_array.shape[1],
* out_array.shape[2],
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_out_array, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":22
* <double*> np.PyArray_DATA(out_array),
* out_array.shape[0],
* out_array.shape[1], # <<<<<<<<<<<<<<
* out_array.shape[2],
* filling_value,
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_out_array, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":23
* out_array.shape[0],
* out_array.shape[1],
* out_array.shape[2], # <<<<<<<<<<<<<<
* filling_value,
* interpolate_mode)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_out_array, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":18
* out_array = np.zeros_like(img_array)
*
* FlowWarp(<double*> np.PyArray_DATA(img_array), # <<<<<<<<<<<<<<
* <double*> np.PyArray_DATA(flow_array),
* <double*> np.PyArray_DATA(out_array),
*/
FlowWarp(((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_img_array))), ((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_flow_array))), ((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_out_array))), __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_v_filling_value, __pyx_v_interpolate_mode);
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":27
* interpolate_mode)
*
* return out_array # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_out_array);
__pyx_r = __pyx_v_out_array;
goto __pyx_L0;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":11
* void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode)
*
* def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<<
* np.ndarray[double, ndim=3, mode="c"] flow_array not None,
* int filling_value=0,
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_flow_array.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_img_array.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("mmcv._ext.flow_warp_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_flow_array.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_img_array.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF(__pyx_v_out_array);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
goto __pyx_L4;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
/*else*/ {
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 235, __pyx_L1_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 239, __pyx_L1_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim))));
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L11;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef int offset
*/
__pyx_v_f = NULL;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
goto __pyx_L14;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
/*else*/ {
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 276, __pyx_L1_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = ((char *)"B");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = ((char *)"h");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = ((char *)"H");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = ((char *)"i");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = ((char *)"I");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = ((char *)"l");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = ((char *)"L");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = ((char *)"q");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = ((char *)"Q");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = ((char *)"f");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = ((char *)"d");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = ((char *)"g");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = ((char *)"Zf");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = ((char *)"Zd");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = ((char *)"Zg");
break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = ((char *)"O");
break;
default:
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 295, __pyx_L1_error)
break;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299
* return
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)PyObject_Malloc(0xFF));
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error)
__pyx_v_f = __pyx_t_7;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides)
*/
PyObject_Free(__pyx_v_info->format);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
PyObject_Free(__pyx_v_info->strides);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 818, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 819, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 820, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 823, __pyx_L1_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_6) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 827, __pyx_L1_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 847, __pyx_L1_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 868, __pyx_L1_error)
}
__pyx_L15:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
goto __pyx_L3;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
/*else*/ {
Py_INCREF(__pyx_v_base);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011
* cdef inline int import_array() except -1:
* try:
* _import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012
* try:
* _import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1013, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1019, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error)
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1025, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec__ext(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec__ext},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"_ext",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_n_s_Hi, __pyx_k_Hi, sizeof(__pyx_k_Hi), 0, 0, 1, 1},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_STUFF, __pyx_k_STUFF, sizeof(__pyx_k_STUFF), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_filling_value, __pyx_k_filling_value, sizeof(__pyx_k_filling_value), 0, 0, 1, 1},
{&__pyx_n_s_flow_array, __pyx_k_flow_array, sizeof(__pyx_k_flow_array), 0, 0, 1, 1},
{&__pyx_n_s_flow_warp_c, __pyx_k_flow_warp_c, sizeof(__pyx_k_flow_warp_c), 0, 0, 1, 1},
{&__pyx_n_s_img_array, __pyx_k_img_array, sizeof(__pyx_k_img_array), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_interpolate_mode, __pyx_k_interpolate_mode, sizeof(__pyx_k_interpolate_mode), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_mmcv__ext, __pyx_k_mmcv__ext, sizeof(__pyx_k_mmcv__ext), 0, 0, 1, 1},
{&__pyx_kp_s_mmcv_video_optflow_warp_flow_war, __pyx_k_mmcv_video_optflow_warp_flow_war, sizeof(__pyx_k_mmcv_video_optflow_warp_flow_war), 0, 0, 1, 0},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_out_array, __pyx_k_out_array, sizeof(__pyx_k_out_array), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_zeros_like, __pyx_k_zeros_like, sizeof(__pyx_k_zeros_like), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 235, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 248, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 235, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 239, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 276, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 847, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1019, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1025, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":11
* void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode)
*
* def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<<
* np.ndarray[double, ndim=3, mode="c"] flow_array not None,
* int filling_value=0,
*/
__pyx_tuple__10 = PyTuple_Pack(5, __pyx_n_s_img_array, __pyx_n_s_flow_array, __pyx_n_s_filling_value, __pyx_n_s_interpolate_mode, __pyx_n_s_out_array); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
__pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_mmcv_video_optflow_warp_flow_war, __pyx_n_s_flow_warp_c, 11, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC init_ext(void); /*proto*/
PyMODINIT_FUNC init_ext(void)
#else
PyMODINIT_FUNC PyInit__ext(void); /*proto*/
PyMODINIT_FUNC PyInit__ext(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
result = PyDict_SetItemString(moddict, to_name, value);
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static int __pyx_pymod_exec__ext(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0;
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__ext(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("_ext", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_mmcv___ext) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "mmcv._ext")) {
if (unlikely(PyDict_SetItemString(modules, "mmcv._ext", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if CYTHON_COMPILING_IN_PYPY
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error)
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":1
* STUFF = "Hi" # <<<<<<<<<<<<<<
*
* import numpy as np
*/
if (PyDict_SetItem(__pyx_d, __pyx_n_s_STUFF, __pyx_n_s_Hi) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":3
* STUFF = "Hi"
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":6
* cimport numpy as np
*
* np.import_array() # <<<<<<<<<<<<<<
*
* cdef extern from "flow_warp.hpp":
*/
__pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 6, __pyx_L1_error)
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":11
* void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode)
*
* def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<<
* np.ndarray[double, ndim=3, mode="c"] flow_array not None,
* int filling_value=0,
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4mmcv_4_ext_1flow_warp_c, NULL, __pyx_n_s_mmcv__ext); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_flow_warp_c, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "mmcv/video/optflow_warp/flow_warp_module.pyx":1
* STUFF = "Hi" # <<<<<<<<<<<<<<
*
* import numpy as np
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init mmcv._ext", 0, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init mmcv._ext");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* BufferGetAndValidate */
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (unlikely(info->buf == NULL)) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static int __Pyx__GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
buf->buf = NULL;
if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) {
__Pyx_ZeroBuffer(buf);
return -1;
}
if (unlikely(buf->ndim != nd)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if (unlikely((unsigned)buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_SafeReleaseBuffer(buf);
return -1;
}
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
#include "frameobject.h"
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = f->f_localsplus;
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if PY_VERSION_HEX >= 0x030700A2
*type = tstate->exc_state.exc_type;
*value = tstate->exc_state.exc_value;
*tb = tstate->exc_state.exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if PY_VERSION_HEX >= 0x030700A2
tmp_type = tstate->exc_state.exc_type;
tmp_value = tstate->exc_state.exc_value;
tmp_tb = tstate->exc_state.exc_traceback;
tstate->exc_state.exc_type = type;
tstate->exc_state.exc_value = value;
tstate->exc_state.exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
#endif
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if PY_VERSION_HEX >= 0x030700A2
tmp_type = tstate->exc_state.exc_type;
tmp_value = tstate->exc_state.exc_value;
tmp_tb = tstate->exc_state.exc_traceback;
tstate->exc_state.exc_type = local_type;
tstate->exc_state.exc_value = local_value;
tstate->exc_state.exc_traceback = local_tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback);
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (PyObject_Not(use_cline) != 0) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view);
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = 1.0 / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = 1.0 / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0, -1);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = 1.0 / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = 1.0 / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0, -1);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* ModuleImport */
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
PyErr_Clear();
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
Cream/CDARTS/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.cpp",
"repo_id": "Cream",
"token_count": 166554
}
| 296 |
import warnings
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
else:
results['filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
# TODO: merge this method with the one in BaseDetector
def show_result(img,
result,
class_names,
score_thr=0.3,
wait_time=0,
show=True,
out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
wait_time (int): Value of waitKey param.
show (bool, optional): Whether to show the image with opencv or not.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
Returns:
np.ndarray or None: If neither `show` nor `out_file` is specified, the
visualized image is returned, otherwise None is returned.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
def show_result_pyplot(img,
result,
class_names,
score_thr=0.3,
fig_size=(15, 10)):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
img = show_result(
img, result, class_names, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
|
Cream/CDARTS/CDARTS_detection/mmdet/apis/inference.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/apis/inference.py",
"repo_id": "Cream",
"token_count": 2561
}
| 297 |
from .base_sampler import BaseSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .combined_sampler import CombinedSampler
from .ohem_sampler import OHEMSampler
from .sampling_result import SamplingResult
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/__init__.py",
"repo_id": "Cream",
"token_count": 183
}
| 298 |
from collections.abc import Sequence
import numpy as np
from terminaltables import AsciiTable
from mmdet.utils import print_log
from .bbox_overlaps import bbox_overlaps
def _recalls(all_ious, proposal_nums, thrs):
img_num = all_ious.shape[0]
total_gt_num = sum([ious.shape[0] for ious in all_ious])
_ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
for k, proposal_num in enumerate(proposal_nums):
tmp_ious = np.zeros(0)
for i in range(img_num):
ious = all_ious[i][:, :proposal_num].copy()
gt_ious = np.zeros((ious.shape[0]))
if ious.size == 0:
tmp_ious = np.hstack((tmp_ious, gt_ious))
continue
for j in range(ious.shape[0]):
gt_max_overlaps = ious.argmax(axis=1)
max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
gt_idx = max_ious.argmax()
gt_ious[j] = max_ious[gt_idx]
box_idx = gt_max_overlaps[gt_idx]
ious[gt_idx, :] = -1
ious[:, box_idx] = -1
tmp_ious = np.hstack((tmp_ious, gt_ious))
_ious[k, :] = tmp_ious
_ious = np.fliplr(np.sort(_ious, axis=1))
recalls = np.zeros((proposal_nums.size, thrs.size))
for i, thr in enumerate(thrs):
recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)
return recalls
def set_recall_param(proposal_nums, iou_thrs):
"""Check proposal_nums and iou_thrs and set correct format.
"""
if isinstance(proposal_nums, Sequence):
_proposal_nums = np.array(proposal_nums)
elif isinstance(proposal_nums, int):
_proposal_nums = np.array([proposal_nums])
else:
_proposal_nums = proposal_nums
if iou_thrs is None:
_iou_thrs = np.array([0.5])
elif isinstance(iou_thrs, Sequence):
_iou_thrs = np.array(iou_thrs)
elif isinstance(iou_thrs, float):
_iou_thrs = np.array([iou_thrs])
else:
_iou_thrs = iou_thrs
return _proposal_nums, _iou_thrs
def eval_recalls(gts,
proposals,
proposal_nums=None,
iou_thrs=0.5,
logger=None):
"""Calculate recalls.
Args:
gts (list[ndarray]): a list of arrays of shape (n, 4)
proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)
proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.
iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.
logger (logging.Logger | str | None): The way to print the recall
summary. See `mmdet.utils.print_log()` for details. Default: None.
Returns:
ndarray: recalls of different ious and proposal nums
"""
img_num = len(gts)
assert img_num == len(proposals)
proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
all_ious = []
for i in range(img_num):
if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
scores = proposals[i][:, 4]
sort_idx = np.argsort(scores)[::-1]
img_proposal = proposals[i][sort_idx, :]
else:
img_proposal = proposals[i]
prop_num = min(img_proposal.shape[0], proposal_nums[-1])
if gts[i] is None or gts[i].shape[0] == 0:
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
else:
ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
all_ious.append(ious)
all_ious = np.array(all_ious)
recalls = _recalls(all_ious, proposal_nums, iou_thrs)
print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)
return recalls
def print_recall_summary(recalls,
proposal_nums,
iou_thrs,
row_idxs=None,
col_idxs=None,
logger=None):
"""Print recalls in a table.
Args:
recalls (ndarray): calculated from `bbox_recalls`
proposal_nums (ndarray or list): top N proposals
iou_thrs (ndarray or list): iou thresholds
row_idxs (ndarray): which rows(proposal nums) to print
col_idxs (ndarray): which cols(iou thresholds) to print
logger (logging.Logger | str | None): The way to print the recall
summary. See `mmdet.utils.print_log()` for details. Default: None.
"""
proposal_nums = np.array(proposal_nums, dtype=np.int32)
iou_thrs = np.array(iou_thrs)
if row_idxs is None:
row_idxs = np.arange(proposal_nums.size)
if col_idxs is None:
col_idxs = np.arange(iou_thrs.size)
row_header = [''] + iou_thrs[col_idxs].tolist()
table_data = [row_header]
for i, num in enumerate(proposal_nums[row_idxs]):
row = [
'{:.3f}'.format(val)
for val in recalls[row_idxs[i], col_idxs].tolist()
]
row.insert(0, num)
table_data.append(row)
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
def plot_num_recall(recalls, proposal_nums):
"""Plot Proposal_num-Recalls curve.
Args:
recalls(ndarray or list): shape (k,)
proposal_nums(ndarray or list): same shape as `recalls`
"""
if isinstance(proposal_nums, np.ndarray):
_proposal_nums = proposal_nums.tolist()
else:
_proposal_nums = proposal_nums
if isinstance(recalls, np.ndarray):
_recalls = recalls.tolist()
else:
_recalls = recalls
import matplotlib.pyplot as plt
f = plt.figure()
plt.plot([0] + _proposal_nums, [0] + _recalls)
plt.xlabel('Proposal num')
plt.ylabel('Recall')
plt.axis([0, proposal_nums.max(), 0, 1])
f.show()
def plot_iou_recall(recalls, iou_thrs):
"""Plot IoU-Recalls curve.
Args:
recalls(ndarray or list): shape (k,)
iou_thrs(ndarray or list): same shape as `recalls`
"""
if isinstance(iou_thrs, np.ndarray):
_iou_thrs = iou_thrs.tolist()
else:
_iou_thrs = iou_thrs
if isinstance(recalls, np.ndarray):
_recalls = recalls.tolist()
else:
_recalls = recalls
import matplotlib.pyplot as plt
f = plt.figure()
plt.plot(_iou_thrs + [1.0], _recalls + [0.])
plt.xlabel('IoU')
plt.ylabel('Recall')
plt.axis([iou_thrs.min(), 1, 0, 1])
f.show()
|
Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/recall.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/recall.py",
"repo_id": "Cream",
"token_count": 3195
}
| 299 |
from .coco import CocoDataset
from .registry import DATASETS
@DATASETS.register_module
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle')
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/cityscapes.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/cityscapes.py",
"repo_id": "Cream",
"token_count": 96
}
| 300 |
from mmdet.core import eval_map, eval_recalls
from .registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = {}
if metric == 'mAP':
assert isinstance(iou_thr, float)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.dataset.CLASSES
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger)
eval_results['mAP'] = mean_ap
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
if isinstance(iou_thr, float):
iou_thr = [iou_thr]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thr):
eval_results['recall@{}@{}'.format(num, iou)] = recalls[i,
j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
return eval_results
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/voc.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/voc.py",
"repo_id": "Cream",
"token_count": 1422
}
| 301 |
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, H, W)`
- Output: `(N, C, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock2D, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask and place on input device
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).to(x)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool2d(input=mask[:, None, :, :],
kernel_size=(self.block_size, self.block_size),
stride=(1, 1),
padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 2)
class DropBlock3D(DropBlock2D):
r"""Randomly zeroes 3D spatial blocks of the input tensor.
An extension to the concept described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, D, H, W)`
- Output: `(N, C, D, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock3D, self).__init__(drop_prob, block_size)
def forward(self, x):
# shape: (bsize, channels, depth, height, width)
assert x.dim() == 5, \
"Expected input with 5 dimensions (bsize, channels, depth, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask and place on input device
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).to(x)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool3d(input=mask[:, None, :, :, :],
kernel_size=(self.block_size, self.block_size, self.block_size),
stride=(1, 1, 1),
padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 3)
class DropBlockScheduled(nn.Module):
def __init__(self, dropblock, start_value, stop_value, nr_steps):
super(DropBlockScheduled, self).__init__()
self.dropblock = dropblock
self.i = 0
self.drop_values = np.linspace(start=start_value, stop=stop_value, num=nr_steps)
def forward(self, x):
if self.training:
self.step()
return self.dropblock(x)
def step(self):
if self.i < len(self.drop_values):
self.dropblock.drop_prob = self.drop_values[self.i]
self.i += 1
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/dropblock.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/dropblock.py",
"repo_id": "Cream",
"token_count": 2265
}
| 302 |
import torch
import torch.nn.functional as F
from .cascade_rcnn import CascadeRCNN
from .. import builder
from ..registry import DETECTORS
from mmdet.core import (bbox2roi, bbox2result, build_assigner, build_sampler,
merge_aug_masks)
@DETECTORS.register_module
class HybridTaskCascade(CascadeRCNN):
def __init__(self,
num_stages,
backbone,
semantic_roi_extractor=None,
semantic_head=None,
semantic_fusion=('bbox', 'mask'),
interleaved=True,
mask_info_flow=True,
**kwargs):
super(HybridTaskCascade, self).__init__(num_stages, backbone, **kwargs)
assert self.with_bbox and self.with_mask
assert not self.with_shared_head # shared head not supported
if semantic_head is not None:
self.semantic_roi_extractor = builder.build_roi_extractor(
semantic_roi_extractor)
self.semantic_head = builder.build_head(semantic_head)
self.semantic_fusion = semantic_fusion
self.interleaved = interleaved
self.mask_info_flow = mask_info_flow
@property
def with_semantic(self):
if hasattr(self, 'semantic_head') and self.semantic_head is not None:
return True
else:
return False
def _bbox_forward_train(self,
stage,
x,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
semantic_feat=None):
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# semantic feature fusion
# element-wise sum for original features and pooled semantic features
if self.with_semantic and 'bbox' in self.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = F.adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets)
return loss_bbox, rois, bbox_targets, bbox_pred
def _mask_forward_train(self,
stage,
x,
sampling_results,
gt_masks,
rcnn_train_cfg,
semantic_feat=None):
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
pos_rois)
# semantic feature fusion
# element-wise sum for original features and pooled semantic features
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
pos_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
# mask information flow
# forward all previous mask heads to obtain last_feat, and fuse it
# with the normal mask feature
if self.mask_info_flow:
last_feat = None
for i in range(stage):
last_feat = self.mask_head[i](
mask_feats, last_feat, return_logits=False)
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
else:
mask_pred = mask_head(mask_feats)
mask_targets = mask_head.get_target(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
return loss_mask
def _bbox_forward_test(self, stage, x, rois, semantic_feat=None):
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_semantic and 'bbox' in self.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = F.adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats)
return cls_score, bbox_pred
def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_rois = bbox2roi([bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
mask_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
if self.mask_info_flow:
last_feat = None
last_pred = None
for i in range(stage):
mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat)
if last_pred is not None:
mask_pred = mask_pred + last_pred
last_pred = mask_pred
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
if last_pred is not None:
mask_pred = mask_pred + last_pred
else:
mask_pred = mask_head(mask_feats)
return mask_pred
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
# RPN part, the same as normal two-stage detectors
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
# semantic segmentation part
# 2 outputs: segmentation prediction and embedded features
if self.with_semantic:
semantic_pred, semantic_feat = self.semantic_head(x)
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
losses['loss_semantic_seg'] = loss_seg
else:
semantic_feat = None
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg.rcnn[i]
lw = self.train_cfg.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
bbox_assigner = build_assigner(rcnn_train_cfg.assigner)
bbox_sampler = build_sampler(rcnn_train_cfg.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
loss_bbox, rois, bbox_targets, bbox_pred = \
self._bbox_forward_train(
i, x, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg, semantic_feat)
roi_labels = bbox_targets[0]
for name, value in loss_bbox.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
# interleaved execution: use regressed bboxes by the box branch
# to train the mask branch
if self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
rois, roi_labels, bbox_pred, pos_is_gts, img_meta)
# re-assign and sample 512 RoIs from 512 RoIs
sampling_results = []
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j],
gt_bboxes_ignore[j], gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
loss_mask = self._mask_forward_train(i, x, sampling_results,
gt_masks, rcnn_train_cfg,
semantic_feat)
for name, value in loss_mask.items():
losses['s{}.{}'.format(i, name)] = (
value * lw if 'loss' in name else value)
# refine bboxes (same as Cascade R-CNN)
if i < self.num_stages - 1 and not self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
rois, roi_labels, bbox_pred, pos_is_gts, img_meta)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
img_shape = img_meta[0]['img_shape']
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
cls_score, bbox_pred = self._bbox_forward_test(
i, x, rois, semantic_feat=semantic_feat)
ms_scores.append(cls_score)
if self.test_cfg.keep_all_stages:
det_bboxes, det_labels = bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
nms_cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
bbox_head.num_classes)
ms_bbox_result['stage{}'.format(i)] = bbox_result
if self.with_mask:
mask_head = self.mask_head[i]
if det_bboxes.shape[0] == 0:
segm_result = [
[] for _ in range(mask_head.num_classes - 1)
]
else:
_bboxes = (
det_bboxes[:, :4] * scale_factor
if rescale else det_bboxes)
mask_pred = self._mask_forward_test(
i, x, _bboxes, semantic_feat=semantic_feat)
segm_result = mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['stage{}'.format(i)] = segm_result
if i < self.num_stages - 1:
bbox_label = cls_score.argmax(dim=1)
rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred,
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [
[] for _ in range(self.mask_head[-1].num_classes - 1)
]
else:
_bboxes = (
det_bboxes[:, :4] * scale_factor
if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
aug_masks = []
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats += mask_semantic_feat
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks,
[img_meta] * self.num_stages,
self.test_cfg.rcnn)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if not self.test_cfg.keep_all_stages:
if self.with_mask:
results = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
else:
if self.with_mask:
results = {
stage: (ms_bbox_result[stage], ms_segm_result[stage])
for stage in ms_bbox_result
}
else:
results = ms_bbox_result
return results
def aug_test(self, img, img_meta, proposals=None, rescale=False):
raise NotImplementedError
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/htc.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/htc.py",
"repo_id": "Cream",
"token_count": 10086
}
| 303 |
import torch
import torch.nn as nn
from .utils import weighted_loss
from ..registry import LOSSES
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@LOSSES.register_module
class SmoothL1Loss(nn.Module):
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
|
Cream/CDARTS/CDARTS_detection/mmdet/models/losses/smooth_l1_loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/smooth_l1_loss.py",
"repo_id": "Cream",
"token_count": 625
}
| 304 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from mmcv.cnn.weight_init import caffe2_xavier_init
from ..utils import ConvModule
from ..registry import NECKS
@NECKS.register_module
class HRFPN(nn.Module):
"""HRFPN (High Resolution Feature Pyrmamids)
arXiv: https://arxiv.org/abs/1904.04514
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False):
super(HRFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
activation=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
conv_cfg=self.conv_cfg,
activation=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
caffe2_xavier_init(m)
def forward(self, inputs):
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/necks/hrfpn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/necks/hrfpn.py",
"repo_id": "Cream",
"token_count": 1639
}
| 305 |
import torch
import torch.nn as nn
class Scale(nn.Module):
def __init__(self, scale=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
def forward(self, x):
return x * self.scale
|
Cream/CDARTS/CDARTS_detection/mmdet/models/utils/scale.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/utils/scale.py",
"repo_id": "Cream",
"token_count": 113
}
| 306 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.