docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Average a series of datetime objects. .. note:: This function assumes all datetime objects are naive and in the same time zone (UTC). Args: dt_list (iterable): Datetime objects to average Returns: Average datetime as a datetime object
def average_datetimes(dt_list): if sys.version_info < (3, 3): # timestamp added in python 3.3 import time def timestamp_func(dt): return time.mktime(dt.timetuple()) else: timestamp_func = datetime.timestamp total = [timestamp_func(dt) for dt in dt_list] return datetime.fromtimestamp(sum(total) / len(total))
293,768
Return if two wavelengths are equal. Args: a (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl b (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl
def wavelength_match(a, b): if type(a) == (type(b) or isinstance(a, numbers.Number) and isinstance(b, numbers.Number)): return a == b elif a is None or b is None: return False elif isinstance(a, (list, tuple)) and len(a) == 3: return a[0] <= b <= a[2] elif isinstance(b, (list, tuple)) and len(b) == 3: return b[0] <= a <= b[2] else: raise ValueError("Can only compare wavelengths of length 1 or 3")
293,773
Configure reader behavior. Args: mask_surface (boolean): mask anything below the surface pressure mask_quality (boolean): mask anything where the `Quality_Flag` metadata is ``!= 1``.
def __init__(self, config_files, mask_surface=True, mask_quality=True, **kwargs): self.pressure_dataset_names = defaultdict(list) super(NUCAPSReader, self).__init__(config_files, **kwargs) self.mask_surface = self.info.get('mask_surface', mask_surface) self.mask_quality = self.info.get('mask_quality', mask_quality)
293,808
Test calibration coefficients against NOAA reference pages Currently the reference pages are: ir_url = https://www.ospo.noaa.gov/Operations/GOES/calibration/gvar-conversion.html vis_url = https://www.ospo.noaa.gov/Operations/GOES/calibration/goes-vis-ch-calibration.html Args: ir_url: Path or URL to HTML page with IR coefficients vis_url: Path or URL to HTML page with VIS coefficients Raises: ValueError if coefficients don't match the reference
def test_coefs(ir_url, vis_url): reader = GOESCoefficientReader(ir_url=ir_url, vis_url=vis_url) for platform in CALIB_COEFS.keys(): for channel, coefs in CALIB_COEFS[platform].items(): coefs_expected = reader.get_coefs(platform=platform, channel=channel) for cname in coefs_expected.keys(): if not np.allclose(coefs[cname], coefs_expected[cname]): raise ValueError( 'Coefficient {} for {} channel {} does not match the ' 'reference'.format(cname, platform, channel)) logger.info('Coefficients OK') return True
293,924
Find the nadir pixel Args: earth_mask: Mask identifying earth and space pixels sector: Specifies the scanned sector Returns: nadir row, nadir column
def _get_nadir_pixel(earth_mask, sector): if sector == FULL_DISC: logger.debug('Computing nadir pixel') # The earth is not centered in the image, compute bounding box # of the earth disc first rmin, rmax, cmin, cmax = bbox(earth_mask) # The nadir pixel is approximately at the centre of the earth disk nadir_row = rmin + (rmax - rmin) // 2 nadir_col = cmin + (cmax - cmin) // 2 return nadir_row, nadir_col return None, None
293,929
Convert IR counts to radiance Reference: [IR]. Args: counts: Raw detector counts scale: Scale [mW-1 m2 cm sr] offset: Offset [1] Returns: Radiance [mW m-2 cm-1 sr-1]
def _ircounts2radiance(counts, scale, offset): rad = (counts - offset) / scale return rad.clip(min=0)
293,937
Convert VIS counts to radiance References: [VIS] Args: counts: Raw detector counts slope: Slope [W m-2 um-1 sr-1] offset: Offset [W m-2 um-1 sr-1] Returns: Radiance [W m-2 um-1 sr-1]
def _viscounts2radiance(counts, slope, offset): rad = counts * slope + offset return rad.clip(min=0)
293,939
Apply `func` to the provided data. Args: data (xarray.DataArray): Data to be modified inplace. func (callable): Function to be applied to an xarray exclude (iterable): Bands in the 'bands' dimension to not include in the calculations. separate (bool): Apply `func` one band at a time. Default is False. pass_dask (bool): Pass the underlying dask array instead of the xarray.DataArray.
def apply_enhancement(data, func, exclude=None, separate=False, pass_dask=False): attrs = data.attrs bands = data.coords['bands'].values if exclude is None: exclude = ['A'] if 'A' in bands else [] if separate: data_arrs = [] for idx, band_name in enumerate(bands): band_data = data.sel(bands=[band_name]) if band_name in exclude: # don't modify alpha data_arrs.append(band_data) continue if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data, index=idx) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data, index=idx) data_arrs.append(band_data) # we assume that the func can add attrs attrs.update(band_data.attrs) data.data = xr.concat(data_arrs, dim='bands').data data.attrs = attrs return data else: band_data = data.sel(bands=[b for b in bands if b not in exclude]) if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data) attrs.update(band_data.attrs) # combine the new data with the excluded data new_data = xr.concat([band_data, data.sel(bands=exclude)], dim='bands') data.data = new_data.sel(bands=bands).data data.attrs = attrs return data
293,974
Convert convolution layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_conv(params, w_name, scope_name, inputs, layers, weights, names): print('Converting convolution ...') if names == 'short': tf_name = 'C' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) input_name = inputs[0] if len(weights[weights_name].numpy().shape) == 5: # 3D conv W = weights[weights_name].numpy().transpose(2, 3, 4, 1, 0) height, width, channels, n_layers, n_filters = W.shape if bias_name in weights: biases = weights[bias_name].numpy() has_bias = True else: biases = None has_bias = False if params['pads'][0] > 0 or params['pads'][1] > 0: padding_name = tf_name + '_pad' padding_layer = keras.layers.ZeroPadding3D( padding=(params['pads'][0], params['pads'][1], params['pads'][2]), name=padding_name ) layers[padding_name] = padding_layer(layers[input_name]) input_name = padding_name if has_bias: weights = [W, biases] else: weights = [W] conv = keras.layers.Conv3D( filters=n_filters, kernel_size=(channels, height, width), strides=(params['strides'][0], params['strides'][1], params['strides'][2]), padding='valid', weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name ) layers[scope_name] = conv(layers[input_name]) elif len(weights[weights_name].numpy().shape) == 4: # 2D conv if params['pads'][0] > 0 or params['pads'][1] > 0: padding_name = tf_name + '_pad' padding_layer = keras.layers.ZeroPadding2D( padding=(params['pads'][0], params['pads'][1]), name=padding_name ) layers[padding_name] = padding_layer(layers[input_name]) input_name = padding_name W = weights[weights_name].numpy().transpose(2, 3, 1, 0) height, width, channels_per_group, out_channels = W.shape n_groups = params['group'] in_channels = channels_per_group * n_groups if n_groups == in_channels and n_groups != 1: if bias_name in weights: biases = weights[bias_name].numpy() has_bias = True else: biases = None has_bias = False W = W.transpose(0, 1, 3, 2) if has_bias: weights = [W, biases] else: weights = [W] conv = keras.layers.DepthwiseConv2D( kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', use_bias=has_bias, activation=None, depth_multiplier=1, weights = weights, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros' ) layers[scope_name] = conv(layers[input_name]) elif n_groups != 1: # Example from https://kratzert.github.io/2017/02/24/finetuning-alexnet-with-tensorflow.html # # Split input and weights and convolve them separately # input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x) # weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=weights) # output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)] # # Concat the convolved output together again # conv = tf.concat(axis=3, values=output_groups) def target_layer(x, groups=params['group'], stride_y=params['strides'][0], stride_x=params['strides'][1]): x = tf.transpose(x, [0, 2, 3, 1]) def convolve_lambda(i, k): return tf.nn.conv2d(i, k, strides=[1, stride_y, stride_x, 1], padding='VALID') input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x) weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=W.transpose(0, 1, 2, 3)) output_groups = [convolve_lambda(i, k) for i, k in zip(input_groups, weight_groups)] layer = tf.concat(axis=3, values=output_groups) layer = tf.transpose(layer, [0, 3, 1, 2]) return layer lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[input_name]) else: if bias_name in weights: biases = weights[bias_name].numpy() has_bias = True else: biases = None has_bias = False if has_bias: weights = [W, biases] else: weights = [W] conv = keras.layers.Conv2D( filters=out_channels, kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name ) layers[scope_name] = conv(layers[input_name]) else: # 1D conv W = weights[weights_name].numpy().transpose(2, 1, 0) width, channels, n_filters = W.shape n_groups = params['group'] if n_groups > 1: raise AssertionError('Cannot convert conv1d with groups != 1') if bias_name in weights: biases = weights[bias_name].numpy() has_bias = True else: biases = None has_bias = False padding_name = tf_name + '_pad' padding_layer = keras.layers.ZeroPadding1D( padding=params['pads'][0], name=padding_name ) layers[padding_name] = padding_layer(layers[inputs[0]]) input_name = padding_name if has_bias: weights = [W, biases] else: weights = [W] conv = keras.layers.Conv1D( filters=channels, kernel_size=width, strides=params['strides'], padding='valid', weights=weights, use_bias=has_bias, activation=None, data_format='channels_first', dilation_rate=params['dilations'], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name ) layers[scope_name] = conv(layers[input_name])
294,116
Convert transposed convolution layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_convtranspose(params, w_name, scope_name, inputs, layers, weights, names): print('Converting transposed convolution ...') if names == 'short': tf_name = 'C' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) if len(weights[weights_name].numpy().shape) == 4: W = weights[weights_name].numpy().transpose(2, 3, 1, 0) height, width, n_filters, channels = W.shape n_groups = params['group'] if n_groups > 1: raise AssertionError('Cannot convert conv1d with groups != 1') if params['dilations'][0] > 1: raise AssertionError('Cannot convert conv1d with dilation_rate != 1') if bias_name in weights: biases = weights[bias_name].numpy() has_bias = True else: biases = None has_bias = False input_name = inputs[0] if has_bias: weights = [W, biases] else: weights = [W] conv = keras.layers.Conv2DTranspose( filters=n_filters, kernel_size=(height, width), strides=(params['strides'][0], params['strides'][1]), padding='valid', output_padding=0, weights=weights, use_bias=has_bias, activation=None, dilation_rate=params['dilations'][0], bias_initializer='zeros', kernel_initializer='zeros', name=tf_name ) layers[scope_name] = conv(layers[input_name]) # Magic ad-hoc. # See the Keras issue: https://github.com/keras-team/keras/issues/6777 layers[scope_name].set_shape(layers[scope_name]._keras_shape) pads = params['pads'] if pads[0] > 0: assert(len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1])) crop = keras.layers.Cropping2D( pads[:2], name=tf_name + '_crop' ) layers[scope_name] = crop(layers[scope_name]) else: raise AssertionError('Layer is not supported for now')
294,117
Convert sum. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_sum( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting Sum ...') def target_layer(x): import keras.backend as K return K.sum(x) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,118
Convert reduce_sum layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights, names): print('Converting reduce_sum ...') keepdims = params['keepdims'] > 0 axis = params['axes'] def target_layer(x, keepdims=keepdims, axis=axis): import keras.backend as K return K.sum(x, keepdims=keepdims, axis=axis) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,119
Convert concatenation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_concat(params, w_name, scope_name, inputs, layers, weights, names): print('Converting concat ...') concat_nodes = [layers[i] for i in inputs] if len(concat_nodes) == 1: # no-op layers[scope_name] = concat_nodes[0] return if names == 'short': tf_name = 'CAT' + random_string(5) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) cat = keras.layers.Concatenate(name=tf_name, axis=params['axis']) layers[scope_name] = cat(concat_nodes)
294,120
Convert slice operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_slice(params, w_name, scope_name, inputs, layers, weights, names): print('Converting slice ...') if len(params['axes']) > 1: raise AssertionError('Cannot convert slice by multiple dimensions') if params['axes'][0] not in [0, 1, 2, 3]: raise AssertionError('Slice by dimension more than 3 or less than 0 is not supported') def target_layer(x, axis=int(params['axes'][0]), start=int(params['starts'][0]), end=int(params['ends'][0])): if axis == 0: return x[start:end] elif axis == 1: return x[:, start:end] elif axis == 2: return x[:, :, start:end] elif axis == 3: return x[:, :, :, start:end] lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,121
Convert clip operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_clip(params, w_name, scope_name, inputs, layers, weights, names): print('Converting clip ...') if params['min'] == 0: print("using ReLU({0})".format(params['max'])) layer = keras.layers.ReLU(max_value=params['max']) else: def target_layer(x, vmin=params['min'], vmax=params['max']): import tensorflow as tf return tf.clip_by_value(x, vmin, vmax) layer = keras.layers.Lambda(target_layer) layers[scope_name] = layer(layers[inputs[0]])
294,122
Convert elementwise addition. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_elementwise_add( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting elementwise_add ...') if 'broadcast' in params: model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'A' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x): layer = tf.add(x[0], x[1]) return layer lambda_layer = keras.layers.Lambda(target_layer, name=tf_name) layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]]) else: model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'A' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) add = keras.layers.Add(name=tf_name) layers[scope_name] = add([model0, model1])
294,123
Convert elementwise multiplication. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_elementwise_mul( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting elementwise_mul ...') model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'M' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x): layer = tf.multiply( x[0], x[1] ) return layer lambda_layer = keras.layers.Lambda(target_layer, name=tf_name) layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])
294,124
Convert elementwise multiplication. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_elementwise_div( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting elementwise_div ...') if names == 'short': tf_name = 'D' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x): layer = tf.div( x[0], x[1] ) return layer lambda_layer = keras.layers.Lambda(target_layer, name=tf_name) layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])
294,125
Convert elementwise subtraction. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_elementwise_sub( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting elementwise_sub ...') model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'S' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) sub = keras.layers.Subtract(name=tf_name) layers[scope_name] = sub([model0, model1])
294,126
Convert Linear. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_gemm(params, w_name, scope_name, inputs, layers, weights, names): print('Converting Linear ...') if names == 'short': tf_name = 'FC' + random_string(6) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() input_channels, output_channels = W.shape keras_weights = [W] has_bias = False if bias_name in weights: bias = weights[bias_name].numpy() keras_weights = [W, bias] has_bias = True dense = keras.layers.Dense( output_channels, weights=keras_weights, use_bias=has_bias, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros', ) layers[scope_name] = dense(layers[inputs[0]])
294,127
Convert matmul layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_matmul(params, w_name, scope_name, inputs, layers, weights, names): print('Converting matmul ...') if names == 'short': tf_name = 'MMUL' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) if len(inputs) == 1: weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() input_channels, output_channels = W.shape keras_weights = [W] dense = keras.layers.Dense( output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros', ) layers[scope_name] = dense(layers[inputs[0]]) elif len(inputs) == 2: weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() input_channels, output_channels = W.shape keras_weights = [W] dense = keras.layers.Dense( output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros', ) layers[scope_name] = dense(layers[inputs[0]]) else: raise AssertionError('Cannot convert matmul layer')
294,128
Convert constant layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_constant(params, w_name, scope_name, inputs, layers, weights, names): print('Converting constant ...') params_list = params['value'].numpy() def target_layer(x, value=params_list): return tf.constant(value.tolist(), shape=value.shape) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name + '_np'] = params_list # ad-hoc layers[scope_name] = lambda_layer(layers[list(layers.keys())[0]])
294,129
Convert reshape(view). Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_flatten(params, w_name, scope_name, inputs, layers, weights, names): print('Converting flatten ...') if names == 'short': tf_name = 'R' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) reshape = keras.layers.Reshape([-1], name=tf_name) layers[scope_name] = reshape(layers[inputs[0]])
294,130
Convert transpose layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_transpose(params, w_name, scope_name, inputs, layers, weights, names): print('Converting transpose ...') if params['perm'][0] != 0: if inputs[0] in layers: print('!!! Cannot permute batch dimension. Result may be wrong !!!') layers[scope_name] = layers[inputs[0]] else: print('Skip weight matrix transpose, result may be wrong.') else: if names: tf_name = 'PERM' + random_string(4) else: tf_name = w_name + str(random.random()) permute = keras.layers.Permute(params['perm'][1:], name=tf_name) layers[scope_name] = permute(layers[inputs[0]])
294,131
Convert reshape layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_reshape(params, w_name, scope_name, inputs, layers, weights, names): print('Converting reshape ...') if names == 'short': tf_name = 'RESH' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) if len(inputs) > 1: if layers[inputs[1]][0] == -1: print('Cannot deduct batch size! It will be omitted, but result may be wrong.') reshape = keras.layers.Reshape(layers[inputs[1] + '_np'], name=tf_name) layers[scope_name] = reshape(layers[inputs[0]]) else: if inputs[0] in layers: reshape = keras.layers.Reshape(params['shape'][1:], name=tf_name) layers[scope_name] = reshape(layers[inputs[0]]) else: print('Skip weight matrix transpose, but result may be wrong.')
294,132
Convert squeeze operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_squeeze(params, w_name, scope_name, inputs, layers, weights, names): print('Converting squeeze ...') if len(params['axes']) > 1: raise AssertionError('Cannot convert squeeze by multiple dimensions') def target_layer(x, axis=int(params['axes'][0])): import tensorflow as tf return tf.squeeze(x, axis=axis) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,133
Convert unsqueeze operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_unsqueeze(params, w_name, scope_name, inputs, layers, weights, names): print('Converting unsqueeze ...') if names == 'short': tf_name = 'UNSQ' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x): import keras return keras.backend.expand_dims(x) lambda_layer = keras.layers.Lambda(target_layer, name=tf_name + 'E') layers[scope_name] = lambda_layer(layers[inputs[0]])
294,134
Convert shape operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_shape(params, w_name, scope_name, inputs, layers, weights, names): print('Converting shape ...') def target_layer(x): import tensorflow as tf return tf.shape(x) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,135
Convert Average pooling. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names): print('Converting pooling ...') if names == 'short': tf_name = 'P' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) if 'kernel_shape' in params: height, width = params['kernel_shape'] else: height, width = params['kernel_size'] if 'strides' in params: stride_height, stride_width = params['strides'] else: stride_height, stride_width = params['stride'] if 'pads' in params: padding_h, padding_w, _, _ = params['pads'] else: padding_h, padding_w = params['padding'] input_name = inputs[0] pad = 'valid' if height % 2 == 1 and width % 2 == 1 and \ height // 2 == padding_h and width // 2 == padding_w and \ stride_height == 1 and stride_width == 1: pad = 'same' else: padding_name = tf_name + '_pad' padding_layer = keras.layers.ZeroPadding2D( padding=(padding_h, padding_w), name=padding_name ) layers[padding_name] = padding_layer(layers[inputs[0]]) input_name = padding_name # Pooling type AveragePooling2D pooling = keras.layers.AveragePooling2D( pool_size=(height, width), strides=(stride_height, stride_width), padding=pad, name=tf_name, data_format='channels_first' ) layers[scope_name] = pooling(layers[input_name])
294,136
Convert 3d Max pooling. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_maxpool3(params, w_name, scope_name, inputs, layers, weights, names): print('Converting pooling ...') if names == 'short': tf_name = 'P' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) if 'kernel_shape' in params: height, width, depth = params['kernel_shape'] else: height, width, depth = params['kernel_size'] if 'strides' in params: stride_height, stride_width, stride_depth = params['strides'] else: stride_height, stride_width, stride_depth = params['stride'] if 'pads' in params: padding_h, padding_w, padding_d, _, _ = params['pads'] else: padding_h, padding_w, padding_d = params['padding'] input_name = inputs[0] if padding_h > 0 and padding_w > 0 and padding_d > 0: padding_name = tf_name + '_pad' padding_layer = keras.layers.ZeroPadding3D( padding=(padding_h, padding_w, padding_d), name=padding_name ) layers[padding_name] = padding_layer(layers[inputs[0]]) input_name = padding_name # Pooling type pooling = keras.layers.MaxPooling3D( pool_size=(height, width, depth), strides=(stride_height, stride_width, stride_depth), padding='valid', name=tf_name ) layers[scope_name] = pooling(layers[input_name])
294,137
Convert convert_adaptive_max_pool2d layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_adaptive_max_pool2d(params, w_name, scope_name, inputs, layers, weights, names): print('Converting adaptive_avg_pool2d...') if names == 'short': tf_name = 'APOL' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) global_pool = keras.layers.GlobalMaxPooling2D(data_format='channels_first', name=tf_name) layers[scope_name] = global_pool(layers[inputs[0]]) def target_layer(x): import keras return keras.backend.expand_dims(x) lambda_layer = keras.layers.Lambda(target_layer, name=tf_name + 'E') layers[scope_name] = lambda_layer(layers[scope_name]) # double expand dims layers[scope_name] = lambda_layer(layers[scope_name])
294,138
Convert padding layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_padding(params, w_name, scope_name, inputs, layers, weights, names): print('Converting padding...') if params['mode'] == 'constant': # raise AssertionError('Cannot convert non-constant padding') if params['value'] != 0.0: raise AssertionError('Cannot convert non-zero padding') if names: tf_name = 'PADD' + random_string(4) else: tf_name = w_name + str(random.random()) # Magic ordering padding_name = tf_name padding_layer = keras.layers.ZeroPadding2D( padding=((params['pads'][2], params['pads'][6]), (params['pads'][3], params['pads'][7])), name=padding_name ) layers[scope_name] = padding_layer(layers[inputs[0]]) elif params['mode'] == 'reflect': def target_layer(x, pads=params['pads']): # x = tf.transpose(x, [0, 2, 3, 1]) layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') # layer = tf.transpose(layer, [0, 3, 1, 2]) return layer lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,139
Convert batch normalization layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_batchnorm(params, w_name, scope_name, inputs, layers, weights, names): print('Converting batchnorm ...') if names == 'short': tf_name = 'BN' + random_string(6) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) mean_name = '{0}.running_mean'.format(w_name) var_name = '{0}.running_var'.format(w_name) if bias_name in weights: beta = weights[bias_name].numpy() if weights_name in weights: gamma = weights[weights_name].numpy() mean = weights[mean_name].numpy() variance = weights[var_name].numpy() eps = params['epsilon'] momentum = params['momentum'] if weights_name not in weights: bn = keras.layers.BatchNormalization( axis=1, momentum=momentum, epsilon=eps, center=False, scale=False, weights=[mean, variance], name=tf_name ) else: bn = keras.layers.BatchNormalization( axis=1, momentum=momentum, epsilon=eps, weights=[gamma, beta, mean, variance], name=tf_name ) layers[scope_name] = bn(layers[inputs[0]])
294,140
Convert instance normalization layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_instancenorm(params, w_name, scope_name, inputs, layers, weights, names): print('Converting instancenorm ...') if names == 'short': tf_name = 'IN' + random_string(6) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) assert(len(inputs) == 3) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) # Use previously taken constants if inputs[-2] + '_np' in layers: gamma = layers[inputs[-2] + '_np'] else: gamma = weights[weights_name].numpy() if inputs[-1] + '_np' in layers: beta = layers[inputs[-1] + '_np'] else: beta = weights[bias_name].numpy() def target_layer(x, epsilon=params['epsilon'], gamma=gamma, beta=beta): layer = tf.contrib.layers.instance_norm( x, param_initializers={'beta': tf.constant_initializer(beta), 'gamma': tf.constant_initializer(gamma)}, epsilon=epsilon, data_format='NCHW', trainable=False ) return layer lambda_layer = keras.layers.Lambda(target_layer, name=tf_name) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,141
Convert dropout. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names): print('Converting dropout ...') if names == 'short': tf_name = 'DO' + random_string(6) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) dropout = keras.layers.Dropout(rate=params['ratio'], name=tf_name) layers[scope_name] = dropout(layers[inputs[0]])
294,142
Convert relu layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_relu(params, w_name, scope_name, inputs, layers, weights, names): print('Converting relu ...') if names == 'short': tf_name = 'RELU' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) relu = keras.layers.Activation('relu', name=tf_name) layers[scope_name] = relu(layers[inputs[0]])
294,143
Convert leaky relu layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, names): print('Converting lrelu ...') if names == 'short': tf_name = 'lRELU' + random_string(3) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) leakyrelu = \ keras.layers.LeakyReLU(alpha=params['alpha'], name=tf_name) layers[scope_name] = leakyrelu(layers[inputs[0]])
294,144
Convert sigmoid layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_sigmoid(params, w_name, scope_name, inputs, layers, weights, names): print('Converting sigmoid ...') if names == 'short': tf_name = 'SIGM' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) sigmoid = keras.layers.Activation('sigmoid', name=tf_name) layers[scope_name] = sigmoid(layers[inputs[0]])
294,145
Convert softmax layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_softmax(params, w_name, scope_name, inputs, layers, weights, names): print('Converting softmax ...') if names == 'short': tf_name = 'SMAX' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x, dim=params['dim']): import keras return keras.activations.softmax(x, axis=dim) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,146
Convert tanh layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_tanh(params, w_name, scope_name, inputs, layers, weights, names): print('Converting tanh ...') if names == 'short': tf_name = 'TANH' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) tanh = keras.layers.Activation('tanh', name=tf_name) layers[scope_name] = tanh(layers[inputs[0]])
294,147
Convert hardtanh layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_hardtanh(params, w_name, scope_name, inputs, layers, weights, names): print('Converting hardtanh (clip) ...') def target_layer(x, max_val=float(params['max_val']), min_val=float(params['min_val'])): return tf.minimum(max_val, tf.maximum(min_val, x)) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,148
Convert selu layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_selu(params, w_name, scope_name, inputs, layers, weights, names): print('Converting selu ...') if names == 'short': tf_name = 'SELU' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) selu = keras.layers.Activation('selu', name=tf_name) layers[scope_name] = selu(layers[inputs[0]])
294,149
Convert upsample_bilinear2d layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_upsample_bilinear(params, w_name, scope_name, inputs, layers, weights, names): print('Converting upsample...') if names == 'short': tf_name = 'UPSL' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) output_size = params['output_size'] align_corners = params['align_corners'] > 0 def target_layer(x, size=output_size, align_corners=align_corners): import tensorflow as tf x = tf.transpose(x, [0, 2, 3, 1]) x = tf.image.resize_images(x, size, align_corners=align_corners) x = tf.transpose(x, [0, 3, 1, 2]) return x lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
294,150
Convert nearest upsampling layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_upsample(params, w_name, scope_name, inputs, layers, weights, names): print('Converting upsample...') if params['mode'] != 'nearest': raise AssertionError('Cannot convert non-nearest upsampling') if names == 'short': tf_name = 'UPSL' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) if 'height_scale' in params: scale = (params['height_scale'], params['width_scale']) elif len(inputs) == 2: scale = layers[inputs[-1] + '_np'][-2:] upsampling = keras.layers.UpSampling2D( size=scale, name=tf_name ) layers[scope_name] = upsampling(layers[inputs[0]])
294,151
Convert gather (embedding) layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def convert_gather(params, w_name, scope_name, inputs, layers, weights, names): print('Converting embedding ...') if names == 'short': tf_name = 'EMBD' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy() input_channels, output_channels = W.shape keras_weights = [W] dense = keras.layers.Embedding( input_channels, weights=keras_weights, output_dim=output_channels, name=tf_name ) layers[scope_name] = dense(layers[inputs[1]])
294,152
By given pytorch model convert layers with specified convertors. Args: model: pytorch model args: pytorch model arguments input_shapes: keras input shapes (using for each InputLayer) change_ordering: change CHW to HWC training: switch model to training mode verbose: verbose output names: use short names, use random-suffix or keep original names for keras layers Returns: model: created keras model.
def pytorch_to_keras( model, args, input_shapes, change_ordering=False, training=False, verbose=False, names=False, ): # PyTorch JIT tracing if isinstance(args, torch.autograd.Variable): args = (args, ) # Workaround for previous versions if isinstance(input_shapes, tuple): input_shapes = [input_shapes] orig_state_dict_keys = _unique_state_dict(model).keys() with set_training(model, training): trace, torch_out = torch.jit.get_trace_graph(model, tuple(args)) if orig_state_dict_keys != _unique_state_dict(model).keys(): raise RuntimeError("state_dict changed after running the tracer; " "something weird is happening in your model!") # _optimize_trace(trace, False) if version.parse('0.4.0') < version.parse(torch.__version__): trace.set_graph(_optimize_graph(trace.graph(), OperatorExportTypes.ONNX)) else: trace.set_graph(_optimize_graph(trace.graph(), False)) trace.graph().lint() if verbose: print(trace.graph()) # Get all graph nodes nodes = list(trace.graph().nodes()) # Optimize Flatten: # When we have something loke that: # # %523 : Long() = onnx::Constant[value={0}](), scope: ResNet # %524 : Dynamic = onnx::Shape(%522), scope: ResNet # %526 : Long() = onnx::Gather[axis=0](%524, %523), scope: ResNet # %527 : Long() = onnx::Constant[value={-1}](), scope: ResNet # %534 : Dynamic = onnx::Unsqueeze[axes=[0]](%526) # %535 : Dynamic = onnx::Unsqueeze[axes=[0]](%527) # %536 : Dynamic = onnx::Concat[axis=0](%534, %535) # %529 : Float(1, 512) = onnx::Reshape(%522, %536), scope: ResNet # # It's better to replace it with onnx::Flatten if six.PY3: from types import SimpleNamespace seq_to_find = \ ['onnx::Constant', 'onnx::Shape', 'onnx::Gather', 'onnx::Constant', 'onnx::Unsqueeze', 'onnx::Unsqueeze', 'onnx::Concat', 'onnx::Reshape'] k = 0 s = 0 for i, node in enumerate(nodes): if node.kind() == seq_to_find[k]: if k == 0: s = i k += 1 if k == len(seq_to_find): reshape_op = nodes[s + k - 1] flatten_op = { 'kind': (lambda: 'onnx::Flatten'), 'attributeNames': (lambda: {}), 'outputs': (lambda: list(reshape_op.outputs())), 'scopeName': (lambda: reshape_op.scopeName()), 'inputs': (lambda: list(reshape_op.inputs())[:1]), '__str__': (lambda: reshape_op.__str__()), } nodes = nodes[:s] + [SimpleNamespace(**flatten_op)] + nodes[s+k:] break else: k = 0 s = -1 # Collect graph inputs and outputs graph_outputs = [get_leaf_id(n) for n in trace.graph().outputs()] graph_inputs = [get_leaf_id(n) for n in trace.graph().inputs()] # Collect model state dict state_dict = _unique_state_dict(model) if verbose: print('Graph inputs:', graph_inputs) print('Graph outputs:', graph_outputs) print('State dict:', list(state_dict)) import re import keras from keras import backend as K K.set_image_data_format('channels_first') layers = dict() keras_inputs = [] for i in range(len(args)): layers[graph_inputs[i]] = keras.layers.InputLayer( input_shape=input_shapes[i], name='input{0}'.format(i) ).output keras_inputs.append(layers[graph_inputs[i]]) outputs = [] group_indices = defaultdict(lambda: 0, {}) for node in nodes: node_inputs = list(node.inputs()) node_input_names = [] for node_input in node_inputs: node_input_names.append(get_leaf_id(node_input)) node_type = node.kind() node_scope_name = node.scopeName() node_id = get_node_id(node) node_name_regex = re.findall(r'\[([\w\d.\-\[\]\s]+)\]', node_scope_name) try: int(node_name_regex[-1]) node_weigth_group_name = '.'.join( node_name_regex[:-1] ) node_weights_name = node_weigth_group_name + '.' + str(group_indices[node_weigth_group_name]) group_indices[node_weigth_group_name] += 1 except ValueError: node_weights_name = '.'.join( node_name_regex ) except IndexError: node_weights_name = '.'.join(node_input_names) node_attrs = {k: node[k] for k in node.attributeNames()} node_outputs = list(node.outputs()) node_outputs_names = [] for node_output in node_outputs: if node_output.node().scopeName(): node_outputs_names.append(node_output.node().scopeName()) if verbose: print(' ____ ') print('graph node:', node_scope_name) print('node id:', node_id) print('type:', node_type) print('inputs:', node_input_names) print('outputs:', node_outputs_names) print('name in state_dict:', node_weights_name) print('attrs:', node_attrs) print('is_terminal:', node_id in graph_outputs) AVAILABLE_CONVERTERS[node_type]( node_attrs, node_weights_name, node_id, node_input_names, layers, state_dict, names ) if node_id in graph_outputs: outputs.append(layers[node_id]) model = keras.models.Model(inputs=keras_inputs, outputs=outputs) if change_ordering: import numpy as np conf = model.get_config() for layer in conf['layers']: if layer['config'] and 'batch_input_shape' in layer['config']: layer['config']['batch_input_shape'] = \ tuple(np.reshape(np.array( [ [None] + list(layer['config']['batch_input_shape'][2:][:]) + [layer['config']['batch_input_shape'][1]] ]), -1 )) if layer['config'] and 'target_shape' in layer['config']: if len(list(layer['config']['target_shape'][1:][:])) > 0: layer['config']['target_shape'] = \ tuple(np.reshape(np.array( [ list(layer['config']['target_shape'][1:][:]), layer['config']['target_shape'][0] ]), -1 ),) if layer['config'] and 'data_format' in layer['config']: layer['config']['data_format'] = 'channels_last' if layer['config'] and 'axis' in layer['config']: layer['config']['axis'] = 3 K.set_image_data_format('channels_last') model_tf_ordering = keras.models.Model.from_config(conf) # from keras.utils.layer_utils import convert_all_kernels_in_model # convert_all_kernels_in_model(model) for dst_layer, src_layer in zip( model_tf_ordering.layers, model.layers ): dst_layer.set_weights(src_layer.get_weights()) model = model_tf_ordering print('Your model was (probably) successfully converted! ' 'Please, follow the repository https://github.com/nerox8664/pytorch2keras and give a star :)') return model
294,155
Compile by saving to file and importing that. Compiling the AST/source code this way ensures that the source code is readable by e.g. `pdb` or `inspect`. Args: source: The code to compile, either as a string or as an AST. globals_: A dictionary of variables that should be available as globals in the compiled module. They will be monkey patched after importing the module. Returns: A module object containing the compiled source code.
def compile_file(source, globals_=None): if isinstance(source, gast.AST): source = quoting.to_source(source) # Write source to temporary file tempdir = tempfile.mkdtemp() uuid = str(uuid4().hex[:4]) tmpname = os.path.join(tempdir, 'tangent_%s.py' % uuid) with open(tmpname, 'w') as f: f.write(source) # Load the temporary file as a module module_name = 'tangent_%s' % uuid if six.PY3: spec = util.spec_from_file_location(module_name, tmpname) m = util.module_from_spec(spec) spec.loader.exec_module(m) else: m = imp.load_source(module_name, tmpname) # Update the modules namespace if globals_: m.__dict__.update(globals_) return m
297,393
Perform AD on a single function and return the AST. Args: See `grad`. Returns: node: The AST of a module containing the adjoint and primal function definitions. required: A list of non-built in functions that this function called, and of which the primals and adjoints need to be made available in order for the returned function to run.
def autodiff_ast(func, wrt, motion, mode, preserve_result, check_dims, verbose): node = annotate.resolve_calls(func) node = desugar.explicit_loop_indexes(node) fence.validate(node, inspect.getsource(func)) node = anf_.anf(node) if verbose >= 2: print('ANF') print(quoting.to_source(node)) if mode == 'reverse': node, required, stack = reverse_ad.reverse_ad(node.body[0], wrt, preserve_result, check_dims) if verbose >= 2: print('RAW') print(quoting.to_source(node)) if motion == 'split': node = reverse_ad.split(node, stack) else: node = reverse_ad.joint(node) if verbose >= 2: print('MOTION') print(quoting.to_source(node)) elif mode == 'forward': node, required = forward_ad.forward_ad(node.body[0], wrt, preserve_result, check_dims) return node, required
297,396
Create a user-friendly forward function. Ensures that a single value instead of a tuple is returned if the user asked for the gradient with respect to only one input. Args: out_node: The function definition AST. Returns: The function definition with potentially changed return statement.
def _create_forward(out_node): retval = out_node.body[0].body[-1] if len(retval.value.elts) == 1: retval.value = retval.value.elts[0] return out_node
297,402
A decorator which removes the `with insert_grad_of` statement. This allows the function to be called as usual. Args: f: A function Returns: A function with any `with insert_grad_of` context managers removed.
def tangent(f): node = annotate.resolve_calls(f) RemoveWith().visit(node) wrapped = functools.wraps(f)(compile_.compile_function(node)) wrapped.tangent = f return wrapped
297,403
Build a CFG for a function. Args: node: A function definition the body of which to analyze. Returns: A CFG object. Raises: TypeError: If the input is not a function definition.
def build_cfg(cls, node): if not isinstance(node, gast.FunctionDef): raise TypeError('input must be a function definition') cfg = cls() cfg.entry = Node(node.args) cfg.head = [cfg.entry] cfg.visit_statements(node.body) cfg.exit = Node(None) cfg.set_head(cfg.exit) cfg.backlink(cfg.entry) return cfg
297,407
Perform a series of optimization passes. This function performs a series of optimizations (dead code elimination, constant folding, variable folding) on the given AST. It optimizes the code repeatedly until reaching a fixed point. The fixed point is determine roughly by checking whether the number of lines of generated source code changed after the latest pass. Args: node: The AST to optimize. Returns: The optimized AST.
def optimize(node): node = dead_code_elimination(node) node = constant_folding(node) node = assignment_propagation(node) return node
297,421
Check how many times a variable definition was used. Args: node: An AST to analyze. Returns: A dictionary from assignment nodes to the number of times the assigned to variable was used.
def read_counts(node): cfg.forward(node, cfg.ReachingDefinitions()) rc = ReadCounts() rc.visit(node) return rc.n_read
297,423
Perform assignment propagation. Assignment propagation is not a compiler optimization as much as a readability optimization. If a variable name is used only once, it gets renamed when possible e.g. `y = x; z = y` will become `z = x`. Args: node: The AST to optimize. Returns: The optimized AST.
def assignment_propagation(node): n_reads = read_counts(node) to_remove = [] for succ in gast.walk(node): # We found an assignment of the form a = b # - Left-hand side is a Name, right-hand side is a Name. if (isinstance(succ, gast.Assign) and isinstance(succ.value, gast.Name) and len(succ.targets) == 1 and isinstance(succ.targets[0], gast.Name)): rhs_name = succ.value.id # We now find all the places that b was defined rhs_defs = [def_[1] for def_ in anno.getanno(succ, 'definitions_in') if def_[0] == rhs_name] # If b was defined in only one place (not an argument), and wasn't used # anywhere else but in a == b, and was defined as b = x, then we can fold # the statements if (len(rhs_defs) == 1 and isinstance(rhs_defs[0], gast.Assign) and n_reads[rhs_defs[0]] == 1 and isinstance(rhs_defs[0].value, gast.Name) and isinstance(rhs_defs[0].targets[0], gast.Name)): # Mark rhs_def for deletion to_remove.append(rhs_defs[0]) # Propagate the definition succ.value = rhs_defs[0].value # Remove the definitions we folded transformers.Remove(to_remove).visit(node) anno.clearanno(node) return node
297,424
Reverse the broadcasting operation. See utils.py. Args: tensor: A Tensor. shape: A shape that could have been broadcasted to the shape of tensor. Returns: Tensor with dimensions summed to match `shape`.
def unbroadcast_tfe_to(tensor, shape): axis = utils.create_unbroadcast_axis(shape, shape_as_list(tensor)) return tf.reshape(tf.reduce_sum(tensor, axis=axis), shape)
297,430
Reverse summing over a dimension. See utils.py. Args: tensor: The tensor that was reduced. shape: A list, the original shape of the tensor before reduction. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: A tensor with axes broadcast to match the shape of the original tensor.
def unreduce_tensor(tensor, shape, axis, keepdims): if not keepdims: if axis is None: axis = range(len(shape)) elif isinstance(axis, int): axis = axis, for ax in sorted(axis): tensor = tf.expand_dims(tensor, ax) tile_shape = np.array(shape) / np.array(shape_as_list(tensor)) return tf.tile(tensor, tile_shape)
297,431
Ensure a variable name is valid. Note: Assumes variable names are ASCII, which isn't necessarily true in Python 3. Args: name: A proposed variable name. Returns: A valid version of the name.
def valid(self, name): name = re.sub('[^0-9a-zA-Z_]', '', name) if re.match('[0-9]', name): name = '_' + name return name
297,476
Reverse the broadcasting operation. Args: array: An array. like: An array that could have been broadcasted to the shape of array. Returns: Tensor with certain dimensions summed to match the shape of `like`.
def unbroadcast(array, like): unbroadcaster = unbroadcasters[type(array)] return unbroadcaster(array, like)
297,496
Creates the reduction axis for unbroadcasting. Args: shape: A list. The shape after the broadcast operation. broadcast_shape: A list. The original shape the array being unbroadcast had. Returns: A list. The axes along which the array needs to be reduced. These axes will be distributed evenly into the original shape.
def create_unbroadcast_axis(shape, broadcast_shape): return tuple( -(1 + i) for i in range(len(broadcast_shape)) if i >= len(shape) or broadcast_shape[-(1 + i)] > shape[-(1 + i)])
297,497
Reverse the broadcasting operation. Args: array: An array. shape: A shape that could have been broadcasted to the shape of array. Returns: Array with dimensions summed to match `shape`.
def unbroadcast_numpy_to(array, shape): axis = create_unbroadcast_axis(shape, numpy.shape(array)) return numpy.reshape(numpy.sum(array, axis=axis), shape)
297,498
Reverse summing over a dimension. Args: array: The array that was reduced. shape: The original shape of the array before reduction. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: An array with axes broadcast to match the shape of the original array.
def unreduce(array, shape, axis, keepdims): unreducer = unreducers[type(array)] return unreducer(array, shape, axis, keepdims)
297,499
Reverse summing over a dimension. Args: array: The array that was reduced. original_array: An array whose shape to unreduce to. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: An array with axes broadcast to match the shape of the original array.
def unreduce_like(array, original_array, axis, keepdims): atype = type(array) unreducer = unreducers[atype] shape = shape_functions[atype] return unreducer(array, shape(original_array), axis, keepdims)
297,500
Reverse summing over a dimension, NumPy implementation. Args: array: The array that was reduced. shape: The original shape of the array before reduction. axis: The axis or axes that were summed. keepdims: Whether these axes were kept as singleton axes. Returns: An array with axes broadcast to match the shape of the original array.
def unreduce_array(array, shape, axis, keepdims): # NumPy uses a special default value for keepdims, which is equivalent to # False. if axis is not None and (not keepdims or keepdims is numpy._NoValue): # pylint: disable=protected-access if isinstance(axis, int): axis = axis, for ax in sorted(axis): array = numpy.expand_dims(array, ax) return numpy.broadcast_to(array, shape)
297,501
A functional form of the `astype` method. Args: array: The array or number to cast. y: An array or number, as the input, whose type should be that of array. Returns: An array or number with the same dtype as `y`.
def astype(array, y): if isinstance(y, autograd.core.Node): return array.astype(numpy.array(y.value).dtype) return array.astype(numpy.array(y).dtype)
297,502
Initialize the gradient for an object. Args: obj: The object to initialize the gradient for, can be either a number, array, tuple, list, or dictionary. allow_lazy_initializer: Whether to allow using the ZeroGradient wrapper, for efficiency. Returns: An object of the same type, shape, etc. but with all numeric values set to zero. If the type is unknown, a zero is returned.
def init_grad(obj, allow_lazy_initializer=False): if obj is None: # TODO: fixes.py appears to pass None value and expect 0.0 back. Bug? return 0.0 initializer, supports_lazy_initializer = grad_initializers[type(obj)] if supports_lazy_initializer: if isinstance(obj, ZeroGradient): if allow_lazy_initializer: return ZeroGradient(obj.like) else: # TODO: Not sure this should normally be hit. In forward-over-reverse? return obj.instantiate() else: if allow_lazy_initializer: return ZeroGradient(obj) else: assert not isinstance(obj, ZeroGradient) return initializer(obj)
297,503
Recursively add the gradient of two objects. Args: left: The left value to add. Can be either an array, a number, list or dictionary. right: The right value. Must be of the same type (recursively) as the left. Returns: The sum of the two gradients, which will of the same type.
def add_grad(left, right): # We assume that initial gradients are always identity WRT add_grad. # We also assume that only init_grad could have created None values. assert left is not None and right is not None left_type = type(left) right_type = type(right) if left_type is ZeroGradient: return right if right_type is ZeroGradient: return left return grad_adders[(left_type, right_type)](left, right)
297,510
Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match.
def shapes_match(a, b): if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)): if len(a) != len(b): return False return all([shapes_match(ia, ib) for ia, ib in zip(a, b)]) elif isinstance(a, dict) and isinstance(b, dict): if len(a) != len(b): return False match = True for (ak, av), (bk, bv) in zip(a.items(), b.items()): match = match and all([ak == bk and shapes_match(av, bv)]) return match else: shape_checker = shape_checkers[(type(a), type(b))] return shape_checker(a, b)
297,513
Push a value onto the stack (i.e. record it on the tape). Args: stack: The stack object, which must support appending values. x: The value to append. If it is a mutable object like an array or list, it will be copied before being added onto the stack. op_id: A unique variable that is also passed into the corresponding pop. Allows optimization passes to track pairs of pushes and pops.
def push(stack, x, op_id): if isinstance(x, numpy.ndarray): x = x.copy() elif isinstance(x, list): x = x[:] if __debug__: stack.append((x, op_id)) else: stack.append(x)
297,514
Pop a value from the stack (i.e. read it from the tape). Args: stack: The stack to pop from. op_id: A unique variable that is also passed into the matching push. Allows optimization passes to track pairs of pushes and pops. Returns: The last value.
def pop(stack, op_id): if __debug__: pushed_value, pushed_op_id = stack.pop() assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id) else: pushed_value = stack.pop() return pushed_value
297,515
Proxy of pop, where we know we're popping a stack off of a stack. We know that we don't need to differentiate through this. See pop() for more. Args: stack: The stack to pop from. op_id: A unique variable that is also passed into the matching push. Allows optimization passes to track pairs of pushes and pops. Returns: The last value.
def pop_stack(stack, op_id): if __debug__: pushed_stack, pushed_op_id = stack.pop() assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id) else: pushed_stack = stack.pop() return pushed_stack
297,516
Proxy of push, where we know we're pushing a stack onto a stack. Used when differentiating call trees,where sub-functions get their own stack. See push() for more. Args: stack: The stack object, which must support appending values. substack: The stack to append. op_id: A unique variable that is also passed into the corresponding pop. Allows optimization passes to track pairs of pushes and pops. Raises: ValueError: If a non-stack value for `substack` is passed.
def push_stack(stack, substack, op_id): if substack is not None and not isinstance(substack, Stack): raise ValueError( 'Substack should be type tangent.Stack or None, instead found %s' % type(substack)) if __debug__: stack.append((substack, op_id)) else: stack.append(substack)
297,517
Gradient of NumPy dot product w.r.t. to the left hand side. Args: dy: The gradient with respect to the output. x1: The left hand side of the `numpy.dot` function. x2: The right hand side Returns: The gradient with respect to `x1` i.e. `x2.dot(dy.T)` with all the broadcasting involved.
def grad_dot(dy, x1, x2): if len(numpy.shape(x1)) == 1: dy = numpy.atleast_2d(dy) elif len(numpy.shape(x2)) == 1: dy = numpy.transpose(numpy.atleast_2d(dy)) x2 = numpy.transpose(numpy.atleast_2d(x2)) x2_t = numpy.transpose(numpy.atleast_2d( numpy.sum(x2, axis=tuple(numpy.arange(numpy.ndim(x2) - 2))))) dy_x2 = numpy.sum(dy, axis=tuple(-numpy.arange(numpy.ndim(x2) - 2) - 2)) return numpy.reshape(numpy.dot(dy_x2, x2_t), numpy.shape(x1))
297,518
Creates a LanguageFence. Args: source: String, the source code of the AST that will be verified. strict: Boolean, set to False to allow unsafe constructs. Raises: ValueError: if source code has not been supplied.
def __init__(self, source, strict=True): self._visited_top_module = False if not source: raise ValueError('The source code of the tree is required.') self._source = source self._strict = strict # Location information is used to locate the offending elements # in the source code. self._current_lineno = None # Only consistent during a visit. self._current_offset = None # Only consistent during a visit. super(LanguageFence, self).__init__()
297,566
Get the name of a variable. Args: node: A `Name`, `Subscript` or `Attribute` node. Returns: The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
def get_name(node): if isinstance(node, gast.Name): return node.id elif isinstance(node, (gast.Subscript, gast.Attribute)): return get_name(node.value) else: raise TypeError
297,600
Return the variable names created or mutated by this statement. This function considers assign statements, augmented assign statements, and the targets of for loops, as well as function arguments. For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and `y`, `for i in range(x)` will return `i`, etc. Args: node: An AST node Returns: A set of variable names (strings) of all the variables created or mutated.
def get_updated(node): if isinstance(node, gast.Assign): return set.union(*(_get_target(target) for target in node.targets)) elif isinstance(node, (gast.For, gast.AugAssign)): return _get_target(node.target) elif isinstance(node, gast.arguments): targets = set(arg.id for arg in node.args + node.kwonlyargs) if node.vararg: targets.add(node.vararg.id) if node.kwarg: targets.add(node.kwarg.id) return targets else: return set()
297,602
Check whether a context manager calls `insert_grad_of`. Args: node: The context manager node. Returns: Whether or not this node contains `insert_grad_of` calls. Raises: ValueError: If the `insert_grad_of` calls are mixed with other calls.
def is_insert_grad_of_statement(node): tangent_calls = [anno.getanno(item.context_expr, 'func', None) is utils.insert_grad_of for item in node.items] if all(tangent_calls): return True elif any(tangent_calls): raise ValueError else: return False
297,605
Remove comments that repeat themselves. Multiple statements might be annotated with the same comment. This way if one of the statements is deleted during optimization passes, the comment won't be lost. This pass removes sequences of identical comments, leaving only the first one. Args: node: An AST Returns: An AST where comments are not repeated in sequence.
def remove_repeated_comments(node): last_comment = {'text': None} for _node in gast.walk(node): if anno.hasanno(_node, 'comment'): comment = anno.getanno(_node, 'comment') if comment['text'] == last_comment['text']: anno.delanno(_node, 'comment') last_comment = comment return node
297,608
Create a variable to store partial gradients. Args: node: See `create_grad`. namer: See `create_grad`. tangent: See `create_grad`. Returns: node: See `create_grad`. Returns a node representing the partial gradient. Note that this is always a simple variable e.g. the temporary partial of `x[i]` can be something like `_dxi`. Nodes are given an annotation `temp_adjoint_var`.
def create_temp_grad(node, namer, tangent=False): if not isinstance(node, (gast.Subscript, gast.Name)): raise TypeError def _name_temp_grad(node): name = namer.temp_grad(node.id, tangent) temp_node = gast.Name(id=name, annotation=None, ctx=None) return temp_node if isinstance(node, gast.Subscript): temp_node = _name_temp_grad(node.value) else: temp_node = _name_temp_grad(node) anno.setanno(temp_node, 'temp_adjoint_var', node) return temp_node
297,624
Create a temporary variable. Args: node: Create a temporary variable to store this variable in. namer: A naming object that guarantees the names are unique. Returns: node: See `create_grad`. Returns a temporary variable, which is always a simple variable annotated with `temp_var`.
def create_temp(node, namer): if isinstance(node, gast.Name): name = node.id elif isinstance(node, (gast.Attribute, gast.Subscript)): name = node.value.id else: raise TypeError temp_node = gast.Name(id=namer.temp(name), annotation=None, ctx=None) anno.setanno(temp_node, 'temp_var', node) return temp_node
297,625
Go from source code to AST nodes. This function returns a tree without enclosing `Module` or `Expr` nodes. Args: src_string: The source code to parse. return_expr: Whether or not to return a containing expression. This can be set to `True` if the result is to be part of a series of statements. Returns: An AST of the given source code.
def quote(src_string, return_expr=False): node = parse_string(src_string) body = node.body if len(body) == 1: if isinstance(body[0], gast.Expr) and not return_expr: out = body[0].value else: out = body[0] else: out = node return out
297,647
Carry over the state from the primal to the adjoint. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. stack: The stack node to use for storing and restoring state. Returns: func: A `Module` node with two function definitions containing the primal and adjoint respectively.
def split(node, stack): node, defined, reaching = _fix(node) # Store and restore the state node = store_state(node, reaching, defined, stack) # Clean up anno.clearanno(node) return node
297,655
Merge the bodies of primal and adjoint into a single function. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: func: A `Module` node with a single function definition containing the combined primal and adjoint.
def joint(node): node, _, _ = _fix(node) body = node.body[0].body[:-1] + node.body[1].body func = gast.Module(body=[gast.FunctionDef( name=node.body[0].name, args=node.body[1].args, body=body, decorator_list=[], returns=None)]) # Clean up anno.clearanno(func) return func
297,656
Visit a node. This method is largely modelled after the ast.NodeTransformer class. Args: node: The node to visit. Returns: A tuple of the primal and adjoint, each of which is a node or a list of nodes.
def visit(self, node): method = 'visit_' + node.__class__.__name__ if not hasattr(self, method): raise ValueError('Unknown node type: %s' % node.__class__.__name__) visitor = getattr(self, method) # If this node is a statement, inform all child nodes what the active # variables in this statement are if anno.hasanno(node, 'active_in'): self.active_variables = anno.getanno(node, 'active_in') pri, adj = visitor(node) # Annotate primal and adjoint statements if isinstance(pri, gast.AST): anno.setdefaultanno(pri, 'adj', adj) else: for node in pri: anno.setdefaultanno(node, 'adj', adj) if isinstance(adj, gast.AST): anno.setdefaultanno(adj, 'pri', pri) else: for node in adj: anno.setdefaultanno(node, 'pri', pri) return pri, adj
297,659
Checks whether a statement is active. An assignment is active when its right hand side contains active variables. Args: node: an instance of gast.Assign Returns: Whether the statement is active.
def is_active(self, node): # Special case: If the right hand side is a pop statement, we want to # process it if (isinstance(node.value, gast.Call) and anno.getanno(node.value, 'func', False) == utils.pop): return True for succ in gast.walk(node.value): if (isinstance(succ, gast.Name) and isinstance(succ.ctx, gast.Load) and succ.id in self.active_variables): return True return False
297,662
Build the primal and adjoint of a traceable function. Args: node: ast.Call node of a function we wish to trace, instead of transform Returns: primal: new ast.Assign node to replace the original primal call adjoint: new ast.Assign node using the VJP generated in primal to calculate the adjoint.
def primal_and_adjoint_for_tracing(self, node): primal_template = grads.primals[tracing.Traceable] adjoint_template = grads.adjoints[tracing.Traceable] # Prep to_pack = node.args target = ast_.copy_node(self.orig_target) vjp = quoting.quote(self.namer.unique('%s_grad' % node.func.id)) tmp = create.create_temp(quoting.quote('tmp'), self.namer) assert len(node.keywords) == 0 # Full replacement of primal # TODO: do we need to set 'pri_call' on this? primal = template.replace( primal_template, namer=self.namer, result=target, fn=node.func, tmp=tmp, vjp=vjp, args=gast.Tuple(elts=to_pack, ctx=gast.Load())) # Building adjoint using the vjp generated with the primal dto_pack = gast.Tuple( elts=[create.create_temp_grad(arg, self.namer) for arg in to_pack], ctx=gast.Store()) adjoint = template.replace( adjoint_template, namer=self.namer, result=target, vjp=vjp, dargs=dto_pack) return primal, adjoint
297,675
Prepend a statement to the current statement. Note that multiple calls to prepend will result in the last statement to be prepended to end up at the top. Args: node: The statement to prepend. Raises: ValueError: If the given node is not a statement.
def prepend(self, node): if not isinstance(node, grammar.STATEMENTS): raise ValueError self.to_prepend[-1].appendleft(node)
297,679
Append a statement to the current statement. Note that multiple calls to append will result in the last statement to be appended to end up at the bottom. Args: node: The statement to append. Raises: ValueError: If the given node is not a statement.
def append(self, node): if not isinstance(node, grammar.STATEMENTS): raise ValueError self.to_append[-1].append(node)
297,680
Insert statements at the top of the function body. Note that multiple calls to `insert_top` will result in the statements being prepended in that order; this is different behavior from `prepend`. Args: node: The statement to prepend. Raises: ValueError: If the given node is not a statement.
def insert_top(self, node): if not isinstance(node, grammar.STATEMENTS): raise ValueError self.to_insert_top.append(node)
297,681
Prepend a statement to the current block. Args: node: The statement to prepend. reverse: When called multiple times, this flag determines whether the statement should be prepended or appended to the already inserted statements. Raises: ValueError: If the given node is not a statement.
def prepend_block(self, node, reverse=False): if not isinstance(node, grammar.STATEMENTS): raise ValueError if reverse: self.to_prepend_block[-1].appendleft(node) else: self.to_prepend_block[-1].append(node)
297,682
Append a statement to the current block. Args: node: The statement to prepend. reverse: When called multiple times, this flag determines whether the statement should be prepended or appended to the already inserted statements. Raises: ValueError: If the given node is not a statement.
def append_block(self, node, reverse=False): if not isinstance(node, grammar.STATEMENTS): raise ValueError if reverse: self.to_append_block[-1].appendleft(node) else: self.to_append_block[-1].append(node)
297,683
Visit a series of nodes in a node body. This function is factored out so that it can be called recursively on statements that are appended or prepended. This allows e.g. a nested expression to prepend a statement, and that statement can prepend a statement again, etc. Args: nodes: A list of statements. Returns: A list of transformed statements.
def visit_statements(self, nodes): for node in nodes: if isinstance(node, gast.AST): self.to_prepend.append(deque()) self.to_append.append(deque()) node = self.visit(node) self.visit_statements(self.to_prepend.pop()) if isinstance(node, gast.AST): self.to_insert[-1].append(node) elif node: self.to_insert[-1].extend(node) self.visit_statements(self.to_append.pop()) else: self.to_insert[-1].append(node) return self.to_insert[-1]
297,684
Find pushes and pops to the stack and annotate them as such. Args: node: An AST node that might contain stack pushes and pops. strict: A boolean indicating whether to stringently test whether each push and pop are matched. This is not always possible when taking higher-order derivatives of code generated in split-motion. Returns: node: The node passed in, but with pushes and pops annotated in AST nodes.
def find_stacks(node, strict=False): # First, find all stack operation IDs. fso = FindStackOps() fso.visit(node) # Using those IDs, make annotations onto the push and pop nodes. AnnotateStacks(fso.push_pop_pairs, strict).visit(node) return node
297,697
Find unused definitions that can be remove. This runs reaching definitions analysis followed by a walk over the AST to find all variable definitions that are not used later on. Args: node: The AST of e.g. a function body to find unused variable definitions. Returns: unused: After visiting all the nodes, this attribute contanis a set of definitions in the form of `(variable_name, node)` pairs which are unused in this AST.
def unused(node): cfg.forward(node, cfg.ReachingDefinitions()) unused_obj = Unused() unused_obj.visit(node) return unused_obj.unused
297,698
Initializes logging. Prints logs to console with level defined by loglevel Also prints verbose log to the multiqc data directory if available. (multiqc_data/multiqc.log) Args: loglevel (str): Determines the level of the log output.
def init_log(logger, loglevel=0): # File for logging global log_tmp_dir, log_tmp_fn log_tmp_dir = tempfile.mkdtemp() log_tmp_fn = os.path.join(log_tmp_dir, 'multiqc.log') # Logging templates debug_template = '[%(asctime)s] %(name)-50s [%(levelname)-7s] %(message)s' info_template = '[%(levelname)-7s] %(module)15s : %(message)s' # Base level setup logger.setLevel(getattr(logging, 'DEBUG')) # Set up the console logging stream console = logging.StreamHandler() console.setLevel(getattr(logging, loglevel)) if loglevel == 'DEBUG': console.setFormatter(logging.Formatter(debug_template)) else: console.setFormatter(logging.Formatter(info_template)) logger.addHandler(console) # Now set up the file logging stream if we have a data directory file_handler = logging.FileHandler(log_tmp_fn, encoding='utf-8') file_handler.setLevel(getattr(logging, 'DEBUG')) # always DEBUG for the file file_handler.setFormatter(logging.Formatter(debug_template)) logger.addHandler(file_handler)
298,367
Make an HTTP connection using connection_class. This is an internal method that should be called from open_http() or open_https(). Arguments: - connection_factory should take a host name and return an HTTPConnection instance. - url is the url to retrieval or a host, relative-path pair. - data is payload for a POST request or None.
def _open_generic_http(self, connection_factory, url, data): user_passwd = None proxy_passwd= None if isinstance(url, str): host, selector = splithost(url) if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host else: host, selector = url # check whether the proxy contains authorization information proxy_passwd, host = splituser(host) # now we proceed with the url we want to obtain urltype, rest = splittype(selector) url = rest user_passwd = None if urltype.lower() != 'http': realhost = None else: realhost, rest = splithost(rest) if realhost: user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) if proxy_bypass(realhost): host = realhost if not host: raise IOError('http error', 'no host given') if proxy_passwd: proxy_passwd = unquote(proxy_passwd) proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') else: proxy_auth = None if user_passwd: user_passwd = unquote(user_passwd) auth = base64.b64encode(user_passwd.encode()).decode('ascii') else: auth = None http_conn = connection_factory(host) headers = {} if proxy_auth: headers["Proxy-Authorization"] = "Basic %s" % proxy_auth if auth: headers["Authorization"] = "Basic %s" % auth if realhost: headers["Host"] = realhost # Add Connection:close as we don't support persistent connections yet. # This helps in closing the socket and avoiding ResourceWarning headers["Connection"] = "close" for header, value in self.addheaders: headers[header] = value if data is not None: headers["Content-Type"] = "application/x-www-form-urlencoded" http_conn.request("POST", selector, data, headers) else: http_conn.request("GET", selector, headers=headers) try: response = http_conn.getresponse() except http_client.BadStatusLine: # something went wrong with the HTTP status line raise URLError("http protocol error: bad status line") # According to RFC 2616, "2xx" code indicates that the client's # request was successfully received, understood, and accepted. if 200 <= response.status < 300: return addinfourl(response, response.msg, "http:" + url, response.status) else: return self.http_error( url, response.fp, response.status, response.reason, response.msg, data)
298,987
Constructor. Arguments: year, month, day (required, base 1)
def __new__(cls, year, month=None, day=None): if (isinstance(year, bytes) and len(year) == 4 and 1 <= year[2] <= 12 and month is None): # Month is sane # Pickle support self = object.__new__(cls) self.__setstate(year) return self _check_date_fields(year, month, day) self = object.__new__(cls) self._year = year self._month = month self._day = day return self
299,252
Constructor. Arguments: hour, minute (required) second, microsecond (default to zero) tzinfo (default to None)
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): self = object.__new__(cls) if isinstance(hour, bytes) and len(hour) == 6: # Pickle support self.__setstate(hour, minute or None) return self _check_tzinfo_arg(tzinfo) _check_time_fields(hour, minute, second, microsecond) self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo return self
299,267
Create a new profile looter. Arguments: username (str): the username of the profile. See `InstaLooter.__init__` for more details about accepted keyword arguments.
def __init__(self, username, **kwargs): # type: (str, **Any) -> None super(ProfileLooter, self).__init__(**kwargs) self._username = username self._owner_id = None
299,773
Create a new hashtag looter. Arguments: username (str): the hashtag to search for. See `InstaLooter.__init__` for more details about accepted keyword arguments.
def __init__(self, hashtag, **kwargs): # type: (str, **Any) -> None super(HashtagLooter, self).__init__(**kwargs) self._hashtag = hashtag
299,775
Create a new hashtag looter. Arguments: code (str): the code of the post to get. See `InstaLooter.__init__` for more details about accepted keyword arguments.
def __init__(self, code, **kwargs): # type: (str, **Any) -> None super(PostLooter, self).__init__(**kwargs) self._info = None # type: Optional[dict] match = self._RX_URL.match(code) if match is not None: self.code = match.group(1) elif self._RX_CODE.match(code) is None: raise ValueError("invalid post code: '{}'".format(code)) else: self.code = code
299,776