function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def test_none_str(self): assert convert_vector_catch('none') == 0
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_none_b(self): assert convert_vector_catch(b'none') == 0
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_vc_str(self, vc, msk): assert convert_vector_catch(vc) == msk
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_vc_b(self, vc, msk): assert convert_vector_catch(vc) == msk
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_empty(self): assert convert_session_options([]) == {}
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_unknown_option(self): assert convert_session_options(['dumkopf']) == {}
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_bool(self): assert convert_session_options(['auto_unlock']) == {'auto_unlock': True} assert convert_session_options(['no-auto_unlock']) == {'auto_unlock': False} assert convert_session_options(['auto_unlock=1']) == {'auto_unlock': True} assert convert_session_options(['auto_unlock=true']) == {'auto_unlock': True} assert convert_session_options(['auto_unlock=yes']) == {'auto_unlock': True} assert convert_session_options(['auto_unlock=on']) == {'auto_unlock': True} assert convert_session_options(['auto_unlock=0']) == {'auto_unlock': False} assert convert_session_options(['auto_unlock=false']) == {'auto_unlock': False} assert convert_session_options(['auto_unlock=anything-goes-here']) == {'auto_unlock': False}
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_noncasesense(self): # Test separate paths for with and without a value. assert convert_session_options(['AUTO_Unlock']) == {'auto_unlock': True} assert convert_session_options(['AUTO_Unlock=0']) == {'auto_unlock': False}
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_int(self): # Non-bool with no value is ignored (and logged). assert convert_session_options(['frequency']) == {} # Invalid int value is ignored and logged assert convert_session_options(['frequency=abc']) == {} # Ignore with no- prefix assert convert_session_options(['no-frequency']) == {} # Valid int assert convert_session_options(['frequency=1000']) == {'frequency': 1000} # Valid hex int assert convert_session_options(['frequency=0x40']) == {'frequency': 64}
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def test_str(self): # Ignore with no value assert convert_session_options(['test_binary']) == {} # Ignore with no- prefix assert convert_session_options(['no-test_binary']) == {} # Valid assert convert_session_options(['test_binary=abc']) == {'test_binary': 'abc'}
mbedmicro/pyOCD
[ 883, 424, 883, 228, 1382710205 ]
def __init__(self, args): """ Args: args: parameters of the model """ self.args = args
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def init_state(self): """ Return the initial cell state """ return None
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def get_module_id(): return 'rnn'
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def build(self): """ Initialize the weights of the model """ self.rnn_cell = tfutils.get_rnn_cell(self.args, "deco_cell") self.project_key = tfutils.single_layer_perceptron([self.args.hidden_size, 1], 'project_key')
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def get_cell(self, prev_keyboard, prev_state_enco): """ a RNN decoder See parent class for arguments details """ axis = 1 # The first dimension is the batch, we split the keys assert prev_keyboard.get_shape()[axis].value == music.NB_NOTES inputs = tf.split(axis, music.NB_NOTES, prev_keyboard) outputs, final_state = tf.nn.seq2seq.rnn_decoder( decoder_inputs=inputs, initial_state=prev_state_enco, cell=self.rnn_cell # TODO: Which loop function (should use prediction) ? : Should take the previous generated input/ground truth (as the global model loop_fct). Need to add a new bool placeholder ) # Is it better to do the projection before or after the packing ? next_keys = [] for output in outputs: next_keys.append(self.project_key(output)) next_keyboard = tf.concat(axis, next_keys) return next_keyboard, final_state
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def get_module_id(): return 'perceptron'
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def build(self): """ Initialize the weights of the model """ # For projecting on the keyboard space self.project_hidden = tfutils.single_layer_perceptron([music.NB_NOTES, self.args.hidden_size], 'project_hidden') # For projecting on the keyboard space self.project_keyboard = tfutils.single_layer_perceptron([self.args.hidden_size, music.NB_NOTES], 'project_keyboard') # Should we do the activation sigmoid here ?
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def get_module_id(): return 'lstm'
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def build(self): """ Initialize the weights of the model """ # TODO: Control over the the Cell using module arguments instead of global arguments (hidden_size and num_layer) !! # RNN network rnn_cell = tf.nn.rnn_cell.BasicLSTMCell(self.args.hidden_size, state_is_tuple=True) # Or GRUCell, LSTMCell(args.hidden_size) if not self.args.test: # TODO: Should use a placeholder instead rnn_cell = tf.nn.rnn_cell.DropoutWrapper(rnn_cell, input_keep_prob=1.0, output_keep_prob=0.9) # TODO: Custom values rnn_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell] * self.args.num_layers, state_is_tuple=True) self.rnn_cell = rnn_cell # For projecting on the keyboard space self.project_output = tfutils.single_layer_perceptron([self.args.hidden_size, 12 + 1], # TODO: HACK: Input/output space hardcoded !!! 'project_output') # Should we do the activation sigmoid here ?
Conchylicultor/MusicGenerator
[ 295, 76, 295, 8, 1472483765 ]
def __init__(self, *args, **kwargs): pass
cuemacro/chartpy
[ 510, 98, 510, 7, 1470261601 ]
def auto_set_key(self): self.twitter = Twython(cc.TWITTER_APP_KEY, cc.TWITTER_APP_SECRET, cc.TWITTER_OAUTH_TOKEN, cc.TWITTER_OAUTH_TOKEN_SECRET)
cuemacro/chartpy
[ 510, 98, 510, 7, 1470261601 ]
def dict(self): """ Return a python dictionary which could be jsonified. """ return {key: value.dict for key, value in self.items()}
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def __init__(self, websocket, **details): """ Create a connection object provided: - websocket (tornado.websocket.WebSocketHandler instance - details: dictionary of metadata associated to the connection """ self.id = create_global_id() # set connection attributes, if any is given for name, value in details.items(): setattr(self, name, value) # meta-data # TODO: update this self.last_update = datetime.now().isoformat() # communication-related self._websocket = websocket self.topics = { "subscriber": {}, "publisher": {} } # when connection should be closed but something is left self.zombie = False self.zombification_datetime = None
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def peer(self): try: ip, port = self._websocket.ws_connection.stream.socket.getpeername() except (AttributeError, OSError, socket.error) as error: if not hasattr(error, 'errno') or error.errno in (errno.EBADF, errno.ENOTCONN): # Expected errnos: # - EBADF: bad file descriptor (connection was closed) # - ENOTCONN: not connected (connection was never open) ip = self._websocket.request.remote_ip name = u"{0}:HACK|{1}".format(ip, self.id) else: # Rethrow exception in case of unknown errno raise else: forwarded_ip = self._websocket.request.headers.get("X-Forwarded-For") if forwarded_ip: ip = forwarded_ip name = u"{0}:{1}|{2}".format(ip, port, self.id) return name
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def add_subscription_channel(self, subscription_id, topic_name): """ Add topic as a subscriber. """ self.topics["subscriber"][topic_name] = subscription_id
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def add_publishing_channel(self, subscription_id, topic_name): """ Add topic as a publisher. """ self.topics["publisher"][topic_name] = subscription_id
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def get_publisher_topics(self): """ Return list of topics to which this connection has subscribed. """ return list(self.topics["publisher"])
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def topics_by_subscription_id(self): return {subscription_id: topic for topic, subscription_id in self.get_topics().items()}
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def dict(self): """ Return dict representation of the current Connection, keeping only data that could be exported to JSON (convention: attributes which do not start with _). """ return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
ef-ctx/tornwamp
[ 7, 3, 7, 1, 1415799047 ]
def __init__(self): pass
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def map_apply(func,scalararray): """Return a xarray dataarray with value func(scalararray.data) Parameters ---------- func : function Any function that works on numpy arrays such that input and output arrays have the same shape. scalararray : xarray.DataArray Returns ------- out : xarray.DataArray Methods ------- uses dask map_block without ghost cells (map_overlap) """ data = scalararray.data coords = scalararray.coords dims = scalararray.dims if is_daskarray(data): _out = data.map_block(func) else: _out = func(data) out = xr.DataArray(_out,coords,dims) return out
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def is_numpy(array): """Return True if array is a numpy array Parameters ---------- array : array-like array is either a numpy array, a masked array, a dask array or a xarray. Returns ------- test : bool """ test = bool( isinstance(array,np.ndarray) + isinstance(array,np.ma.masked_array) ) return test
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def is_daskarray(array): """Return True if array is a dask array Parameters ---------- array : array-like Returns ------- test : bool """ return isinstance(array,da.core.Array)
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def _append_dataarray_extra_attrs(xarr,**extra_kwargs): """Update the dictionnary of attributes a xarray dataarray (xarr.attrs). Parameters ---------- xarr : xarray.DataArray The function will add extra arguments to xarr.attrs **extra_kwargs not used Returns ------- da : xarray.DataArray """ if not(is_xarray(xarr)): raise TypeError('except a xarray.DataArray') for kwargs in extra_kwargs: xarr.attrs[kwargs] = extra_kwargs[kwargs] return xarr
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def _chunks_are_compatible(chunks1=None,chunks2=None,ndims=None): """Return True when two chunks are aligned over their common dimensions. Parameters ---------- chunks1 : list-like of list-like object chunks associated to a xarray data array chunks2 : list-like of list-like object chunks associated to a xarray data array ndims : int number of dimensions over which chunks should ne compared. Returns ------- test : bool boolean value of the test. """ # TODO : not clear whether to compare a priori description of chunks # (dictionnaries) or a posteriori values (tuple of tuples). test = True if (chunks1 is None) or (chunks2 is None): if (chunks1 is None) and (chunks2 is None): return True else: return False for idim in range(ndims): test *= chunks1[-idim-1] == chunks2[-idim-1] return test
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def _assert_and_set_grid_location_attribute(xarr,grid_location=None): """Assert whether xarr holds an extra attribute 'grid_location' that equals grid_location. If xarr does not have such extra-attribute, create one with value grid_location. Parameters ---------- xarr : xarray.DataArray xarray dataarray that should be associated with a grid location grid_location : str string describing the grid location : eg 'u','v','t','f'... """ if xarr.attrs.has_key('grid_location'): assert ( xarr.attrs['grid_location'] == grid_location ) else: xarr.attrs['grid_location'] = grid_location
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def __init__(self): Exception.__init__(self,"incompatible chunk size")
lesommer/oocgcm
[ 36, 12, 36, 27, 1458817850 ]
def __init__(self, ctx: Context, client: BaseHTTPClientConnection): self.ctx = ctx self._client = client self._ws = WSConnection(ConnectionType.SERVER)
asphalt-framework/asphalt-web
[ 7, 1, 7, 1, 1430740193 ]
def begin_request(self, request: HTTPRequest): trailing_data = self._client.upgrade() self._ws.receive_bytes(trailing_data) self._process_ws_events()
asphalt-framework/asphalt-web
[ 7, 1, 7, 1, 1430740193 ]
def send_message(self, payload: Union[str, bytes]) -> None: """ Send a message to the client. :param payload: either a unicode string or a bytestring """ self._ws.send_data(payload) bytes_to_send = self._ws.bytes_to_send() self._client.write(bytes_to_send)
asphalt-framework/asphalt-web
[ 7, 1, 7, 1, 1430740193 ]
def on_connect(self) -> None: """Called when the websocket handshake has been done."""
asphalt-framework/asphalt-web
[ 7, 1, 7, 1, 1430740193 ]
def PlotGene(label, X, Y, s=3, alpha=1.0, ax=None): fig = None if ax is None: fig, ax = plt.subplots(1, 1, figsize=(5, 5)) for li in np.unique(label): idxN = (label == li).flatten() ax.scatter(X[idxN], Y[idxN], s=s, alpha=alpha, label=int(np.round(li))) return fig, ax
ManchesterBioinference/BranchedGP
[ 25, 7, 25, 17, 1474552399 ]
def FitGene(g, ns=20): # for quick results subsample data t = time.time() Bsearch = list(np.linspace(0.05, 0.95, 5)) + [ 1.1 ] # set of candidate branching points GPy = (Y[g].iloc[::ns].values - Y[g].iloc[::ns].values.mean())[ :, None ] # remove mean from gene expression data GPt = monocle["StretchedPseudotime"].values[::ns] globalBranching = monocle["State"].values[::ns].astype(int) d = BranchedGP.FitBranchingModel.FitModel(Bsearch, GPt, GPy, globalBranching) print(g, "BGP inference completed in %.1f seconds." % (time.time() - t)) # plot BGP fig, ax = BranchedGP.VBHelperFunctions.PlotBGPFit( GPy, GPt, Bsearch, d, figsize=(10, 10) ) # overplot data f, a = PlotGene( monocle["State"].values, monocle["StretchedPseudotime"].values, Y[g].values - Y[g].iloc[::ns].values.mean(), ax=ax[0], s=10, alpha=0.5, ) # Calculate Bayes factor of branching vs non-branching bf = BranchedGP.VBHelperFunctions.CalculateBranchingEvidence(d)["logBayesFactor"] fig.suptitle("%s log Bayes factor of branching %.1f" % (g, bf)) return d, fig, ax
ManchesterBioinference/BranchedGP
[ 25, 7, 25, 17, 1474552399 ]
def _div_maybe_zero(total_loss, num_present): """Normalizes the total loss with the number of present pixels.""" return tf.cast(num_present > 0, tf.float32) * tf.math.divide( total_loss, tf.maximum(1e-5, num_present))
googleinterns/wss
[ 142, 21, 142, 9, 1597440534 ]
def compute_cam_v2( end_points, logits, cls_label, num_class=21, use_attention=True, attention_dim=128, strides=(15, 16), is_training=True, valid_mask=None, net='xception_65',
googleinterns/wss
[ 142, 21, 142, 9, 1597440534 ]
def compute_self_att_v2( end_points, logits, num_class=21, attention_dim=128, strides=(15, 16), is_training=True, linformer=True, valid_mask=None, factor=8, downsample_type='nearest', net='xception_65'): """Compute self-attention for segmentation head. Args: end_points: Network end_points (dict). logits: The input seed for refinement. Used as ``value'' in self-attention. Can be either logits, probability, or score map. num_class: Number of classes including background attention_dim: Embedding space dimension for key and query used in the self-attention module strides: Use feature maps from which stride to compute pixel similarity is_training: Indicate training or inference mode linformer: Adopt the idea from https://arxiv.org/abs/2006.04768 to reduce memory usage in self-attention computation. But instead of learning the downsample function, we use deterministic image downsample functions valid_mask: To identity valid region of the input. It is used to avoid attending to padding regions factor: Downsample factor used in linformer mode downsample_type: Use which downsample method to reduce the memory usage. Can be either 'nearest' or 'bilinear'. Default: 'nearest' net: Specify which network is used Returns: A list of computed Grad-CAMs or refined ones. """ # Sanity check: Make sure strides are sorted strides = sorted(list(strides))[::-1] conv_layer_list = [] for stride in strides: conv_layer = end_points[net_to_stride_to_endpoints_name[net][stride]] conv_layer_list.append(conv_layer) # Resize to seed resolution first h, w = preprocess_utils.resolve_shape(logits, 4)[1:3] conv_layer_list = [ tf.compat.v1.image.resize_bilinear( conv, (h, w), align_corners=True) for conv in conv_layer_list ] conv_layer_merged = tf.concat(conv_layer_list, axis=-1) conv_layer_merged = tf.stop_gradient(conv_layer_merged) score = tf.stop_gradient(logits) # This tells us what input it is (decoder logits or Grad-CAM) value_dim = tf.shape(score)[-1] # Only valid when we use Linformer style to reduce size for key and value if downsample_type == 'bilinear': resize_fn = tf.compat.v1.image.resize_bilinear else: resize_fn = tf.compat.v1.image.resize_nearest_neighbor scope = 'hyper_column' with tf.variable_scope(scope): with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None, biases_initializer=None, reuse=tf.AUTO_REUSE): k = slim.conv2d( conv_layer_merged, attention_dim, [1, 1], scope='key') q = slim.conv2d( conv_layer_merged, attention_dim, [1, 1], scope='query') q = tf.reshape(q, [-1, h * w, attention_dim]) if valid_mask is not None: valid_mask_q = tf.reshape(valid_mask, [-1, h * w, 1]) # Adopt idea from Linformer (https://arxiv.org/abs/2006.04768) to reduce the # memory usage. Instead of learning a downsample function, we use determinstic # image downsample methods (nearest neighbor or bilinear) to reduce the size # of key and value. if linformer: k = resize_fn( k, ((h // factor + 1), (w // factor + 1)), align_corners=True) k = tf.reshape(k, [-1, (h // factor + 1) * (w // factor + 1), attention_dim]) if valid_mask is not None: valid_mask_k = tf.compat.v1.image.resize_nearest_neighbor( valid_mask, ((h // factor + 1), (w // factor + 1))) valid_mask_k = tf.reshape( tf.cast(valid_mask_k, tf.float32), [-1, (h // factor + 1) * (w // factor + 1), 1]) else: k = tf.reshape(k, [-1, h * w, attention_dim]) valid_mask_k = tf.reshape(valid_mask, [-1, h * w, 1]) matmul_qk = tf.matmul(q, k, transpose_b=True) scaled_att_logits = matmul_qk / math.sqrt(attention_dim) # Masking if valid_mask is not None: final_mask = tf.matmul(valid_mask_q, valid_mask_k, transpose_b=True) scaled_att_logits += (1 - final_mask) * -1e9 att_weights = tf.nn.softmax(scaled_att_logits, axis=-1) if linformer: value = resize_fn( score, ((h // factor + 1), (w // factor + 1)), align_corners=True) value = tf.reshape(value, [-1, (h // factor + 1) * (w // factor + 1), value_dim]) else: value = tf.reshape(score, [-1, h * w, value_dim]) att_score = tf.matmul(att_weights, value) att_score = tf.reshape(att_score, tf.shape(score)) ## Add skip-connection and 1x1 conv to convert score back to logit att_score += score if value_dim != num_class: # Set an initial score for the background class. Since the score range of a # class is [0, 2] after skip-connection, we use 2 minus the max class # probability to set the initial background score for each pixel. bg = 2 - tf.reduce_max(att_score, axis=3, keepdims=True) att_score = tf.concat([bg, att_score], axis=-1) out_att_logits = slim.conv2d( att_score, num_class, [1, 1], scope='pixel_normalization', activation_fn=None, normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training}, reuse=tf.AUTO_REUSE) return out_att_logits
googleinterns/wss
[ 142, 21, 142, 9, 1597440534 ]
def prepend_ctes(self, prepended_ctes: List[InjectedCTE]): self.extra_ctes_injected = True self.extra_ctes = prepended_ctes if self.compiled_sql is None: raise RuntimeException( 'Cannot prepend ctes to an unparsed node', self ) self.injected_sql = _inject_ctes_into_sql( self.compiled_sql, prepended_ctes, ) self.validate(self.to_dict())
fishtown-analytics/dbt
[ 6645, 1178, 6645, 457, 1457577480 ]
def empty(self): """ Seeds are never empty""" return False
fishtown-analytics/dbt
[ 6645, 1178, 6645, 457, 1457577480 ]
def _inject_ctes_into_sql(sql: str, ctes: List[InjectedCTE]) -> str: """ `ctes` is a list of InjectedCTEs like: [ InjectedCTE( id="cte_id_1", sql="__dbt__CTE__ephemeral as (select * from table)", ), InjectedCTE( id="cte_id_2", sql="__dbt__CTE__events as (select id, type from events)", ), ] Given `sql` like: "with internal_cte as (select * from sessions) select * from internal_cte" This will spit out: "with __dbt__CTE__ephemeral as (select * from table), __dbt__CTE__events as (select id, type from events), with internal_cte as (select * from sessions) select * from internal_cte" (Whitespace enhanced for readability.) """ if len(ctes) == 0: return sql parsed_stmts = sqlparse.parse(sql) parsed = parsed_stmts[0] with_stmt = None for token in parsed.tokens: if token.is_keyword and token.normalized == 'WITH': with_stmt = token break if with_stmt is None: # no with stmt, add one, and inject CTEs right at the beginning first_token = parsed.token_first() with_stmt = sqlparse.sql.Token(sqlparse.tokens.Keyword, 'with') parsed.insert_before(first_token, with_stmt) else: # stmt exists, add a comma (which will come after injected CTEs) trailing_comma = sqlparse.sql.Token(sqlparse.tokens.Punctuation, ',') parsed.insert_after(with_stmt, trailing_comma) token = sqlparse.sql.Token( sqlparse.tokens.Keyword, ", ".join(c.sql for c in ctes) ) parsed.insert_after(with_stmt, token) return str(parsed)
fishtown-analytics/dbt
[ 6645, 1178, 6645, 457, 1457577480 ]
def compiled_type_for(parsed: ParsedNode) -> CompiledType: if type(parsed) in COMPILED_TYPES: return COMPILED_TYPES[type(parsed)] else: return type(parsed)
fishtown-analytics/dbt
[ 6645, 1178, 6645, 457, 1457577480 ]
def smart_concat(v1, v2): if isinstance(v1, tf.Tensor) or isinstance(v2, tf.Tensor): return tf.concat([v1, v2], 0) else: return v1 + v2
deepmind/sonnet
[ 9523, 1351, 9523, 33, 1491219275 ]
def __init__(self, num_spatial_dims: int, output_channels: int, kernel_shape: Union[int, Sequence[int]], output_shape: Optional[types.ShapeLike] = None, stride: Union[int, Sequence[int]] = 1, rate: Union[int, Sequence[int]] = 1, padding: str = "SAME", with_bias: bool = True, w_init: Optional[initializers.Initializer] = None, b_init: Optional[initializers.Initializer] = None, data_format: Optional[str] = None, name: Optional[str] = None): """Constructs a `ConvNDTranspose` module. Args: num_spatial_dims: Number of spatial dimensions of the input. output_channels: Number of output channels. kernel_shape: Sequence of integers (of length num_spatial_dims), or an integer representing kernel shape. `kernel_shape` will be expanded to define a kernel size in all dimensions. output_shape: Output shape of the spatial dimensions of a transpose convolution. Can be either an iterable of integers or a `TensorShape` of length `num_spatial_dims`. If a `None` value is given, a default shape is automatically calculated. stride: Sequence of integers (of length num_spatial_dims), or an integer. `stride` will be expanded to define stride in all dimensions. rate: Sequence of integers (of length num_spatial_dims), or integer that is used to define dilation rate in all dimensions. 1 corresponds to standard ND convolution, `rate > 1` corresponds to dilated convolution. padding: Padding algorithm, either "SAME" or "VALID". with_bias: Boolean, whether to include bias parameters. Default `True`. w_init: Optional initializer for the weights. By default the weights are initialized truncated random normal values with a standard deviation of `1 / sqrt(input_feature_size)`, which is commonly used when the inputs are zero centered (see https://arxiv.org/abs/1502.03167v3). b_init: Optional initializer for the bias. By default the bias is initialized to zero. data_format: The data format of the input. name: Name of the module. """ super().__init__(name=name) if not 1 <= num_spatial_dims <= 3: raise ValueError( "We only support transpose convolution operations for " "num_spatial_dims=1, 2 or 3, received num_spatial_dims={}.".format( num_spatial_dims)) self._num_spatial_dims = num_spatial_dims self._output_channels = output_channels self._kernel_shape = kernel_shape self._output_shape = output_shape self._stride = stride self._rate = rate if padding == "SAME" or padding == "VALID": self._padding = padding else: raise TypeError("ConvNDTranspose only takes string padding, please " "provide either `SAME` or `VALID`.") self._data_format = data_format self._channel_index = utils.get_channel_index(data_format) self._with_bias = with_bias self._w_init = w_init if with_bias: self._b_init = b_init if b_init is not None else initializers.Zeros() elif b_init is not None: raise ValueError("When not using a bias the b_init must be None.")
deepmind/sonnet
[ 9523, 1351, 9523, 33, 1491219275 ]
def _initialize(self, inputs): utils.assert_rank(inputs, self._num_spatial_dims + 2) self.input_channels = inputs.shape[self._channel_index] if self.input_channels is None: raise ValueError("The number of input channels must be known") self._dtype = inputs.dtype if self._output_shape is not None: if len(self._output_shape) != self._num_spatial_dims: raise ValueError( "The output_shape must be of length {} but instead was {}.".format( self._num_spatial_dims, len(self._output_shape))) if self._channel_index == 1: self._output_shape = [self._output_channels] + list(self._output_shape) else: self._output_shape = list(self._output_shape) + [self._output_channels] self.w = self._make_w() if self._with_bias: self.b = tf.Variable( self._b_init((self._output_channels,), self._dtype), name="b")
deepmind/sonnet
[ 9523, 1351, 9523, 33, 1491219275 ]
def _get_output_shape(self, inputs): input_shape = inputs.shape if inputs.shape.is_fully_defined() else tf.shape( inputs) if self._channel_index == 1: input_size = input_shape[2:] else: input_size = input_shape[1:-1] stride = utils.replicate(self._stride, self._num_spatial_dims, "stride") output_shape = smart_lambda(lambda x, y: x * y, input_size, stride) if self._padding == "VALID": kernel_shape = utils.replicate(self._kernel_shape, self._num_spatial_dims, "kernel_shape") rate = utils.replicate(self._rate, self._num_spatial_dims, "rate") effective_kernel_shape = [ (shape - 1) * rate + 1 for (shape, rate) in zip(kernel_shape, rate) ] output_shape = smart_lambda(lambda x, y: x + y - 1, output_shape, effective_kernel_shape) return output_shape
deepmind/sonnet
[ 9523, 1351, 9523, 33, 1491219275 ]
def __init__(self, output_channels: int, kernel_shape: Union[int, Sequence[int]], output_shape: Optional[types.ShapeLike] = None, stride: Union[int, Sequence[int]] = 1, rate: Union[int, Sequence[int]] = 1, padding: str = "SAME", with_bias: bool = True, w_init: Optional[initializers.Initializer] = None, b_init: Optional[initializers.Initializer] = None, data_format: str = "NWC", name: Optional[str] = None): """Constructs a `Conv1DTranspose` module. Args: output_channels: Number of output channels. kernel_shape: Sequence of integers (of length 1), or an integer representing kernel shape. `kernel_shape` will be expanded to define a kernel size in all dimensions. output_shape: Output shape of the spatial dimensions of a transpose convolution. Can be either an integer or an iterable of integers or `Dimension`s, or a `TensorShape` (of length 1). If a `None` value is given, a default shape is automatically calculated. stride: Sequence of integers (of length 1), or an integer. `stride` will be expanded to define stride in all dimensions. rate: Sequence of integers (of length 1), or integer that is used to define dilation rate in all dimensions. 1 corresponds to standard 1D convolution, `rate > 1` corresponds to dilated convolution. padding: Padding algorithm, either "SAME" or "VALID". with_bias: Boolean, whether to include bias parameters. Default `True`. w_init: Optional initializer for the weights. By default the weights are initialized truncated random normal values with a standard deviation of `1 / sqrt(input_feature_size)`, which is commonly used when the inputs are zero centered (see https://arxiv.org/abs/1502.03167v3). b_init: Optional initializer for the bias. By default the bias is initialized to zero. data_format: The data format of the input. name: Name of the module. """ super().__init__( num_spatial_dims=1, output_channels=output_channels, kernel_shape=kernel_shape, output_shape=output_shape, stride=stride, rate=rate, padding=padding, with_bias=with_bias, w_init=w_init, b_init=b_init, data_format=data_format, name=name)
deepmind/sonnet
[ 9523, 1351, 9523, 33, 1491219275 ]
def __init__(self, output_channels: int, kernel_shape: Union[int, Sequence[int]], output_shape: Optional[types.ShapeLike] = None, stride: Union[int, Sequence[int]] = 1, rate: Union[int, Sequence[int]] = 1, padding: str = "SAME", with_bias: bool = True, w_init: Optional[initializers.Initializer] = None, b_init: Optional[initializers.Initializer] = None, data_format: str = "NHWC", name: Optional[str] = None): """Constructs a `Conv2DTranspose` module. Args: output_channels: An integer, The number of output channels. kernel_shape: Sequence of integers (of length 2), or an integer representing kernel shape. `kernel_shape` will be expanded to define a kernel size in all dimensions. output_shape: Output shape of the spatial dimensions of a transpose convolution. Can be either an integer or an iterable of integers or `Dimension`s, or a `TensorShape` (of length 2). If a `None` value is given, a default shape is automatically calculated. stride: Sequence of integers (of length 2), or an integer. `stride` will be expanded to define stride in all dimensions. rate: Sequence of integers (of length 2), or integer that is used to define dilation rate in all dimensions. 1 corresponds to standard 2D convolution, `rate > 1` corresponds to dilated convolution. padding: Padding algorithm, either "SAME" or "VALID". with_bias: Boolean, whether to include bias parameters. Default `True`. w_init: Optional initializer for the weights. By default the weights are initialized truncated random normal values with a standard deviation of `1 / sqrt(input_feature_size)`, which is commonly used when the inputs are zero centered (see https://arxiv.org/abs/1502.03167v3). b_init: Optional initializer for the bias. By default the bias is initialized to zero. data_format: The data format of the input. name: Name of the module. """ super().__init__( num_spatial_dims=2, output_channels=output_channels, kernel_shape=kernel_shape, output_shape=output_shape, stride=stride, rate=rate, padding=padding, with_bias=with_bias, w_init=w_init, b_init=b_init, data_format=data_format, name=name)
deepmind/sonnet
[ 9523, 1351, 9523, 33, 1491219275 ]
def setup_loader_modules(self): return {x509: {"__opts__": {"fips_mode": False}}}
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def setup_loader_modules(self): self.file_managed_mock = MagicMock() self.file_managed_mock.return_value = {"changes": True} return { x509: { "__opts__": {"fips_mode": True}, "__salt__": { "x509.get_pem_entry": x509_mod.get_pem_entry, "x509.get_private_key_size": x509_mod.get_private_key_size, }, "__states__": {"file.managed": self.file_managed_mock}, } }
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def test_private_key_fips_mode(self): """ :return: """ test_key = dedent( """ -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDx7UUt0cPi5G51 FmRBhAZtZb5x6P0PFn7GwnLmSvLNhCsOcD/vq/yBUU62pknzmOjM5pgWTACZj66O GOFmWBg06v8+sqUbaF9PZ/CxQD5MogmQhYNgfyuopHWWgLXMub2hlP+15qGohkzg Tr/mXp2ohVAb6ihjqb7XV9MiZaLNVX+XWauM8SlhqXMiJyDUopEGbg2pLsHhIMcX 1twLlyDja+uDbCMZ4jDNB+wsWxTaPRH8KizfEabB1Cl+fdyD10pSAYcodOAnlkW+ G/DX2hwb/ZAM9B1SXTfZ3gzaIIbqXBEHcZQNXxHL7szBTVcOmfx/RPfOeRncytb9 Mit7RIBxAgMBAAECggEAD4Pi+uRIBsYVm2a7OURpURzEUPPbPtt3d/HCgqht1+ZR CJUEVK+X+wcm4Cnb9kZpL7LeMBfhtfdz/2LzGagurT4g7nlwg0h3TFVjJ0ryc+G0 cVNOsKKXPzKE5AkPH7kNw04V9Cl9Vpx+U6hZQEHzJHqgP5oNyw540cCtJriT700b fG1q3PYKWSkDwTiUnJTnVLybFIKQC6urxTeT2UWeiBadfDY7DjI4USfrQsqCfGMO uWPpOOJk5RIvw5r0Of2xvxV76xCgzVTkgtWjBRMTEkfeYx3019xKlQtAKoGbZd1T tF8DH0cDlnri4nG7YT8yYvx/LWVDg12E6IZij1X60QKBgQD7062JuQGEmTd99a7o 5TcgWYqDrmE9AEgJZjN+gnEPcsxc50HJaTQgrkV0oKrS8CMbStIymbzMKWifOj7o gvQBVecydq1AaXePt3gRe8vBFiP4cHjFcSegs9FDvdfJR36iHOBIgEp4DWvV1vgs +z82LT6Qy5kxUQvnlQ4dEaGdrQKBgQD175f0H4enRJ3BoWTrqt2mTAwtJcPsKmGD 9YfFB3H4+O2rEKP4FpBO5PFXZ0dqm54hDtxqyC/lSXorFCUjVUBero1ECGt6Gnn2 TSnhgk0VMxvhnc0GReIt4K9WrXGd0CMUDwIhFHj8kbb1X1yqt2hwyw7b10xFVStl sGv8CQB+VQKBgAF9q1VZZwzl61Ivli2CzeS/IvbMnX7C9ao4lK13EDxLLbKPG/CZ UtmurnKWUOyWx15t/viVuGxtAlWO/rhZriAj5g6CbVwoQ7DyIR/ZX8dw3h2mbNCe buGgruh7wz9J0RIcoadMOySiz7SgZS++/QzRD8HDstB77loco8zAQfixAoGBALDO FbTocfKbjrpkmBQg24YxR9OxQb/n3AEtI/VO2+38r4h6xxaUyhwd1S9bzWjkBXOI poeR8XTqNQ0BR422PTeUT3SohPPcUu/yG3jG3zmta47wjjPDS85lqEgtGvA0cPN7 srErcatJ6nlOnGUSw9/K65y6lFeH2lIZ2hfwNM2dAoGBAMVCc7i3AIhLp6UrGzjP 0ioCHCakpxfl8s1VQp55lhHlP6Y4RfqT72Zq7ScteTrisIAQyI9ot0gsuct2miQM nyDdyKGki/MPduGTzzWlBA7GZEHnxbAILH8kWJ7eE/Nh7zdF1CRts8utEO9L9S+0 lVz1j/xGOseQk4cVos681Wpw -----END PRIVATE KEY-----""" ) test_cert = dedent( """ -----BEGIN CERTIFICATE----- MIIDazCCAlOgAwIBAgIUAfATs1aodKw11Varh55msmU0LoowDQYJKoZIhvcNAQEL BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMTAzMjMwMTM4MzdaFw0yMjAz MjMwMTM4MzdaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQDx7UUt0cPi5G51FmRBhAZtZb5x6P0PFn7GwnLmSvLN hCsOcD/vq/yBUU62pknzmOjM5pgWTACZj66OGOFmWBg06v8+sqUbaF9PZ/CxQD5M ogmQhYNgfyuopHWWgLXMub2hlP+15qGohkzgTr/mXp2ohVAb6ihjqb7XV9MiZaLN VX+XWauM8SlhqXMiJyDUopEGbg2pLsHhIMcX1twLlyDja+uDbCMZ4jDNB+wsWxTa PRH8KizfEabB1Cl+fdyD10pSAYcodOAnlkW+G/DX2hwb/ZAM9B1SXTfZ3gzaIIbq XBEHcZQNXxHL7szBTVcOmfx/RPfOeRncytb9Mit7RIBxAgMBAAGjUzBRMB0GA1Ud DgQWBBT0qx4KLhozvuWAI9peT/utYV9FITAfBgNVHSMEGDAWgBT0qx4KLhozvuWA I9peT/utYV9FITAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQDx tWvUyGfEwJJg1ViBa10nVhg5sEc6KfqcPzc2GvatIGJlAbc3b1AYu6677X04SQNA dYRA2jcZcKudy6eolPJow6SDpkt66IqciZYdbQE5h9elnwpZxmXlJTQTB9cEwyIk 2em5DKpdIwa9rRDlbAjAVJb3015MtpKRu2gsQ7gl5X2U3K+DFsWtBPf+0xiJqUiq rd7tiHF/zylubSyH/LVONJZ6+/oT/qzJfxfpvygtQWcu4b2zzME/FPenMA8W6Rau ZYycQfpMVc7KwqF5/wfjnkmfxoFKnkD7WQ3qFCJ/xULk/Yn1hrvNeIr+khX3qKQi Y3BMA5m+J+PZrNy7EQSa -----END CERTIFICATE----- """ ) fp, name = tempfile.mkstemp() with salt.utils.files.fopen(name, "w") as fd: fd.write(test_key) fd.write(test_cert) ret = x509.private_key_managed(name) self.file_managed_mock.assert_called_once() assert ( self.file_managed_mock.call_args.kwargs["contents"].strip() == test_key.strip() )
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def execute(self, src_proto_path, import_proto_path, organization_name): self._organization_name = organization_name # Treat google.protobuf, google.iam as a common proto package, even # though they are not included in the common-protos we generate. # # TODO (geigerj): remove 'google.iam' when it is included in the common # protos package. common_protos = [ 'google.protobuf', 'google.iam', 'google.api', 'google.longrunning', 'google.rpc', 'google.type', 'google.logging.type', ] tmpdir = os.path.join( tempfile.gettempdir(), 'artman-python', str(int(time.time()))) new_proto_dir = os.path.join(tmpdir, 'proto') new_src_path = set() new_import_path = [new_proto_dir] self._copy_and_transform_directories( src_proto_path, new_proto_dir, common_protos, paths=new_src_path) self._copy_and_transform_directories( import_proto_path, new_proto_dir, common_protos) # Update src_proto_path, import_proto_path return list(new_src_path), new_import_path
googleapis/artman
[ 133, 85, 133, 12, 1459375772 ]
def _transform(self, pkg, sep, common_protos): """Transform to the appropriate proto package layout. Works with arbitrary separator (e.g., '/' for import statements, '.' for proto package statements, os.path.sep for filenames) """ if sep != '.' and pkg.endswith('.proto'): dotted = pkg[:-6].replace(sep, '.') suffix = '.proto' else: dotted = pkg.replace(sep, '.') suffix = '' # Sanity check: Do not transform common protos. for common_pkg in common_protos: if dotted.startswith(common_pkg): return pkg # Special case: If the organization name is "google-cloud", then we # have to ensure that "cloud" exists in the path. The protos # themselves may not follow this. if 'cloud' not in dotted and self._organization_name == 'google-cloud': dotted = dotted.replace('google.', 'google.cloud.', 1) # Transform into the ideal proto path. # What essentially should happen here is that "{api}.{vN}" should # change to "{api}_{vN}". dotted = re.sub(r'\.v([\da-z_]*)([\d]+)\b', r'_v\1\2.proto', dotted) # Edge case: Some internal customers use "vNalpha" and "vNbeta". # Rather than make the regular expression more complicated, catch # this as a one-off. if re.search(r'\.v[\d]+alpha\b', dotted): dotted = re.sub(r'\.v([\d]+)alpha\b', r'_v\1alpha.proto', dotted) if re.search(r'\.v[\d]+beta\b', dotted): dotted = re.sub(r'\.v([\d]+)beta\b', r'_v\1beta.proto', dotted) if re.search(r'\.v[\d]+eap\b', dotted): dotted = re.sub(r'\.v([\d]+)eap\b', r'_v\1eap.proto', dotted) # Done; return with the appropriate separator. return dotted.replace('.', sep) + suffix
googleapis/artman
[ 133, 85, 133, 12, 1459375772 ]
def _copy_and_transform_directories( self, src_directories, destination_directory, common_protos, paths=None): for path in src_directories: protos = list(protoc_utils.find_protos([path], [])) for proto in protos: src_base_dirs = self._extract_base_dirs(proto) sub_new_src = os.path.join( destination_directory, self._transform( src_base_dirs, os.path.sep, common_protos)) if paths is not None: paths.add(sub_new_src) dest = os.path.join(sub_new_src, os.path.basename(proto)) if not os.path.exists(dest): self.exec_command(['mkdir', '-p', sub_new_src]) self._copy_proto( proto, os.path.join(sub_new_src, dest), common_protos)
googleapis/artman
[ 133, 85, 133, 12, 1459375772 ]
def execute(self, grpc_code_dir, gapic_code_dir): """Move the protos into the GAPIC structure. This copies the ``x/y/z/proto/`` directory over to be a sibling of ``x/y/z/gapic/`` in the GAPIC code directory. In the event of an inconsistency on the prefix, the GAPIC wins. Args: grpc_code_dir (str): The location where the GRPC code was generated. gapic_code_dir (str): The location where the GAPIC code was generated. """ # Determine the appropriate source and target directory. # We can get this by drilling in to the GAPIC artifact until we get to # a "gapic" directory that is outside "docs" and "tests". src = self._get_proto_path(grpc_code_dir) target = self._get_gapic_subdir_path(gapic_code_dir) # Move the contents into the GAPIC directory. self.exec_command(['mv', src, os.path.join(target, 'proto')]) # Create an __init__.py file in the proto directory. # This is necessary for Python 2.7 compatibility. self.exec_command([ 'touch', os.path.join(target, 'proto', '__init__.py'), ]) # Remove the grpc directory. self.exec_command(['rm', '-rf', grpc_code_dir]) # Clear out the grpc_code_dir, so future tasks perceive it as # not being a thing anymore. return {'grpc_code_dir': None}
googleapis/artman
[ 133, 85, 133, 12, 1459375772 ]
def main(): # pragma: no cover execute(os.environ, sys.argv[1:], sys.stdout)
google/gif-for-cli
[ 2858, 162, 2858, 18, 1528989105 ]
def __init__( self, address: Address, kwargs: Optional[Dict[str, Any]] = None, msg_id: Optional[UID] = None, reply_to: Optional[Address] = None, reply: bool = False,
OpenMined/PySyft
[ 8617, 1908, 8617, 143, 1500410476 ]
def payload(self) -> Payload: kwargs_dict = {} if hasattr(self.kwargs, "upcast"): kwargs_dict = self.kwargs.upcast() # type: ignore else: kwargs_dict = self.kwargs # type: ignore try: # If it's not a reply message then load kwargs as a proper request payload. if not self.reply: return self.request_payload_type(**kwargs_dict) # If it's a reply message, then load kwargs as a proper reply payload. else: return self.reply_payload_type(**kwargs_dict) except PydanticValidationError: raise BadPayloadException
OpenMined/PySyft
[ 8617, 1908, 8617, 143, 1500410476 ]
def get_permissions(self) -> List: """Returns the list of permission classes applicable to the given message.""" raise NotImplementedError
OpenMined/PySyft
[ 8617, 1908, 8617, 143, 1500410476 ]
def __init__(self, local_hostname, logger): logger.debug("Creating CommandRunner with Args - local_hostname: {local_hostname}, logger: {logger}".format(**locals())) self.local_hostname = local_hostname self.logger = logger
teamclairvoyant/airflow-scheduler-failover-controller
[ 220, 63, 220, 7, 1475000288 ]
def run_command(self, host, base_command): self.logger.debug("Running Command: " + str(base_command)) if host == self.local_hostname or host in self.HOST_LIST_TO_RUN_LOCAL: return self._run_local_command(base_command) else: return self._run_ssh_command(host, base_command)
teamclairvoyant/airflow-scheduler-failover-controller
[ 220, 63, 220, 7, 1475000288 ]
def _run_local_command(self, base_command): self.logger.debug("Running command as Local command") output = os.popen(base_command).read() if output: output = output.split("\n") self.logger.debug("Run Command output: " + str(output)) return True, output
teamclairvoyant/airflow-scheduler-failover-controller
[ 220, 63, 220, 7, 1475000288 ]
def __init__(self, jvalue=None, **kwargs): super(Sequential, self).__init__(jvalue, **kwargs)
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def is_built(self): try: self.get_output_shape() return True except: return False
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def from_jvalue(jvalue, bigdl_type="float"): """ Create a Python Model base on the given java value :param jvalue: Java object create by Py4j :return: A Python Model """ model = Sequential(jvalue=jvalue) model.value = jvalue return model
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def __init__(self, input, output, jvalue=None, **kwargs): super(Model, self).__init__(jvalue, to_list(input), to_list(output), **kwargs)
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def new_graph(self, outputs): value = callZooFunc(self.bigdl_type, "newGraph", self.value, outputs) return self.from_jvalue(value)
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def unfreeze(self, names): callZooFunc(self.bigdl_type, "unFreeze", self.value, names)
intel-analytics/analytics-zoo
[ 2553, 722, 2553, 534, 1493951250 ]
def env(tmpdir, redis_cache): basedir = tmpdir conffile = tmpdir.join('flask-resize-conf.py') conffile.write( """
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def run(env, *args): return subprocess.check_output(args, env=env).decode().splitlines()
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_usage(env): assert 'usage: flask-resize' in run(env, 'flask-resize', '--help')[0]
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_list_images_empty(env): assert run(env, 'flask-resize', 'list', 'images') == []
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_list_has_images( env, resizetarget_opts, image1_name, image1_data, image1_key
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_list_cache_empty(env, redis_cache): assert run(env, 'flask-resize', 'list', 'cache') == []
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_list_has_cache(env, redis_cache): redis_cache.add('hello') redis_cache.add('buh-bye') assert set(run(env, 'flask-resize', 'list', 'cache')) == \ {'hello', 'buh-bye'}
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_clear_images( env, resizetarget_opts, image1_name, image1_data
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_clear_cache(env, redis_cache): redis_cache.add('foo bar') assert run(env, 'flask-resize', 'clear', 'cache') == []
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def test_bin_sync_cache( env, resizetarget_opts, image1_name, image1_data, image1_key, redis_cache
jmagnusson/Flask-Resize
[ 47, 11, 47, 6, 1384049068 ]
def dict_for_mongo_without_userform_id(parsed_instance): d = parsed_instance.to_dict_for_mongo() # remove _userform_id since its not returned by the API d.pop(ParsedInstance.USERFORM_ID) return d
makinacorpus/formhub
[ 1, 3, 1, 1, 1415368669 ]
def setUp(self): MainTestCase.setUp(self) self._create_user_and_login() self._publish_transportation_form_and_submit_instance() self.api_url = reverse(api, kwargs={ 'username': self.user.username, 'id_string': self.xform.id_string })
makinacorpus/formhub
[ 1, 3, 1, 1, 1415368669 ]
def test_api_with_query(self): # query string query = '{"transport/available_transportation_types_to_referral_facility":"none"}' data = {'query': query} response = self.client.get(self.api_url, data) self.assertEqual(response.status_code, 200) d = dict_for_mongo_without_userform_id(self.xform.surveys.all()[0].parsed_instance) find_d = json.loads(response.content)[0] self.assertEqual(sorted(find_d, key=find_d.get), sorted(d, key=d.get))
makinacorpus/formhub
[ 1, 3, 1, 1, 1415368669 ]
def test_handle_bad_json(self): response = self.client.get(self.api_url, {'query': 'bad'}) self.assertEqual(response.status_code, 400) self.assertEqual(True, 'JSON' in response.content)
makinacorpus/formhub
[ 1, 3, 1, 1, 1415368669 ]
def test_api_with_query_start_limit(self): # query string query = '{"transport/available_transportation_types_to_referral_facility":"none"}' data = {'query': query, 'start': 0, 'limit': 10} response = self.client.get(self.api_url, data) self.assertEqual(response.status_code, 200) d = dict_for_mongo_without_userform_id(self.xform.surveys.all()[0].parsed_instance) find_d = json.loads(response.content)[0] self.assertEqual(sorted(find_d, key=find_d.get), sorted(d, key=d.get))
makinacorpus/formhub
[ 1, 3, 1, 1, 1415368669 ]
def test_api_count(self): # query string query = '{"transport/available_transportation_types_to_referral_facility":"none"}' data = {'query': query, 'count': 1} response = self.client.get(self.api_url, data) self.assertEqual(response.status_code, 200) find_d = json.loads(response.content)[0] self.assertTrue(find_d.has_key('count')) self.assertEqual(find_d.get('count'), 1)
makinacorpus/formhub
[ 1, 3, 1, 1, 1415368669 ]
def test_api_decode_from_mongo(self): field = "$section1.group01.question1" encoded = _encode_for_mongo(field) self.assertEqual(encoded, ("%(dollar)ssection1%(dot)sgroup01%(dot)squestion1" % \ {"dollar": base64.b64encode("$"), \ "dot": base64.b64encode(".")})) decoded = _decode_from_mongo(encoded) self.assertEqual(field, decoded)
makinacorpus/formhub
[ 1, 3, 1, 1, 1415368669 ]
def testIncomplete(self): self.fail("Add header tests for <CoreGraphics/CGPDFDictionary.h>")
albertz/music-player
[ 483, 61, 483, 16, 1345772141 ]
def __init__(self, con, query_templates, schema, table, name, dtype, keys_per_column): self._con = con self._query_templates = query_templates self.schema = schema self.table = table self.name = name self.type = dtype self.keys_per_column = keys_per_column self.foreign_keys = [] self.ref_keys = []
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def __str__(self): return "Column({0})<{1}>".format(self.name, self.__hash__())
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def _str_foreign_keys(self): keys = [] for col in self.foreign_keys: keys.append("%s.%s" % (col.table, col.name)) if self.keys_per_column is not None and len(keys) > self.keys_per_column: keys = keys[0:self.keys_per_column] + ['(+ {0} more)'.format(len(keys) - self.keys_per_column)] return ", ".join(keys)
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def head(self, n=6): """ Returns first n values of your column as a DataFrame. This is executing: SELECT <name_of_the_column> FROM <name_of_the_table> LIMIT <n> Parameters ---------- n: int number of rows to return Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>> db.tables.Customer.City.head() 0 Sao Jose dos Campos 1 Stuttgart 2 Montreal 3 Oslo 4 Prague 5 Prague Name: City, dtype: object >>> db.tables.Customer.City.head(2) 0 Sao Jose dos Campos 1 Stuttgart Name: City, dtype: object """ q = self._query_templates['column']['head'].format(column=self.name, schema=self.schema, table=self.table, n=n) return pd.read_sql(q, self._con)[self.name]
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def unique(self): """ Returns all unique values as a DataFrame. This is executing: SELECT DISTINCT <name_of_the_column> FROM <name_of_the_table> Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>> db.tables.Customer.FirstName.unique().head(10) 0 Luis 1 Leonie 2 Francois 3 Bjorn 4 Franti\u0161ek 5 Helena 6 Astrid 7 Daan 8 Kara 9 Eduardo Name: FirstName, dtype: object >>> len(db.tables.Customer.LastName.unique()) 59 """ q = self._query_templates['column']['unique'].format(column=self.name, schema=self.schema, table=self.table) return pd.read_sql(q, self._con)[self.name]
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def to_dict(self): """ Serialize representation of the column for local caching. """ return {'schema': self.schema, 'table': self.table, 'name': self.name, 'type': self.type}
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]