nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
EdwardTyantov/ultrasound-nerve-segmentation
84a614009cdce6426628b7dbf159fc5a445fe302
train_generator.py
python
Learner.split_train_and_valid_by_patient
(cls, data, mask, validation_split, shuffle=False)
return (x_train, y_train), (x_valid, y_valid)
[]
def split_train_and_valid_by_patient(cls, data, mask, validation_split, shuffle=False): print('Shuffle & split...') patient_nums = load_patient_num() patient_dict = count_enum(patient_nums) pnum = len(patient_dict) val_num = int(pnum * validation_split) patients = patient_dict.keys() if shuffle: random.shuffle(patients) val_p, train_p = patients[:val_num], patients[val_num:] train_indexes = [i for i, c in enumerate(patient_nums) if c in set(train_p)] val_indexes = [i for i, c in enumerate(patient_nums) if c in set(val_p)] x_train, y_train = data[train_indexes], mask[train_indexes] x_valid, y_valid = data[val_indexes], mask[val_indexes] cls.save_valid_idx(val_indexes) print ('val patients:', len(x_valid), val_p) print ('train patients:', len(x_train), train_p) return (x_train, y_train), (x_valid, y_valid)
[ "def", "split_train_and_valid_by_patient", "(", "cls", ",", "data", ",", "mask", ",", "validation_split", ",", "shuffle", "=", "False", ")", ":", "print", "(", "'Shuffle & split...'", ")", "patient_nums", "=", "load_patient_num", "(", ")", "patient_dict", "=", "count_enum", "(", "patient_nums", ")", "pnum", "=", "len", "(", "patient_dict", ")", "val_num", "=", "int", "(", "pnum", "*", "validation_split", ")", "patients", "=", "patient_dict", ".", "keys", "(", ")", "if", "shuffle", ":", "random", ".", "shuffle", "(", "patients", ")", "val_p", ",", "train_p", "=", "patients", "[", ":", "val_num", "]", ",", "patients", "[", "val_num", ":", "]", "train_indexes", "=", "[", "i", "for", "i", ",", "c", "in", "enumerate", "(", "patient_nums", ")", "if", "c", "in", "set", "(", "train_p", ")", "]", "val_indexes", "=", "[", "i", "for", "i", ",", "c", "in", "enumerate", "(", "patient_nums", ")", "if", "c", "in", "set", "(", "val_p", ")", "]", "x_train", ",", "y_train", "=", "data", "[", "train_indexes", "]", ",", "mask", "[", "train_indexes", "]", "x_valid", ",", "y_valid", "=", "data", "[", "val_indexes", "]", ",", "mask", "[", "val_indexes", "]", "cls", ".", "save_valid_idx", "(", "val_indexes", ")", "print", "(", "'val patients:'", ",", "len", "(", "x_valid", ")", ",", "val_p", ")", "print", "(", "'train patients:'", ",", "len", "(", "x_train", ")", ",", "train_p", ")", "return", "(", "x_train", ",", "y_train", ")", ",", "(", "x_valid", ",", "y_valid", ")" ]
https://github.com/EdwardTyantov/ultrasound-nerve-segmentation/blob/84a614009cdce6426628b7dbf159fc5a445fe302/train_generator.py#L105-L122
JinpengLI/deep_ocr
450148c0c51b3565a96ac2f3c94ee33022e55307
deep_ocr/ocrolib/toplevel.py
python
checks
(*types,**ktypes)
return argument_check_decorator
Check argument and return types against type specs at runtime.
Check argument and return types against type specs at runtime.
[ "Check", "argument", "and", "return", "types", "against", "type", "specs", "at", "runtime", "." ]
def checks(*types,**ktypes): """Check argument and return types against type specs at runtime.""" def argument_check_decorator(f): @functools.wraps(f) def argument_checks(*args,**kw): # print("@@@", f, "decl", types, ktypes, "call", # [strc(x) for x in args], kw) name = f.func_name argnames = f.func_code.co_varnames[:f.func_code.co_argcount] kw3 = [(var,value,ktypes.get(var,True)) for var,value in kw.items()] for var,value,type_ in zip(argnames,args,types)+kw3: try: checktype(value,type_) except AssertionError as e: raise CheckError(e.message,*e.args,var=var,fun=f) except CheckError as e: e.fun = f e.var = var raise e except: print("unknown exception while checking function:", name) raise result = f(*args,**kw) checktype(result,kw.get("_",True)) return result return argument_checks return argument_check_decorator
[ "def", "checks", "(", "*", "types", ",", "*", "*", "ktypes", ")", ":", "def", "argument_check_decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "argument_checks", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "# print(\"@@@\", f, \"decl\", types, ktypes, \"call\",", "# [strc(x) for x in args], kw)", "name", "=", "f", ".", "func_name", "argnames", "=", "f", ".", "func_code", ".", "co_varnames", "[", ":", "f", ".", "func_code", ".", "co_argcount", "]", "kw3", "=", "[", "(", "var", ",", "value", ",", "ktypes", ".", "get", "(", "var", ",", "True", ")", ")", "for", "var", ",", "value", "in", "kw", ".", "items", "(", ")", "]", "for", "var", ",", "value", ",", "type_", "in", "zip", "(", "argnames", ",", "args", ",", "types", ")", "+", "kw3", ":", "try", ":", "checktype", "(", "value", ",", "type_", ")", "except", "AssertionError", "as", "e", ":", "raise", "CheckError", "(", "e", ".", "message", ",", "*", "e", ".", "args", ",", "var", "=", "var", ",", "fun", "=", "f", ")", "except", "CheckError", "as", "e", ":", "e", ".", "fun", "=", "f", "e", ".", "var", "=", "var", "raise", "e", "except", ":", "print", "(", "\"unknown exception while checking function:\"", ",", "name", ")", "raise", "result", "=", "f", "(", "*", "args", ",", "*", "*", "kw", ")", "checktype", "(", "result", ",", "kw", ".", "get", "(", "\"_\"", ",", "True", ")", ")", "return", "result", "return", "argument_checks", "return", "argument_check_decorator" ]
https://github.com/JinpengLI/deep_ocr/blob/450148c0c51b3565a96ac2f3c94ee33022e55307/deep_ocr/ocrolib/toplevel.py#L195-L221
libtcod/python-tcod
e12c4172baa9efdfd74aff6ee9bab8454a835248
tcod/libtcodpy.py
python
map_get_width
(map: tcod.map.Map)
return map.width
Return the width of a map. .. deprecated:: 4.5 Check the :any:`tcod.map.Map.width` attribute instead.
Return the width of a map.
[ "Return", "the", "width", "of", "a", "map", "." ]
def map_get_width(map: tcod.map.Map) -> int: """Return the width of a map. .. deprecated:: 4.5 Check the :any:`tcod.map.Map.width` attribute instead. """ return map.width
[ "def", "map_get_width", "(", "map", ":", "tcod", ".", "map", ".", "Map", ")", "->", "int", ":", "return", "map", ".", "width" ]
https://github.com/libtcod/python-tcod/blob/e12c4172baa9efdfd74aff6ee9bab8454a835248/tcod/libtcodpy.py#L3329-L3335
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.py
python
Marker.__str__
(self)
return _format_marker(self._markers)
[]
def __str__(self): return _format_marker(self._markers)
[ "def", "__str__", "(", "self", ")", ":", "return", "_format_marker", "(", "self", ".", "_markers", ")" ]
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/setuptools/_vendor/packaging/markers.py#L282-L283
GalSim-developers/GalSim
a05d4ec3b8d8574f99d3b0606ad882cbba53f345
galsim/correlatednoise.py
python
BaseCorrelatedNoise.drawImage
(self, image=None, scale=None, wcs=None, dtype=None, add_to_image=False)
return self._profile.drawImage( image=image, wcs=wcs, dtype=dtype, method='sb', gain=1., add_to_image=add_to_image, use_true_center=False)
A method for drawing profiles storing correlation functions. This is a mild reimplementation of the `GSObject.drawImage` method. The ``method`` is automatically set to 'sb' and cannot be changed, and the ``gain`` is set to unity. Also, not all the normal parameters of the `GSObject` method are available. If ``scale`` and ``wcs`` are not set, and the ``image`` has no ``wcs`` attribute, then this will use the wcs of the `BaseCorrelatedNoise` object. Parameters: image: If provided, this will be the image on which to draw the profile. If ``image`` is None, then an automatically-sized `Image` will be created. If ``image`` is given, but its bounds are undefined (e.g. if it was constructed with ``image = galsim.Image()``), then it will be resized appropriately based on the profile's size [default: None]. scale: If provided, use this as the pixel scale for the image. [default: None] wcs: If provided, use this as the wcs for the image (possibly overriding any existing ``image.wcs``). At most one of ``scale`` or ``wcs`` may be provided. [default: None] Note: If no WCS is provided either via ``scale``, ``wcs`` or ``image.wcs``, then the noise object's wcs will be used. dtype: The data type to use for an automatically constructed image. Only valid if ``image`` is None. [default: None, which means to use numpy.float32] add_to_image: Whether to add flux to the existing image rather than clear out anything in the image before drawing. Note: This requires that ``image`` be provided and that it have defined bounds. [default: False] Returns: an `Image` of the correlation function.
A method for drawing profiles storing correlation functions.
[ "A", "method", "for", "drawing", "profiles", "storing", "correlation", "functions", "." ]
def drawImage(self, image=None, scale=None, wcs=None, dtype=None, add_to_image=False): """A method for drawing profiles storing correlation functions. This is a mild reimplementation of the `GSObject.drawImage` method. The ``method`` is automatically set to 'sb' and cannot be changed, and the ``gain`` is set to unity. Also, not all the normal parameters of the `GSObject` method are available. If ``scale`` and ``wcs`` are not set, and the ``image`` has no ``wcs`` attribute, then this will use the wcs of the `BaseCorrelatedNoise` object. Parameters: image: If provided, this will be the image on which to draw the profile. If ``image`` is None, then an automatically-sized `Image` will be created. If ``image`` is given, but its bounds are undefined (e.g. if it was constructed with ``image = galsim.Image()``), then it will be resized appropriately based on the profile's size [default: None]. scale: If provided, use this as the pixel scale for the image. [default: None] wcs: If provided, use this as the wcs for the image (possibly overriding any existing ``image.wcs``). At most one of ``scale`` or ``wcs`` may be provided. [default: None] Note: If no WCS is provided either via ``scale``, ``wcs`` or ``image.wcs``, then the noise object's wcs will be used. dtype: The data type to use for an automatically constructed image. Only valid if ``image`` is None. [default: None, which means to use numpy.float32] add_to_image: Whether to add flux to the existing image rather than clear out anything in the image before drawing. Note: This requires that ``image`` be provided and that it have defined bounds. [default: False] Returns: an `Image` of the correlation function. """ wcs = self._profile._determine_wcs(scale, wcs, image, self.wcs) return self._profile.drawImage( image=image, wcs=wcs, dtype=dtype, method='sb', gain=1., add_to_image=add_to_image, use_true_center=False)
[ "def", "drawImage", "(", "self", ",", "image", "=", "None", ",", "scale", "=", "None", ",", "wcs", "=", "None", ",", "dtype", "=", "None", ",", "add_to_image", "=", "False", ")", ":", "wcs", "=", "self", ".", "_profile", ".", "_determine_wcs", "(", "scale", ",", "wcs", ",", "image", ",", "self", ".", "wcs", ")", "return", "self", ".", "_profile", ".", "drawImage", "(", "image", "=", "image", ",", "wcs", "=", "wcs", ",", "dtype", "=", "dtype", ",", "method", "=", "'sb'", ",", "gain", "=", "1.", ",", "add_to_image", "=", "add_to_image", ",", "use_true_center", "=", "False", ")" ]
https://github.com/GalSim-developers/GalSim/blob/a05d4ec3b8d8574f99d3b0606ad882cbba53f345/galsim/correlatednoise.py#L658-L695
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
chap19/monitor/monitor/monitor/api/xmlutil.py
python
SlaveTemplate.__repr__
(self)
return ("<%s.%s object versions %s-%s at %#x>" % (self.__class__.__module__, self.__class__.__name__, self.min_vers, self.max_vers, id(self)))
Return string representation of the template.
Return string representation of the template.
[ "Return", "string", "representation", "of", "the", "template", "." ]
def __repr__(self): """Return string representation of the template.""" return ("<%s.%s object versions %s-%s at %#x>" % (self.__class__.__module__, self.__class__.__name__, self.min_vers, self.max_vers, id(self)))
[ "def", "__repr__", "(", "self", ")", ":", "return", "(", "\"<%s.%s object versions %s-%s at %#x>\"", "%", "(", "self", ".", "__class__", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "min_vers", ",", "self", ".", "max_vers", ",", "id", "(", "self", ")", ")", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/chap19/monitor/monitor/monitor/api/xmlutil.py#L789-L794
msg-systems/holmes-extractor
fc536f32a5cd02a53d1c32f771adc14227d09f38
holmes_extractor/parsing.py
python
HolmesDocumentInfo.deserialize_obj
(obj, chain=None)
return obj if chain is None else chain(obj)
[]
def deserialize_obj(obj, chain=None): if '__holmes_document_info_holder__' in obj: return pickle.loads(obj['__holmes_document_info_holder__']) return obj if chain is None else chain(obj)
[ "def", "deserialize_obj", "(", "obj", ",", "chain", "=", "None", ")", ":", "if", "'__holmes_document_info_holder__'", "in", "obj", ":", "return", "pickle", ".", "loads", "(", "obj", "[", "'__holmes_document_info_holder__'", "]", ")", "return", "obj", "if", "chain", "is", "None", "else", "chain", "(", "obj", ")" ]
https://github.com/msg-systems/holmes-extractor/blob/fc536f32a5cd02a53d1c32f771adc14227d09f38/holmes_extractor/parsing.py#L245-L248
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/stat.py
python
S_ISDOOR
(mode)
return False
Return True if mode is from a door.
Return True if mode is from a door.
[ "Return", "True", "if", "mode", "is", "from", "a", "door", "." ]
def S_ISDOOR(mode): """Return True if mode is from a door.""" return False
[ "def", "S_ISDOOR", "(", "mode", ")", ":", "return", "False" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/stat.py#L78-L80
facebookresearch/hydra
9b2f4d54b328d1551aa70a241a1d638cbe046367
hydra/_internal/hydra.py
python
Hydra.get_sanitized_cfg
(self, cfg: DictConfig, cfg_type: str)
return cfg
[]
def get_sanitized_cfg(self, cfg: DictConfig, cfg_type: str) -> DictConfig: assert cfg_type in ["job", "hydra", "all"] if cfg_type == "job": with flag_override(cfg, ["struct", "readonly"], [False, False]): del cfg["hydra"] elif cfg_type == "hydra": cfg = self.get_sanitized_hydra_cfg(cfg) return cfg
[ "def", "get_sanitized_cfg", "(", "self", ",", "cfg", ":", "DictConfig", ",", "cfg_type", ":", "str", ")", "->", "DictConfig", ":", "assert", "cfg_type", "in", "[", "\"job\"", ",", "\"hydra\"", ",", "\"all\"", "]", "if", "cfg_type", "==", "\"job\"", ":", "with", "flag_override", "(", "cfg", ",", "[", "\"struct\"", ",", "\"readonly\"", "]", ",", "[", "False", ",", "False", "]", ")", ":", "del", "cfg", "[", "\"hydra\"", "]", "elif", "cfg_type", "==", "\"hydra\"", ":", "cfg", "=", "self", ".", "get_sanitized_hydra_cfg", "(", "cfg", ")", "return", "cfg" ]
https://github.com/facebookresearch/hydra/blob/9b2f4d54b328d1551aa70a241a1d638cbe046367/hydra/_internal/hydra.py#L156-L163
nficano/tangerine
80a1498a3a33625a7a59e5b74da025fdb994b23b
tangerine/bot.py
python
Tangerine.listen_for
(self, rule, **options)
return decorator
Decorator for adding a Rule. See guidelines for rules.
Decorator for adding a Rule. See guidelines for rules.
[ "Decorator", "for", "adding", "a", "Rule", ".", "See", "guidelines", "for", "rules", "." ]
def listen_for(self, rule, **options): """Decorator for adding a Rule. See guidelines for rules.""" trigger = None if isinstance(rule, six.string_types): trigger = rule rule = self._verify_rule(rule) def decorator(f): self.add_listener(rule, f, trigger, f.__doc__, **options) return f return decorator
[ "def", "listen_for", "(", "self", ",", "rule", ",", "*", "*", "options", ")", ":", "trigger", "=", "None", "if", "isinstance", "(", "rule", ",", "six", ".", "string_types", ")", ":", "trigger", "=", "rule", "rule", "=", "self", ".", "_verify_rule", "(", "rule", ")", "def", "decorator", "(", "f", ")", ":", "self", ".", "add_listener", "(", "rule", ",", "f", ",", "trigger", ",", "f", ".", "__doc__", ",", "*", "*", "options", ")", "return", "f", "return", "decorator" ]
https://github.com/nficano/tangerine/blob/80a1498a3a33625a7a59e5b74da025fdb994b23b/tangerine/bot.py#L83-L94
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/docutils-0.14/docutils/utils/math/math2html.py
python
MacroFunction.parseoptional
(self, pos, defaults)
Parse optional parameters.
Parse optional parameters.
[ "Parse", "optional", "parameters", "." ]
def parseoptional(self, pos, defaults): "Parse optional parameters." optional = [] while self.factory.detecttype(SquareBracket, pos): optional.append(self.parsesquare(pos)) if len(optional) > len(defaults): break for value in optional: default = defaults.pop() if len(value.contents) > 0: self.values.append(value) else: self.values.append(default) self.values += defaults
[ "def", "parseoptional", "(", "self", ",", "pos", ",", "defaults", ")", ":", "optional", "=", "[", "]", "while", "self", ".", "factory", ".", "detecttype", "(", "SquareBracket", ",", "pos", ")", ":", "optional", ".", "append", "(", "self", ".", "parsesquare", "(", "pos", ")", ")", "if", "len", "(", "optional", ")", ">", "len", "(", "defaults", ")", ":", "break", "for", "value", "in", "optional", ":", "default", "=", "defaults", ".", "pop", "(", ")", "if", "len", "(", "value", ".", "contents", ")", ">", "0", ":", "self", ".", "values", ".", "append", "(", "value", ")", "else", ":", "self", ".", "values", ".", "append", "(", "default", ")", "self", ".", "values", "+=", "defaults" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/docutils-0.14/docutils/utils/math/math2html.py#L5262-L5275
brightmart/multi-label_classification
b5febe17eaf9d937d71cabab56c5da48ee68f7b5
bert/modeling.py
python
BertModel.__init__
(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=False, scope=None)
Constructor for BertModel. Args: config: `BertConfig` instance. is_training: bool. true for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or tf.embedding_lookup() for the word embeddings. scope: (optional) variable scope. Defaults to "bert". Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid.
Constructor for BertModel.
[ "Constructor", "for", "BertModel", "." ]
def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=False, scope=None): """Constructor for BertModel. Args: config: `BertConfig` instance. is_training: bool. true for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or tf.embedding_lookup() for the word embeddings. scope: (optional) variable scope. Defaults to "bert". Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid. """ config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.variable_scope(scope, default_name="bert"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.embedding_output, self.embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.hidden_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.variable_scope("encoder"): # This converts a 2D mask of shape [batch_size, seq_length] to a 3D # mask of shape [batch_size, seq_length, seq_length] which is used # for the attention scores. attention_mask = create_attention_mask_from_input_mask( input_ids, input_mask) # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=attention_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, intermediate_size=config.intermediate_size, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.sequence_output = self.all_encoder_layers[-1] # [batch_size, seq_length, hidden_size] # The "pooler" converts the encoded sequence tensor of shape # [batch_size, seq_length, hidden_size] to a tensor of shape # [batch_size, hidden_size]. This is necessary for segment-level # (or segment-pair-level) classification tasks where we need a fixed # dimensional representation of the segment. with tf.variable_scope("pooler"): # We "pool" the model by simply taking the hidden state corresponding # to the first token. We assume that this has been pre-trained first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.layers.dense( first_token_tensor, config.hidden_size, activation=tf.tanh, kernel_initializer=create_initializer(config.initializer_range))
[ "def", "__init__", "(", "self", ",", "config", ",", "is_training", ",", "input_ids", ",", "input_mask", "=", "None", ",", "token_type_ids", "=", "None", ",", "use_one_hot_embeddings", "=", "False", ",", "scope", "=", "None", ")", ":", "config", "=", "copy", ".", "deepcopy", "(", "config", ")", "if", "not", "is_training", ":", "config", ".", "hidden_dropout_prob", "=", "0.0", "config", ".", "attention_probs_dropout_prob", "=", "0.0", "input_shape", "=", "get_shape_list", "(", "input_ids", ",", "expected_rank", "=", "2", ")", "batch_size", "=", "input_shape", "[", "0", "]", "seq_length", "=", "input_shape", "[", "1", "]", "if", "input_mask", "is", "None", ":", "input_mask", "=", "tf", ".", "ones", "(", "shape", "=", "[", "batch_size", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", "if", "token_type_ids", "is", "None", ":", "token_type_ids", "=", "tf", ".", "zeros", "(", "shape", "=", "[", "batch_size", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", "with", "tf", ".", "variable_scope", "(", "scope", ",", "default_name", "=", "\"bert\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"embeddings\"", ")", ":", "# Perform embedding lookup on the word ids.", "(", "self", ".", "embedding_output", ",", "self", ".", "embedding_table", ")", "=", "embedding_lookup", "(", "input_ids", "=", "input_ids", ",", "vocab_size", "=", "config", ".", "vocab_size", ",", "embedding_size", "=", "config", ".", "hidden_size", ",", "initializer_range", "=", "config", ".", "initializer_range", ",", "word_embedding_name", "=", "\"word_embeddings\"", ",", "use_one_hot_embeddings", "=", "use_one_hot_embeddings", ")", "# Add positional embeddings and token type embeddings, then layer", "# normalize and perform dropout.", "self", ".", "embedding_output", "=", "embedding_postprocessor", "(", "input_tensor", "=", "self", ".", "embedding_output", ",", "use_token_type", "=", "True", ",", "token_type_ids", "=", "token_type_ids", ",", "token_type_vocab_size", "=", "config", ".", "type_vocab_size", ",", "token_type_embedding_name", "=", "\"token_type_embeddings\"", ",", "use_position_embeddings", "=", "True", ",", "position_embedding_name", "=", "\"position_embeddings\"", ",", "initializer_range", "=", "config", ".", "initializer_range", ",", "max_position_embeddings", "=", "config", ".", "max_position_embeddings", ",", "dropout_prob", "=", "config", ".", "hidden_dropout_prob", ")", "with", "tf", ".", "variable_scope", "(", "\"encoder\"", ")", ":", "# This converts a 2D mask of shape [batch_size, seq_length] to a 3D", "# mask of shape [batch_size, seq_length, seq_length] which is used", "# for the attention scores.", "attention_mask", "=", "create_attention_mask_from_input_mask", "(", "input_ids", ",", "input_mask", ")", "# Run the stacked transformer.", "# `sequence_output` shape = [batch_size, seq_length, hidden_size].", "self", ".", "all_encoder_layers", "=", "transformer_model", "(", "input_tensor", "=", "self", ".", "embedding_output", ",", "attention_mask", "=", "attention_mask", ",", "hidden_size", "=", "config", ".", "hidden_size", ",", "num_hidden_layers", "=", "config", ".", "num_hidden_layers", ",", "num_attention_heads", "=", "config", ".", "num_attention_heads", ",", "intermediate_size", "=", "config", ".", "intermediate_size", ",", "intermediate_act_fn", "=", "get_activation", "(", "config", ".", "hidden_act", ")", ",", "hidden_dropout_prob", "=", "config", ".", "hidden_dropout_prob", ",", "attention_probs_dropout_prob", "=", "config", ".", "attention_probs_dropout_prob", ",", "initializer_range", "=", "config", ".", "initializer_range", ",", "do_return_all_layers", "=", "True", ")", "self", ".", "sequence_output", "=", "self", ".", "all_encoder_layers", "[", "-", "1", "]", "# [batch_size, seq_length, hidden_size]", "# The \"pooler\" converts the encoded sequence tensor of shape", "# [batch_size, seq_length, hidden_size] to a tensor of shape", "# [batch_size, hidden_size]. This is necessary for segment-level", "# (or segment-pair-level) classification tasks where we need a fixed", "# dimensional representation of the segment.", "with", "tf", ".", "variable_scope", "(", "\"pooler\"", ")", ":", "# We \"pool\" the model by simply taking the hidden state corresponding", "# to the first token. We assume that this has been pre-trained", "first_token_tensor", "=", "tf", ".", "squeeze", "(", "self", ".", "sequence_output", "[", ":", ",", "0", ":", "1", ",", ":", "]", ",", "axis", "=", "1", ")", "self", ".", "pooled_output", "=", "tf", ".", "layers", ".", "dense", "(", "first_token_tensor", ",", "config", ".", "hidden_size", ",", "activation", "=", "tf", ".", "tanh", ",", "kernel_initializer", "=", "create_initializer", "(", "config", ".", "initializer_range", ")", ")" ]
https://github.com/brightmart/multi-label_classification/blob/b5febe17eaf9d937d71cabab56c5da48ee68f7b5/bert/modeling.py#L131-L232
s3prl/s3prl
185e4b060cd96ce5911e258c2fde74a2e8246308
s3prl/downstream/voxceleb2_amsoftmax_segment_eval/model.py
python
Mean.forward
(self, feature, att_mask)
return torch.stack(agg_vec_list)
Arguments feature - [BxTxD] Acoustic feature with shape att_mask - [BxTx1] Attention Mask logits
Arguments feature - [BxTxD] Acoustic feature with shape att_mask - [BxTx1] Attention Mask logits
[ "Arguments", "feature", "-", "[", "BxTxD", "]", "Acoustic", "feature", "with", "shape", "att_mask", "-", "[", "BxTx1", "]", "Attention", "Mask", "logits" ]
def forward(self, feature, att_mask): ''' Arguments feature - [BxTxD] Acoustic feature with shape att_mask - [BxTx1] Attention Mask logits ''' feature=self.linear(self.act_fn(feature)) agg_vec_list = [] for i in range(len(feature)): if torch.nonzero(att_mask[i] < 0, as_tuple=False).size(0) == 0: length = len(feature[i]) else: length = torch.nonzero(att_mask[i] < 0, as_tuple=False)[0] + 1 agg_vec=torch.mean(feature[i][:length], dim=0) agg_vec_list.append(agg_vec) return torch.stack(agg_vec_list)
[ "def", "forward", "(", "self", ",", "feature", ",", "att_mask", ")", ":", "feature", "=", "self", ".", "linear", "(", "self", ".", "act_fn", "(", "feature", ")", ")", "agg_vec_list", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "feature", ")", ")", ":", "if", "torch", ".", "nonzero", "(", "att_mask", "[", "i", "]", "<", "0", ",", "as_tuple", "=", "False", ")", ".", "size", "(", "0", ")", "==", "0", ":", "length", "=", "len", "(", "feature", "[", "i", "]", ")", "else", ":", "length", "=", "torch", ".", "nonzero", "(", "att_mask", "[", "i", "]", "<", "0", ",", "as_tuple", "=", "False", ")", "[", "0", "]", "+", "1", "agg_vec", "=", "torch", ".", "mean", "(", "feature", "[", "i", "]", "[", ":", "length", "]", ",", "dim", "=", "0", ")", "agg_vec_list", ".", "append", "(", "agg_vec", ")", "return", "torch", ".", "stack", "(", "agg_vec_list", ")" ]
https://github.com/s3prl/s3prl/blob/185e4b060cd96ce5911e258c2fde74a2e8246308/s3prl/downstream/voxceleb2_amsoftmax_segment_eval/model.py#L41-L57
usb-tools/Facedancer
e688fe61dc34087db333432394e1f90e52ac3794
facedancer/backends/greatdancer.py
python
GreatDancerApp._is_control_endpoint
(self, endpoint_number)
return endpoint_number == 0
Returns true iff the given endpoint number corresponds to a control endpoint.
Returns true iff the given endpoint number corresponds to a control endpoint.
[ "Returns", "true", "iff", "the", "given", "endpoint", "number", "corresponds", "to", "a", "control", "endpoint", "." ]
def _is_control_endpoint(self, endpoint_number): """ Returns true iff the given endpoint number corresponds to a control endpoint. """ # FIXME: Support control endpoints other than EP0. return endpoint_number == 0
[ "def", "_is_control_endpoint", "(", "self", ",", "endpoint_number", ")", ":", "# FIXME: Support control endpoints other than EP0.", "return", "endpoint_number", "==", "0" ]
https://github.com/usb-tools/Facedancer/blob/e688fe61dc34087db333432394e1f90e52ac3794/facedancer/backends/greatdancer.py#L515-L521
lahwaacz/arch-wiki-docs
216a2170262f5b1ee4af3ac9b565fd3be2752df9
ArchWiki/ArchWiki.py
python
ArchWiki.query_continue
(self, query)
Generator for MediaWiki's query-continue feature. ref: https://www.mediawiki.org/wiki/API:Query#Continuing_queries
Generator for MediaWiki's query-continue feature. ref: https://www.mediawiki.org/wiki/API:Query#Continuing_queries
[ "Generator", "for", "MediaWiki", "s", "query", "-", "continue", "feature", ".", "ref", ":", "https", ":", "//", "www", ".", "mediawiki", ".", "org", "/", "wiki", "/", "API", ":", "Query#Continuing_queries" ]
def query_continue(self, query): """ Generator for MediaWiki's query-continue feature. ref: https://www.mediawiki.org/wiki/API:Query#Continuing_queries """ last_continue = {"continue": ""} while True: # clone the original params to clean up old continue params query_copy = query.copy() # and update with the last continue -- it may involve multiple params, # hence the clean up with params.copy() query_copy.update(last_continue) # call the API and handle the result result = self.call(query_copy) if "error" in result: raise Exception(result["error"]) if "warnings" in result: print(result["warnings"]) if "query" in result: yield result["query"] if "continue" not in result: break last_continue = result["continue"]
[ "def", "query_continue", "(", "self", ",", "query", ")", ":", "last_continue", "=", "{", "\"continue\"", ":", "\"\"", "}", "while", "True", ":", "# clone the original params to clean up old continue params", "query_copy", "=", "query", ".", "copy", "(", ")", "# and update with the last continue -- it may involve multiple params,", "# hence the clean up with params.copy()", "query_copy", ".", "update", "(", "last_continue", ")", "# call the API and handle the result", "result", "=", "self", ".", "call", "(", "query_copy", ")", "if", "\"error\"", "in", "result", ":", "raise", "Exception", "(", "result", "[", "\"error\"", "]", ")", "if", "\"warnings\"", "in", "result", ":", "print", "(", "result", "[", "\"warnings\"", "]", ")", "if", "\"query\"", "in", "result", ":", "yield", "result", "[", "\"query\"", "]", "if", "\"continue\"", "not", "in", "result", ":", "break", "last_continue", "=", "result", "[", "\"continue\"", "]" ]
https://github.com/lahwaacz/arch-wiki-docs/blob/216a2170262f5b1ee4af3ac9b565fd3be2752df9/ArchWiki/ArchWiki.py#L111-L133
mahmoud/boltons
270e974975984f662f998c8f6eb0ebebd964de82
boltons/cacheutils.py
python
ThresholdCounter.elements
(self)
return itertools.chain.from_iterable(repeaters)
Return an iterator of all the common elements tracked by the counter. Yields each key as many times as it has been seen.
Return an iterator of all the common elements tracked by the counter. Yields each key as many times as it has been seen.
[ "Return", "an", "iterator", "of", "all", "the", "common", "elements", "tracked", "by", "the", "counter", ".", "Yields", "each", "key", "as", "many", "times", "as", "it", "has", "been", "seen", "." ]
def elements(self): """Return an iterator of all the common elements tracked by the counter. Yields each key as many times as it has been seen. """ repeaters = itertools.starmap(itertools.repeat, self.iteritems()) return itertools.chain.from_iterable(repeaters)
[ "def", "elements", "(", "self", ")", ":", "repeaters", "=", "itertools", ".", "starmap", "(", "itertools", ".", "repeat", ",", "self", ".", "iteritems", "(", ")", ")", "return", "itertools", ".", "chain", ".", "from_iterable", "(", "repeaters", ")" ]
https://github.com/mahmoud/boltons/blob/270e974975984f662f998c8f6eb0ebebd964de82/boltons/cacheutils.py#L726-L731
angr/angr
4b04d56ace135018083d36d9083805be8146688b
angr/keyed_region.py
python
KeyedRegion._canonicalize_size
(self, size: Union[int,'UnknownSize'])
return size
[]
def _canonicalize_size(self, size: Union[int,'UnknownSize']) -> int: # delayed import from .knowledge_plugins.key_definitions.unknown_size import UnknownSize # pylint:disable=import-outside-toplevel if isinstance(size, UnknownSize): return self._canonical_size return size
[ "def", "_canonicalize_size", "(", "self", ",", "size", ":", "Union", "[", "int", ",", "'UnknownSize'", "]", ")", "->", "int", ":", "# delayed import", "from", ".", "knowledge_plugins", ".", "key_definitions", ".", "unknown_size", "import", "UnknownSize", "# pylint:disable=import-outside-toplevel", "if", "isinstance", "(", "size", ",", "UnknownSize", ")", ":", "return", "self", ".", "_canonical_size", "return", "size" ]
https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/keyed_region.py#L364-L371
vrnetlab/vrnetlab
28c144042efbf59ae500da6eac3f983ea346ffda
vmx/docker/launch.py
python
VMX_vcp.gen_mgmt
(self)
return res
Generate mgmt interface(s) We override the default function since we want a virtio NIC to the vFPC
Generate mgmt interface(s)
[ "Generate", "mgmt", "interface", "(", "s", ")" ]
def gen_mgmt(self): """ Generate mgmt interface(s) We override the default function since we want a virtio NIC to the vFPC """ # call parent function to generate first mgmt interface (e1000) res = super(VMX_vcp, self).gen_mgmt() # install mode doesn't need host port forwarding rules. if running in # dual-re mode, replace host port forwarding rules for the backup # routing engine if self.install_mode: res[-1] = re.sub(r',hostfwd.*', '', res[-1]) elif self.dual_re and self.num == 1: res[-1] = re.sub(r',hostfwd.*', self.gen_host_forwards(mgmt_ip='10.0.0.16', offset=3000), res[-1]) if not self.install_mode: # add virtio NIC for internal control plane interface to vFPC res.append("-device") res.append("virtio-net-pci,netdev=%s,mac=%s" % (self._vcp_int, vrnetlab.gen_mac(1))) res.append("-netdev") res.append("tap,ifname=%(_vcp_int)s,id=%(_vcp_int)s,script=no,downscript=no" % { '_vcp_int': self._vcp_int }) return res
[ "def", "gen_mgmt", "(", "self", ")", ":", "# call parent function to generate first mgmt interface (e1000)", "res", "=", "super", "(", "VMX_vcp", ",", "self", ")", ".", "gen_mgmt", "(", ")", "# install mode doesn't need host port forwarding rules. if running in", "# dual-re mode, replace host port forwarding rules for the backup", "# routing engine", "if", "self", ".", "install_mode", ":", "res", "[", "-", "1", "]", "=", "re", ".", "sub", "(", "r',hostfwd.*'", ",", "''", ",", "res", "[", "-", "1", "]", ")", "elif", "self", ".", "dual_re", "and", "self", ".", "num", "==", "1", ":", "res", "[", "-", "1", "]", "=", "re", ".", "sub", "(", "r',hostfwd.*'", ",", "self", ".", "gen_host_forwards", "(", "mgmt_ip", "=", "'10.0.0.16'", ",", "offset", "=", "3000", ")", ",", "res", "[", "-", "1", "]", ")", "if", "not", "self", ".", "install_mode", ":", "# add virtio NIC for internal control plane interface to vFPC", "res", ".", "append", "(", "\"-device\"", ")", "res", ".", "append", "(", "\"virtio-net-pci,netdev=%s,mac=%s\"", "%", "(", "self", ".", "_vcp_int", ",", "vrnetlab", ".", "gen_mac", "(", "1", ")", ")", ")", "res", ".", "append", "(", "\"-netdev\"", ")", "res", ".", "append", "(", "\"tap,ifname=%(_vcp_int)s,id=%(_vcp_int)s,script=no,downscript=no\"", "%", "{", "'_vcp_int'", ":", "self", ".", "_vcp_int", "}", ")", "return", "res" ]
https://github.com/vrnetlab/vrnetlab/blob/28c144042efbf59ae500da6eac3f983ea346ffda/vmx/docker/launch.py#L81-L103
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/xapi_virt.py
python
_get_metrics_record
(xapi, rectype, record)
return getattr(xapi, "{}_metrics".format(rectype)).get_record(metrics_id)
Internal, returns metrics record for a rectype
Internal, returns metrics record for a rectype
[ "Internal", "returns", "metrics", "record", "for", "a", "rectype" ]
def _get_metrics_record(xapi, rectype, record): """ Internal, returns metrics record for a rectype """ metrics_id = record["metrics"] return getattr(xapi, "{}_metrics".format(rectype)).get_record(metrics_id)
[ "def", "_get_metrics_record", "(", "xapi", ",", "rectype", ",", "record", ")", ":", "metrics_id", "=", "record", "[", "\"metrics\"", "]", "return", "getattr", "(", "xapi", ",", "\"{}_metrics\"", ".", "format", "(", "rectype", ")", ")", ".", "get_record", "(", "metrics_id", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/xapi_virt.py#L154-L159
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/mimify.py
python
mimify
(infile, outfile)
Convert 8bit parts of a MIME mail message to quoted-printable.
Convert 8bit parts of a MIME mail message to quoted-printable.
[ "Convert", "8bit", "parts", "of", "a", "MIME", "mail", "message", "to", "quoted", "-", "printable", "." ]
def mimify(infile, outfile): """Convert 8bit parts of a MIME mail message to quoted-printable.""" if type(infile) == type(''): ifile = open(infile) if type(outfile) == type('') and infile == outfile: import os d, f = os.path.split(infile) os.rename(infile, os.path.join(d, ',' + f)) else: ifile = infile if type(outfile) == type(''): ofile = open(outfile, 'w') else: ofile = outfile nifile = File(ifile, None) mimify_part(nifile, ofile, 0) ofile.flush()
[ "def", "mimify", "(", "infile", ",", "outfile", ")", ":", "if", "type", "(", "infile", ")", "==", "type", "(", "''", ")", ":", "ifile", "=", "open", "(", "infile", ")", "if", "type", "(", "outfile", ")", "==", "type", "(", "''", ")", "and", "infile", "==", "outfile", ":", "import", "os", "d", ",", "f", "=", "os", ".", "path", ".", "split", "(", "infile", ")", "os", ".", "rename", "(", "infile", ",", "os", ".", "path", ".", "join", "(", "d", ",", "','", "+", "f", ")", ")", "else", ":", "ifile", "=", "infile", "if", "type", "(", "outfile", ")", "==", "type", "(", "''", ")", ":", "ofile", "=", "open", "(", "outfile", ",", "'w'", ")", "else", ":", "ofile", "=", "outfile", "nifile", "=", "File", "(", "ifile", ",", "None", ")", "mimify_part", "(", "nifile", ",", "ofile", ",", "0", ")", "ofile", ".", "flush", "(", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/mimify.py#L415-L431
accelero-cloud/appkernel
1be8707f535e9f8ad78ef944f2631b15ce03e8f3
appkernel/reflection.py
python
is_set
(obj)
return type(obj) is set
Helper method to see if the object is a Python set. >>> is_set(set()) True
Helper method to see if the object is a Python set. >>> is_set(set()) True
[ "Helper", "method", "to", "see", "if", "the", "object", "is", "a", "Python", "set", ".", ">>>", "is_set", "(", "set", "()", ")", "True" ]
def is_set(obj): """Helper method to see if the object is a Python set. >>> is_set(set()) True """ return type(obj) is set
[ "def", "is_set", "(", "obj", ")", ":", "return", "type", "(", "obj", ")", "is", "set" ]
https://github.com/accelero-cloud/appkernel/blob/1be8707f535e9f8ad78ef944f2631b15ce03e8f3/appkernel/reflection.py#L119-L124
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/numpy-1.10.0.dev0_046311a-py3.3-win-amd64.egg/numpy/core/fromnumeric.py
python
amin
(a, axis=None, out=None, keepdims=False)
Return the minimum of an array or minimum along an axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is used. .. versionadded: 1.7.0 If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- amin : ndarray or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- amax : The maximum value of an array along a given axis, propagating any NaNs. nanmin : The minimum value of an array along a given axis, ignoring any NaNs. minimum : Element-wise minimum of two arrays, propagating any NaNs. fmin : Element-wise minimum of two arrays, ignoring any NaNs. argmin : Return the indices of the minimum values. nanmax, maximum, fmax Notes ----- NaN values are propagated, that is if at least one item is NaN, the corresponding min value will be NaN as well. To ignore NaN values (MATLAB behavior), please use nanmin. Don't use `amin` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than ``amin(a, axis=0)``. Examples -------- >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], [2, 3]]) >>> np.amin(a) # Minimum of the flattened array 0 >>> np.amin(a, axis=0) # Minima along the first axis array([0, 1]) >>> np.amin(a, axis=1) # Minima along the second axis array([0, 2]) >>> b = np.arange(5, dtype=np.float) >>> b[2] = np.NaN >>> np.amin(b) nan >>> np.nanmin(b) 0.0
Return the minimum of an array or minimum along an axis.
[ "Return", "the", "minimum", "of", "an", "array", "or", "minimum", "along", "an", "axis", "." ]
def amin(a, axis=None, out=None, keepdims=False): """ Return the minimum of an array or minimum along an axis. Parameters ---------- a : array_like Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is used. .. versionadded: 1.7.0 If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. Returns ------- amin : ndarray or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``. See Also -------- amax : The maximum value of an array along a given axis, propagating any NaNs. nanmin : The minimum value of an array along a given axis, ignoring any NaNs. minimum : Element-wise minimum of two arrays, propagating any NaNs. fmin : Element-wise minimum of two arrays, ignoring any NaNs. argmin : Return the indices of the minimum values. nanmax, maximum, fmax Notes ----- NaN values are propagated, that is if at least one item is NaN, the corresponding min value will be NaN as well. To ignore NaN values (MATLAB behavior), please use nanmin. Don't use `amin` for element-wise comparison of 2 arrays; when ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than ``amin(a, axis=0)``. Examples -------- >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], [2, 3]]) >>> np.amin(a) # Minimum of the flattened array 0 >>> np.amin(a, axis=0) # Minima along the first axis array([0, 1]) >>> np.amin(a, axis=1) # Minima along the second axis array([0, 2]) >>> b = np.arange(5, dtype=np.float) >>> b[2] = np.NaN >>> np.amin(b) nan >>> np.nanmin(b) 0.0 """ if type(a) is not mu.ndarray: try: amin = a.min except AttributeError: return _methods._amin(a, axis=axis, out=out, keepdims=keepdims) # NOTE: Dropping the keepdims parameter return amin(axis=axis, out=out) else: return _methods._amin(a, axis=axis, out=out, keepdims=keepdims)
[ "def", "amin", "(", "a", ",", "axis", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "False", ")", ":", "if", "type", "(", "a", ")", "is", "not", "mu", ".", "ndarray", ":", "try", ":", "amin", "=", "a", ".", "min", "except", "AttributeError", ":", "return", "_methods", ".", "_amin", "(", "a", ",", "axis", "=", "axis", ",", "out", "=", "out", ",", "keepdims", "=", "keepdims", ")", "# NOTE: Dropping the keepdims parameter", "return", "amin", "(", "axis", "=", "axis", ",", "out", "=", "out", ")", "else", ":", "return", "_methods", ".", "_amin", "(", "a", ",", "axis", "=", "axis", ",", "out", "=", "out", ",", "keepdims", "=", "keepdims", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/numpy-1.10.0.dev0_046311a-py3.3-win-amd64.egg/numpy/core/fromnumeric.py#L2252-L2340
mit-han-lab/amc
040d83fe9f0288556c8afe363c190e763425a6fe
lib/utils.py
python
accuracy
(output, target, topk=(1,))
return res + appendices
Computes the precision@k for the specified values of k
Computes the precision
[ "Computes", "the", "precision" ]
def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" batch_size = target.size(0) num = output.size(1) target_topk = [] appendices = [] for k in topk: if k <= num: target_topk.append(k) else: appendices.append([0.0]) topk = target_topk maxk = max(topk) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res + appendices
[ "def", "accuracy", "(", "output", ",", "target", ",", "topk", "=", "(", "1", ",", ")", ")", ":", "batch_size", "=", "target", ".", "size", "(", "0", ")", "num", "=", "output", ".", "size", "(", "1", ")", "target_topk", "=", "[", "]", "appendices", "=", "[", "]", "for", "k", "in", "topk", ":", "if", "k", "<=", "num", ":", "target_topk", ".", "append", "(", "k", ")", "else", ":", "appendices", ".", "append", "(", "[", "0.0", "]", ")", "topk", "=", "target_topk", "maxk", "=", "max", "(", "topk", ")", "_", ",", "pred", "=", "output", ".", "topk", "(", "maxk", ",", "1", ",", "True", ",", "True", ")", "pred", "=", "pred", ".", "t", "(", ")", "correct", "=", "pred", ".", "eq", "(", "target", ".", "view", "(", "1", ",", "-", "1", ")", ".", "expand_as", "(", "pred", ")", ")", "res", "=", "[", "]", "for", "k", "in", "topk", ":", "correct_k", "=", "correct", "[", ":", "k", "]", ".", "view", "(", "-", "1", ")", ".", "float", "(", ")", ".", "sum", "(", "0", ")", "res", ".", "append", "(", "correct_k", ".", "mul_", "(", "100.0", "/", "batch_size", ")", ")", "return", "res", "+", "appendices" ]
https://github.com/mit-han-lab/amc/blob/040d83fe9f0288556c8afe363c190e763425a6fe/lib/utils.py#L59-L80
IronLanguages/ironpython2
51fdedeeda15727717fb8268a805f71b06c0b9f1
Src/StdLib/Lib/decimal.py
python
Decimal.is_subnormal
(self, context=None)
return self.adjusted() < context.Emin
Return True if self is subnormal; otherwise return False.
Return True if self is subnormal; otherwise return False.
[ "Return", "True", "if", "self", "is", "subnormal", ";", "otherwise", "return", "False", "." ]
def is_subnormal(self, context=None): """Return True if self is subnormal; otherwise return False.""" if self._is_special or not self: return False if context is None: context = getcontext() return self.adjusted() < context.Emin
[ "def", "is_subnormal", "(", "self", ",", "context", "=", "None", ")", ":", "if", "self", ".", "_is_special", "or", "not", "self", ":", "return", "False", "if", "context", "is", "None", ":", "context", "=", "getcontext", "(", ")", "return", "self", ".", "adjusted", "(", ")", "<", "context", ".", "Emin" ]
https://github.com/IronLanguages/ironpython2/blob/51fdedeeda15727717fb8268a805f71b06c0b9f1/Src/StdLib/Lib/decimal.py#L3055-L3061
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/alembic/util/pyfiles.py
python
load_python_file
(dir_, filename)
return module
Load a file from the given path as a Python module.
Load a file from the given path as a Python module.
[ "Load", "a", "file", "from", "the", "given", "path", "as", "a", "Python", "module", "." ]
def load_python_file(dir_, filename): """Load a file from the given path as a Python module.""" module_id = re.sub(r'\W', "_", filename) path = os.path.join(dir_, filename) _, ext = os.path.splitext(filename) if ext == ".py": if os.path.exists(path): module = load_module_py(module_id, path) else: pyc_path = pyc_file_from_path(path) if pyc_path is None: raise ImportError("Can't find Python file %s" % path) else: module = load_module_pyc(module_id, pyc_path) elif ext in (".pyc", ".pyo"): module = load_module_pyc(module_id, path) return module
[ "def", "load_python_file", "(", "dir_", ",", "filename", ")", ":", "module_id", "=", "re", ".", "sub", "(", "r'\\W'", ",", "\"_\"", ",", "filename", ")", "path", "=", "os", ".", "path", ".", "join", "(", "dir_", ",", "filename", ")", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "ext", "==", "\".py\"", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "module", "=", "load_module_py", "(", "module_id", ",", "path", ")", "else", ":", "pyc_path", "=", "pyc_file_from_path", "(", "path", ")", "if", "pyc_path", "is", "None", ":", "raise", "ImportError", "(", "\"Can't find Python file %s\"", "%", "path", ")", "else", ":", "module", "=", "load_module_pyc", "(", "module_id", ",", "pyc_path", ")", "elif", "ext", "in", "(", "\".pyc\"", ",", "\".pyo\"", ")", ":", "module", "=", "load_module_pyc", "(", "module_id", ",", "path", ")", "return", "module" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/alembic/util/pyfiles.py#L73-L90
carmaa/inception
6c09195f1318ae66010d629b1a86c10524251e26
inception/modules/dump.py
python
calculate
(address, size)
Calculate the start and end memory addresses of the dump
Calculate the start and end memory addresses of the dump
[ "Calculate", "the", "start", "and", "end", "memory", "addresses", "of", "the", "dump" ]
def calculate(address, size): '''Calculate the start and end memory addresses of the dump''' try: # Fix address if isinstance(address, int): pass elif address.startswith('0x'): address = int(address, 0) & 0xfffff000 # Address elif address.startswith('p'): address = int(address[1:]) * cfg.PAGESIZE # Page number else: address = int(address) # Integer # Fix size try: size = util.parse_unit(size) except ValueError as e: raise InceptionException('Could not parse "{0}" to a valid data ' 'size: {1}'.format(size, e)) if size < cfg.PAGESIZE: term.warn('Minimum dump size is a page, {0} KiB' .format(cfg.PAGESIZE // cfg.KiB)) end = address + size return address, end except Exception as e: raise InceptionException('Could not calculate start and end memory ' 'address', e)
[ "def", "calculate", "(", "address", ",", "size", ")", ":", "try", ":", "# Fix address", "if", "isinstance", "(", "address", ",", "int", ")", ":", "pass", "elif", "address", ".", "startswith", "(", "'0x'", ")", ":", "address", "=", "int", "(", "address", ",", "0", ")", "&", "0xfffff000", "# Address", "elif", "address", ".", "startswith", "(", "'p'", ")", ":", "address", "=", "int", "(", "address", "[", "1", ":", "]", ")", "*", "cfg", ".", "PAGESIZE", "# Page number", "else", ":", "address", "=", "int", "(", "address", ")", "# Integer", "# Fix size", "try", ":", "size", "=", "util", ".", "parse_unit", "(", "size", ")", "except", "ValueError", "as", "e", ":", "raise", "InceptionException", "(", "'Could not parse \"{0}\" to a valid data '", "'size: {1}'", ".", "format", "(", "size", ",", "e", ")", ")", "if", "size", "<", "cfg", ".", "PAGESIZE", ":", "term", ".", "warn", "(", "'Minimum dump size is a page, {0} KiB'", ".", "format", "(", "cfg", ".", "PAGESIZE", "//", "cfg", ".", "KiB", ")", ")", "end", "=", "address", "+", "size", "return", "address", ",", "end", "except", "Exception", "as", "e", ":", "raise", "InceptionException", "(", "'Could not calculate start and end memory '", "'address'", ",", "e", ")" ]
https://github.com/carmaa/inception/blob/6c09195f1318ae66010d629b1a86c10524251e26/inception/modules/dump.py#L65-L91
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v7/services/services/conversion_adjustment_upload_service/transports/grpc.py
python
ConversionAdjustmentUploadServiceGrpcTransport.create_channel
( cls, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, )
return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, )
Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object.
Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object.
[ "Create", "and", "return", "a", "gRPC", "channel", "object", ".", "Args", ":", "address", "(", "Optionsl", "[", "str", "]", ")", ":", "The", "host", "for", "the", "channel", "to", "use", ".", "credentials", "(", "Optional", "[", "~", ".", "Credentials", "]", ")", ":", "The", "authorization", "credentials", "to", "attach", "to", "requests", ".", "These", "credentials", "identify", "this", "application", "to", "the", "service", ".", "If", "none", "are", "specified", "the", "client", "will", "attempt", "to", "ascertain", "the", "credentials", "from", "the", "environment", ".", "scopes", "(", "Optional", "[", "Sequence", "[", "str", "]]", ")", ":", "A", "optional", "list", "of", "scopes", "needed", "for", "this", "service", ".", "These", "are", "only", "used", "when", "credentials", "are", "not", "specified", "and", "are", "passed", "to", ":", "func", ":", "google", ".", "auth", ".", "default", ".", "kwargs", "(", "Optional", "[", "dict", "]", ")", ":", "Keyword", "arguments", "which", "are", "passed", "to", "the", "channel", "creation", ".", "Returns", ":", "grpc", ".", "Channel", ":", "A", "gRPC", "channel", "object", "." ]
def create_channel( cls, host: str = "googleads.googleapis.com", credentials: credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, )
[ "def", "create_channel", "(", "cls", ",", "host", ":", "str", "=", "\"googleads.googleapis.com\"", ",", "credentials", ":", "credentials", ".", "Credentials", "=", "None", ",", "scopes", ":", "Optional", "[", "Sequence", "[", "str", "]", "]", "=", "None", ",", "*", "*", "kwargs", ",", ")", "->", "grpc", ".", "Channel", ":", "return", "grpc_helpers", ".", "create_channel", "(", "host", ",", "credentials", "=", "credentials", ",", "scopes", "=", "scopes", "or", "cls", ".", "AUTH_SCOPES", ",", "*", "*", "kwargs", ",", ")" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v7/services/services/conversion_adjustment_upload_service/transports/grpc.py#L182-L210
4shadoww/hakkuframework
409a11fc3819d251f86faa3473439f8c19066a21
lib/future/backports/http/cookiejar.py
python
CookieJar.clear
(self, domain=None, path=None, name=None)
Clear some cookies. Invoking this method without arguments will clear all cookies. If given a single argument, only cookies belonging to that domain will be removed. If given two arguments, cookies belonging to the specified path within that domain are removed. If given three arguments, then the cookie with the specified name, path and domain is removed. Raises KeyError if no matching cookie exists.
Clear some cookies.
[ "Clear", "some", "cookies", "." ]
def clear(self, domain=None, path=None, name=None): """Clear some cookies. Invoking this method without arguments will clear all cookies. If given a single argument, only cookies belonging to that domain will be removed. If given two arguments, cookies belonging to the specified path within that domain are removed. If given three arguments, then the cookie with the specified name, path and domain is removed. Raises KeyError if no matching cookie exists. """ if name is not None: if (domain is None) or (path is None): raise ValueError( "domain and path must be given to remove a cookie by name") del self._cookies[domain][path][name] elif path is not None: if domain is None: raise ValueError( "domain must be given to remove cookies by path") del self._cookies[domain][path] elif domain is not None: del self._cookies[domain] else: self._cookies = {}
[ "def", "clear", "(", "self", ",", "domain", "=", "None", ",", "path", "=", "None", ",", "name", "=", "None", ")", ":", "if", "name", "is", "not", "None", ":", "if", "(", "domain", "is", "None", ")", "or", "(", "path", "is", "None", ")", ":", "raise", "ValueError", "(", "\"domain and path must be given to remove a cookie by name\"", ")", "del", "self", ".", "_cookies", "[", "domain", "]", "[", "path", "]", "[", "name", "]", "elif", "path", "is", "not", "None", ":", "if", "domain", "is", "None", ":", "raise", "ValueError", "(", "\"domain must be given to remove cookies by path\"", ")", "del", "self", ".", "_cookies", "[", "domain", "]", "[", "path", "]", "elif", "domain", "is", "not", "None", ":", "del", "self", ".", "_cookies", "[", "domain", "]", "else", ":", "self", ".", "_cookies", "=", "{", "}" ]
https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/future/backports/http/cookiejar.py#L1671-L1696
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/python-social-auth/social/backends/vk.py
python
VKOAuth2.user_data
(self, access_token, *args, **kwargs)
return data
Loads user data from service
Loads user data from service
[ "Loads", "user", "data", "from", "service" ]
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" request_data = ['first_name', 'last_name', 'screen_name', 'nickname', 'photo'] + self.setting('EXTRA_DATA', []) fields = ','.join(set(request_data)) data = vk_api(self, 'users.get', { 'access_token': access_token, 'fields': fields, }) if data.get('error'): error = data['error'] msg = error.get('error_msg', 'Unknown error') if error.get('error_code') == 5: raise AuthTokenRevoked(self, msg) else: raise AuthException(self, msg) if data: data = data.get('response')[0] data['user_photo'] = data.get('photo') # Backward compatibility return data
[ "def", "user_data", "(", "self", ",", "access_token", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "request_data", "=", "[", "'first_name'", ",", "'last_name'", ",", "'screen_name'", ",", "'nickname'", ",", "'photo'", "]", "+", "self", ".", "setting", "(", "'EXTRA_DATA'", ",", "[", "]", ")", "fields", "=", "','", ".", "join", "(", "set", "(", "request_data", ")", ")", "data", "=", "vk_api", "(", "self", ",", "'users.get'", ",", "{", "'access_token'", ":", "access_token", ",", "'fields'", ":", "fields", ",", "}", ")", "if", "data", ".", "get", "(", "'error'", ")", ":", "error", "=", "data", "[", "'error'", "]", "msg", "=", "error", ".", "get", "(", "'error_msg'", ",", "'Unknown error'", ")", "if", "error", ".", "get", "(", "'error_code'", ")", "==", "5", ":", "raise", "AuthTokenRevoked", "(", "self", ",", "msg", ")", "else", ":", "raise", "AuthException", "(", "self", ",", "msg", ")", "if", "data", ":", "data", "=", "data", ".", "get", "(", "'response'", ")", "[", "0", "]", "data", "[", "'user_photo'", "]", "=", "data", ".", "get", "(", "'photo'", ")", "# Backward compatibility", "return", "data" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/python-social-auth/social/backends/vk.py#L103-L125
naftaliharris/tauthon
5587ceec329b75f7caf6d65a036db61ac1bae214
Lib/lib-tk/turtle.py
python
RawTurtle.end_fill
(self)
Fill the shape drawn after the call begin_fill(). No argument. Example (for a Turtle instance named turtle): >>> turtle.begin_fill() >>> turtle.forward(100) >>> turtle.left(90) >>> turtle.forward(100) >>> turtle.left(90) >>> turtle.forward(100) >>> turtle.left(90) >>> turtle.forward(100) >>> turtle.end_fill()
Fill the shape drawn after the call begin_fill().
[ "Fill", "the", "shape", "drawn", "after", "the", "call", "begin_fill", "()", "." ]
def end_fill(self): """Fill the shape drawn after the call begin_fill(). No argument. Example (for a Turtle instance named turtle): >>> turtle.begin_fill() >>> turtle.forward(100) >>> turtle.left(90) >>> turtle.forward(100) >>> turtle.left(90) >>> turtle.forward(100) >>> turtle.left(90) >>> turtle.forward(100) >>> turtle.end_fill() """ self.fill(False)
[ "def", "end_fill", "(", "self", ")", ":", "self", ".", "fill", "(", "False", ")" ]
https://github.com/naftaliharris/tauthon/blob/5587ceec329b75f7caf6d65a036db61ac1bae214/Lib/lib-tk/turtle.py#L3198-L3214
typemytype/drawbot
b64569bfb352acf3ac54d2a91f0a987985685466
drawBot/drawBotDrawingTools.py
python
DrawBotDrawingTool.cmykStroke
(self, c, m=None, y=None, k=None, alpha=1)
Set a stroke using a CMYK color before drawing a shape. This is handy if the file is intended for print. Sets the CMYK stroke color. Each value must be a float between 0.0 and 1.0. .. downloadcode:: cmykStroke.py # define x, y and the amount of lines needed x, y = 20, 20 lines = 49 # calculate the smallest step colorStep = 1.00 / lines # set stroke width strokeWidth(10) # start a loop for i in range(lines): # set a cmyk color # the magenta value is calculated cmykStroke(0, i * colorStep, 1, 0) # draw a line line((x, y), (x, y + 960)) # translate the canvas translate(20, 0)
Set a stroke using a CMYK color before drawing a shape. This is handy if the file is intended for print.
[ "Set", "a", "stroke", "using", "a", "CMYK", "color", "before", "drawing", "a", "shape", ".", "This", "is", "handy", "if", "the", "file", "is", "intended", "for", "print", "." ]
def cmykStroke(self, c, m=None, y=None, k=None, alpha=1): """ Set a stroke using a CMYK color before drawing a shape. This is handy if the file is intended for print. Sets the CMYK stroke color. Each value must be a float between 0.0 and 1.0. .. downloadcode:: cmykStroke.py # define x, y and the amount of lines needed x, y = 20, 20 lines = 49 # calculate the smallest step colorStep = 1.00 / lines # set stroke width strokeWidth(10) # start a loop for i in range(lines): # set a cmyk color # the magenta value is calculated cmykStroke(0, i * colorStep, 1, 0) # draw a line line((x, y), (x, y + 960)) # translate the canvas translate(20, 0) """ self._requiresNewFirstPage = True self._addInstruction("cmykStroke", c, m, y, k, alpha)
[ "def", "cmykStroke", "(", "self", ",", "c", ",", "m", "=", "None", ",", "y", "=", "None", ",", "k", "=", "None", ",", "alpha", "=", "1", ")", ":", "self", ".", "_requiresNewFirstPage", "=", "True", "self", ".", "_addInstruction", "(", "\"cmykStroke\"", ",", "c", ",", "m", ",", "y", ",", "k", ",", "alpha", ")" ]
https://github.com/typemytype/drawbot/blob/b64569bfb352acf3ac54d2a91f0a987985685466/drawBot/drawBotDrawingTools.py#L897-L923
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/core/data/url/extended_urllib.py
python
ExtendedUrllib._increase_worker_pool_size
(self)
[]
def _increase_worker_pool_size(self): w3af_core = self.get_w3af_core() worker_pool = w3af_core.worker_pool max_workers = w3af_core.MAX_WORKER_THREADS error_rate = self.get_error_rate() # Note that we increase by one here, and decrease by two above new_worker_count = worker_pool.get_worker_count() + 1 new_worker_count = min(new_worker_count, max_workers) if new_worker_count <= max_workers: worker_pool.set_worker_count(new_worker_count) msg = 'Increased the worker pool size to %s (error rate: %i%%)' om.out.debug(msg % (new_worker_count, error_rate)) else: msg = 'Not increasing the worker pool size since it exceeds the max: %s' om.out.debug(msg % max_workers)
[ "def", "_increase_worker_pool_size", "(", "self", ")", ":", "w3af_core", "=", "self", ".", "get_w3af_core", "(", ")", "worker_pool", "=", "w3af_core", ".", "worker_pool", "max_workers", "=", "w3af_core", ".", "MAX_WORKER_THREADS", "error_rate", "=", "self", ".", "get_error_rate", "(", ")", "# Note that we increase by one here, and decrease by two above", "new_worker_count", "=", "worker_pool", ".", "get_worker_count", "(", ")", "+", "1", "new_worker_count", "=", "min", "(", "new_worker_count", ",", "max_workers", ")", "if", "new_worker_count", "<=", "max_workers", ":", "worker_pool", ".", "set_worker_count", "(", "new_worker_count", ")", "msg", "=", "'Increased the worker pool size to %s (error rate: %i%%)'", "om", ".", "out", ".", "debug", "(", "msg", "%", "(", "new_worker_count", ",", "error_rate", ")", ")", "else", ":", "msg", "=", "'Not increasing the worker pool size since it exceeds the max: %s'", "om", ".", "out", ".", "debug", "(", "msg", "%", "max_workers", ")" ]
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/core/data/url/extended_urllib.py#L1003-L1020
inducer/loopy
55143b21711a534c07bbb14aaa63ff3879a93433
loopy/kernel/__init__.py
python
LoopKernel.all_inames
(self)
return frozenset(self.inames.keys())
Returns a :class:`frozenset` of the names of all the inames in the kernel.
Returns a :class:`frozenset` of the names of all the inames in the kernel.
[ "Returns", "a", ":", "class", ":", "frozenset", "of", "the", "names", "of", "all", "the", "inames", "in", "the", "kernel", "." ]
def all_inames(self): """ Returns a :class:`frozenset` of the names of all the inames in the kernel. """ return frozenset(self.inames.keys())
[ "def", "all_inames", "(", "self", ")", ":", "return", "frozenset", "(", "self", ".", "inames", ".", "keys", "(", ")", ")" ]
https://github.com/inducer/loopy/blob/55143b21711a534c07bbb14aaa63ff3879a93433/loopy/kernel/__init__.py#L747-L751
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/nuki/lock.py
python
NukiOpenerEntity.lock
(self, **kwargs)
Disable ring-to-open.
Disable ring-to-open.
[ "Disable", "ring", "-", "to", "-", "open", "." ]
def lock(self, **kwargs): """Disable ring-to-open.""" self._nuki_device.deactivate_rto()
[ "def", "lock", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_nuki_device", ".", "deactivate_rto", "(", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/nuki/lock.py#L148-L150
mahmoud/glom
67cd5a4ed7b21607dfefbafb86d9f93314afd6e1
glom/core.py
python
glom
(target, spec, **kwargs)
return ret
Access or construct a value from a given *target* based on the specification declared by *spec*. Accessing nested data, aka deep-get: >>> target = {'a': {'b': 'c'}} >>> glom(target, 'a.b') 'c' Here the *spec* was just a string denoting a path, ``'a.b.``. As simple as it should be. The next example shows how to use nested data to access many fields at once, and make a new nested structure. Constructing, or restructuring more-complicated nested data: >>> target = {'a': {'b': 'c', 'd': 'e'}, 'f': 'g', 'h': [0, 1, 2]} >>> spec = {'a': 'a.b', 'd': 'a.d', 'h': ('h', [lambda x: x * 2])} >>> output = glom(target, spec) >>> pprint(output) {'a': 'c', 'd': 'e', 'h': [0, 2, 4]} ``glom`` also takes a keyword-argument, *default*. When set, if a ``glom`` operation fails with a :exc:`GlomError`, the *default* will be returned, very much like :meth:`dict.get()`: >>> glom(target, 'a.xx', default='nada') 'nada' The *skip_exc* keyword argument controls which errors should be ignored. >>> glom({}, lambda x: 100.0 / len(x), default=0.0, skip_exc=ZeroDivisionError) 0.0 Args: target (object): the object on which the glom will operate. spec (object): Specification of the output object in the form of a dict, list, tuple, string, other glom construct, or any composition of these. default (object): An optional default to return in the case an exception, specified by *skip_exc*, is raised. skip_exc (Exception): An optional exception or tuple of exceptions to ignore and return *default* (None if omitted). If *skip_exc* and *default* are both not set, glom raises errors through. scope (dict): Additional data that can be accessed via S inside the glom-spec. Read more: :ref:`scope`. It's a small API with big functionality, and glom's power is only surpassed by its intuitiveness. Give it a whirl!
Access or construct a value from a given *target* based on the specification declared by *spec*.
[ "Access", "or", "construct", "a", "value", "from", "a", "given", "*", "target", "*", "based", "on", "the", "specification", "declared", "by", "*", "spec", "*", "." ]
def glom(target, spec, **kwargs): """Access or construct a value from a given *target* based on the specification declared by *spec*. Accessing nested data, aka deep-get: >>> target = {'a': {'b': 'c'}} >>> glom(target, 'a.b') 'c' Here the *spec* was just a string denoting a path, ``'a.b.``. As simple as it should be. The next example shows how to use nested data to access many fields at once, and make a new nested structure. Constructing, or restructuring more-complicated nested data: >>> target = {'a': {'b': 'c', 'd': 'e'}, 'f': 'g', 'h': [0, 1, 2]} >>> spec = {'a': 'a.b', 'd': 'a.d', 'h': ('h', [lambda x: x * 2])} >>> output = glom(target, spec) >>> pprint(output) {'a': 'c', 'd': 'e', 'h': [0, 2, 4]} ``glom`` also takes a keyword-argument, *default*. When set, if a ``glom`` operation fails with a :exc:`GlomError`, the *default* will be returned, very much like :meth:`dict.get()`: >>> glom(target, 'a.xx', default='nada') 'nada' The *skip_exc* keyword argument controls which errors should be ignored. >>> glom({}, lambda x: 100.0 / len(x), default=0.0, skip_exc=ZeroDivisionError) 0.0 Args: target (object): the object on which the glom will operate. spec (object): Specification of the output object in the form of a dict, list, tuple, string, other glom construct, or any composition of these. default (object): An optional default to return in the case an exception, specified by *skip_exc*, is raised. skip_exc (Exception): An optional exception or tuple of exceptions to ignore and return *default* (None if omitted). If *skip_exc* and *default* are both not set, glom raises errors through. scope (dict): Additional data that can be accessed via S inside the glom-spec. Read more: :ref:`scope`. It's a small API with big functionality, and glom's power is only surpassed by its intuitiveness. Give it a whirl! """ # TODO: check spec up front default = kwargs.pop('default', None if 'skip_exc' in kwargs else _MISSING) skip_exc = kwargs.pop('skip_exc', () if default is _MISSING else GlomError) glom_debug = kwargs.pop('glom_debug', GLOM_DEBUG) scope = _DEFAULT_SCOPE.new_child({ Path: kwargs.pop('path', []), Inspect: kwargs.pop('inspector', None), MODE: AUTO, CHILD_ERRORS: [], 'globals': ScopeVars({}, {}), }) scope[UP] = scope scope[ROOT] = scope scope[T] = target scope.update(kwargs.pop('scope', {})) err = None if kwargs: raise TypeError('unexpected keyword args: %r' % sorted(kwargs.keys())) try: try: ret = _glom(target, spec, scope) except skip_exc: if default is _MISSING: raise ret = default except Exception as e: if glom_debug: raise if isinstance(e, GlomError): # need to change id or else py3 seems to not let us truncate the # stack trace with the explicit "raise err" below err = copy.copy(e) err._set_wrapped(e) else: err = GlomError.wrap(e) if isinstance(err, GlomError): err._finalize(scope[LAST_CHILD_SCOPE]) else: # wrapping failed, fall back to default behavior raise if err: raise err return ret
[ "def", "glom", "(", "target", ",", "spec", ",", "*", "*", "kwargs", ")", ":", "# TODO: check spec up front", "default", "=", "kwargs", ".", "pop", "(", "'default'", ",", "None", "if", "'skip_exc'", "in", "kwargs", "else", "_MISSING", ")", "skip_exc", "=", "kwargs", ".", "pop", "(", "'skip_exc'", ",", "(", ")", "if", "default", "is", "_MISSING", "else", "GlomError", ")", "glom_debug", "=", "kwargs", ".", "pop", "(", "'glom_debug'", ",", "GLOM_DEBUG", ")", "scope", "=", "_DEFAULT_SCOPE", ".", "new_child", "(", "{", "Path", ":", "kwargs", ".", "pop", "(", "'path'", ",", "[", "]", ")", ",", "Inspect", ":", "kwargs", ".", "pop", "(", "'inspector'", ",", "None", ")", ",", "MODE", ":", "AUTO", ",", "CHILD_ERRORS", ":", "[", "]", ",", "'globals'", ":", "ScopeVars", "(", "{", "}", ",", "{", "}", ")", ",", "}", ")", "scope", "[", "UP", "]", "=", "scope", "scope", "[", "ROOT", "]", "=", "scope", "scope", "[", "T", "]", "=", "target", "scope", ".", "update", "(", "kwargs", ".", "pop", "(", "'scope'", ",", "{", "}", ")", ")", "err", "=", "None", "if", "kwargs", ":", "raise", "TypeError", "(", "'unexpected keyword args: %r'", "%", "sorted", "(", "kwargs", ".", "keys", "(", ")", ")", ")", "try", ":", "try", ":", "ret", "=", "_glom", "(", "target", ",", "spec", ",", "scope", ")", "except", "skip_exc", ":", "if", "default", "is", "_MISSING", ":", "raise", "ret", "=", "default", "except", "Exception", "as", "e", ":", "if", "glom_debug", ":", "raise", "if", "isinstance", "(", "e", ",", "GlomError", ")", ":", "# need to change id or else py3 seems to not let us truncate the", "# stack trace with the explicit \"raise err\" below", "err", "=", "copy", ".", "copy", "(", "e", ")", "err", ".", "_set_wrapped", "(", "e", ")", "else", ":", "err", "=", "GlomError", ".", "wrap", "(", "e", ")", "if", "isinstance", "(", "err", ",", "GlomError", ")", ":", "err", ".", "_finalize", "(", "scope", "[", "LAST_CHILD_SCOPE", "]", ")", "else", ":", "# wrapping failed, fall back to default behavior", "raise", "if", "err", ":", "raise", "err", "return", "ret" ]
https://github.com/mahmoud/glom/blob/67cd5a4ed7b21607dfefbafb86d9f93314afd6e1/glom/core.py#L2085-L2182
stanford-futuredata/noscope
6c6aa72e09280530dfdcf87871c1ac43df3b6cc3
optimizer/noscope_accuracy.py
python
window_yolo
(frames)
return smooth_indicator(true_indicator)
[]
def window_yolo(frames): true_indicator = np.asarray( map(lambda x: int(x.confidence > YOLO_CONFIDENCE), frames) ) # smooth and window the yolo labels return smooth_indicator(true_indicator)
[ "def", "window_yolo", "(", "frames", ")", ":", "true_indicator", "=", "np", ".", "asarray", "(", "map", "(", "lambda", "x", ":", "int", "(", "x", ".", "confidence", ">", "YOLO_CONFIDENCE", ")", ",", "frames", ")", ")", "# smooth and window the yolo labels", "return", "smooth_indicator", "(", "true_indicator", ")" ]
https://github.com/stanford-futuredata/noscope/blob/6c6aa72e09280530dfdcf87871c1ac43df3b6cc3/optimizer/noscope_accuracy.py#L150-L154
criteo/biggraphite
1f647ada6b3f2b2f3fb4e59d326f73a2c891fc30
biggraphite/utils.py
python
FeatureSwitch.enabled
(self)
Check if the feature is enabled or not.
Check if the feature is enabled or not.
[ "Check", "if", "the", "feature", "is", "enabled", "or", "not", "." ]
def enabled(self): """Check if the feature is enabled or not.""" if self._default_value: return not self._flag.is_set() else: return self._flag.is_set()
[ "def", "enabled", "(", "self", ")", ":", "if", "self", ".", "_default_value", ":", "return", "not", "self", ".", "_flag", ".", "is_set", "(", ")", "else", ":", "return", "self", ".", "_flag", ".", "is_set", "(", ")" ]
https://github.com/criteo/biggraphite/blob/1f647ada6b3f2b2f3fb4e59d326f73a2c891fc30/biggraphite/utils.py#L75-L80
awslabs/dgl-ke
e9d4f4916f570d2c9f2e1aa3bdec9196c68120e5
python/dglke/dataloader/sampler.py
python
EvalDataset.get_edges
(self, eval_type)
Get all edges in this dataset Parameters ---------- eval_type : str Sampling type, 'valid' for validation and 'test' for testing Returns ------- np.array Edges
Get all edges in this dataset
[ "Get", "all", "edges", "in", "this", "dataset" ]
def get_edges(self, eval_type): """ Get all edges in this dataset Parameters ---------- eval_type : str Sampling type, 'valid' for validation and 'test' for testing Returns ------- np.array Edges """ if eval_type == 'valid': return self.valid elif eval_type == 'test': return self.test else: raise Exception('get invalid type: ' + eval_type)
[ "def", "get_edges", "(", "self", ",", "eval_type", ")", ":", "if", "eval_type", "==", "'valid'", ":", "return", "self", ".", "valid", "elif", "eval_type", "==", "'test'", ":", "return", "self", ".", "test", "else", ":", "raise", "Exception", "(", "'get invalid type: '", "+", "eval_type", ")" ]
https://github.com/awslabs/dgl-ke/blob/e9d4f4916f570d2c9f2e1aa3bdec9196c68120e5/python/dglke/dataloader/sampler.py#L701-L719
LinkedInAttic/indextank-service
880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e
api/restapi.py
python
__validate_docid
(docid)
Validates that a document id is a string, a unicode, or an int (for backwards compatibility). It can't be empty, nor longer than 1024 bytes. Valid inputs >>> __validate_docid("a") >>> __validate_docid("\xc3\xb1") >>> __validate_docid(u"\xc3\xb1") >>> # for backwards compatibility >>> __validate_docid(123) >>> __validate_docid(0) >>> __validate_docid(-1) Validate length >>> __validate_docid("a"*1024) >>> __validate_docid(u"a"*1024) >>> # 512 2-byte chars are ok >>> __validate_docid("\xc3\xb1"*512) >>> e = __validate_docid("a"*1025) >>> isinstance(e, HttpResponse) True >>> e = __validate_docid(u"\xc3"*1025) >>> isinstance(e, HttpResponse) True >>> # 512 2-byte chars are not ok >>> e = __validate_docid("\xc3\xb1"*513) >>> isinstance(e, HttpResponse) True Validate emptyness >>> e = __validate_docid(" ") >>> isinstance(e, HttpResponse) True >>> e = __validate_docid("") >>> isinstance(e, HttpResponse) True >>> e = __validate_docid(" "*80) >>> isinstance(e, HttpResponse) True Validate not supported types >>> e = __validate_docid(80.0) >>> isinstance(e, HttpResponse) True >>> e = __validate_docid([1,2,3]) >>> isinstance(e, HttpResponse) True >>> e = __validate_docid({"a":"b"}) >>> isinstance(e, HttpResponse) True Validate None >>> e = __validate_docid(None) >>> isinstance(e, HttpResponse) True
Validates that a document id is a string, a unicode, or an int (for backwards compatibility). It can't be empty, nor longer than 1024 bytes. Valid inputs >>> __validate_docid("a") >>> __validate_docid("\xc3\xb1") >>> __validate_docid(u"\xc3\xb1") >>> # for backwards compatibility >>> __validate_docid(123) >>> __validate_docid(0) >>> __validate_docid(-1)
[ "Validates", "that", "a", "document", "id", "is", "a", "string", "a", "unicode", "or", "an", "int", "(", "for", "backwards", "compatibility", ")", ".", "It", "can", "t", "be", "empty", "nor", "longer", "than", "1024", "bytes", ".", "Valid", "inputs", ">>>", "__validate_docid", "(", "a", ")", ">>>", "__validate_docid", "(", "\\", "xc3", "\\", "xb1", ")", ">>>", "__validate_docid", "(", "u", "\\", "xc3", "\\", "xb1", ")", ">>>", "#", "for", "backwards", "compatibility", ">>>", "__validate_docid", "(", "123", ")", ">>>", "__validate_docid", "(", "0", ")", ">>>", "__validate_docid", "(", "-", "1", ")" ]
def __validate_docid(docid): """ Validates that a document id is a string, a unicode, or an int (for backwards compatibility). It can't be empty, nor longer than 1024 bytes. Valid inputs >>> __validate_docid("a") >>> __validate_docid("\xc3\xb1") >>> __validate_docid(u"\xc3\xb1") >>> # for backwards compatibility >>> __validate_docid(123) >>> __validate_docid(0) >>> __validate_docid(-1) Validate length >>> __validate_docid("a"*1024) >>> __validate_docid(u"a"*1024) >>> # 512 2-byte chars are ok >>> __validate_docid("\xc3\xb1"*512) >>> e = __validate_docid("a"*1025) >>> isinstance(e, HttpResponse) True >>> e = __validate_docid(u"\xc3"*1025) >>> isinstance(e, HttpResponse) True >>> # 512 2-byte chars are not ok >>> e = __validate_docid("\xc3\xb1"*513) >>> isinstance(e, HttpResponse) True Validate emptyness >>> e = __validate_docid(" ") >>> isinstance(e, HttpResponse) True >>> e = __validate_docid("") >>> isinstance(e, HttpResponse) True >>> e = __validate_docid(" "*80) >>> isinstance(e, HttpResponse) True Validate not supported types >>> e = __validate_docid(80.0) >>> isinstance(e, HttpResponse) True >>> e = __validate_docid([1,2,3]) >>> isinstance(e, HttpResponse) True >>> e = __validate_docid({"a":"b"}) >>> isinstance(e, HttpResponse) True Validate None >>> e = __validate_docid(None) >>> isinstance(e, HttpResponse) True """ if type(docid) in [int, long]: docid = str(docid) if not type(docid) in [str,unicode]: return HttpResponse('"Invalid docid, it should be a String."', status=400) if docid.strip() == '': return HttpResponse('"Invalid docid, it shouldnt be empty."', status=400) udocid = _encode_utf8(docid) if len(udocid) > 1024: return HttpResponse('"Invalid docid, it shouldnt be longer than 1024 bytes. It was %d"'%len(udocid), status=400)
[ "def", "__validate_docid", "(", "docid", ")", ":", "if", "type", "(", "docid", ")", "in", "[", "int", ",", "long", "]", ":", "docid", "=", "str", "(", "docid", ")", "if", "not", "type", "(", "docid", ")", "in", "[", "str", ",", "unicode", "]", ":", "return", "HttpResponse", "(", "'\"Invalid docid, it should be a String.\"'", ",", "status", "=", "400", ")", "if", "docid", ".", "strip", "(", ")", "==", "''", ":", "return", "HttpResponse", "(", "'\"Invalid docid, it shouldnt be empty.\"'", ",", "status", "=", "400", ")", "udocid", "=", "_encode_utf8", "(", "docid", ")", "if", "len", "(", "udocid", ")", ">", "1024", ":", "return", "HttpResponse", "(", "'\"Invalid docid, it shouldnt be longer than 1024 bytes. It was %d\"'", "%", "len", "(", "udocid", ")", ",", "status", "=", "400", ")" ]
https://github.com/LinkedInAttic/indextank-service/blob/880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e/api/restapi.py#L45-L114
shaohua0116/Group-Normalization-Tensorflow
551511c939fc5733a61e2505cc03774a6d224547
datasets/fashion_mnist.py
python
Dataset.ids
(self)
return self._ids
[]
def ids(self): return self._ids
[ "def", "ids", "(", "self", ")", ":", "return", "self", ".", "_ids" ]
https://github.com/shaohua0116/Group-Normalization-Tensorflow/blob/551511c939fc5733a61e2505cc03774a6d224547/datasets/fashion_mnist.py#L45-L46
goace/personal-file-sharing-center
4a5b903b003f2db1306e77c5e51b6660fc5dbc6a
web/browser.py
python
Browser.get_forms
(self)
return self._forms
Returns all forms in the current document. The returned form objects implement the ClientForm.HTMLForm interface.
Returns all forms in the current document. The returned form objects implement the ClientForm.HTMLForm interface.
[ "Returns", "all", "forms", "in", "the", "current", "document", ".", "The", "returned", "form", "objects", "implement", "the", "ClientForm", ".", "HTMLForm", "interface", "." ]
def get_forms(self): """Returns all forms in the current document. The returned form objects implement the ClientForm.HTMLForm interface. """ if self._forms is None: import ClientForm self._forms = ClientForm.ParseResponse(self.get_response(), backwards_compat=False) return self._forms
[ "def", "get_forms", "(", "self", ")", ":", "if", "self", ".", "_forms", "is", "None", ":", "import", "ClientForm", "self", ".", "_forms", "=", "ClientForm", ".", "ParseResponse", "(", "self", ".", "get_response", "(", ")", ",", "backwards_compat", "=", "False", ")", "return", "self", ".", "_forms" ]
https://github.com/goace/personal-file-sharing-center/blob/4a5b903b003f2db1306e77c5e51b6660fc5dbc6a/web/browser.py#L144-L151
XX-net/XX-Net
a9898cfcf0084195fb7e69b6bc834e59aecdf14f
python3.8.2/Lib/site-packages/pip/_vendor/requests/models.py
python
Response.raise_for_status
(self)
Raises stored :class:`HTTPError`, if one occurred.
Raises stored :class:`HTTPError`, if one occurred.
[ "Raises", "stored", ":", "class", ":", "HTTPError", "if", "one", "occurred", "." ]
def raise_for_status(self): """Raises stored :class:`HTTPError`, if one occurred.""" http_error_msg = '' if isinstance(self.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. (See PR #3538) try: reason = self.reason.decode('utf-8') except UnicodeDecodeError: reason = self.reason.decode('iso-8859-1') else: reason = self.reason if 400 <= self.status_code < 500: http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) elif 500 <= self.status_code < 600: http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) if http_error_msg: raise HTTPError(http_error_msg, response=self)
[ "def", "raise_for_status", "(", "self", ")", ":", "http_error_msg", "=", "''", "if", "isinstance", "(", "self", ".", "reason", ",", "bytes", ")", ":", "# We attempt to decode utf-8 first because some servers", "# choose to localize their reason strings. If the string", "# isn't utf-8, we fall back to iso-8859-1 for all other", "# encodings. (See PR #3538)", "try", ":", "reason", "=", "self", ".", "reason", ".", "decode", "(", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "reason", "=", "self", ".", "reason", ".", "decode", "(", "'iso-8859-1'", ")", "else", ":", "reason", "=", "self", ".", "reason", "if", "400", "<=", "self", ".", "status_code", "<", "500", ":", "http_error_msg", "=", "u'%s Client Error: %s for url: %s'", "%", "(", "self", ".", "status_code", ",", "reason", ",", "self", ".", "url", ")", "elif", "500", "<=", "self", ".", "status_code", "<", "600", ":", "http_error_msg", "=", "u'%s Server Error: %s for url: %s'", "%", "(", "self", ".", "status_code", ",", "reason", ",", "self", ".", "url", ")", "if", "http_error_msg", ":", "raise", "HTTPError", "(", "http_error_msg", ",", "response", "=", "self", ")" ]
https://github.com/XX-net/XX-Net/blob/a9898cfcf0084195fb7e69b6bc834e59aecdf14f/python3.8.2/Lib/site-packages/pip/_vendor/requests/models.py#L917-L940
inasafe/inasafe
355eb2ce63f516b9c26af0c86a24f99e53f63f87
safe/gui/tools/peta_bencana_dialog.py
python
PetaBencanaDialog.accept
(self)
Do PetaBencana download and display it in QGIS. .. versionadded: 3.3
Do PetaBencana download and display it in QGIS.
[ "Do", "PetaBencana", "download", "and", "display", "it", "in", "QGIS", "." ]
def accept(self): """Do PetaBencana download and display it in QGIS. .. versionadded: 3.3 """ self.save_state() try: self.require_directory() except CanceledImportDialogError: return QgsApplication.instance().setOverrideCursor( QtGui.QCursor(QtCore.Qt.WaitCursor) ) source = self.define_url() # save the file as json first name = 'jakarta_flood.json' output_directory = self.output_directory.text() output_prefix = self.filename_prefix.text() overwrite = self.overwrite_flag.isChecked() date_stamp_flag = self.include_date_flag.isChecked() output_base_file_path = self.get_output_base_path( output_directory, output_prefix, date_stamp_flag, name, overwrite) title = self.tr("Can't access API") try: self.download(source, output_base_file_path) # Open downloaded file as QgsMapLayer options = QgsVectorLayer.LayerOptions(False) layer = QgsVectorLayer( output_base_file_path, 'flood', 'ogr', options) except Exception as e: disable_busy_cursor() QMessageBox.critical(self, title, str(e)) return self.time_stamp = time.strftime('%d-%b-%Y %H:%M:%S') # Now save as shp name = 'jakarta_flood.shp' output_base_file_path = self.get_output_base_path( output_directory, output_prefix, date_stamp_flag, name, overwrite) QgsVectorFileWriter.writeAsVectorFormat( layer, output_base_file_path, 'CP1250', QgsCoordinateTransform(), 'ESRI Shapefile') # Get rid of the GeoJSON layer and rather use local shp del layer self.copy_style(output_base_file_path) self.copy_keywords(output_base_file_path) layer = self.add_flooded_field(output_base_file_path) # check if the layer has feature or not if layer.featureCount() <= 0: city = self.city_combo_box.currentText() message = self.tr( 'There are no floods data available on {city} ' 'at this time.').format(city=city) display_warning_message_box( self, self.tr('No data'), message) disable_busy_cursor() else: # add the layer to the map project = QgsProject.instance() project.addMapLayer(layer) disable_busy_cursor() self.done(QDialog.Accepted)
[ "def", "accept", "(", "self", ")", ":", "self", ".", "save_state", "(", ")", "try", ":", "self", ".", "require_directory", "(", ")", "except", "CanceledImportDialogError", ":", "return", "QgsApplication", ".", "instance", "(", ")", ".", "setOverrideCursor", "(", "QtGui", ".", "QCursor", "(", "QtCore", ".", "Qt", ".", "WaitCursor", ")", ")", "source", "=", "self", ".", "define_url", "(", ")", "# save the file as json first", "name", "=", "'jakarta_flood.json'", "output_directory", "=", "self", ".", "output_directory", ".", "text", "(", ")", "output_prefix", "=", "self", ".", "filename_prefix", ".", "text", "(", ")", "overwrite", "=", "self", ".", "overwrite_flag", ".", "isChecked", "(", ")", "date_stamp_flag", "=", "self", ".", "include_date_flag", ".", "isChecked", "(", ")", "output_base_file_path", "=", "self", ".", "get_output_base_path", "(", "output_directory", ",", "output_prefix", ",", "date_stamp_flag", ",", "name", ",", "overwrite", ")", "title", "=", "self", ".", "tr", "(", "\"Can't access API\"", ")", "try", ":", "self", ".", "download", "(", "source", ",", "output_base_file_path", ")", "# Open downloaded file as QgsMapLayer", "options", "=", "QgsVectorLayer", ".", "LayerOptions", "(", "False", ")", "layer", "=", "QgsVectorLayer", "(", "output_base_file_path", ",", "'flood'", ",", "'ogr'", ",", "options", ")", "except", "Exception", "as", "e", ":", "disable_busy_cursor", "(", ")", "QMessageBox", ".", "critical", "(", "self", ",", "title", ",", "str", "(", "e", ")", ")", "return", "self", ".", "time_stamp", "=", "time", ".", "strftime", "(", "'%d-%b-%Y %H:%M:%S'", ")", "# Now save as shp", "name", "=", "'jakarta_flood.shp'", "output_base_file_path", "=", "self", ".", "get_output_base_path", "(", "output_directory", ",", "output_prefix", ",", "date_stamp_flag", ",", "name", ",", "overwrite", ")", "QgsVectorFileWriter", ".", "writeAsVectorFormat", "(", "layer", ",", "output_base_file_path", ",", "'CP1250'", ",", "QgsCoordinateTransform", "(", ")", ",", "'ESRI Shapefile'", ")", "# Get rid of the GeoJSON layer and rather use local shp", "del", "layer", "self", ".", "copy_style", "(", "output_base_file_path", ")", "self", ".", "copy_keywords", "(", "output_base_file_path", ")", "layer", "=", "self", ".", "add_flooded_field", "(", "output_base_file_path", ")", "# check if the layer has feature or not", "if", "layer", ".", "featureCount", "(", ")", "<=", "0", ":", "city", "=", "self", ".", "city_combo_box", ".", "currentText", "(", ")", "message", "=", "self", ".", "tr", "(", "'There are no floods data available on {city} '", "'at this time.'", ")", ".", "format", "(", "city", "=", "city", ")", "display_warning_message_box", "(", "self", ",", "self", ".", "tr", "(", "'No data'", ")", ",", "message", ")", "disable_busy_cursor", "(", ")", "else", ":", "# add the layer to the map", "project", "=", "QgsProject", ".", "instance", "(", ")", "project", ".", "addMapLayer", "(", "layer", ")", "disable_busy_cursor", "(", ")", "self", ".", "done", "(", "QDialog", ".", "Accepted", ")" ]
https://github.com/inasafe/inasafe/blob/355eb2ce63f516b9c26af0c86a24f99e53f63f87/safe/gui/tools/peta_bencana_dialog.py#L205-L288
duerrp/pyexperiment
c426565d870d944bd5b9712629d8f1ba2527c67f
pyexperiment/Logger.py
python
MPRotLogHandler.__init__
(self, filename, level=logging.DEBUG, no_backups=0)
Initializer
Initializer
[ "Initializer" ]
def __init__(self, filename, level=logging.DEBUG, no_backups=0): """Initializer """ # Init base class super(MPRotLogHandler, self).__init__(level=level) # Check if we need to roll_over later roll_over_file = os.path.isfile(filename) # Prepare the formatter file_formatter = ColorFormatter( FILE_FORMAT, False) # Setup the actual handler for the log files self._file_handler = logging.handlers.RotatingFileHandler( filename=filename, backupCount=no_backups) self._file_handler.setLevel(level) self.setFormatter(file_formatter) if roll_over_file: self._file_handler.doRollover() # Emit messages in the main process self._delegate_emit = DelegateCall(self._file_handler.emit)
[ "def", "__init__", "(", "self", ",", "filename", ",", "level", "=", "logging", ".", "DEBUG", ",", "no_backups", "=", "0", ")", ":", "# Init base class", "super", "(", "MPRotLogHandler", ",", "self", ")", ".", "__init__", "(", "level", "=", "level", ")", "# Check if we need to roll_over later", "roll_over_file", "=", "os", ".", "path", ".", "isfile", "(", "filename", ")", "# Prepare the formatter", "file_formatter", "=", "ColorFormatter", "(", "FILE_FORMAT", ",", "False", ")", "# Setup the actual handler for the log files", "self", ".", "_file_handler", "=", "logging", ".", "handlers", ".", "RotatingFileHandler", "(", "filename", "=", "filename", ",", "backupCount", "=", "no_backups", ")", "self", ".", "_file_handler", ".", "setLevel", "(", "level", ")", "self", ".", "setFormatter", "(", "file_formatter", ")", "if", "roll_over_file", ":", "self", ".", "_file_handler", ".", "doRollover", "(", ")", "# Emit messages in the main process", "self", ".", "_delegate_emit", "=", "DelegateCall", "(", "self", ".", "_file_handler", ".", "emit", ")" ]
https://github.com/duerrp/pyexperiment/blob/c426565d870d944bd5b9712629d8f1ba2527c67f/pyexperiment/Logger.py#L110-L136
rhinstaller/anaconda
63edc8680f1b05cbfe11bef28703acba808c5174
pyanaconda/ui/tui/spokes/language_support.py
python
LangSpoke.refresh
(self, args=None)
args is None if we want a list of languages; or, it is a list of all locales for a language.
args is None if we want a list of languages; or, it is a list of all locales for a language.
[ "args", "is", "None", "if", "we", "want", "a", "list", "of", "languages", ";", "or", "it", "is", "a", "list", "of", "all", "locales", "for", "a", "language", "." ]
def refresh(self, args=None): """ args is None if we want a list of languages; or, it is a list of all locales for a language. """ NormalTUISpoke.refresh(self, args) self._container = ListColumnContainer(3) if args: self.window.add(TextWidget(_("Available locales"))) for locale in args: widget = TextWidget(localization.get_english_name(locale)) self._container.add(widget, self._set_locales_callback, locale) else: self.window.add(TextWidget(_("Available languages"))) for lang in self._langs: langs_and_locales = self._langs_and_locales[lang] locales = self._locales[langs_and_locales] self._container.add(TextWidget(lang), self._show_locales_callback, locales) self.window.add_with_separator(self._container)
[ "def", "refresh", "(", "self", ",", "args", "=", "None", ")", ":", "NormalTUISpoke", ".", "refresh", "(", "self", ",", "args", ")", "self", ".", "_container", "=", "ListColumnContainer", "(", "3", ")", "if", "args", ":", "self", ".", "window", ".", "add", "(", "TextWidget", "(", "_", "(", "\"Available locales\"", ")", ")", ")", "for", "locale", "in", "args", ":", "widget", "=", "TextWidget", "(", "localization", ".", "get_english_name", "(", "locale", ")", ")", "self", ".", "_container", ".", "add", "(", "widget", ",", "self", ".", "_set_locales_callback", ",", "locale", ")", "else", ":", "self", ".", "window", ".", "add", "(", "TextWidget", "(", "_", "(", "\"Available languages\"", ")", ")", ")", "for", "lang", "in", "self", ".", "_langs", ":", "langs_and_locales", "=", "self", ".", "_langs_and_locales", "[", "lang", "]", "locales", "=", "self", ".", "_locales", "[", "langs_and_locales", "]", "self", ".", "_container", ".", "add", "(", "TextWidget", "(", "lang", ")", ",", "self", ".", "_show_locales_callback", ",", "locales", ")", "self", ".", "window", ".", "add_with_separator", "(", "self", ".", "_container", ")" ]
https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/ui/tui/spokes/language_support.py#L98-L119
leancloud/satori
701caccbd4fe45765001ca60435c0cb499477c03
satori-rules/plugin/libs/requests/packages/urllib3/util/connection.py
python
create_connection
(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None)
Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default.
Connect to *address* and return the socket object.
[ "Connect", "to", "*", "address", "*", "and", "return", "the", "socket", "object", "." ]
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default. """ host, port = address if host.startswith('['): host = host.strip('[]') err = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) # If provided, set socket level options before connecting. # This is the only addition urllib3 makes to this function. _set_socket_options(sock, socket_options) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as e: err = e if sock is not None: sock.close() sock = None if err is not None: raise err raise socket.error("getaddrinfo returns an empty list")
[ "def", "create_connection", "(", "address", ",", "timeout", "=", "socket", ".", "_GLOBAL_DEFAULT_TIMEOUT", ",", "source_address", "=", "None", ",", "socket_options", "=", "None", ")", ":", "host", ",", "port", "=", "address", "if", "host", ".", "startswith", "(", "'['", ")", ":", "host", "=", "host", ".", "strip", "(", "'[]'", ")", "err", "=", "None", "for", "res", "in", "socket", ".", "getaddrinfo", "(", "host", ",", "port", ",", "0", ",", "socket", ".", "SOCK_STREAM", ")", ":", "af", ",", "socktype", ",", "proto", ",", "canonname", ",", "sa", "=", "res", "sock", "=", "None", "try", ":", "sock", "=", "socket", ".", "socket", "(", "af", ",", "socktype", ",", "proto", ")", "# If provided, set socket level options before connecting.", "# This is the only addition urllib3 makes to this function.", "_set_socket_options", "(", "sock", ",", "socket_options", ")", "if", "timeout", "is", "not", "socket", ".", "_GLOBAL_DEFAULT_TIMEOUT", ":", "sock", ".", "settimeout", "(", "timeout", ")", "if", "source_address", ":", "sock", ".", "bind", "(", "source_address", ")", "sock", ".", "connect", "(", "sa", ")", "return", "sock", "except", "socket", ".", "error", "as", "e", ":", "err", "=", "e", "if", "sock", "is", "not", "None", ":", "sock", ".", "close", "(", ")", "sock", "=", "None", "if", "err", "is", "not", "None", ":", "raise", "err", "raise", "socket", ".", "error", "(", "\"getaddrinfo returns an empty list\"", ")" ]
https://github.com/leancloud/satori/blob/701caccbd4fe45765001ca60435c0cb499477c03/satori-rules/plugin/libs/requests/packages/urllib3/util/connection.py#L49-L93
hhursev/recipe-scrapers
478b9ddb0dda02b17b14f299eea729bef8131aa9
recipe_scrapers/hostthetoast.py
python
Hostthetoast.yields
(self)
return self.schema.yields()
[]
def yields(self): return self.schema.yields()
[ "def", "yields", "(", "self", ")", ":", "return", "self", ".", "schema", ".", "yields", "(", ")" ]
https://github.com/hhursev/recipe-scrapers/blob/478b9ddb0dda02b17b14f299eea729bef8131aa9/recipe_scrapers/hostthetoast.py#L15-L16
pazz/alot
52f11f089df19cf336ad0983368e880dc5364149
alot/account.py
python
Address.__cmp
(self, other, comparitor)
return (comparitor(username, ouser) and comparitor(self.domainname.lower(), odomain.lower()))
Shared helper for rich comparison operators. This allows the comparison operators to be relatively simple and share the complex logic. If the username is not considered case sensitive then lower the username of both self and the other, and handle that the other can be either another :class:`~alot.account.Address`, or a `str` instance. :param other: The other address to compare against :type other: str or ~alot.account.Address :param callable comparitor: A function with the a signature (str, str) -> bool that will compare the two instance. The intention is to use functions from the operator module.
Shared helper for rich comparison operators.
[ "Shared", "helper", "for", "rich", "comparison", "operators", "." ]
def __cmp(self, other, comparitor): """Shared helper for rich comparison operators. This allows the comparison operators to be relatively simple and share the complex logic. If the username is not considered case sensitive then lower the username of both self and the other, and handle that the other can be either another :class:`~alot.account.Address`, or a `str` instance. :param other: The other address to compare against :type other: str or ~alot.account.Address :param callable comparitor: A function with the a signature (str, str) -> bool that will compare the two instance. The intention is to use functions from the operator module. """ if isinstance(other, str): try: ouser, odomain = other.split('@') except ValueError: ouser, odomain = '', '' else: ouser = other.username odomain = other.domainname if not self.case_sensitive: ouser = ouser.lower() username = self.username.lower() else: username = self.username return (comparitor(username, ouser) and comparitor(self.domainname.lower(), odomain.lower()))
[ "def", "__cmp", "(", "self", ",", "other", ",", "comparitor", ")", ":", "if", "isinstance", "(", "other", ",", "str", ")", ":", "try", ":", "ouser", ",", "odomain", "=", "other", ".", "split", "(", "'@'", ")", "except", "ValueError", ":", "ouser", ",", "odomain", "=", "''", ",", "''", "else", ":", "ouser", "=", "other", ".", "username", "odomain", "=", "other", ".", "domainname", "if", "not", "self", ".", "case_sensitive", ":", "ouser", "=", "ouser", ".", "lower", "(", ")", "username", "=", "self", ".", "username", ".", "lower", "(", ")", "else", ":", "username", "=", "self", ".", "username", "return", "(", "comparitor", "(", "username", ",", "ouser", ")", "and", "comparitor", "(", "self", ".", "domainname", ".", "lower", "(", ")", ",", "odomain", ".", "lower", "(", ")", ")", ")" ]
https://github.com/pazz/alot/blob/52f11f089df19cf336ad0983368e880dc5364149/alot/account.py#L104-L136
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/flux_led/switch.py
python
async_setup_entry
( hass: HomeAssistant, entry: config_entries.ConfigEntry, async_add_entities: AddEntitiesCallback, )
Set up the Flux lights.
Set up the Flux lights.
[ "Set", "up", "the", "Flux", "lights", "." ]
async def async_setup_entry( hass: HomeAssistant, entry: config_entries.ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the Flux lights.""" coordinator: FluxLedUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] entities: list[FluxSwitch | FluxRemoteAccessSwitch | FluxMusicSwitch] = [] unique_id = entry.unique_id name = entry.data[CONF_NAME] if coordinator.device.device_type == DeviceType.Switch: entities.append(FluxSwitch(coordinator, unique_id, name, None)) if entry.data.get(CONF_REMOTE_ACCESS_HOST): entities.append(FluxRemoteAccessSwitch(coordinator.device, entry)) if coordinator.device.microphone: entities.append( FluxMusicSwitch(coordinator, unique_id, f"{name} Music", "music") ) if entities: async_add_entities(entities)
[ "async", "def", "async_setup_entry", "(", "hass", ":", "HomeAssistant", ",", "entry", ":", "config_entries", ".", "ConfigEntry", ",", "async_add_entities", ":", "AddEntitiesCallback", ",", ")", "->", "None", ":", "coordinator", ":", "FluxLedUpdateCoordinator", "=", "hass", ".", "data", "[", "DOMAIN", "]", "[", "entry", ".", "entry_id", "]", "entities", ":", "list", "[", "FluxSwitch", "|", "FluxRemoteAccessSwitch", "|", "FluxMusicSwitch", "]", "=", "[", "]", "unique_id", "=", "entry", ".", "unique_id", "name", "=", "entry", ".", "data", "[", "CONF_NAME", "]", "if", "coordinator", ".", "device", ".", "device_type", "==", "DeviceType", ".", "Switch", ":", "entities", ".", "append", "(", "FluxSwitch", "(", "coordinator", ",", "unique_id", ",", "name", ",", "None", ")", ")", "if", "entry", ".", "data", ".", "get", "(", "CONF_REMOTE_ACCESS_HOST", ")", ":", "entities", ".", "append", "(", "FluxRemoteAccessSwitch", "(", "coordinator", ".", "device", ",", "entry", ")", ")", "if", "coordinator", ".", "device", ".", "microphone", ":", "entities", ".", "append", "(", "FluxMusicSwitch", "(", "coordinator", ",", "unique_id", ",", "f\"{name} Music\"", ",", "\"music\"", ")", ")", "if", "entities", ":", "async_add_entities", "(", "entities", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/flux_led/switch.py#L29-L52
glumpy/glumpy
46a7635c08d3a200478397edbe0371a6c59cd9d7
glumpy/transforms/quantitative_scale.py
python
QuantitativeScale.__init__
(self, code, *args, **kwargs)
Initialize the transform.
Initialize the transform.
[ "Initialize", "the", "transform", "." ]
def __init__(self, code, *args, **kwargs): """ Initialize the transform. """ self._clamp = False self._discard = True self._domain = np.array([-1,+1], dtype=np.float32) self._range = np.array([-1,+1], dtype=np.float32) self.process_kwargs(**kwargs) Transform.__init__(self, code, *args, **kwargs)
[ "def", "__init__", "(", "self", ",", "code", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_clamp", "=", "False", "self", ".", "_discard", "=", "True", "self", ".", "_domain", "=", "np", ".", "array", "(", "[", "-", "1", ",", "+", "1", "]", ",", "dtype", "=", "np", ".", "float32", ")", "self", ".", "_range", "=", "np", ".", "array", "(", "[", "-", "1", ",", "+", "1", "]", ",", "dtype", "=", "np", ".", "float32", ")", "self", ".", "process_kwargs", "(", "*", "*", "kwargs", ")", "Transform", ".", "__init__", "(", "self", ",", "code", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/glumpy/glumpy/blob/46a7635c08d3a200478397edbe0371a6c59cd9d7/glumpy/transforms/quantitative_scale.py#L30-L41
fcwu/desktop-mirror
f81c6bff0d022ee404c7adfacc9cbb2e1db788b5
lib/pybonjour.py
python
DNSServiceConstructFullName
( service = None, regtype = _NO_DEFAULT, domain = _NO_DEFAULT, )
return fullName.value.decode('utf-8')
Concatenate a three-part domain name (as returned by a callback function) into a properly-escaped full domain name. Note that callback functions already escape strings where necessary. service: The service name; any dots or backslashes must NOT be escaped. May be None (to construct a PTR record name, e.g. "_ftp._tcp.apple.com."). regtype: The service type followed by the protocol, separated by a dot (e.g. "_ftp._tcp"). domain: The domain name, e.g. "apple.com.". Literal dots or backslashes, if any, must be escaped, e.g. "1st\. Floor.apple.com." return value: The resulting full domain name.
[]
def DNSServiceConstructFullName( service = None, regtype = _NO_DEFAULT, domain = _NO_DEFAULT, ): """ Concatenate a three-part domain name (as returned by a callback function) into a properly-escaped full domain name. Note that callback functions already escape strings where necessary. service: The service name; any dots or backslashes must NOT be escaped. May be None (to construct a PTR record name, e.g. "_ftp._tcp.apple.com."). regtype: The service type followed by the protocol, separated by a dot (e.g. "_ftp._tcp"). domain: The domain name, e.g. "apple.com.". Literal dots or backslashes, if any, must be escaped, e.g. "1st\. Floor.apple.com." return value: The resulting full domain name. """ _NO_DEFAULT.check(regtype) _NO_DEFAULT.check(domain) _global_lock.acquire() try: fullName = _DNSServiceConstructFullName(service, regtype, domain) finally: _global_lock.release() return fullName.value.decode('utf-8')
[ "def", "DNSServiceConstructFullName", "(", "service", "=", "None", ",", "regtype", "=", "_NO_DEFAULT", ",", "domain", "=", "_NO_DEFAULT", ",", ")", ":", "_NO_DEFAULT", ".", "check", "(", "regtype", ")", "_NO_DEFAULT", ".", "check", "(", "domain", ")", "_global_lock", ".", "acquire", "(", ")", "try", ":", "fullName", "=", "_DNSServiceConstructFullName", "(", "service", ",", "regtype", ",", "domain", ")", "finally", ":", "_global_lock", ".", "release", "(", ")", "return", "fullName", ".", "value", ".", "decode", "(", "'utf-8'", ")" ]
https://github.com/fcwu/desktop-mirror/blob/f81c6bff0d022ee404c7adfacc9cbb2e1db788b5/lib/pybonjour.py#L1857-L1897
maraoz/proofofexistence
10703675824e989f59a8d36fd8c06394e71a2c25
babel/core.py
python
Locale.languages
(self)
return self._data['languages']
Mapping of language codes to translated language names. >>> Locale('de', 'DE').languages['ja'] u'Japanisch' See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for more information.
Mapping of language codes to translated language names.
[ "Mapping", "of", "language", "codes", "to", "translated", "language", "names", "." ]
def languages(self): """Mapping of language codes to translated language names. >>> Locale('de', 'DE').languages['ja'] u'Japanisch' See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for more information. """ return self._data['languages']
[ "def", "languages", "(", "self", ")", ":", "return", "self", ".", "_data", "[", "'languages'", "]" ]
https://github.com/maraoz/proofofexistence/blob/10703675824e989f59a8d36fd8c06394e71a2c25/babel/core.py#L454-L463
MegEngine/Models
4c55d28bad03652a4e352bf5e736a75df041d84a
official/vision/detection/layers/basic/functional.py
python
safelog
(x, eps=None)
return F.log(F.maximum(x, eps))
[]
def safelog(x, eps=None): if eps is None: eps = np.finfo(x.dtype).eps return F.log(F.maximum(x, eps))
[ "def", "safelog", "(", "x", ",", "eps", "=", "None", ")", ":", "if", "eps", "is", "None", ":", "eps", "=", "np", ".", "finfo", "(", "x", ".", "dtype", ")", ".", "eps", "return", "F", ".", "log", "(", "F", ".", "maximum", "(", "x", ",", "eps", ")", ")" ]
https://github.com/MegEngine/Models/blob/4c55d28bad03652a4e352bf5e736a75df041d84a/official/vision/detection/layers/basic/functional.py#L53-L56
PatrickLib/captcha_recognize
e4ed7a1513ddcf3b2aec7620f4d68e7c90ddc5a4
captcha_model.py
python
inputs
(train, batch_size)
return captcha_input.inputs(train, batch_size=batch_size)
[]
def inputs(train, batch_size): return captcha_input.inputs(train, batch_size=batch_size)
[ "def", "inputs", "(", "train", ",", "batch_size", ")", ":", "return", "captcha_input", ".", "inputs", "(", "train", ",", "batch_size", "=", "batch_size", ")" ]
https://github.com/PatrickLib/captcha_recognize/blob/e4ed7a1513ddcf3b2aec7620f4d68e7c90ddc5a4/captcha_model.py#L14-L15
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/modular/modsym/boundary.py
python
BoundarySpaceElement._sub_
(self, other)
return BoundarySpaceElement(self.parent(), z)
Return self - other. Assumes that other is a BoundarySpaceElement. EXAMPLES:: sage: B = ModularSymbols(Gamma1(16), 4).boundary_space() sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16)) sage: x - y # indirect doctest [2/7] - [13/16] sage: x - x # indirect doctest 0
Return self - other. Assumes that other is a BoundarySpaceElement.
[ "Return", "self", "-", "other", ".", "Assumes", "that", "other", "is", "a", "BoundarySpaceElement", "." ]
def _sub_(self, other): """ Return self - other. Assumes that other is a BoundarySpaceElement. EXAMPLES:: sage: B = ModularSymbols(Gamma1(16), 4).boundary_space() sage: x = B(Cusp(2/7)) ; y = B(Cusp(13/16)) sage: x - y # indirect doctest [2/7] - [13/16] sage: x - x # indirect doctest 0 """ z = dict(self.__x) for i, c in other.__x.items(): if i in z: z[i] -= c else: z[i] = -c return BoundarySpaceElement(self.parent(), z)
[ "def", "_sub_", "(", "self", ",", "other", ")", ":", "z", "=", "dict", "(", "self", ".", "__x", ")", "for", "i", ",", "c", "in", "other", ".", "__x", ".", "items", "(", ")", ":", "if", "i", "in", "z", ":", "z", "[", "i", "]", "-=", "c", "else", ":", "z", "[", "i", "]", "=", "-", "c", "return", "BoundarySpaceElement", "(", "self", ".", "parent", "(", ")", ",", "z", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/modular/modsym/boundary.py#L201-L220
NervanaSystems/ngraph-python
ac032c83c7152b615a9ad129d54d350f9d6a2986
ngraph/op_graph/op_graph.py
python
InputOp.__init__
(self, axes, aeon_cfg, label, session_id, *args, **kwargs)
Arguments: axes : of fake data to generate aeon_cfg : aeon configuration passed in as a string Return:
Arguments: axes : of fake data to generate aeon_cfg : aeon configuration passed in as a string Return:
[ "Arguments", ":", "axes", ":", "of", "fake", "data", "to", "generate", "aeon_cfg", ":", "aeon", "configuration", "passed", "in", "as", "a", "string", "Return", ":" ]
def __init__(self, axes, aeon_cfg, label, session_id, *args, **kwargs): """ Arguments: axes : of fake data to generate aeon_cfg : aeon configuration passed in as a string Return: """ super(InputOp, self).__init__( axes=axes, *args, **kwargs ) self._is_input = True self.aeon_cfg = aeon_cfg self.label = label self.session_id = session_id
[ "def", "__init__", "(", "self", ",", "axes", ",", "aeon_cfg", ",", "label", ",", "session_id", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "InputOp", ",", "self", ")", ".", "__init__", "(", "axes", "=", "axes", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_is_input", "=", "True", "self", ".", "aeon_cfg", "=", "aeon_cfg", "self", ".", "label", "=", "label", "self", ".", "session_id", "=", "session_id" ]
https://github.com/NervanaSystems/ngraph-python/blob/ac032c83c7152b615a9ad129d54d350f9d6a2986/ngraph/op_graph/op_graph.py#L2822-L2836
rowliny/DiffHelper
ab3a96f58f9579d0023aed9ebd785f4edf26f8af
Tool/SitePackages/nltk/tag/sequential.py
python
ContextTagger.size
(self)
return len(self._context_to_tag)
:return: The number of entries in the table used by this tagger to map from contexts to tags.
:return: The number of entries in the table used by this tagger to map from contexts to tags.
[ ":", "return", ":", "The", "number", "of", "entries", "in", "the", "table", "used", "by", "this", "tagger", "to", "map", "from", "contexts", "to", "tags", "." ]
def size(self): """ :return: The number of entries in the table used by this tagger to map from contexts to tags. """ return len(self._context_to_tag)
[ "def", "size", "(", "self", ")", ":", "return", "len", "(", "self", ".", "_context_to_tag", ")" ]
https://github.com/rowliny/DiffHelper/blob/ab3a96f58f9579d0023aed9ebd785f4edf26f8af/Tool/SitePackages/nltk/tag/sequential.py#L142-L147
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/extern/graph_editor/subgraph.py
python
make_view
(*args, **kwargs)
return _check_graph(sgv, graph)
Create a SubGraphView from selected operations and passthrough tensors. Args: *args: list of 1) regular expressions (compiled or not) or 2) (array of) `tf.Operation` 3) (array of) `tf.Tensor`. Those objects will be converted into a list of operations and a list of candidate for passthrough tensors. **kwargs: keyword graph is used 1) to check that the ops and ts are from the correct graph 2) for regular expression query Returns: A subgraph view. Raises: TypeError: if the optional keyword argument graph is not a `tf.Graph` or if an argument in args is not an (array of) `tf.Tensor` or an (array of) `tf.Operation` or a string or a regular expression. ValueError: if one of the keyword arguments is unexpected.
Create a SubGraphView from selected operations and passthrough tensors.
[ "Create", "a", "SubGraphView", "from", "selected", "operations", "and", "passthrough", "tensors", "." ]
def make_view(*args, **kwargs): """Create a SubGraphView from selected operations and passthrough tensors. Args: *args: list of 1) regular expressions (compiled or not) or 2) (array of) `tf.Operation` 3) (array of) `tf.Tensor`. Those objects will be converted into a list of operations and a list of candidate for passthrough tensors. **kwargs: keyword graph is used 1) to check that the ops and ts are from the correct graph 2) for regular expression query Returns: A subgraph view. Raises: TypeError: if the optional keyword argument graph is not a `tf.Graph` or if an argument in args is not an (array of) `tf.Tensor` or an (array of) `tf.Operation` or a string or a regular expression. ValueError: if one of the keyword arguments is unexpected. """ # get keywords arguments graph = kwargs["graph"] if "graph" in kwargs else None # already a view? if len(args) == 1 and isinstance(args[0], SubGraphView): return _check_graph(args[0], graph) ops, ts = select.select_ops_and_ts(*args, **kwargs) sgv = SubGraphView(ops, ts) return _check_graph(sgv, graph)
[ "def", "make_view", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# get keywords arguments", "graph", "=", "kwargs", "[", "\"graph\"", "]", "if", "\"graph\"", "in", "kwargs", "else", "None", "# already a view?", "if", "len", "(", "args", ")", "==", "1", "and", "isinstance", "(", "args", "[", "0", "]", ",", "SubGraphView", ")", ":", "return", "_check_graph", "(", "args", "[", "0", "]", ",", "graph", ")", "ops", ",", "ts", "=", "select", ".", "select_ops_and_ts", "(", "*", "args", ",", "*", "*", "kwargs", ")", "sgv", "=", "SubGraphView", "(", "ops", ",", "ts", ")", "return", "_check_graph", "(", "sgv", ",", "graph", ")" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/extern/graph_editor/subgraph.py#L629-L655
etetoolkit/ete
2b207357dc2a40ccad7bfd8f54964472c72e4726
ete3/phylomedb/phylomeDB3.py
python
PhylomeDB3Connector.get_external_ids
(self, ids)
return external_ids
Returns all the external IDs registered in the 'external_id' table that are associated to the input phylomeDB IDs
Returns all the external IDs registered in the 'external_id' table that are associated to the input phylomeDB IDs
[ "Returns", "all", "the", "external", "IDs", "registered", "in", "the", "external_id", "table", "that", "are", "associated", "to", "the", "input", "phylomeDB", "IDs" ]
def get_external_ids(self, ids): """ Returns all the external IDs registered in the 'external_id' table that are associated to the input phylomeDB IDs """ ids = self.__parser_ids__(ids) cmd = 'SELECT DISTINCT CONCAT("Phy", p.protid, "_", s.code) as protid, ' cmd += 'external_db AS db, external_id AS id FROM protein AS p, species AS ' cmd += 's, external_id AS ex WHERE p.protid IN (%s) AND p.taxid = ' % (ids) cmd += 's.taxid AND p.protid = ex.protid' external_ids = {} if self.__execute__(cmd): for row in self._SQL.fetchall(): external_ids.setdefault(row["protid"], {}) external_ids[row["protid"]].setdefault(row["db"], set()).add(row["id"]) for protid in external_ids: for key in external_ids[protid]: external_ids[protid][key] = list(external_ids[protid][key]) return external_ids
[ "def", "get_external_ids", "(", "self", ",", "ids", ")", ":", "ids", "=", "self", ".", "__parser_ids__", "(", "ids", ")", "cmd", "=", "'SELECT DISTINCT CONCAT(\"Phy\", p.protid, \"_\", s.code) as protid, '", "cmd", "+=", "'external_db AS db, external_id AS id FROM protein AS p, species AS '", "cmd", "+=", "'s, external_id AS ex WHERE p.protid IN (%s) AND p.taxid = '", "%", "(", "ids", ")", "cmd", "+=", "'s.taxid AND p.protid = ex.protid'", "external_ids", "=", "{", "}", "if", "self", ".", "__execute__", "(", "cmd", ")", ":", "for", "row", "in", "self", ".", "_SQL", ".", "fetchall", "(", ")", ":", "external_ids", ".", "setdefault", "(", "row", "[", "\"protid\"", "]", ",", "{", "}", ")", "external_ids", "[", "row", "[", "\"protid\"", "]", "]", ".", "setdefault", "(", "row", "[", "\"db\"", "]", ",", "set", "(", ")", ")", ".", "add", "(", "row", "[", "\"id\"", "]", ")", "for", "protid", "in", "external_ids", ":", "for", "key", "in", "external_ids", "[", "protid", "]", ":", "external_ids", "[", "protid", "]", "[", "key", "]", "=", "list", "(", "external_ids", "[", "protid", "]", "[", "key", "]", ")", "return", "external_ids" ]
https://github.com/etetoolkit/ete/blob/2b207357dc2a40ccad7bfd8f54964472c72e4726/ete3/phylomedb/phylomeDB3.py#L401-L422
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/artist.py
python
Artist.set_clip_on
(self, b)
Set whether artist uses clipping. When False artists will be visible out side of the axes which can lead to unexpected results. Parameters ---------- b : bool
Set whether artist uses clipping.
[ "Set", "whether", "artist", "uses", "clipping", "." ]
def set_clip_on(self, b): """ Set whether artist uses clipping. When False artists will be visible out side of the axes which can lead to unexpected results. Parameters ---------- b : bool """ self._clipon = b # This may result in the callbacks being hit twice, but ensures they # are hit at least once self.pchanged() self.stale = True
[ "def", "set_clip_on", "(", "self", ",", "b", ")", ":", "self", ".", "_clipon", "=", "b", "# This may result in the callbacks being hit twice, but ensures they", "# are hit at least once", "self", ".", "pchanged", "(", ")", "self", ".", "stale", "=", "True" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/artist.py#L765-L780
openvinotoolkit/training_extensions
e7aa33af94a1f8004d3ea2df259d99234dfca046
ote_sdk/ote_sdk/usecases/tasks/interfaces/optimization_interface.py
python
IOptimizationTask.optimize
( self, optimization_type: OptimizationType, dataset: DatasetEntity, output_model: ModelEntity, optimization_parameters: Optional[OptimizationParameters], )
This method defines the interface for optimization. :param optimization_type: The type of optimization :param dataset: Optional dataset which may be used as part of the optimization process :param output_model: Output model :param optimization_parameters: Additional optimization parameters
This method defines the interface for optimization.
[ "This", "method", "defines", "the", "interface", "for", "optimization", "." ]
def optimize( self, optimization_type: OptimizationType, dataset: DatasetEntity, output_model: ModelEntity, optimization_parameters: Optional[OptimizationParameters], ): """ This method defines the interface for optimization. :param optimization_type: The type of optimization :param dataset: Optional dataset which may be used as part of the optimization process :param output_model: Output model :param optimization_parameters: Additional optimization parameters """ raise NotImplementedError
[ "def", "optimize", "(", "self", ",", "optimization_type", ":", "OptimizationType", ",", "dataset", ":", "DatasetEntity", ",", "output_model", ":", "ModelEntity", ",", "optimization_parameters", ":", "Optional", "[", "OptimizationParameters", "]", ",", ")", ":", "raise", "NotImplementedError" ]
https://github.com/openvinotoolkit/training_extensions/blob/e7aa33af94a1f8004d3ea2df259d99234dfca046/ote_sdk/ote_sdk/usecases/tasks/interfaces/optimization_interface.py#L42-L57
stanfordnlp/stanza-old
920c55d8eaa1e7105971059c66eb448a74c100d6
stanza/nlp/data.py
python
Entity.sentence
(self)
Returns the referring sentence
Returns the referring sentence
[ "Returns", "the", "referring", "sentence" ]
def sentence(self): """Returns the referring sentence""" pass
[ "def", "sentence", "(", "self", ")", ":", "pass" ]
https://github.com/stanfordnlp/stanza-old/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/nlp/data.py#L28-L30
perseas/Pyrseas
957860839c3b0047293a7a4fa982b1a2b85c7eb4
pyrseas/dbobject/constraint.py
python
PrimaryKey.to_map
(self, db, dbcols)
return {self.name: dct}
Convert a primary key definition to a YAML-suitable format :param dbcols: dictionary of dbobject columns :return: dictionary
Convert a primary key definition to a YAML-suitable format
[ "Convert", "a", "primary", "key", "definition", "to", "a", "YAML", "-", "suitable", "format" ]
def to_map(self, db, dbcols): """Convert a primary key definition to a YAML-suitable format :param dbcols: dictionary of dbobject columns :return: dictionary """ dct = super(PrimaryKey, self).to_map(db) if self.access_method == 'btree': dct.pop('access_method') for attr in ('inherited', 'deferrable', 'deferred', 'cluster'): if getattr(self, attr) is False: dct.pop(attr) if self.tablespace is None: dct.pop('tablespace') if '_table' in dct: del dct['_table'] dct['columns'] = [dbcols[k - 1] for k in self.columns] return {self.name: dct}
[ "def", "to_map", "(", "self", ",", "db", ",", "dbcols", ")", ":", "dct", "=", "super", "(", "PrimaryKey", ",", "self", ")", ".", "to_map", "(", "db", ")", "if", "self", ".", "access_method", "==", "'btree'", ":", "dct", ".", "pop", "(", "'access_method'", ")", "for", "attr", "in", "(", "'inherited'", ",", "'deferrable'", ",", "'deferred'", ",", "'cluster'", ")", ":", "if", "getattr", "(", "self", ",", "attr", ")", "is", "False", ":", "dct", ".", "pop", "(", "attr", ")", "if", "self", ".", "tablespace", "is", "None", ":", "dct", ".", "pop", "(", "'tablespace'", ")", "if", "'_table'", "in", "dct", ":", "del", "dct", "[", "'_table'", "]", "dct", "[", "'columns'", "]", "=", "[", "dbcols", "[", "k", "-", "1", "]", "for", "k", "in", "self", ".", "columns", "]", "return", "{", "self", ".", "name", ":", "dct", "}" ]
https://github.com/perseas/Pyrseas/blob/957860839c3b0047293a7a4fa982b1a2b85c7eb4/pyrseas/dbobject/constraint.py#L306-L323
inpanel/inpanel
be53d86a72e30dd5476780ed5ba334315a23004b
lib/pxssh.py
python
pxssh.synch_original_prompt
(self)
return False
This attempts to find the prompt. Basically, press enter and record the response; press enter again and record the response; if the two responses are similar then assume we are at the original prompt.
This attempts to find the prompt. Basically, press enter and record the response; press enter again and record the response; if the two responses are similar then assume we are at the original prompt.
[ "This", "attempts", "to", "find", "the", "prompt", ".", "Basically", "press", "enter", "and", "record", "the", "response", ";", "press", "enter", "again", "and", "record", "the", "response", ";", "if", "the", "two", "responses", "are", "similar", "then", "assume", "we", "are", "at", "the", "original", "prompt", "." ]
def synch_original_prompt (self): """This attempts to find the prompt. Basically, press enter and record the response; press enter again and record the response; if the two responses are similar then assume we are at the original prompt. """ # All of these timing pace values are magic. # I came up with these based on what seemed reliable for # connecting to a heavily loaded machine I have. # If latency is worse than these values then this will fail. # bug fix REF: http://python.6.n6.nabble.com/read-nonblocking-error-in-pxssh-td1330216.html self.sendline() time.sleep(0.5) self.read_nonblocking(size=10000,timeout=1) # GAS: Clear out the cache before getting the prompt time.sleep(0.1) self.sendline() time.sleep(0.5) x = self.read_nonblocking(size=1000,timeout=1) time.sleep(0.1) self.sendline() time.sleep(0.5) a = self.read_nonblocking(size=1000,timeout=1) time.sleep(0.1) self.sendline() time.sleep(0.5) b = self.read_nonblocking(size=1000,timeout=1) ld = self.levenshtein_distance(a,b) len_a = len(a) if len_a == 0: return False if float(ld)/len_a < 0.4: return True return False
[ "def", "synch_original_prompt", "(", "self", ")", ":", "# All of these timing pace values are magic.", "# I came up with these based on what seemed reliable for", "# connecting to a heavily loaded machine I have.", "# If latency is worse than these values then this will fail.", "# bug fix REF: http://python.6.n6.nabble.com/read-nonblocking-error-in-pxssh-td1330216.html", "self", ".", "sendline", "(", ")", "time", ".", "sleep", "(", "0.5", ")", "self", ".", "read_nonblocking", "(", "size", "=", "10000", ",", "timeout", "=", "1", ")", "# GAS: Clear out the cache before getting the prompt", "time", ".", "sleep", "(", "0.1", ")", "self", ".", "sendline", "(", ")", "time", ".", "sleep", "(", "0.5", ")", "x", "=", "self", ".", "read_nonblocking", "(", "size", "=", "1000", ",", "timeout", "=", "1", ")", "time", ".", "sleep", "(", "0.1", ")", "self", ".", "sendline", "(", ")", "time", ".", "sleep", "(", "0.5", ")", "a", "=", "self", ".", "read_nonblocking", "(", "size", "=", "1000", ",", "timeout", "=", "1", ")", "time", ".", "sleep", "(", "0.1", ")", "self", ".", "sendline", "(", ")", "time", ".", "sleep", "(", "0.5", ")", "b", "=", "self", ".", "read_nonblocking", "(", "size", "=", "1000", ",", "timeout", "=", "1", ")", "ld", "=", "self", ".", "levenshtein_distance", "(", "a", ",", "b", ")", "len_a", "=", "len", "(", "a", ")", "if", "len_a", "==", "0", ":", "return", "False", "if", "float", "(", "ld", ")", "/", "len_a", "<", "0.4", ":", "return", "True", "return", "False" ]
https://github.com/inpanel/inpanel/blob/be53d86a72e30dd5476780ed5ba334315a23004b/lib/pxssh.py#L123-L156
XingangPan/Switchable-Whitening
dc8a9947ee27285ab123db1f152e18959e0e0861
utils/distributed_utils.py
python
average_gradients
(model)
average gradients
average gradients
[ "average", "gradients" ]
def average_gradients(model): """ average gradients """ for param in model.parameters(): if param.requires_grad: dist.all_reduce(param.grad.data)
[ "def", "average_gradients", "(", "model", ")", ":", "for", "param", "in", "model", ".", "parameters", "(", ")", ":", "if", "param", ".", "requires_grad", ":", "dist", ".", "all_reduce", "(", "param", ".", "grad", ".", "data", ")" ]
https://github.com/XingangPan/Switchable-Whitening/blob/dc8a9947ee27285ab123db1f152e18959e0e0861/utils/distributed_utils.py#L22-L26
kirthevasank/nasbot
3c745dc986be30e3721087c8fa768099032a0802
opt/domains.py
python
NNDomain._rand_ga_maximise
(self, obj, num_evals)
Maximise over the space of neural networks via rand_ga.
Maximise over the space of neural networks via rand_ga.
[ "Maximise", "over", "the", "space", "of", "neural", "networks", "via", "rand_ga", "." ]
def _rand_ga_maximise(self, obj, num_evals): """ Maximise over the space of neural networks via rand_ga. """ raise NotImplementedError('Not implemented rand_ga for NNDomain yet.')
[ "def", "_rand_ga_maximise", "(", "self", ",", "obj", ",", "num_evals", ")", ":", "raise", "NotImplementedError", "(", "'Not implemented rand_ga for NNDomain yet.'", ")" ]
https://github.com/kirthevasank/nasbot/blob/3c745dc986be30e3721087c8fa768099032a0802/opt/domains.py#L165-L167
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/cinder/cinder/quota.py
python
DbQuotaDriver.destroy_all_by_project
(self, context, project_id)
Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted.
Destroy all quotas, usages, and reservations associated with a project.
[ "Destroy", "all", "quotas", "usages", "and", "reservations", "associated", "with", "a", "project", "." ]
def destroy_all_by_project(self, context, project_id): """ Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ db.quota_destroy_all_by_project(context, project_id)
[ "def", "destroy_all_by_project", "(", "self", ",", "context", ",", "project_id", ")", ":", "db", ".", "quota_destroy_all_by_project", "(", "context", ",", "project_id", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/cinder/cinder/quota.py#L359-L368
apple/ccs-calendarserver
13c706b985fb728b9aab42dc0fef85aae21921c3
twistedcaldav/directory/addressbook.py
python
DirectoryAddressBookHomeTypeProvisioningResource.principalForRecord
(self, record)
return self._parent.principalForRecord(record)
[]
def principalForRecord(self, record): return self._parent.principalForRecord(record)
[ "def", "principalForRecord", "(", "self", ",", "record", ")", ":", "return", "self", ".", "_parent", ".", "principalForRecord", "(", "record", ")" ]
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/twistedcaldav/directory/addressbook.py#L227-L228
programa-stic/barf-project
9547ef843b8eb021c2c32c140e36173c0b4eafa3
barf/arch/arm/parser.py
python
parse_instruction
(string, location, tokens)
return instr
Parse an ARM instruction.
Parse an ARM instruction.
[ "Parse", "an", "ARM", "instruction", "." ]
def parse_instruction(string, location, tokens): """Parse an ARM instruction. """ mnemonic_str = tokens.get("mnemonic") operands = [op for op in tokens.get("operands", [])] instr = ArmInstruction( string, mnemonic_str["ins"], operands, arch_info.architecture_mode ) if "cc" in mnemonic_str: instr.condition_code = cc_mapper[mnemonic_str["cc"]] if "uf" in mnemonic_str: instr.update_flags = True if "ldm_stm_addr_mode" in mnemonic_str: instr.ldm_stm_addr_mode = ldm_stm_am_mapper[mnemonic_str["ldm_stm_addr_mode"]] return instr
[ "def", "parse_instruction", "(", "string", ",", "location", ",", "tokens", ")", ":", "mnemonic_str", "=", "tokens", ".", "get", "(", "\"mnemonic\"", ")", "operands", "=", "[", "op", "for", "op", "in", "tokens", ".", "get", "(", "\"operands\"", ",", "[", "]", ")", "]", "instr", "=", "ArmInstruction", "(", "string", ",", "mnemonic_str", "[", "\"ins\"", "]", ",", "operands", ",", "arch_info", ".", "architecture_mode", ")", "if", "\"cc\"", "in", "mnemonic_str", ":", "instr", ".", "condition_code", "=", "cc_mapper", "[", "mnemonic_str", "[", "\"cc\"", "]", "]", "if", "\"uf\"", "in", "mnemonic_str", ":", "instr", ".", "update_flags", "=", "True", "if", "\"ldm_stm_addr_mode\"", "in", "mnemonic_str", ":", "instr", ".", "ldm_stm_addr_mode", "=", "ldm_stm_am_mapper", "[", "mnemonic_str", "[", "\"ldm_stm_addr_mode\"", "]", "]", "return", "instr" ]
https://github.com/programa-stic/barf-project/blob/9547ef843b8eb021c2c32c140e36173c0b4eafa3/barf/arch/arm/parser.py#L163-L185
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit - MAC OSX/tools/sqli/thirdparty/xdot/xdot.py
python
DotWindow.set_xdotcode
(self, xdotcode, filename='<stdin>')
[]
def set_xdotcode(self, xdotcode, filename='<stdin>'): if self.widget.set_xdotcode(xdotcode): self.set_title(os.path.basename(filename) + ' - Dot Viewer') self.widget.zoom_to_fit()
[ "def", "set_xdotcode", "(", "self", ",", "xdotcode", ",", "filename", "=", "'<stdin>'", ")", ":", "if", "self", ".", "widget", ".", "set_xdotcode", "(", "xdotcode", ")", ":", "self", ".", "set_title", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", "+", "' - Dot Viewer'", ")", "self", ".", "widget", ".", "zoom_to_fit", "(", ")" ]
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/tools/sqli/thirdparty/xdot/xdot.py#L1778-L1781
spectacles/CodeComplice
8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62
libs/codeintel2/lang_python.py
python
PythonImportsEvaluator.eval
(self, mgr)
[]
def eval(self, mgr): try: imp_prefix = tuple(self.trg.extra["imp_prefix"]) if imp_prefix: libs = self.buf.libs if not imp_prefix[0]: if not imp_prefix[-1]: # Deal with last item being empty, i.e. "from ." imp_prefix = imp_prefix[:-1] lookuppath = self.buf.path while imp_prefix and not imp_prefix[0]: lookuppath = dirname(lookuppath) imp_prefix = imp_prefix[1:] libs = [mgr.db.get_lang_lib(self.lang, "curdirlib", [lookuppath])] else: # We use a special lib generator - that will lazily load # additional directory libs when there are no matches found. # This is a smart import facility - to detect imports from # a parent directory when they are not explicitly on the # included path list, quite common for Django and other # Python frameworks that mangle the sys.path at runtime. libs = PythonImportLibGenerator(mgr, self.lang, self.buf.path, imp_prefix, libs) self.ctlr.set_desc("subimports of '%s'" % '.'.join(imp_prefix)) cplns = [] for lib in libs: imports = lib.get_blob_imports(imp_prefix) if imports: cplns.extend( ((is_dir_import and "directory" or "module"), name) for name, is_dir_import in imports ) if self.trg.type == "module-members": # Also add top-level members of the specified module. dotted_prefix = '.'.join(imp_prefix) if lib.has_blob(dotted_prefix): blob = lib.get_blob(dotted_prefix) for name in blob.names: elem = blob.names[name] cplns.append((elem.get( "ilk") or elem.tag, name)) # TODO: Consider using the value of __all__ # if defined. for e in blob: attrs = e.get("attributes", "").split() if "__hidden__" not in attrs: try: cplns += self._members_from_elem( e, mgr) except CodeIntelError as ex: log.warn( "%s (skipping members for %s)", ex, e) if cplns: break if cplns: cplns = list(set(cplns)) # remove duplicates else: self.ctlr.set_desc("available imports") all_imports = set() for lib in self.buf.libs: all_imports.update(lib.get_blob_imports(imp_prefix)) cplns = [((is_dir_import and "directory" or "module"), name) for name, is_dir_import in all_imports] if cplns: cplns.sort(key=lambda i: i[1].upper()) self.ctlr.set_cplns(cplns) finally: self.ctlr.done("success")
[ "def", "eval", "(", "self", ",", "mgr", ")", ":", "try", ":", "imp_prefix", "=", "tuple", "(", "self", ".", "trg", ".", "extra", "[", "\"imp_prefix\"", "]", ")", "if", "imp_prefix", ":", "libs", "=", "self", ".", "buf", ".", "libs", "if", "not", "imp_prefix", "[", "0", "]", ":", "if", "not", "imp_prefix", "[", "-", "1", "]", ":", "# Deal with last item being empty, i.e. \"from .\"", "imp_prefix", "=", "imp_prefix", "[", ":", "-", "1", "]", "lookuppath", "=", "self", ".", "buf", ".", "path", "while", "imp_prefix", "and", "not", "imp_prefix", "[", "0", "]", ":", "lookuppath", "=", "dirname", "(", "lookuppath", ")", "imp_prefix", "=", "imp_prefix", "[", "1", ":", "]", "libs", "=", "[", "mgr", ".", "db", ".", "get_lang_lib", "(", "self", ".", "lang", ",", "\"curdirlib\"", ",", "[", "lookuppath", "]", ")", "]", "else", ":", "# We use a special lib generator - that will lazily load", "# additional directory libs when there are no matches found.", "# This is a smart import facility - to detect imports from", "# a parent directory when they are not explicitly on the", "# included path list, quite common for Django and other", "# Python frameworks that mangle the sys.path at runtime.", "libs", "=", "PythonImportLibGenerator", "(", "mgr", ",", "self", ".", "lang", ",", "self", ".", "buf", ".", "path", ",", "imp_prefix", ",", "libs", ")", "self", ".", "ctlr", ".", "set_desc", "(", "\"subimports of '%s'\"", "%", "'.'", ".", "join", "(", "imp_prefix", ")", ")", "cplns", "=", "[", "]", "for", "lib", "in", "libs", ":", "imports", "=", "lib", ".", "get_blob_imports", "(", "imp_prefix", ")", "if", "imports", ":", "cplns", ".", "extend", "(", "(", "(", "is_dir_import", "and", "\"directory\"", "or", "\"module\"", ")", ",", "name", ")", "for", "name", ",", "is_dir_import", "in", "imports", ")", "if", "self", ".", "trg", ".", "type", "==", "\"module-members\"", ":", "# Also add top-level members of the specified module.", "dotted_prefix", "=", "'.'", ".", "join", "(", "imp_prefix", ")", "if", "lib", ".", "has_blob", "(", "dotted_prefix", ")", ":", "blob", "=", "lib", ".", "get_blob", "(", "dotted_prefix", ")", "for", "name", "in", "blob", ".", "names", ":", "elem", "=", "blob", ".", "names", "[", "name", "]", "cplns", ".", "append", "(", "(", "elem", ".", "get", "(", "\"ilk\"", ")", "or", "elem", ".", "tag", ",", "name", ")", ")", "# TODO: Consider using the value of __all__", "# if defined.", "for", "e", "in", "blob", ":", "attrs", "=", "e", ".", "get", "(", "\"attributes\"", ",", "\"\"", ")", ".", "split", "(", ")", "if", "\"__hidden__\"", "not", "in", "attrs", ":", "try", ":", "cplns", "+=", "self", ".", "_members_from_elem", "(", "e", ",", "mgr", ")", "except", "CodeIntelError", "as", "ex", ":", "log", ".", "warn", "(", "\"%s (skipping members for %s)\"", ",", "ex", ",", "e", ")", "if", "cplns", ":", "break", "if", "cplns", ":", "cplns", "=", "list", "(", "set", "(", "cplns", ")", ")", "# remove duplicates", "else", ":", "self", ".", "ctlr", ".", "set_desc", "(", "\"available imports\"", ")", "all_imports", "=", "set", "(", ")", "for", "lib", "in", "self", ".", "buf", ".", "libs", ":", "all_imports", ".", "update", "(", "lib", ".", "get_blob_imports", "(", "imp_prefix", ")", ")", "cplns", "=", "[", "(", "(", "is_dir_import", "and", "\"directory\"", "or", "\"module\"", ")", ",", "name", ")", "for", "name", ",", "is_dir_import", "in", "all_imports", "]", "if", "cplns", ":", "cplns", ".", "sort", "(", "key", "=", "lambda", "i", ":", "i", "[", "1", "]", ".", "upper", "(", ")", ")", "self", ".", "ctlr", ".", "set_cplns", "(", "cplns", ")", "finally", ":", "self", ".", "ctlr", ".", "done", "(", "\"success\"", ")" ]
https://github.com/spectacles/CodeComplice/blob/8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62/libs/codeintel2/lang_python.py#L208-L280
mozilla/foundation.mozilla.org
6757f8d4a56ea97800d304dd0b43ee23dac6593d
network-api/networkapi/wagtailpages/pagemodels/index.py
python
IndexPage.generate_entries_set_html
(self, request, *args, **kwargs)
return JsonResponse({ 'entries_html': html, 'has_next': has_next, })
JSON endpoint for getting a set of (pre-rendered) entries
JSON endpoint for getting a set of (pre-rendered) entries
[ "JSON", "endpoint", "for", "getting", "a", "set", "of", "(", "pre", "-", "rendered", ")", "entries" ]
def generate_entries_set_html(self, request, *args, **kwargs): """ JSON endpoint for getting a set of (pre-rendered) entries """ page = 1 if 'page' in request.GET: try: page = int(request.GET['page']) except ValueError: pass page_size = self.page_size if 'page_size' in request.GET: try: page_size = int(request.GET['page_size']) except ValueError: pass start = page * page_size end = start + page_size entries = self.get_entries({ 'request': request }) # Exclude model types if data-exclude="" has a value in the template if 'exclude' in request.GET: try: # Try to get the content type. Then get the model_class. # This allows us to say "exclude 'publicationpage'" and get the model # by it's sting name without the AppName. ct = ContentType.objects.get(model=request.GET.get("exclude").lower()) not_model = ct.model_class() entries = entries.not_type(not_model) except ContentType.DoesNotExist: pass has_next = end < len(entries) hide_classifiers = False if hasattr(self, 'filtered'): if self.filtered.get('type') == 'category': hide_classifiers = True html = loader.render_to_string( 'wagtailpages/fragments/entry_cards.html', context={ 'entries': entries[start:end], 'hide_classifiers': hide_classifiers }, request=request ) return JsonResponse({ 'entries_html': html, 'has_next': has_next, })
[ "def", "generate_entries_set_html", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "page", "=", "1", "if", "'page'", "in", "request", ".", "GET", ":", "try", ":", "page", "=", "int", "(", "request", ".", "GET", "[", "'page'", "]", ")", "except", "ValueError", ":", "pass", "page_size", "=", "self", ".", "page_size", "if", "'page_size'", "in", "request", ".", "GET", ":", "try", ":", "page_size", "=", "int", "(", "request", ".", "GET", "[", "'page_size'", "]", ")", "except", "ValueError", ":", "pass", "start", "=", "page", "*", "page_size", "end", "=", "start", "+", "page_size", "entries", "=", "self", ".", "get_entries", "(", "{", "'request'", ":", "request", "}", ")", "# Exclude model types if data-exclude=\"\" has a value in the template", "if", "'exclude'", "in", "request", ".", "GET", ":", "try", ":", "# Try to get the content type. Then get the model_class.", "# This allows us to say \"exclude 'publicationpage'\" and get the model", "# by it's sting name without the AppName.", "ct", "=", "ContentType", ".", "objects", ".", "get", "(", "model", "=", "request", ".", "GET", ".", "get", "(", "\"exclude\"", ")", ".", "lower", "(", ")", ")", "not_model", "=", "ct", ".", "model_class", "(", ")", "entries", "=", "entries", ".", "not_type", "(", "not_model", ")", "except", "ContentType", ".", "DoesNotExist", ":", "pass", "has_next", "=", "end", "<", "len", "(", "entries", ")", "hide_classifiers", "=", "False", "if", "hasattr", "(", "self", ",", "'filtered'", ")", ":", "if", "self", ".", "filtered", ".", "get", "(", "'type'", ")", "==", "'category'", ":", "hide_classifiers", "=", "True", "html", "=", "loader", ".", "render_to_string", "(", "'wagtailpages/fragments/entry_cards.html'", ",", "context", "=", "{", "'entries'", ":", "entries", "[", "start", ":", "end", "]", ",", "'hide_classifiers'", ":", "hide_classifiers", "}", ",", "request", "=", "request", ")", "return", "JsonResponse", "(", "{", "'entries_html'", ":", "html", ",", "'has_next'", ":", "has_next", ",", "}", ")" ]
https://github.com/mozilla/foundation.mozilla.org/blob/6757f8d4a56ea97800d304dd0b43ee23dac6593d/network-api/networkapi/wagtailpages/pagemodels/index.py#L185-L241
enthought/traitsui
b7c38c7a47bf6ae7971f9ddab70c8a358647dd25
traitsui/wx/text_editor.py
python
ReadonlyEditor.dispose
(self)
Disposes of the contents of an editor.
Disposes of the contents of an editor.
[ "Disposes", "of", "the", "contents", "of", "an", "editor", "." ]
def dispose(self): """Disposes of the contents of an editor.""" if self.factory.view is not None: control = self.control control.Unbind(wx.EVT_ENTER_WINDOW) control.Unbind(wx.EVT_LEAVE_WINDOW) control.Unbind(wx.EVT_LEFT_DOWN) control.Unbind(wx.EVT_LEFT_UP) super().dispose()
[ "def", "dispose", "(", "self", ")", ":", "if", "self", ".", "factory", ".", "view", "is", "not", "None", ":", "control", "=", "self", ".", "control", "control", ".", "Unbind", "(", "wx", ".", "EVT_ENTER_WINDOW", ")", "control", ".", "Unbind", "(", "wx", ".", "EVT_LEAVE_WINDOW", ")", "control", ".", "Unbind", "(", "wx", ".", "EVT_LEFT_DOWN", ")", "control", ".", "Unbind", "(", "wx", ".", "EVT_LEFT_UP", ")", "super", "(", ")", ".", "dispose", "(", ")" ]
https://github.com/enthought/traitsui/blob/b7c38c7a47bf6ae7971f9ddab70c8a358647dd25/traitsui/wx/text_editor.py#L200-L209
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/taskrouter/v1/workspace/workflow/__init__.py
python
WorkflowPage.__repr__
(self)
return '<Twilio.Taskrouter.V1.WorkflowPage>'
Provide a friendly representation :returns: Machine friendly representation :rtype: str
Provide a friendly representation
[ "Provide", "a", "friendly", "representation" ]
def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Taskrouter.V1.WorkflowPage>'
[ "def", "__repr__", "(", "self", ")", ":", "return", "'<Twilio.Taskrouter.V1.WorkflowPage>'" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/taskrouter/v1/workspace/workflow/__init__.py#L212-L219
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/s3/codecs/card.py
python
S3PDFCardTemplate.__init__
(self, pagesize, cardsize, margins = None, spacing = None, title = None, )
Args: pagesize: the page size, tuple (w, h) cardsize: the card size, tuple (w, h) margins: the page margins, tuple (N, E, S, W) spacing: the spacing between cards, tuple (H, V) title: the document title Note: - all sizes in points (72 points per inch)
Args: pagesize: the page size, tuple (w, h) cardsize: the card size, tuple (w, h) margins: the page margins, tuple (N, E, S, W) spacing: the spacing between cards, tuple (H, V) title: the document title
[ "Args", ":", "pagesize", ":", "the", "page", "size", "tuple", "(", "w", "h", ")", "cardsize", ":", "the", "card", "size", "tuple", "(", "w", "h", ")", "margins", ":", "the", "page", "margins", "tuple", "(", "N", "E", "S", "W", ")", "spacing", ":", "the", "spacing", "between", "cards", "tuple", "(", "H", "V", ")", "title", ":", "the", "document", "title" ]
def __init__(self, pagesize, cardsize, margins = None, spacing = None, title = None, ): """ Args: pagesize: the page size, tuple (w, h) cardsize: the card size, tuple (w, h) margins: the page margins, tuple (N, E, S, W) spacing: the spacing between cards, tuple (H, V) title: the document title Note: - all sizes in points (72 points per inch) """ # Spacing between cards if spacing is None: spacing = (18, 18) elif not isinstance(spacing, (tuple, list)): spacing = (spacing, spacing) # Page margins if margins is None: margins = self.compute_margins(pagesize, cardsize, spacing) elif not isinstance(margins, (tuple, list)): margins = (margins, margins, margins, margins) # Cards per row, rows per page and cards per page pagewidth, pageheight = pagesize cardwidth, cardheight = cardsize number_of_cards = self.number_of_cards cards_per_row = number_of_cards(pagewidth, cardwidth, (margins[1], margins[3]), spacing[0], ) rows_per_page = number_of_cards(pageheight, cardheight, (margins[0], margins[2]), spacing[1], ) self.cards_per_row = cards_per_row self.rows_per_page = rows_per_page self.cards_per_page = rows_per_page * cards_per_row # Generate page templates pages = self.page_layouts(pagesize, cardsize, margins, spacing) if title is None: title = current.T("Items") # Call super-constructor BaseDocTemplate.__init__(self, None, pagesize = pagesize, pageTemplates = pages, topMargin = margins[0], rightMargin = margins[1], bottomMargin = margins[2], leftMargin = margins[3], title = s3_str(title), )
[ "def", "__init__", "(", "self", ",", "pagesize", ",", "cardsize", ",", "margins", "=", "None", ",", "spacing", "=", "None", ",", "title", "=", "None", ",", ")", ":", "# Spacing between cards", "if", "spacing", "is", "None", ":", "spacing", "=", "(", "18", ",", "18", ")", "elif", "not", "isinstance", "(", "spacing", ",", "(", "tuple", ",", "list", ")", ")", ":", "spacing", "=", "(", "spacing", ",", "spacing", ")", "# Page margins", "if", "margins", "is", "None", ":", "margins", "=", "self", ".", "compute_margins", "(", "pagesize", ",", "cardsize", ",", "spacing", ")", "elif", "not", "isinstance", "(", "margins", ",", "(", "tuple", ",", "list", ")", ")", ":", "margins", "=", "(", "margins", ",", "margins", ",", "margins", ",", "margins", ")", "# Cards per row, rows per page and cards per page", "pagewidth", ",", "pageheight", "=", "pagesize", "cardwidth", ",", "cardheight", "=", "cardsize", "number_of_cards", "=", "self", ".", "number_of_cards", "cards_per_row", "=", "number_of_cards", "(", "pagewidth", ",", "cardwidth", ",", "(", "margins", "[", "1", "]", ",", "margins", "[", "3", "]", ")", ",", "spacing", "[", "0", "]", ",", ")", "rows_per_page", "=", "number_of_cards", "(", "pageheight", ",", "cardheight", ",", "(", "margins", "[", "0", "]", ",", "margins", "[", "2", "]", ")", ",", "spacing", "[", "1", "]", ",", ")", "self", ".", "cards_per_row", "=", "cards_per_row", "self", ".", "rows_per_page", "=", "rows_per_page", "self", ".", "cards_per_page", "=", "rows_per_page", "*", "cards_per_row", "# Generate page templates", "pages", "=", "self", ".", "page_layouts", "(", "pagesize", ",", "cardsize", ",", "margins", ",", "spacing", ")", "if", "title", "is", "None", ":", "title", "=", "current", ".", "T", "(", "\"Items\"", ")", "# Call super-constructor", "BaseDocTemplate", ".", "__init__", "(", "self", ",", "None", ",", "pagesize", "=", "pagesize", ",", "pageTemplates", "=", "pages", ",", "topMargin", "=", "margins", "[", "0", "]", ",", "rightMargin", "=", "margins", "[", "1", "]", ",", "bottomMargin", "=", "margins", "[", "2", "]", ",", "leftMargin", "=", "margins", "[", "3", "]", ",", "title", "=", "s3_str", "(", "title", ")", ",", ")" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3/codecs/card.py#L294-L363
napari/napari
dbf4158e801fa7a429de8ef1cdee73bf6d64c61e
napari/utils/events/containers/_evented_list.py
python
EventedList.move
(self, src_index: int, dest_index: int = 0)
return True
Insert object at ``src_index`` before ``dest_index``. Both indices refer to the list prior to any object removal (pre-move space).
Insert object at ``src_index`` before ``dest_index``.
[ "Insert", "object", "at", "src_index", "before", "dest_index", "." ]
def move(self, src_index: int, dest_index: int = 0) -> bool: """Insert object at ``src_index`` before ``dest_index``. Both indices refer to the list prior to any object removal (pre-move space). """ if dest_index < 0: dest_index += len(self) + 1 if dest_index in (src_index, src_index + 1): # this is a no-op return False self.events.moving(index=src_index, new_index=dest_index) item = self._list.pop(src_index) if dest_index > src_index: dest_index -= 1 self._list.insert(dest_index, item) self.events.moved(index=src_index, new_index=dest_index, value=item) self.events.reordered(value=self) return True
[ "def", "move", "(", "self", ",", "src_index", ":", "int", ",", "dest_index", ":", "int", "=", "0", ")", "->", "bool", ":", "if", "dest_index", "<", "0", ":", "dest_index", "+=", "len", "(", "self", ")", "+", "1", "if", "dest_index", "in", "(", "src_index", ",", "src_index", "+", "1", ")", ":", "# this is a no-op", "return", "False", "self", ".", "events", ".", "moving", "(", "index", "=", "src_index", ",", "new_index", "=", "dest_index", ")", "item", "=", "self", ".", "_list", ".", "pop", "(", "src_index", ")", "if", "dest_index", ">", "src_index", ":", "dest_index", "-=", "1", "self", ".", "_list", ".", "insert", "(", "dest_index", ",", "item", ")", "self", ".", "events", ".", "moved", "(", "index", "=", "src_index", ",", "new_index", "=", "dest_index", ",", "value", "=", "item", ")", "self", ".", "events", ".", "reordered", "(", "value", "=", "self", ")", "return", "True" ]
https://github.com/napari/napari/blob/dbf4158e801fa7a429de8ef1cdee73bf6d64c61e/napari/utils/events/containers/_evented_list.py#L206-L225
hvac/hvac
ec048ded30d21c13c21cfa950d148c8bfc1467b0
hvac/api/secrets_engines/identity.py
python
Identity.read_tokens_backend_configuration
(self, mount_point=DEFAULT_MOUNT_POINT)
return self._adapter.get( url=api_path, )
Query vault identity tokens configurations. Supported methods: GET: {mount_point}/oidc/config. :return: The response of the read_tokens_backend_configuration request. :rtype: dict
Query vault identity tokens configurations.
[ "Query", "vault", "identity", "tokens", "configurations", "." ]
def read_tokens_backend_configuration(self, mount_point=DEFAULT_MOUNT_POINT): """Query vault identity tokens configurations. Supported methods: GET: {mount_point}/oidc/config. :return: The response of the read_tokens_backend_configuration request. :rtype: dict """ api_path = utils.format_url( "/v1/{mount_point}/oidc/config", mount_point=mount_point, ) return self._adapter.get( url=api_path, )
[ "def", "read_tokens_backend_configuration", "(", "self", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "utils", ".", "format_url", "(", "\"/v1/{mount_point}/oidc/config\"", ",", "mount_point", "=", "mount_point", ",", ")", "return", "self", ".", "_adapter", ".", "get", "(", "url", "=", "api_path", ",", ")" ]
https://github.com/hvac/hvac/blob/ec048ded30d21c13c21cfa950d148c8bfc1467b0/hvac/api/secrets_engines/identity.py#L1261-L1276
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/s3db/org.py
python
organisation_update_affiliations
(record)
Update affiliations for a branch organisation Args: record: the org_organisation_branch record
Update affiliations for a branch organisation
[ "Update", "affiliations", "for", "a", "branch", "organisation" ]
def organisation_update_affiliations(record): """ Update affiliations for a branch organisation Args: record: the org_organisation_branch record """ if record.deleted and record.deleted_fk: try: fk = json.loads(record.deleted_fk) branch_id = fk["branch_id"] except: return else: branch_id = record.branch_id from .pr import OU BRANCHES = "Branches" db = current.db s3db = current.s3db otable = s3db.org_organisation btable = otable.with_alias("branch") ltable = db.org_organisation_branch etable = s3db.pr_pentity rtable = db.pr_role atable = db.pr_affiliation o = otable._tablename b = btable._tablename r = rtable._tablename # Get current memberships query = (ltable.branch_id == branch_id) & \ (ltable.deleted != True) left = [otable.on(ltable.organisation_id == otable.id), btable.on(ltable.branch_id == btable.id)] rows = db(query).select(otable.pe_id, btable.pe_id, left=left) current_memberships = [(row[o].pe_id, row[b].pe_id) for row in rows] # Get current affiliations query = (rtable.deleted != True) & \ (rtable.role == BRANCHES) & \ (rtable.pe_id == etable.pe_id) & \ (etable.instance_type == o) & \ (atable.deleted != True) & \ (atable.role_id == rtable.id) & \ (atable.pe_id == btable.pe_id) & \ (btable.id == branch_id) rows = db(query).select(rtable.pe_id, btable.pe_id) current_affiliations = [(row[r].pe_id, row[b].pe_id) for row in rows] # Remove all affiliations which are not current memberships remove_affiliation = s3db.pr_remove_affiliation for a in current_affiliations: org, branch = a if a not in current_memberships: remove_affiliation(org, branch, role=BRANCHES) else: current_memberships.remove(a) # Add affiliations for all new memberships add_affiliation = s3db.pr_add_affiliation for m in current_memberships: org, branch = m add_affiliation(org, branch, role=BRANCHES, role_type=OU)
[ "def", "organisation_update_affiliations", "(", "record", ")", ":", "if", "record", ".", "deleted", "and", "record", ".", "deleted_fk", ":", "try", ":", "fk", "=", "json", ".", "loads", "(", "record", ".", "deleted_fk", ")", "branch_id", "=", "fk", "[", "\"branch_id\"", "]", "except", ":", "return", "else", ":", "branch_id", "=", "record", ".", "branch_id", "from", ".", "pr", "import", "OU", "BRANCHES", "=", "\"Branches\"", "db", "=", "current", ".", "db", "s3db", "=", "current", ".", "s3db", "otable", "=", "s3db", ".", "org_organisation", "btable", "=", "otable", ".", "with_alias", "(", "\"branch\"", ")", "ltable", "=", "db", ".", "org_organisation_branch", "etable", "=", "s3db", ".", "pr_pentity", "rtable", "=", "db", ".", "pr_role", "atable", "=", "db", ".", "pr_affiliation", "o", "=", "otable", ".", "_tablename", "b", "=", "btable", ".", "_tablename", "r", "=", "rtable", ".", "_tablename", "# Get current memberships", "query", "=", "(", "ltable", ".", "branch_id", "==", "branch_id", ")", "&", "(", "ltable", ".", "deleted", "!=", "True", ")", "left", "=", "[", "otable", ".", "on", "(", "ltable", ".", "organisation_id", "==", "otable", ".", "id", ")", ",", "btable", ".", "on", "(", "ltable", ".", "branch_id", "==", "btable", ".", "id", ")", "]", "rows", "=", "db", "(", "query", ")", ".", "select", "(", "otable", ".", "pe_id", ",", "btable", ".", "pe_id", ",", "left", "=", "left", ")", "current_memberships", "=", "[", "(", "row", "[", "o", "]", ".", "pe_id", ",", "row", "[", "b", "]", ".", "pe_id", ")", "for", "row", "in", "rows", "]", "# Get current affiliations", "query", "=", "(", "rtable", ".", "deleted", "!=", "True", ")", "&", "(", "rtable", ".", "role", "==", "BRANCHES", ")", "&", "(", "rtable", ".", "pe_id", "==", "etable", ".", "pe_id", ")", "&", "(", "etable", ".", "instance_type", "==", "o", ")", "&", "(", "atable", ".", "deleted", "!=", "True", ")", "&", "(", "atable", ".", "role_id", "==", "rtable", ".", "id", ")", "&", "(", "atable", ".", "pe_id", "==", "btable", ".", "pe_id", ")", "&", "(", "btable", ".", "id", "==", "branch_id", ")", "rows", "=", "db", "(", "query", ")", ".", "select", "(", "rtable", ".", "pe_id", ",", "btable", ".", "pe_id", ")", "current_affiliations", "=", "[", "(", "row", "[", "r", "]", ".", "pe_id", ",", "row", "[", "b", "]", ".", "pe_id", ")", "for", "row", "in", "rows", "]", "# Remove all affiliations which are not current memberships", "remove_affiliation", "=", "s3db", ".", "pr_remove_affiliation", "for", "a", "in", "current_affiliations", ":", "org", ",", "branch", "=", "a", "if", "a", "not", "in", "current_memberships", ":", "remove_affiliation", "(", "org", ",", "branch", ",", "role", "=", "BRANCHES", ")", "else", ":", "current_memberships", ".", "remove", "(", "a", ")", "# Add affiliations for all new memberships", "add_affiliation", "=", "s3db", ".", "pr_add_affiliation", "for", "m", "in", "current_memberships", ":", "org", ",", "branch", "=", "m", "add_affiliation", "(", "org", ",", "branch", ",", "role", "=", "BRANCHES", ",", "role_type", "=", "OU", ")" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3db/org.py#L8272-L8338
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
bin/x86/Debug/scripting_engine/Lib/distutils/fancy_getopt.py
python
FancyGetopt.set_aliases
(self, alias)
Set the aliases for this option parser.
Set the aliases for this option parser.
[ "Set", "the", "aliases", "for", "this", "option", "parser", "." ]
def set_aliases (self, alias): """Set the aliases for this option parser.""" self._check_alias_dict(alias, "alias") self.alias = alias
[ "def", "set_aliases", "(", "self", ",", "alias", ")", ":", "self", ".", "_check_alias_dict", "(", "alias", ",", "\"alias\"", ")", "self", ".", "alias", "=", "alias" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/bin/x86/Debug/scripting_engine/Lib/distutils/fancy_getopt.py#L132-L135
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/sympy/sympy/core/symbol.py
python
symbols
(names, **args)
Transform strings into instances of :class:`Symbol` class. :func:`symbols` function returns a sequence of symbols with names taken from ``names`` argument, which can be a comma or whitespace delimited string, or a sequence of strings:: >>> from sympy import symbols, Function >>> x, y, z = symbols('x,y,z') >>> a, b, c = symbols('a b c') The type of output is dependent on the properties of input arguments:: >>> symbols('x') x >>> symbols('x,') (x,) >>> symbols('x,y') (x, y) >>> symbols(('a', 'b', 'c')) (a, b, c) >>> symbols(['a', 'b', 'c']) [a, b, c] >>> symbols(set(['a', 'b', 'c'])) set([a, b, c]) If an iterable container is needed for a single symbol, set the ``seq`` argument to ``True`` or terminate the symbol name with a comma:: >>> symbols('x', seq=True) (x,) To reduce typing, range syntax is supported to create indexed symbols. Ranges are indicated by a colon and the type of range is determined by the character to the right of the colon. If the character is a digit then all continguous digits to the left are taken as the nonnegative starting value (or 0 if there are no digit of the colon) and all contiguous digits to the right are taken as 1 greater than the ending value:: >>> symbols('x:10') (x0, x1, x2, x3, x4, x5, x6, x7, x8, x9) >>> symbols('x5:10') (x5, x6, x7, x8, x9) >>> symbols('x5(:2)') (x50, x51) >>> symbols('x5:10,y:5') (x5, x6, x7, x8, x9, y0, y1, y2, y3, y4) >>> symbols(('x5:10', 'y:5')) ((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4)) If the character to the right of the colon is a letter, then the single letter to the left (or 'a' if there is none) is taken as the start and all characters in the lexicographic range *through* the letter to the right are used as the range:: >>> symbols('x:z') (x, y, z) >>> symbols('x:c') # null range () >>> symbols('x(:c)') (xa, xb, xc) >>> symbols(':c') (a, b, c) >>> symbols('a:d, x:z') (a, b, c, d, x, y, z) >>> symbols(('a:d', 'x:z')) ((a, b, c, d), (x, y, z)) Multiple ranges are supported; contiguous numerical ranges should be separated by parentheses to disambiguate the ending number of one range from the starting number of the next:: >>> symbols('x:2(1:3)') (x01, x02, x11, x12) >>> symbols(':3:2') # parsing is from left to right (00, 01, 10, 11, 20, 21) Only one pair of parentheses surrounding ranges are removed, so to include parentheses around ranges, double them. And to include spaces, commas, or colons, escape them with a backslash:: >>> symbols('x((a:b))') (x(a), x(b)) >>> symbols('x(:1\,:2)') # or 'x((:1)\,(:2))' (x(0,0), x(0,1)) All newly created symbols have assumptions set according to ``args``:: >>> a = symbols('a', integer=True) >>> a.is_integer True >>> x, y, z = symbols('x,y,z', real=True) >>> x.is_real and y.is_real and z.is_real True Despite its name, :func:`symbols` can create symbol-like objects like instances of Function or Wild classes. To achieve this, set ``cls`` keyword argument to the desired type:: >>> symbols('f,g,h', cls=Function) (f, g, h) >>> type(_[0]) <class 'sympy.core.function.UndefinedFunction'>
Transform strings into instances of :class:`Symbol` class.
[ "Transform", "strings", "into", "instances", "of", ":", "class", ":", "Symbol", "class", "." ]
def symbols(names, **args): """ Transform strings into instances of :class:`Symbol` class. :func:`symbols` function returns a sequence of symbols with names taken from ``names`` argument, which can be a comma or whitespace delimited string, or a sequence of strings:: >>> from sympy import symbols, Function >>> x, y, z = symbols('x,y,z') >>> a, b, c = symbols('a b c') The type of output is dependent on the properties of input arguments:: >>> symbols('x') x >>> symbols('x,') (x,) >>> symbols('x,y') (x, y) >>> symbols(('a', 'b', 'c')) (a, b, c) >>> symbols(['a', 'b', 'c']) [a, b, c] >>> symbols(set(['a', 'b', 'c'])) set([a, b, c]) If an iterable container is needed for a single symbol, set the ``seq`` argument to ``True`` or terminate the symbol name with a comma:: >>> symbols('x', seq=True) (x,) To reduce typing, range syntax is supported to create indexed symbols. Ranges are indicated by a colon and the type of range is determined by the character to the right of the colon. If the character is a digit then all continguous digits to the left are taken as the nonnegative starting value (or 0 if there are no digit of the colon) and all contiguous digits to the right are taken as 1 greater than the ending value:: >>> symbols('x:10') (x0, x1, x2, x3, x4, x5, x6, x7, x8, x9) >>> symbols('x5:10') (x5, x6, x7, x8, x9) >>> symbols('x5(:2)') (x50, x51) >>> symbols('x5:10,y:5') (x5, x6, x7, x8, x9, y0, y1, y2, y3, y4) >>> symbols(('x5:10', 'y:5')) ((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4)) If the character to the right of the colon is a letter, then the single letter to the left (or 'a' if there is none) is taken as the start and all characters in the lexicographic range *through* the letter to the right are used as the range:: >>> symbols('x:z') (x, y, z) >>> symbols('x:c') # null range () >>> symbols('x(:c)') (xa, xb, xc) >>> symbols(':c') (a, b, c) >>> symbols('a:d, x:z') (a, b, c, d, x, y, z) >>> symbols(('a:d', 'x:z')) ((a, b, c, d), (x, y, z)) Multiple ranges are supported; contiguous numerical ranges should be separated by parentheses to disambiguate the ending number of one range from the starting number of the next:: >>> symbols('x:2(1:3)') (x01, x02, x11, x12) >>> symbols(':3:2') # parsing is from left to right (00, 01, 10, 11, 20, 21) Only one pair of parentheses surrounding ranges are removed, so to include parentheses around ranges, double them. And to include spaces, commas, or colons, escape them with a backslash:: >>> symbols('x((a:b))') (x(a), x(b)) >>> symbols('x(:1\,:2)') # or 'x((:1)\,(:2))' (x(0,0), x(0,1)) All newly created symbols have assumptions set according to ``args``:: >>> a = symbols('a', integer=True) >>> a.is_integer True >>> x, y, z = symbols('x,y,z', real=True) >>> x.is_real and y.is_real and z.is_real True Despite its name, :func:`symbols` can create symbol-like objects like instances of Function or Wild classes. To achieve this, set ``cls`` keyword argument to the desired type:: >>> symbols('f,g,h', cls=Function) (f, g, h) >>> type(_[0]) <class 'sympy.core.function.UndefinedFunction'> """ result = [] if isinstance(names, string_types): marker = 0 literals = ['\,', '\:', '\ '] for i in range(len(literals)): lit = literals.pop(0) if lit in names: while chr(marker) in names: marker += 1 lit_char = chr(marker) marker += 1 names = names.replace(lit, lit_char) literals.append((lit_char, lit[1:])) def literal(s): if literals: for c, l in literals: s = s.replace(c, l) return s names = names.strip() as_seq = names.endswith(',') if as_seq: names = names[:-1].rstrip() if not names: raise ValueError('no symbols given') # split on commas names = [n.strip() for n in names.split(',')] if not all(n for n in names): raise ValueError('missing symbol between commas') # split on spaces for i in range(len(names) - 1, -1, -1): names[i: i + 1] = names[i].split() cls = args.pop('cls', Symbol) seq = args.pop('seq', as_seq) for name in names: if not name: raise ValueError('missing symbol') if ':' not in name: symbol = cls(literal(name), **args) result.append(symbol) continue split = _range.split(name) # remove 1 layer of bounding parentheses around ranges for i in range(len(split) - 1): if i and ':' in split[i] and split[i] != ':' and \ split[i - 1].endswith('(') and \ split[i + 1].startswith(')'): split[i - 1] = split[i - 1][:-1] split[i + 1] = split[i + 1][1:] for i, s in enumerate(split): if ':' in s: if s[-1].endswith(':'): raise ValueError('missing end range') a, b = s.split(':') if b[-1] in string.digits: a = 0 if not a else int(a) b = int(b) split[i] = [str(c) for c in range(a, b)] else: a = a or 'a' split[i] = [string.ascii_letters[c] for c in range( string.ascii_letters.index(a), string.ascii_letters.index(b) + 1)] # inclusive if not split[i]: break else: split[i] = [s] else: seq = True if len(split) == 1: names = split[0] else: names = [''.join(s) for s in cartes(*split)] if literals: result.extend([cls(literal(s), **args) for s in names]) else: result.extend([cls(s, **args) for s in names]) if not seq and len(result) <= 1: if not result: return () return result[0] return tuple(result) else: for name in names: result.append(symbols(name, **args)) return type(names)(result)
[ "def", "symbols", "(", "names", ",", "*", "*", "args", ")", ":", "result", "=", "[", "]", "if", "isinstance", "(", "names", ",", "string_types", ")", ":", "marker", "=", "0", "literals", "=", "[", "'\\,'", ",", "'\\:'", ",", "'\\ '", "]", "for", "i", "in", "range", "(", "len", "(", "literals", ")", ")", ":", "lit", "=", "literals", ".", "pop", "(", "0", ")", "if", "lit", "in", "names", ":", "while", "chr", "(", "marker", ")", "in", "names", ":", "marker", "+=", "1", "lit_char", "=", "chr", "(", "marker", ")", "marker", "+=", "1", "names", "=", "names", ".", "replace", "(", "lit", ",", "lit_char", ")", "literals", ".", "append", "(", "(", "lit_char", ",", "lit", "[", "1", ":", "]", ")", ")", "def", "literal", "(", "s", ")", ":", "if", "literals", ":", "for", "c", ",", "l", "in", "literals", ":", "s", "=", "s", ".", "replace", "(", "c", ",", "l", ")", "return", "s", "names", "=", "names", ".", "strip", "(", ")", "as_seq", "=", "names", ".", "endswith", "(", "','", ")", "if", "as_seq", ":", "names", "=", "names", "[", ":", "-", "1", "]", ".", "rstrip", "(", ")", "if", "not", "names", ":", "raise", "ValueError", "(", "'no symbols given'", ")", "# split on commas", "names", "=", "[", "n", ".", "strip", "(", ")", "for", "n", "in", "names", ".", "split", "(", "','", ")", "]", "if", "not", "all", "(", "n", "for", "n", "in", "names", ")", ":", "raise", "ValueError", "(", "'missing symbol between commas'", ")", "# split on spaces", "for", "i", "in", "range", "(", "len", "(", "names", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "names", "[", "i", ":", "i", "+", "1", "]", "=", "names", "[", "i", "]", ".", "split", "(", ")", "cls", "=", "args", ".", "pop", "(", "'cls'", ",", "Symbol", ")", "seq", "=", "args", ".", "pop", "(", "'seq'", ",", "as_seq", ")", "for", "name", "in", "names", ":", "if", "not", "name", ":", "raise", "ValueError", "(", "'missing symbol'", ")", "if", "':'", "not", "in", "name", ":", "symbol", "=", "cls", "(", "literal", "(", "name", ")", ",", "*", "*", "args", ")", "result", ".", "append", "(", "symbol", ")", "continue", "split", "=", "_range", ".", "split", "(", "name", ")", "# remove 1 layer of bounding parentheses around ranges", "for", "i", "in", "range", "(", "len", "(", "split", ")", "-", "1", ")", ":", "if", "i", "and", "':'", "in", "split", "[", "i", "]", "and", "split", "[", "i", "]", "!=", "':'", "and", "split", "[", "i", "-", "1", "]", ".", "endswith", "(", "'('", ")", "and", "split", "[", "i", "+", "1", "]", ".", "startswith", "(", "')'", ")", ":", "split", "[", "i", "-", "1", "]", "=", "split", "[", "i", "-", "1", "]", "[", ":", "-", "1", "]", "split", "[", "i", "+", "1", "]", "=", "split", "[", "i", "+", "1", "]", "[", "1", ":", "]", "for", "i", ",", "s", "in", "enumerate", "(", "split", ")", ":", "if", "':'", "in", "s", ":", "if", "s", "[", "-", "1", "]", ".", "endswith", "(", "':'", ")", ":", "raise", "ValueError", "(", "'missing end range'", ")", "a", ",", "b", "=", "s", ".", "split", "(", "':'", ")", "if", "b", "[", "-", "1", "]", "in", "string", ".", "digits", ":", "a", "=", "0", "if", "not", "a", "else", "int", "(", "a", ")", "b", "=", "int", "(", "b", ")", "split", "[", "i", "]", "=", "[", "str", "(", "c", ")", "for", "c", "in", "range", "(", "a", ",", "b", ")", "]", "else", ":", "a", "=", "a", "or", "'a'", "split", "[", "i", "]", "=", "[", "string", ".", "ascii_letters", "[", "c", "]", "for", "c", "in", "range", "(", "string", ".", "ascii_letters", ".", "index", "(", "a", ")", ",", "string", ".", "ascii_letters", ".", "index", "(", "b", ")", "+", "1", ")", "]", "# inclusive", "if", "not", "split", "[", "i", "]", ":", "break", "else", ":", "split", "[", "i", "]", "=", "[", "s", "]", "else", ":", "seq", "=", "True", "if", "len", "(", "split", ")", "==", "1", ":", "names", "=", "split", "[", "0", "]", "else", ":", "names", "=", "[", "''", ".", "join", "(", "s", ")", "for", "s", "in", "cartes", "(", "*", "split", ")", "]", "if", "literals", ":", "result", ".", "extend", "(", "[", "cls", "(", "literal", "(", "s", ")", ",", "*", "*", "args", ")", "for", "s", "in", "names", "]", ")", "else", ":", "result", ".", "extend", "(", "[", "cls", "(", "s", ",", "*", "*", "args", ")", "for", "s", "in", "names", "]", ")", "if", "not", "seq", "and", "len", "(", "result", ")", "<=", "1", ":", "if", "not", "result", ":", "return", "(", ")", "return", "result", "[", "0", "]", "return", "tuple", "(", "result", ")", "else", ":", "for", "name", "in", "names", ":", "result", ".", "append", "(", "symbols", "(", "name", ",", "*", "*", "args", ")", ")", "return", "type", "(", "names", ")", "(", "result", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/core/symbol.py#L301-L511
projecthamster/hamster
19d160090de30e756bdc3122ff935bdaa86e2843
waflib/Tools/ccroot.py
python
accept_node_to_link
(self, node)
return not node.name.endswith('.pdb')
PRIVATE INTERNAL USE ONLY
PRIVATE INTERNAL USE ONLY
[ "PRIVATE", "INTERNAL", "USE", "ONLY" ]
def accept_node_to_link(self, node): """ PRIVATE INTERNAL USE ONLY """ return not node.name.endswith('.pdb')
[ "def", "accept_node_to_link", "(", "self", ",", "node", ")", ":", "return", "not", "node", ".", "name", ".", "endswith", "(", "'.pdb'", ")" ]
https://github.com/projecthamster/hamster/blob/19d160090de30e756bdc3122ff935bdaa86e2843/waflib/Tools/ccroot.py#L434-L438
PaddlePaddle/models
511e2e282960ed4c7440c3f1d1e62017acb90e11
tutorials/mobilenetv3_prod/Step1-5/mobilenetv3_paddle/paddlevision/datasets/vision.py
python
StandardTransform._format_transform_repr
(self, transform: Callable, head: str)
return (["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]])
[]
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]: lines = transform.__repr__().splitlines() return (["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]])
[ "def", "_format_transform_repr", "(", "self", ",", "transform", ":", "Callable", ",", "head", ":", "str", ")", "->", "List", "[", "str", "]", ":", "lines", "=", "transform", ".", "__repr__", "(", ")", ".", "splitlines", "(", ")", "return", "(", "[", "\"{}{}\"", ".", "format", "(", "head", ",", "lines", "[", "0", "]", ")", "]", "+", "[", "\"{}{}\"", ".", "format", "(", "\" \"", "*", "len", "(", "head", ")", ",", "line", ")", "for", "line", "in", "lines", "[", "1", ":", "]", "]", ")" ]
https://github.com/PaddlePaddle/models/blob/511e2e282960ed4c7440c3f1d1e62017acb90e11/tutorials/mobilenetv3_prod/Step1-5/mobilenetv3_paddle/paddlevision/datasets/vision.py#L99-L103
kubeflow/pipelines
bea751c9259ff0ae85290f873170aae89284ba8e
backend/api/python_http_client/kfp_server_api/models/report_run_metrics_response_report_run_metric_result.py
python
ReportRunMetricsResponseReportRunMetricResult.metric_node_id
(self)
return self._metric_node_id
Gets the metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501 Output. The ID of the node which reports the metric. # noqa: E501 :return: The metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501 :rtype: str
Gets the metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
[ "Gets", "the", "metric_node_id", "of", "this", "ReportRunMetricsResponseReportRunMetricResult", ".", "#", "noqa", ":", "E501" ]
def metric_node_id(self): """Gets the metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501 Output. The ID of the node which reports the metric. # noqa: E501 :return: The metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501 :rtype: str """ return self._metric_node_id
[ "def", "metric_node_id", "(", "self", ")", ":", "return", "self", ".", "_metric_node_id" ]
https://github.com/kubeflow/pipelines/blob/bea751c9259ff0ae85290f873170aae89284ba8e/backend/api/python_http_client/kfp_server_api/models/report_run_metrics_response_report_run_metric_result.py#L94-L102
google/apitools
31cad2d904f356872d2965687e84b2d87ee2cdd3
apitools/base/py/transfer.py
python
Download.__SetTotal
(self, info)
Sets the total size based off info if possible otherwise 0.
Sets the total size based off info if possible otherwise 0.
[ "Sets", "the", "total", "size", "based", "off", "info", "if", "possible", "otherwise", "0", "." ]
def __SetTotal(self, info): """Sets the total size based off info if possible otherwise 0.""" if 'content-range' in info: _, _, total = info['content-range'].rpartition('/') if total != '*': self.__total_size = int(total) # Note "total_size is None" means we don't know it; if no size # info was returned on our initial range request, that means we # have a 0-byte file. (That last statement has been verified # empirically, but is not clearly documented anywhere.) if self.total_size is None: self.__total_size = 0
[ "def", "__SetTotal", "(", "self", ",", "info", ")", ":", "if", "'content-range'", "in", "info", ":", "_", ",", "_", ",", "total", "=", "info", "[", "'content-range'", "]", ".", "rpartition", "(", "'/'", ")", "if", "total", "!=", "'*'", ":", "self", ".", "__total_size", "=", "int", "(", "total", ")", "# Note \"total_size is None\" means we don't know it; if no size", "# info was returned on our initial range request, that means we", "# have a 0-byte file. (That last statement has been verified", "# empirically, but is not clearly documented anywhere.)", "if", "self", ".", "total_size", "is", "None", ":", "self", ".", "__total_size", "=", "0" ]
https://github.com/google/apitools/blob/31cad2d904f356872d2965687e84b2d87ee2cdd3/apitools/base/py/transfer.py#L291-L302
VLSIDA/OpenRAM
f66aac3264598eeae31225c62b6a4af52412d407
compiler/bitcells/pbitcell.py
python
pbitcell.get_wl_name
(self, port=0)
return "wl{}".format(port)
Get wl name by port
Get wl name by port
[ "Get", "wl", "name", "by", "port" ]
def get_wl_name(self, port=0): """Get wl name by port""" debug.check(port < 2, "Two ports for bitcell_2port only.") return "wl{}".format(port)
[ "def", "get_wl_name", "(", "self", ",", "port", "=", "0", ")", ":", "debug", ".", "check", "(", "port", "<", "2", ",", "\"Two ports for bitcell_2port only.\"", ")", "return", "\"wl{}\"", ".", "format", "(", "port", ")" ]
https://github.com/VLSIDA/OpenRAM/blob/f66aac3264598eeae31225c62b6a4af52412d407/compiler/bitcells/pbitcell.py#L1121-L1124
pritunl/pritunl
d793ce820f53f31bfc01e86d8b92ec098ab6362d
pritunl/influxdb/client.py
python
InfluxDBClient.from_DSN
(dsn, **kwargs)
return InfluxDBClient(**init_args)
Return an instance of :class:`~.InfluxDBClient` from the provided data source name. Supported schemes are "influxdb", "https+influxdb" and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient` constructor may also be passed to this method. :param dsn: data source name :type dsn: string :param kwargs: additional parameters for `InfluxDBClient` :type kwargs: dict :raises ValueError: if the provided DSN has any unexpected values :Example: :: >> cli = InfluxDBClient.from_DSN('influxdb://username:password@\ localhost:8086/databasename', timeout=5) >> type(cli) <class 'influxdb.client.InfluxDBClient'> >> cli = InfluxDBClient.from_DSN('udp+influxdb://username:pass@\ localhost:8086/databasename', timeout=5, udp_port=159) >> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli)) http://localhost:8086 - True 159 .. note:: parameters provided in `**kwargs` may override dsn parameters .. note:: when using "udp+influxdb" the specified port (if any) will be used for the TCP connection; specify the UDP port with the additional `udp_port` parameter (cf. examples).
Return an instance of :class:`~.InfluxDBClient` from the provided data source name. Supported schemes are "influxdb", "https+influxdb" and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient` constructor may also be passed to this method.
[ "Return", "an", "instance", "of", ":", "class", ":", "~", ".", "InfluxDBClient", "from", "the", "provided", "data", "source", "name", ".", "Supported", "schemes", "are", "influxdb", "https", "+", "influxdb", "and", "udp", "+", "influxdb", ".", "Parameters", "for", "the", ":", "class", ":", "~", ".", "InfluxDBClient", "constructor", "may", "also", "be", "passed", "to", "this", "method", "." ]
def from_DSN(dsn, **kwargs): """Return an instance of :class:`~.InfluxDBClient` from the provided data source name. Supported schemes are "influxdb", "https+influxdb" and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient` constructor may also be passed to this method. :param dsn: data source name :type dsn: string :param kwargs: additional parameters for `InfluxDBClient` :type kwargs: dict :raises ValueError: if the provided DSN has any unexpected values :Example: :: >> cli = InfluxDBClient.from_DSN('influxdb://username:password@\ localhost:8086/databasename', timeout=5) >> type(cli) <class 'influxdb.client.InfluxDBClient'> >> cli = InfluxDBClient.from_DSN('udp+influxdb://username:pass@\ localhost:8086/databasename', timeout=5, udp_port=159) >> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli)) http://localhost:8086 - True 159 .. note:: parameters provided in `**kwargs` may override dsn parameters .. note:: when using "udp+influxdb" the specified port (if any) will be used for the TCP connection; specify the UDP port with the additional `udp_port` parameter (cf. examples). """ init_args = parse_dsn(dsn) host, port = init_args.pop('hosts')[0] init_args['host'] = host init_args['port'] = port init_args.update(kwargs) return InfluxDBClient(**init_args)
[ "def", "from_DSN", "(", "dsn", ",", "*", "*", "kwargs", ")", ":", "init_args", "=", "parse_dsn", "(", "dsn", ")", "host", ",", "port", "=", "init_args", ".", "pop", "(", "'hosts'", ")", "[", "0", "]", "init_args", "[", "'host'", "]", "=", "host", "init_args", "[", "'port'", "]", "=", "port", "init_args", ".", "update", "(", "kwargs", ")", "return", "InfluxDBClient", "(", "*", "*", "init_args", ")" ]
https://github.com/pritunl/pritunl/blob/d793ce820f53f31bfc01e86d8b92ec098ab6362d/pritunl/influxdb/client.py#L158-L195
deepgully/me
f7ad65edc2fe435310c6676bc2e322cfe5d4c8f0
libs/alembic/operations.py
python
Operations.get_context
(self)
return self.migration_context
Return the :class:`.MigrationContext` object that's currently in use.
Return the :class:`.MigrationContext` object that's currently in use.
[ "Return", "the", ":", "class", ":", ".", "MigrationContext", "object", "that", "s", "currently", "in", "use", "." ]
def get_context(self): """Return the :class:`.MigrationContext` object that's currently in use. """ return self.migration_context
[ "def", "get_context", "(", "self", ")", ":", "return", "self", ".", "migration_context" ]
https://github.com/deepgully/me/blob/f7ad65edc2fe435310c6676bc2e322cfe5d4c8f0/libs/alembic/operations.py#L143-L149
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/imghdr.py
python
test_xbm
(h, f)
X bitmap (X10 or X11)
X bitmap (X10 or X11)
[ "X", "bitmap", "(", "X10", "or", "X11", ")" ]
def test_xbm(h, f): """X bitmap (X10 or X11)""" if h.startswith(b'#define '): return 'xbm'
[ "def", "test_xbm", "(", "h", ",", "f", ")", ":", "if", "h", ".", "startswith", "(", "b'#define '", ")", ":", "return", "'xbm'" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/imghdr.py#L100-L103
rootpy/rootpy
3926935e1f2100d8ba68070c2ab44055d4800f73
rootpy/extern/byteplay2/__init__.py
python
recompile_all
(path)
recursively recompile all .py files in the directory
recursively recompile all .py files in the directory
[ "recursively", "recompile", "all", ".", "py", "files", "in", "the", "directory" ]
def recompile_all(path): """recursively recompile all .py files in the directory""" import os if os.path.isdir(path): for root, dirs, files in os.walk(path): for name in files: if name.endswith('.py'): filename = os.path.abspath(os.path.join(root, name)) print >> sys.stderr, filename recompile(filename) else: filename = os.path.abspath(path) recompile(filename)
[ "def", "recompile_all", "(", "path", ")", ":", "import", "os", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "name", "in", "files", ":", "if", "name", ".", "endswith", "(", "'.py'", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "name", ")", ")", "print", ">>", "sys", ".", "stderr", ",", "filename", "recompile", "(", "filename", ")", "else", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "recompile", "(", "filename", ")" ]
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/extern/byteplay2/__init__.py#L887-L899
rcorcs/NatI
fdf014f4292afdc95250add7b6658468043228e1
en/parser/nltk_lite/parse/rd.py
python
RecursiveDescent._expand
(self, remaining_text, tree, frontier, production=None)
return parses
@rtype: C{list} of C{Tree} @return: A list of all parses that can be generated by expanding the first element of C{frontier} with C{production}. In particular, if the first element of C{frontier} is a subtree whose node type is equal to C{production}'s left hand side, then add a child to that subtree for each element of C{production}'s right hand side; and return all parses that can be generated by matching and expanding the remaining elements of C{frontier}. If the first element of C{frontier} is not a subtree whose node type is equal to C{production}'s left hand side, then return an empty list. If C{production} is not specified, then return a list of all parses that can be generated by expanding the first element of C{frontier} with I{any} CFG production. @type tree: C{Tree} @param tree: A partial structure for the text that is currently being parsed. The elements of C{tree} that are specified by C{frontier} have not yet been expanded or matched. @type remaining_text: C{list} of C{String}s @param remaining_text: The portion of the text that is not yet covered by C{tree}. @type frontier: C{list} of C{tuple} of C{int} @param frontier: A list of the locations within C{tree} of all subtrees that have not yet been expanded, and all leaves that have not yet been matched.
[]
def _expand(self, remaining_text, tree, frontier, production=None): """ @rtype: C{list} of C{Tree} @return: A list of all parses that can be generated by expanding the first element of C{frontier} with C{production}. In particular, if the first element of C{frontier} is a subtree whose node type is equal to C{production}'s left hand side, then add a child to that subtree for each element of C{production}'s right hand side; and return all parses that can be generated by matching and expanding the remaining elements of C{frontier}. If the first element of C{frontier} is not a subtree whose node type is equal to C{production}'s left hand side, then return an empty list. If C{production} is not specified, then return a list of all parses that can be generated by expanding the first element of C{frontier} with I{any} CFG production. @type tree: C{Tree} @param tree: A partial structure for the text that is currently being parsed. The elements of C{tree} that are specified by C{frontier} have not yet been expanded or matched. @type remaining_text: C{list} of C{String}s @param remaining_text: The portion of the text that is not yet covered by C{tree}. @type frontier: C{list} of C{tuple} of C{int} @param frontier: A list of the locations within C{tree} of all subtrees that have not yet been expanded, and all leaves that have not yet been matched. """ if production is None: productions = self._grammar.productions() else: productions = [production] parses = [] for production in productions: lhs = production.lhs().symbol() if lhs == tree[frontier[0]].node: subtree = self._production_to_tree(production) if frontier[0] == (): newtree = subtree else: newtree = tree.copy(deep=True) newtree[frontier[0]] = subtree new_frontier = [frontier[0]+(i,) for i in range(len(production.rhs()))] if self._trace: self._trace_expand(newtree, new_frontier, production) parses += self._parse(remaining_text, newtree, new_frontier + frontier[1:]) return parses
[ "def", "_expand", "(", "self", ",", "remaining_text", ",", "tree", ",", "frontier", ",", "production", "=", "None", ")", ":", "if", "production", "is", "None", ":", "productions", "=", "self", ".", "_grammar", ".", "productions", "(", ")", "else", ":", "productions", "=", "[", "production", "]", "parses", "=", "[", "]", "for", "production", "in", "productions", ":", "lhs", "=", "production", ".", "lhs", "(", ")", ".", "symbol", "(", ")", "if", "lhs", "==", "tree", "[", "frontier", "[", "0", "]", "]", ".", "node", ":", "subtree", "=", "self", ".", "_production_to_tree", "(", "production", ")", "if", "frontier", "[", "0", "]", "==", "(", ")", ":", "newtree", "=", "subtree", "else", ":", "newtree", "=", "tree", ".", "copy", "(", "deep", "=", "True", ")", "newtree", "[", "frontier", "[", "0", "]", "]", "=", "subtree", "new_frontier", "=", "[", "frontier", "[", "0", "]", "+", "(", "i", ",", ")", "for", "i", "in", "range", "(", "len", "(", "production", ".", "rhs", "(", ")", ")", ")", "]", "if", "self", ".", "_trace", ":", "self", ".", "_trace_expand", "(", "newtree", ",", "new_frontier", ",", "production", ")", "parses", "+=", "self", ".", "_parse", "(", "remaining_text", ",", "newtree", ",", "new_frontier", "+", "frontier", "[", "1", ":", "]", ")", "return", "parses" ]
https://github.com/rcorcs/NatI/blob/fdf014f4292afdc95250add7b6658468043228e1/en/parser/nltk_lite/parse/rd.py#L174-L225
zzzeek/sqlalchemy
fc5c54fcd4d868c2a4c7ac19668d72f506fe821e
lib/sqlalchemy/ext/mypy/infer.py
python
_infer_type_from_decl_composite_property
( api: SemanticAnalyzerPluginInterface, stmt: AssignmentStmt, node: Var, left_hand_explicit_type: Optional[ProperType], )
Infer the type of mapping from a CompositeProperty.
Infer the type of mapping from a CompositeProperty.
[ "Infer", "the", "type", "of", "mapping", "from", "a", "CompositeProperty", "." ]
def _infer_type_from_decl_composite_property( api: SemanticAnalyzerPluginInterface, stmt: AssignmentStmt, node: Var, left_hand_explicit_type: Optional[ProperType], ) -> Optional[ProperType]: """Infer the type of mapping from a CompositeProperty.""" assert isinstance(stmt.rvalue, CallExpr) target_cls_arg = stmt.rvalue.args[0] python_type_for_type = None if isinstance(target_cls_arg, NameExpr) and isinstance( target_cls_arg.node, TypeInfo ): related_object_type = target_cls_arg.node python_type_for_type = Instance(related_object_type, []) else: python_type_for_type = None if python_type_for_type is None: return infer_type_from_left_hand_type_only( api, node, left_hand_explicit_type ) elif left_hand_explicit_type is not None: return _infer_type_from_left_and_inferred_right( api, node, left_hand_explicit_type, python_type_for_type ) else: return python_type_for_type
[ "def", "_infer_type_from_decl_composite_property", "(", "api", ":", "SemanticAnalyzerPluginInterface", ",", "stmt", ":", "AssignmentStmt", ",", "node", ":", "Var", ",", "left_hand_explicit_type", ":", "Optional", "[", "ProperType", "]", ",", ")", "->", "Optional", "[", "ProperType", "]", ":", "assert", "isinstance", "(", "stmt", ".", "rvalue", ",", "CallExpr", ")", "target_cls_arg", "=", "stmt", ".", "rvalue", ".", "args", "[", "0", "]", "python_type_for_type", "=", "None", "if", "isinstance", "(", "target_cls_arg", ",", "NameExpr", ")", "and", "isinstance", "(", "target_cls_arg", ".", "node", ",", "TypeInfo", ")", ":", "related_object_type", "=", "target_cls_arg", ".", "node", "python_type_for_type", "=", "Instance", "(", "related_object_type", ",", "[", "]", ")", "else", ":", "python_type_for_type", "=", "None", "if", "python_type_for_type", "is", "None", ":", "return", "infer_type_from_left_hand_type_only", "(", "api", ",", "node", ",", "left_hand_explicit_type", ")", "elif", "left_hand_explicit_type", "is", "not", "None", ":", "return", "_infer_type_from_left_and_inferred_right", "(", "api", ",", "node", ",", "left_hand_explicit_type", ",", "python_type_for_type", ")", "else", ":", "return", "python_type_for_type" ]
https://github.com/zzzeek/sqlalchemy/blob/fc5c54fcd4d868c2a4c7ac19668d72f506fe821e/lib/sqlalchemy/ext/mypy/infer.py#L242-L271
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_image.py
python
Yedit._write
(filename, contents)
Actually write the file contents to disk. This helps with mocking.
Actually write the file contents to disk. This helps with mocking.
[ "Actually", "write", "the", "file", "contents", "to", "disk", ".", "This", "helps", "with", "mocking", "." ]
def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' tmp_filename = filename + '.yedit' with open(tmp_filename, 'w') as yfd: fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB) yfd.write(contents) fcntl.flock(yfd, fcntl.LOCK_UN) os.rename(tmp_filename, filename)
[ "def", "_write", "(", "filename", ",", "contents", ")", ":", "tmp_filename", "=", "filename", "+", "'.yedit'", "with", "open", "(", "tmp_filename", ",", "'w'", ")", "as", "yfd", ":", "fcntl", ".", "flock", "(", "yfd", ",", "fcntl", ".", "LOCK_EX", "|", "fcntl", ".", "LOCK_NB", ")", "yfd", ".", "write", "(", "contents", ")", "fcntl", ".", "flock", "(", "yfd", ",", "fcntl", ".", "LOCK_UN", ")", "os", ".", "rename", "(", "tmp_filename", ",", "filename", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_image.py#L342-L352
pfnet/pytorch-pfn-extras
b7ced31c1e78a0527c36d745ca091ec270da49e3
pytorch_pfn_extras/nn/parallel/distributed.py
python
DistributedDataParallel.register_comm_hook
(self, hook: HookFun)
return handle
Registers a hook function. This module will invoke the hook before starting the synchronization. Args: hook: Callable object that will be invoked before synchronization
Registers a hook function. This module will invoke the hook before starting the synchronization.
[ "Registers", "a", "hook", "function", ".", "This", "module", "will", "invoke", "the", "hook", "before", "starting", "the", "synchronization", "." ]
def register_comm_hook(self, hook: HookFun) -> hooks.RemovableHandle: """Registers a hook function. This module will invoke the hook before starting the synchronization. Args: hook: Callable object that will be invoked before synchronization """ handle = hooks.RemovableHandle(self._comm_hooks) self._comm_hooks[handle.id] = hook return handle
[ "def", "register_comm_hook", "(", "self", ",", "hook", ":", "HookFun", ")", "->", "hooks", ".", "RemovableHandle", ":", "handle", "=", "hooks", ".", "RemovableHandle", "(", "self", ".", "_comm_hooks", ")", "self", ".", "_comm_hooks", "[", "handle", ".", "id", "]", "=", "hook", "return", "handle" ]
https://github.com/pfnet/pytorch-pfn-extras/blob/b7ced31c1e78a0527c36d745ca091ec270da49e3/pytorch_pfn_extras/nn/parallel/distributed.py#L246-L255
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/sms/v20190711/models.py
python
AddSmsSignRequest.__init__
(self)
r""" :param SignName: 签名名称。 注:不能重复申请已通过或待审核的签名。 :type SignName: str :param SignType: 签名类型。其中每种类型后面标注了其可选的 DocumentType(证明类型): 0:公司(0,1,2,3)。 1:APP(0,1,2,3,4) 。 2:网站(0,1,2,3,5)。 3:公众号或者小程序(0,1,2,3,6)。 4:商标(7)。 5:政府/机关事业单位/其他机构(2,3)。 注:必须按照对应关系选择证明类型,否则会审核失败。 :type SignType: int :param DocumentType: 证明类型: 0:三证合一。 1:企业营业执照。 2:组织机构代码证书。 3:社会信用代码证书。 4:应用后台管理截图(个人开发APP)。 5:网站备案后台截图(个人开发网站)。 6:小程序设置页面截图(个人认证小程序)。 7:商标注册书。 :type DocumentType: int :param International: 是否国际/港澳台短信: 0:表示国内短信。 1:表示国际/港澳台短信。 :type International: int :param UsedMethod: 签名用途: 0:自用。 1:他用。 :type UsedMethod: int :param ProofImage: 签名对应的资质证明图片需先进行 base64 编码格式转换,将转换后的字符串去掉前缀`data:image/jpeg;base64,`再赋值给该参数。 :type ProofImage: str :param CommissionImage: 委托授权证明。选择 UsedMethod 为他用之后需要提交委托的授权证明。 图片需先进行 base64 编码格式转换,将转换后的字符串去掉前缀`data:image/jpeg;base64,`再赋值给该参数。 注:只有 UsedMethod 在选择为 1(他用)时,这个字段才会生效。 :type CommissionImage: str :param Remark: 签名的申请备注。 :type Remark: str
r""" :param SignName: 签名名称。 注:不能重复申请已通过或待审核的签名。 :type SignName: str :param SignType: 签名类型。其中每种类型后面标注了其可选的 DocumentType(证明类型): 0:公司(0,1,2,3)。 1:APP(0,1,2,3,4) 。 2:网站(0,1,2,3,5)。 3:公众号或者小程序(0,1,2,3,6)。 4:商标(7)。 5:政府/机关事业单位/其他机构(2,3)。 注:必须按照对应关系选择证明类型,否则会审核失败。 :type SignType: int :param DocumentType: 证明类型: 0:三证合一。 1:企业营业执照。 2:组织机构代码证书。 3:社会信用代码证书。 4:应用后台管理截图(个人开发APP)。 5:网站备案后台截图(个人开发网站)。 6:小程序设置页面截图(个人认证小程序)。 7:商标注册书。 :type DocumentType: int :param International: 是否国际/港澳台短信: 0:表示国内短信。 1:表示国际/港澳台短信。 :type International: int :param UsedMethod: 签名用途: 0:自用。 1:他用。 :type UsedMethod: int :param ProofImage: 签名对应的资质证明图片需先进行 base64 编码格式转换,将转换后的字符串去掉前缀`data:image/jpeg;base64,`再赋值给该参数。 :type ProofImage: str :param CommissionImage: 委托授权证明。选择 UsedMethod 为他用之后需要提交委托的授权证明。 图片需先进行 base64 编码格式转换,将转换后的字符串去掉前缀`data:image/jpeg;base64,`再赋值给该参数。 注:只有 UsedMethod 在选择为 1(他用)时,这个字段才会生效。 :type CommissionImage: str :param Remark: 签名的申请备注。 :type Remark: str
[ "r", ":", "param", "SignName", ":", "签名名称。", "注:不能重复申请已通过或待审核的签名。", ":", "type", "SignName", ":", "str", ":", "param", "SignType", ":", "签名类型。其中每种类型后面标注了其可选的", "DocumentType(证明类型):", "0:公司(0,1,2,3)。", "1:APP(0,1,2,3,4)", "。", "2:网站(0,1,2,3,5)。", "3:公众号或者小程序(0,1,2,3,6)。", "4:商标(7)。", "5:政府", "/", "机关事业单位", "/", "其他机构(2,3)。", "注:必须按照对应关系选择证明类型,否则会审核失败。", ":", "type", "SignType", ":", "int", ":", "param", "DocumentType", ":", "证明类型:", "0:三证合一。", "1:企业营业执照。", "2:组织机构代码证书。", "3:社会信用代码证书。", "4:应用后台管理截图(个人开发APP)。", "5:网站备案后台截图(个人开发网站)。", "6:小程序设置页面截图(个人认证小程序)。", "7:商标注册书。", ":", "type", "DocumentType", ":", "int", ":", "param", "International", ":", "是否国际", "/", "港澳台短信:", "0:表示国内短信。", "1:表示国际", "/", "港澳台短信。", ":", "type", "International", ":", "int", ":", "param", "UsedMethod", ":", "签名用途:", "0:自用。", "1:他用。", ":", "type", "UsedMethod", ":", "int", ":", "param", "ProofImage", ":", "签名对应的资质证明图片需先进行", "base64", "编码格式转换,将转换后的字符串去掉前缀", "data", ":", "image", "/", "jpeg", ";", "base64", "再赋值给该参数。", ":", "type", "ProofImage", ":", "str", ":", "param", "CommissionImage", ":", "委托授权证明。选择", "UsedMethod", "为他用之后需要提交委托的授权证明。", "图片需先进行", "base64", "编码格式转换,将转换后的字符串去掉前缀", "data", ":", "image", "/", "jpeg", ";", "base64", "再赋值给该参数。", "注:只有", "UsedMethod", "在选择为", "1(他用)时,这个字段才会生效。", ":", "type", "CommissionImage", ":", "str", ":", "param", "Remark", ":", "签名的申请备注。", ":", "type", "Remark", ":", "str" ]
def __init__(self): r""" :param SignName: 签名名称。 注:不能重复申请已通过或待审核的签名。 :type SignName: str :param SignType: 签名类型。其中每种类型后面标注了其可选的 DocumentType(证明类型): 0:公司(0,1,2,3)。 1:APP(0,1,2,3,4) 。 2:网站(0,1,2,3,5)。 3:公众号或者小程序(0,1,2,3,6)。 4:商标(7)。 5:政府/机关事业单位/其他机构(2,3)。 注:必须按照对应关系选择证明类型,否则会审核失败。 :type SignType: int :param DocumentType: 证明类型: 0:三证合一。 1:企业营业执照。 2:组织机构代码证书。 3:社会信用代码证书。 4:应用后台管理截图(个人开发APP)。 5:网站备案后台截图(个人开发网站)。 6:小程序设置页面截图(个人认证小程序)。 7:商标注册书。 :type DocumentType: int :param International: 是否国际/港澳台短信: 0:表示国内短信。 1:表示国际/港澳台短信。 :type International: int :param UsedMethod: 签名用途: 0:自用。 1:他用。 :type UsedMethod: int :param ProofImage: 签名对应的资质证明图片需先进行 base64 编码格式转换,将转换后的字符串去掉前缀`data:image/jpeg;base64,`再赋值给该参数。 :type ProofImage: str :param CommissionImage: 委托授权证明。选择 UsedMethod 为他用之后需要提交委托的授权证明。 图片需先进行 base64 编码格式转换,将转换后的字符串去掉前缀`data:image/jpeg;base64,`再赋值给该参数。 注:只有 UsedMethod 在选择为 1(他用)时,这个字段才会生效。 :type CommissionImage: str :param Remark: 签名的申请备注。 :type Remark: str """ self.SignName = None self.SignType = None self.DocumentType = None self.International = None self.UsedMethod = None self.ProofImage = None self.CommissionImage = None self.Remark = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "SignName", "=", "None", "self", ".", "SignType", "=", "None", "self", ".", "DocumentType", "=", "None", "self", ".", "International", "=", "None", "self", ".", "UsedMethod", "=", "None", "self", ".", "ProofImage", "=", "None", "self", ".", "CommissionImage", "=", "None", "self", ".", "Remark", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/sms/v20190711/models.py#L54-L102
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/api/custom_objects_api.py
python
CustomObjectsApi.get_namespaced_custom_object
(self, group, version, namespace, plural, name, **kwargs)
return self.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs)
get_namespaced_custom_object # noqa: E501 Returns a namespace scoped custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_namespaced_custom_object(group, version, namespace, plural, name, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str namespace: The custom resource's namespace (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: object If the method is called asynchronously, returns the request thread.
get_namespaced_custom_object # noqa: E501
[ "get_namespaced_custom_object", "#", "noqa", ":", "E501" ]
def get_namespaced_custom_object(self, group, version, namespace, plural, name, **kwargs): # noqa: E501 """get_namespaced_custom_object # noqa: E501 Returns a namespace scoped custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_namespaced_custom_object(group, version, namespace, plural, name, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str namespace: The custom resource's namespace (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: object If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs)
[ "def", "get_namespaced_custom_object", "(", "self", ",", "group", ",", "version", ",", "namespace", ",", "plural", ",", "name", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "return", "self", ".", "get_namespaced_custom_object_with_http_info", "(", "group", ",", "version", ",", "namespace", ",", "plural", ",", "name", ",", "*", "*", "kwargs", ")" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/api/custom_objects_api.py#L1457-L1484
capitalone/datacompy
8418a5c55fa7648764c0bca87931cf778dc19d48
datacompy/core.py
python
Compare.all_rows_overlap
(self)
return len(self.df1_unq_rows) == len(self.df2_unq_rows) == 0
Whether the rows are all present in both dataframes Returns ------- bool True if all rows in df1 are in df2 and vice versa (based on existence for join option)
Whether the rows are all present in both dataframes
[ "Whether", "the", "rows", "are", "all", "present", "in", "both", "dataframes" ]
def all_rows_overlap(self): """Whether the rows are all present in both dataframes Returns ------- bool True if all rows in df1 are in df2 and vice versa (based on existence for join option) """ return len(self.df1_unq_rows) == len(self.df2_unq_rows) == 0
[ "def", "all_rows_overlap", "(", "self", ")", ":", "return", "len", "(", "self", ".", "df1_unq_rows", ")", "==", "len", "(", "self", ".", "df2_unq_rows", ")", "==", "0" ]
https://github.com/capitalone/datacompy/blob/8418a5c55fa7648764c0bca87931cf778dc19d48/datacompy/core.py#L390-L399
jschrewe/django-mongoadmin
c53b8a0e7d3b96c9dd03126576b53ec9602f0a20
mongoadmin/validation.py
python
ModelAdminValidator.validate_search_fields
(self, cls, model)
Validate search_fields is a sequence.
Validate search_fields is a sequence.
[ "Validate", "search_fields", "is", "a", "sequence", "." ]
def validate_search_fields(self, cls, model): " Validate search_fields is a sequence. " if hasattr(cls, 'search_fields'): check_isseq(cls, 'search_fields', cls.search_fields)
[ "def", "validate_search_fields", "(", "self", ",", "cls", ",", "model", ")", ":", "if", "hasattr", "(", "cls", ",", "'search_fields'", ")", ":", "check_isseq", "(", "cls", ",", "'search_fields'", ",", "cls", ".", "search_fields", ")" ]
https://github.com/jschrewe/django-mongoadmin/blob/c53b8a0e7d3b96c9dd03126576b53ec9602f0a20/mongoadmin/validation.py#L286-L289
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/django/forms/fields.py
python
FileField.clean
(self, data, initial=None)
return super(FileField, self).clean(data)
[]
def clean(self, data, initial=None): # If the widget got contradictory inputs, we raise a validation error if data is FILE_INPUT_CONTRADICTION: raise ValidationError(self.error_messages['contradiction'], code='contradiction') # False means the field value should be cleared; further validation is # not needed. if data is False: if not self.required: return False # If the field is required, clearing is not possible (the widget # shouldn't return False data in that case anyway). False is not # in self.empty_value; if a False value makes it this far # it should be validated from here on out as None (so it will be # caught by the required check). data = None if not data and initial: return initial return super(FileField, self).clean(data)
[ "def", "clean", "(", "self", ",", "data", ",", "initial", "=", "None", ")", ":", "# If the widget got contradictory inputs, we raise a validation error", "if", "data", "is", "FILE_INPUT_CONTRADICTION", ":", "raise", "ValidationError", "(", "self", ".", "error_messages", "[", "'contradiction'", "]", ",", "code", "=", "'contradiction'", ")", "# False means the field value should be cleared; further validation is", "# not needed.", "if", "data", "is", "False", ":", "if", "not", "self", ".", "required", ":", "return", "False", "# If the field is required, clearing is not possible (the widget", "# shouldn't return False data in that case anyway). False is not", "# in self.empty_value; if a False value makes it this far", "# it should be validated from here on out as None (so it will be", "# caught by the required check).", "data", "=", "None", "if", "not", "data", "and", "initial", ":", "return", "initial", "return", "super", "(", "FileField", ",", "self", ")", ".", "clean", "(", "data", ")" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/forms/fields.py#L583-L600
eBay/accelerator
218d9a5e4451ac72b9e65df6c5b32e37d25136c8
accelerator/web.py
python
BaseWebHandler._bad_request
(self)
[]
def _bad_request(self): self.do_response(400, "text/plain", "Bad request\n")
[ "def", "_bad_request", "(", "self", ")", ":", "self", ".", "do_response", "(", "400", ",", "\"text/plain\"", ",", "\"Bad request\\n\"", ")" ]
https://github.com/eBay/accelerator/blob/218d9a5e4451ac72b9e65df6c5b32e37d25136c8/accelerator/web.py#L98-L99
stopstalk/stopstalk-deployment
10c3ab44c4ece33ae515f6888c15033db2004bb1
aws_lambda/spoj_aws_lambda_function/lambda_code/pkg_resources/_vendor/pyparsing.py
python
matchPreviousLiteral
(expr)
return rep
Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled.
Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled.
[ "Helper", "to", "define", "an", "expression", "that", "is", "indirectly", "defined", "from", "the", "tokens", "matched", "in", "a", "previous", "expression", "that", "is", "it", "looks", "for", "a", "repeat", "of", "a", "previous", "expression", ".", "For", "example", "::", "first", "=", "Word", "(", "nums", ")", "second", "=", "matchPreviousLiteral", "(", "first", ")", "matchExpr", "=", "first", "+", ":", "+", "second", "will", "match", "C", "{", "1", ":", "1", "}", "but", "not", "C", "{", "1", ":", "2", "}", ".", "Because", "this", "matches", "a", "previous", "literal", "will", "also", "match", "the", "leading", "C", "{", "1", ":", "1", "}", "in", "C", "{", "1", ":", "10", "}", ".", "If", "this", "is", "not", "desired", "use", "C", "{", "matchPreviousExpr", "}", ".", "Do", "I", "{", "not", "}", "use", "with", "packrat", "parsing", "enabled", "." ]
def matchPreviousLiteral(expr): """ Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do I{not} use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): if t: if len(t) == 1: rep << t[0] else: # flatten t tokens tflat = _flatten(t.asList()) rep << And(Literal(tt) for tt in tflat) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName('(prev) ' + _ustr(expr)) return rep
[ "def", "matchPreviousLiteral", "(", "expr", ")", ":", "rep", "=", "Forward", "(", ")", "def", "copyTokenToRepeater", "(", "s", ",", "l", ",", "t", ")", ":", "if", "t", ":", "if", "len", "(", "t", ")", "==", "1", ":", "rep", "<<", "t", "[", "0", "]", "else", ":", "# flatten t tokens", "tflat", "=", "_flatten", "(", "t", ".", "asList", "(", ")", ")", "rep", "<<", "And", "(", "Literal", "(", "tt", ")", "for", "tt", "in", "tflat", ")", "else", ":", "rep", "<<", "Empty", "(", ")", "expr", ".", "addParseAction", "(", "copyTokenToRepeater", ",", "callDuringTry", "=", "True", ")", "rep", ".", "setName", "(", "'(prev) '", "+", "_ustr", "(", "expr", ")", ")", "return", "rep" ]
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/pkg_resources/_vendor/pyparsing.py#L4509-L4535