nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
trezor/python-trezor
2813522b05cef4e0e545a101f8b3559a3183b45b
trezorlib/protobuf.py
python
dict_to_proto
(message_type, d)
return message_type(**params)
[]
def dict_to_proto(message_type, d): params = {} for fname, ftype, fflags in message_type.get_fields().values(): repeated = fflags & FLAG_REPEATED value = d.get(fname) if value is None: continue if not repeated: value = [value] if issubclass(ftype, MessageType): function = dict_to_proto else: function = value_to_proto newvalue = [function(ftype, v) for v in value] if not repeated: newvalue = newvalue[0] params[fname] = newvalue return message_type(**params)
[ "def", "dict_to_proto", "(", "message_type", ",", "d", ")", ":", "params", "=", "{", "}", "for", "fname", ",", "ftype", ",", "fflags", "in", "message_type", ".", "get_fields", "(", ")", ".", "values", "(", ")", ":", "repeated", "=", "fflags", "&", "FLAG_REPEATED", "value", "=", "d", ".", "get", "(", "fname", ")", "if", "value", "is", "None", ":", "continue", "if", "not", "repeated", ":", "value", "=", "[", "value", "]", "if", "issubclass", "(", "ftype", ",", "MessageType", ")", ":", "function", "=", "dict_to_proto", "else", ":", "function", "=", "value_to_proto", "newvalue", "=", "[", "function", "(", "ftype", ",", "v", ")", "for", "v", "in", "value", "]", "if", "not", "repeated", ":", "newvalue", "=", "newvalue", "[", "0", "]", "params", "[", "fname", "]", "=", "newvalue", "return", "message_type", "(", "*", "*", "params", ")" ]
https://github.com/trezor/python-trezor/blob/2813522b05cef4e0e545a101f8b3559a3183b45b/trezorlib/protobuf.py#L389-L411
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/models/t5/modeling_tf_t5.py
python
TFT5LayerSelfAttention.call
( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, training=False, )
return outputs
[]
def call( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, training=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, training=training, ) hidden_states = hidden_states + self.dropout(attention_output[0], training=training) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs
[ "def", "call", "(", "self", ",", "hidden_states", ",", "attention_mask", "=", "None", ",", "position_bias", "=", "None", ",", "layer_head_mask", "=", "None", ",", "past_key_value", "=", "None", ",", "use_cache", "=", "False", ",", "output_attentions", "=", "False", ",", "training", "=", "False", ",", ")", ":", "normed_hidden_states", "=", "self", ".", "layer_norm", "(", "hidden_states", ")", "attention_output", "=", "self", ".", "SelfAttention", "(", "normed_hidden_states", ",", "mask", "=", "attention_mask", ",", "position_bias", "=", "position_bias", ",", "layer_head_mask", "=", "layer_head_mask", ",", "past_key_value", "=", "past_key_value", ",", "use_cache", "=", "use_cache", ",", "output_attentions", "=", "output_attentions", ",", "training", "=", "training", ",", ")", "hidden_states", "=", "hidden_states", "+", "self", ".", "dropout", "(", "attention_output", "[", "0", "]", ",", "training", "=", "training", ")", "outputs", "=", "(", "hidden_states", ",", ")", "+", "attention_output", "[", "1", ":", "]", "# add attentions if we output them", "return", "outputs" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/models/t5/modeling_tf_t5.py#L431-L455
rowliny/DiffHelper
ab3a96f58f9579d0023aed9ebd785f4edf26f8af
Tool/SitePackages/PIL/ImageStat.py
python
Stat.__getattr__
(self, id)
return v
Calculate missing attribute
Calculate missing attribute
[ "Calculate", "missing", "attribute" ]
def __getattr__(self, id): """Calculate missing attribute""" if id[:4] == "_get": raise AttributeError(id) # calculate missing attribute v = getattr(self, "_get" + id)() setattr(self, id, v) return v
[ "def", "__getattr__", "(", "self", ",", "id", ")", ":", "if", "id", "[", ":", "4", "]", "==", "\"_get\"", ":", "raise", "AttributeError", "(", "id", ")", "# calculate missing attribute", "v", "=", "getattr", "(", "self", ",", "\"_get\"", "+", "id", ")", "(", ")", "setattr", "(", "self", ",", "id", ",", "v", ")", "return", "v" ]
https://github.com/rowliny/DiffHelper/blob/ab3a96f58f9579d0023aed9ebd785f4edf26f8af/Tool/SitePackages/PIL/ImageStat.py#L42-L49
facebookarchive/sparts
c03df928677444ad638d10fa96f4144ca4d644e1
sparts/fb303/FacebookService.py
python
Iface.reinitialize
(self)
Tell the server to reload its configuration, reopen log files, etc
Tell the server to reload its configuration, reopen log files, etc
[ "Tell", "the", "server", "to", "reload", "its", "configuration", "reopen", "log", "files", "etc" ]
def reinitialize(self): """ Tell the server to reload its configuration, reopen log files, etc """ pass
[ "def", "reinitialize", "(", "self", ")", ":", "pass" ]
https://github.com/facebookarchive/sparts/blob/c03df928677444ad638d10fa96f4144ca4d644e1/sparts/fb303/FacebookService.py#L105-L109
benfred/implicit
db83d3f2783441e7dfe3a4ea4743051a8a000fa8
implicit/nearest_neighbours.py
python
BM25Recommender.fit
(self, counts, show_progress=True)
[]
def fit(self, counts, show_progress=True): weighted = bm25_weight(counts.T, self.K1, self.B).T ItemItemRecommender.fit(self, weighted, show_progress)
[ "def", "fit", "(", "self", ",", "counts", ",", "show_progress", "=", "True", ")", ":", "weighted", "=", "bm25_weight", "(", "counts", ".", "T", ",", "self", ".", "K1", ",", "self", ".", "B", ")", ".", "T", "ItemItemRecommender", ".", "fit", "(", "self", ",", "weighted", ",", "show_progress", ")" ]
https://github.com/benfred/implicit/blob/db83d3f2783441e7dfe3a4ea4743051a8a000fa8/implicit/nearest_neighbours.py#L203-L205
dickreuter/neuron_poker
9f841e5aeead681fa1fb2955524c53081fba2078
tools/helper.py
python
Singleton.__call__
(cls, *args, **kwargs)
return cls._instances[cls]
Is called at instantiation of a class that refers to this metaclass.
Is called at instantiation of a class that refers to this metaclass.
[ "Is", "called", "at", "instantiation", "of", "a", "class", "that", "refers", "to", "this", "metaclass", "." ]
def __call__(cls, *args, **kwargs): # called at instantiation of an object that uses this metaclass """Is called at instantiation of a class that refers to this metaclass.""" if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls]
[ "def", "__call__", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# called at instantiation of an object that uses this metaclass", "if", "cls", "not", "in", "cls", ".", "_instances", ":", "cls", ".", "_instances", "[", "cls", "]", "=", "super", "(", "Singleton", ",", "cls", ")", ".", "__call__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "cls", ".", "_instances", "[", "cls", "]" ]
https://github.com/dickreuter/neuron_poker/blob/9f841e5aeead681fa1fb2955524c53081fba2078/tools/helper.py#L34-L38
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/docutils/parsers/rst/states.py
python
Text.literal_block
(self)
return nodelist
Return a list of nodes.
Return a list of nodes.
[ "Return", "a", "list", "of", "nodes", "." ]
def literal_block(self): """Return a list of nodes.""" indented, indent, offset, blank_finish = \ self.state_machine.get_indented() while indented and not indented[-1].strip(): indented.trim_end() if not indented: return self.quoted_literal_block() data = '\n'.join(indented) literal_block = nodes.literal_block(data, data) literal_block.line = offset + 1 nodelist = [literal_block] if not blank_finish: nodelist.append(self.unindent_warning('Literal block')) return nodelist
[ "def", "literal_block", "(", "self", ")", ":", "indented", ",", "indent", ",", "offset", ",", "blank_finish", "=", "self", ".", "state_machine", ".", "get_indented", "(", ")", "while", "indented", "and", "not", "indented", "[", "-", "1", "]", ".", "strip", "(", ")", ":", "indented", ".", "trim_end", "(", ")", "if", "not", "indented", ":", "return", "self", ".", "quoted_literal_block", "(", ")", "data", "=", "'\\n'", ".", "join", "(", "indented", ")", "literal_block", "=", "nodes", ".", "literal_block", "(", "data", ",", "data", ")", "literal_block", ".", "line", "=", "offset", "+", "1", "nodelist", "=", "[", "literal_block", "]", "if", "not", "blank_finish", ":", "nodelist", ".", "append", "(", "self", ".", "unindent_warning", "(", "'Literal block'", ")", ")", "return", "nodelist" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/docutils/parsers/rst/states.py#L2778-L2792
smallcorgi/Faster-RCNN_TF
d9adb24c8ffdbae3b56eb55fc629d719fee3d741
lib/roi_data_layer/minibatch2.py
python
_get_bbox_regression_labels
(bbox_target_data, num_classes)
return bbox_targets, bbox_loss_weights
Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: bbox_target_data (ndarray): N x 4K blob of regression targets bbox_loss_weights (ndarray): N x 4K blob of loss weights
Bounding-box regression targets are stored in a compact form in the roidb.
[ "Bounding", "-", "box", "regression", "targets", "are", "stored", "in", "a", "compact", "form", "in", "the", "roidb", "." ]
def _get_bbox_regression_labels(bbox_target_data, num_classes): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: bbox_target_data (ndarray): N x 4K blob of regression targets bbox_loss_weights (ndarray): N x 4K blob of loss weights """ clss = bbox_target_data[:, 0] bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32) inds = np.where(clss > 0)[0] for ind in inds: cls = clss[ind] start = 4 * cls end = start + 4 bbox_targets[ind, start:end] = bbox_target_data[ind, 1:] bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.] return bbox_targets, bbox_loss_weights
[ "def", "_get_bbox_regression_labels", "(", "bbox_target_data", ",", "num_classes", ")", ":", "clss", "=", "bbox_target_data", "[", ":", ",", "0", "]", "bbox_targets", "=", "np", ".", "zeros", "(", "(", "clss", ".", "size", ",", "4", "*", "num_classes", ")", ",", "dtype", "=", "np", ".", "float32", ")", "bbox_loss_weights", "=", "np", ".", "zeros", "(", "bbox_targets", ".", "shape", ",", "dtype", "=", "np", ".", "float32", ")", "inds", "=", "np", ".", "where", "(", "clss", ">", "0", ")", "[", "0", "]", "for", "ind", "in", "inds", ":", "cls", "=", "clss", "[", "ind", "]", "start", "=", "4", "*", "cls", "end", "=", "start", "+", "4", "bbox_targets", "[", "ind", ",", "start", ":", "end", "]", "=", "bbox_target_data", "[", "ind", ",", "1", ":", "]", "bbox_loss_weights", "[", "ind", ",", "start", ":", "end", "]", "=", "[", "1.", ",", "1.", ",", "1.", ",", "1.", "]", "return", "bbox_targets", ",", "bbox_loss_weights" ]
https://github.com/smallcorgi/Faster-RCNN_TF/blob/d9adb24c8ffdbae3b56eb55fc629d719fee3d741/lib/roi_data_layer/minibatch2.py#L258-L280
apple/ccs-calendarserver
13c706b985fb728b9aab42dc0fef85aae21921c3
twistedcaldav/vcard.py
python
Component.fromStream
(clazz, stream, format=None)
return clazz._fromData(stream, True, format)
Construct a L{Component} from a stream. @param stream: a C{read()}able stream containing vCard data. @return: a L{Component} representing the first component described by C{stream}.
Construct a L{Component} from a stream.
[ "Construct", "a", "L", "{", "Component", "}", "from", "a", "stream", "." ]
def fromStream(clazz, stream, format=None): """ Construct a L{Component} from a stream. @param stream: a C{read()}able stream containing vCard data. @return: a L{Component} representing the first component described by C{stream}. """ return clazz._fromData(stream, True, format)
[ "def", "fromStream", "(", "clazz", ",", "stream", ",", "format", "=", "None", ")", ":", "return", "clazz", ".", "_fromData", "(", "stream", ",", "True", ",", "format", ")" ]
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/twistedcaldav/vcard.py#L254-L261
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/inspect.py
python
isdatadescriptor
(object)
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
Return true if the object is a data descriptor. Data descriptors have both a __get__ and a __set__ attribute. Examples are properties (defined in Python) and getsets and members (defined in C). Typically, data descriptors will also have __name__ and __doc__ attributes (properties, getsets, and members have both of these attributes), but this is not guaranteed.
Return true if the object is a data descriptor.
[ "Return", "true", "if", "the", "object", "is", "a", "data", "descriptor", "." ]
def isdatadescriptor(object): """Return true if the object is a data descriptor. Data descriptors have both a __get__ and a __set__ attribute. Examples are properties (defined in Python) and getsets and members (defined in C). Typically, data descriptors will also have __name__ and __doc__ attributes (properties, getsets, and members have both of these attributes), but this is not guaranteed.""" return (hasattr(object, "__set__") and hasattr(object, "__get__"))
[ "def", "isdatadescriptor", "(", "object", ")", ":", "return", "(", "hasattr", "(", "object", ",", "\"__set__\"", ")", "and", "hasattr", "(", "object", ",", "\"__get__\"", ")", ")" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/inspect.py#L98-L106
angr/angr
4b04d56ace135018083d36d9083805be8146688b
angr/engines/vex/claripy/ccall.py
python
pc_actions_SBB
(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None)
return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
[]
def pc_actions_SBB(state, nbits, cc_dep1, cc_dep2, cc_ndep, platform=None): old_c = cc_ndep[data[platform]['CondBitOffsets']['G_CC_SHIFT_C']].zero_extend(nbits-1) arg_l = cc_dep1 arg_r = cc_dep2 ^ old_c res = (arg_l - arg_r) - old_c cf_c = claripy.If(claripy.ULE(arg_l, arg_r), claripy.BVV(1, 1), claripy.BVV(0, 1)) cf_noc = claripy.If(claripy.ULT(arg_l, arg_r), claripy.BVV(1, 1), claripy.BVV(0, 1)) cf = claripy.If(old_c == 1, cf_c, cf_noc) pf = calc_paritybit(res) af = (res ^ arg_l ^ arg_r)[data[platform]['CondBitOffsets']['G_CC_SHIFT_A']] zf = calc_zerobit(res) sf = res[nbits-1] of = ((arg_l ^ arg_r) & (arg_l ^ res))[nbits-1] return pc_make_rdata(data[platform]['size'], cf, pf, af, zf, sf, of, platform=platform)
[ "def", "pc_actions_SBB", "(", "state", ",", "nbits", ",", "cc_dep1", ",", "cc_dep2", ",", "cc_ndep", ",", "platform", "=", "None", ")", ":", "old_c", "=", "cc_ndep", "[", "data", "[", "platform", "]", "[", "'CondBitOffsets'", "]", "[", "'G_CC_SHIFT_C'", "]", "]", ".", "zero_extend", "(", "nbits", "-", "1", ")", "arg_l", "=", "cc_dep1", "arg_r", "=", "cc_dep2", "^", "old_c", "res", "=", "(", "arg_l", "-", "arg_r", ")", "-", "old_c", "cf_c", "=", "claripy", ".", "If", "(", "claripy", ".", "ULE", "(", "arg_l", ",", "arg_r", ")", ",", "claripy", ".", "BVV", "(", "1", ",", "1", ")", ",", "claripy", ".", "BVV", "(", "0", ",", "1", ")", ")", "cf_noc", "=", "claripy", ".", "If", "(", "claripy", ".", "ULT", "(", "arg_l", ",", "arg_r", ")", ",", "claripy", ".", "BVV", "(", "1", ",", "1", ")", ",", "claripy", ".", "BVV", "(", "0", ",", "1", ")", ")", "cf", "=", "claripy", ".", "If", "(", "old_c", "==", "1", ",", "cf_c", ",", "cf_noc", ")", "pf", "=", "calc_paritybit", "(", "res", ")", "af", "=", "(", "res", "^", "arg_l", "^", "arg_r", ")", "[", "data", "[", "platform", "]", "[", "'CondBitOffsets'", "]", "[", "'G_CC_SHIFT_A'", "]", "]", "zf", "=", "calc_zerobit", "(", "res", ")", "sf", "=", "res", "[", "nbits", "-", "1", "]", "of", "=", "(", "(", "arg_l", "^", "arg_r", ")", "&", "(", "arg_l", "^", "res", ")", ")", "[", "nbits", "-", "1", "]", "return", "pc_make_rdata", "(", "data", "[", "platform", "]", "[", "'size'", "]", ",", "cf", ",", "pf", ",", "af", ",", "zf", ",", "sf", ",", "of", ",", "platform", "=", "platform", ")" ]
https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/engines/vex/claripy/ccall.py#L426-L440
shuup/shuup
25f78cfe370109b9885b903e503faac295c7b7f2
shuup/core/models/_base.py
python
ChangeProtected._are_changes_protected
(self)
return True
Check if changes of this object should be protected. This can be overridden in the subclasses to make it possible to avoid change protection e.g. if object is not in use yet. The base class implementation just returns True.
Check if changes of this object should be protected.
[ "Check", "if", "changes", "of", "this", "object", "should", "be", "protected", "." ]
def _are_changes_protected(self): """ Check if changes of this object should be protected. This can be overridden in the subclasses to make it possible to avoid change protection e.g. if object is not in use yet. The base class implementation just returns True. """ return True
[ "def", "_are_changes_protected", "(", "self", ")", ":", "return", "True" ]
https://github.com/shuup/shuup/blob/25f78cfe370109b9885b903e503faac295c7b7f2/shuup/core/models/_base.py#L123-L132
riptideio/pymodbus
c5772b35ae3f29d1947f3ab453d8d00df846459f
pymodbus/framer/tls_framer.py
python
ModbusTlsFramer.addToFrame
(self, message)
Adds new packet data to the current frame buffer :param message: The most recent packet
Adds new packet data to the current frame buffer
[ "Adds", "new", "packet", "data", "to", "the", "current", "frame", "buffer" ]
def addToFrame(self, message): """ Adds new packet data to the current frame buffer :param message: The most recent packet """ self._buffer += message
[ "def", "addToFrame", "(", "self", ",", "message", ")", ":", "self", ".", "_buffer", "+=", "message" ]
https://github.com/riptideio/pymodbus/blob/c5772b35ae3f29d1947f3ab453d8d00df846459f/pymodbus/framer/tls_framer.py#L72-L77
urduhack/urduhack
44500cd6a78e1a7765bb4f7d6fb92bbb612b7b11
urduhack/pipeline/parsers/normalize.py
python
NormalizeParser.parse
(self, document: str)
return preprocess(normalize(normalize_whitespace(document)))
Normalize|Preprocess text Args: document (str): Urdu text Returns: str: Return complete urdu document
Normalize|Preprocess text
[ "Normalize|Preprocess", "text" ]
def parse(self, document: str) -> str: """ Normalize|Preprocess text Args: document (str): Urdu text Returns: str: Return complete urdu document """ return preprocess(normalize(normalize_whitespace(document)))
[ "def", "parse", "(", "self", ",", "document", ":", "str", ")", "->", "str", ":", "return", "preprocess", "(", "normalize", "(", "normalize_whitespace", "(", "document", ")", ")", ")" ]
https://github.com/urduhack/urduhack/blob/44500cd6a78e1a7765bb4f7d6fb92bbb612b7b11/urduhack/pipeline/parsers/normalize.py#L15-L25
wistbean/learn_python3_spider
73c873f4845f4385f097e5057407d03dd37a117b
stackoverflow/venv/lib/python3.6/site-packages/twisted/application/internet.py
python
_ClientMachine._restarting
(self)
The service is disconnecting and has been asked to restart.
The service is disconnecting and has been asked to restart.
[ "The", "service", "is", "disconnecting", "and", "has", "been", "asked", "to", "restart", "." ]
def _restarting(self): """ The service is disconnecting and has been asked to restart. """
[ "def", "_restarting", "(", "self", ")", ":" ]
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/twisted/application/internet.py#L617-L620
kabkabm/defensegan
7e3feaebf7b9bbf08b1364e400119ef596cd78fd
datasets/utils.py
python
get_generators
(dataset_name, batch_size, randomize=True, attribute='gender')
return gens
Creates batch generators for datasets. Args: dataset_name: A `string`. Name of the dataset. batch_size: An `integer`. The size of each batch. randomize: A `boolean`. attribute: A `string`. If the dataset name is `celeba`, this will indicate the attribute name that labels should be returned for. Returns: Training, validation, and test dataset generators which are the return values of `create_generator`.
Creates batch generators for datasets.
[ "Creates", "batch", "generators", "for", "datasets", "." ]
def get_generators(dataset_name, batch_size, randomize=True, attribute='gender'): """Creates batch generators for datasets. Args: dataset_name: A `string`. Name of the dataset. batch_size: An `integer`. The size of each batch. randomize: A `boolean`. attribute: A `string`. If the dataset name is `celeba`, this will indicate the attribute name that labels should be returned for. Returns: Training, validation, and test dataset generators which are the return values of `create_generator`. """ splits = ['train', 'val', 'test'] gens = [] for i in range(3): if i > 0: randomize = False gens.append( create_generator(dataset_name, splits[i], batch_size, randomize, attribute=attribute)) return gens
[ "def", "get_generators", "(", "dataset_name", ",", "batch_size", ",", "randomize", "=", "True", ",", "attribute", "=", "'gender'", ")", ":", "splits", "=", "[", "'train'", ",", "'val'", ",", "'test'", "]", "gens", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "if", "i", ">", "0", ":", "randomize", "=", "False", "gens", ".", "append", "(", "create_generator", "(", "dataset_name", ",", "splits", "[", "i", "]", ",", "batch_size", ",", "randomize", ",", "attribute", "=", "attribute", ")", ")", "return", "gens" ]
https://github.com/kabkabm/defensegan/blob/7e3feaebf7b9bbf08b1364e400119ef596cd78fd/datasets/utils.py#L64-L87
imageworks/OpenColorIO-Configs
0bb079c08be410030669cbf5f19ff869b88af953
aces_1.0.3/python/aces_ocio/utilities.py
python
ColorSpace.__init__
(self, name, aliases=None, description=None, bit_depth=ocio.Constants.BIT_DEPTH_F32, equality_group='', family=None, is_data=False, to_reference_transforms=None, from_reference_transforms=None, allocation_type=ocio.Constants.ALLOCATION_UNIFORM, allocation_vars=None, aces_transform_id=None)
Constructor for ColorSpace container class. Parameters ---------- name : str or unicode Name of the colorspace. All other arguments are optional
Constructor for ColorSpace container class.
[ "Constructor", "for", "ColorSpace", "container", "class", "." ]
def __init__(self, name, aliases=None, description=None, bit_depth=ocio.Constants.BIT_DEPTH_F32, equality_group='', family=None, is_data=False, to_reference_transforms=None, from_reference_transforms=None, allocation_type=ocio.Constants.ALLOCATION_UNIFORM, allocation_vars=None, aces_transform_id=None): """ Constructor for ColorSpace container class. Parameters ---------- name : str or unicode Name of the colorspace. All other arguments are optional """ if aliases is None: aliases = [] if to_reference_transforms is None: to_reference_transforms = [] if from_reference_transforms is None: from_reference_transforms = [] if allocation_vars is None: allocation_vars = [0, 1] self.name = name self.aliases = aliases self.bit_depth = bit_depth self.description = description self.equality_group = equality_group self.family = family self.is_data = is_data self.to_reference_transforms = to_reference_transforms self.from_reference_transforms = from_reference_transforms self.allocation_type = allocation_type self.allocation_vars = allocation_vars self.aces_transform_id = aces_transform_id
[ "def", "__init__", "(", "self", ",", "name", ",", "aliases", "=", "None", ",", "description", "=", "None", ",", "bit_depth", "=", "ocio", ".", "Constants", ".", "BIT_DEPTH_F32", ",", "equality_group", "=", "''", ",", "family", "=", "None", ",", "is_data", "=", "False", ",", "to_reference_transforms", "=", "None", ",", "from_reference_transforms", "=", "None", ",", "allocation_type", "=", "ocio", ".", "Constants", ".", "ALLOCATION_UNIFORM", ",", "allocation_vars", "=", "None", ",", "aces_transform_id", "=", "None", ")", ":", "if", "aliases", "is", "None", ":", "aliases", "=", "[", "]", "if", "to_reference_transforms", "is", "None", ":", "to_reference_transforms", "=", "[", "]", "if", "from_reference_transforms", "is", "None", ":", "from_reference_transforms", "=", "[", "]", "if", "allocation_vars", "is", "None", ":", "allocation_vars", "=", "[", "0", ",", "1", "]", "self", ".", "name", "=", "name", "self", ".", "aliases", "=", "aliases", "self", ".", "bit_depth", "=", "bit_depth", "self", ".", "description", "=", "description", "self", ".", "equality_group", "=", "equality_group", "self", ".", "family", "=", "family", "self", ".", "is_data", "=", "is_data", "self", ".", "to_reference_transforms", "=", "to_reference_transforms", "self", ".", "from_reference_transforms", "=", "from_reference_transforms", "self", ".", "allocation_type", "=", "allocation_type", "self", ".", "allocation_vars", "=", "allocation_vars", "self", ".", "aces_transform_id", "=", "aces_transform_id" ]
https://github.com/imageworks/OpenColorIO-Configs/blob/0bb079c08be410030669cbf5f19ff869b88af953/aces_1.0.3/python/aces_ocio/utilities.py#L40-L86
arjunvekariyagithub/camelyon16-grand-challenge
660000a79775fbc5cfa8c5b44a591e62ce714089
camelyon16/postprocess/build_tf_records_heatmap_multi_thread.py
python
_process_image
(patch_path, coder)
return image_data, height, width
Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide tf image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels.
Process a single image file.
[ "Process", "a", "single", "image", "file", "." ]
def _process_image(patch_path, coder): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide tf image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. with tf.gfile.FastGFile(patch_path, 'r') as f: image_data = f.read() # Decode the RGB PNG. image = coder.decode_png(image_data) # Check that image converted to RGB assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 return image_data, height, width
[ "def", "_process_image", "(", "patch_path", ",", "coder", ")", ":", "# Read the image file.", "with", "tf", ".", "gfile", ".", "FastGFile", "(", "patch_path", ",", "'r'", ")", "as", "f", ":", "image_data", "=", "f", ".", "read", "(", ")", "# Decode the RGB PNG.", "image", "=", "coder", ".", "decode_png", "(", "image_data", ")", "# Check that image converted to RGB", "assert", "len", "(", "image", ".", "shape", ")", "==", "3", "height", "=", "image", ".", "shape", "[", "0", "]", "width", "=", "image", ".", "shape", "[", "1", "]", "assert", "image", ".", "shape", "[", "2", "]", "==", "3", "return", "image_data", ",", "height", ",", "width" ]
https://github.com/arjunvekariyagithub/camelyon16-grand-challenge/blob/660000a79775fbc5cfa8c5b44a591e62ce714089/camelyon16/postprocess/build_tf_records_heatmap_multi_thread.py#L172-L196
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/feature_histogram.py
python
FeatureHistogram._trim_node_map
(node_map, leaf_sample_counts)
return new_node_map, sibling_node_map
Only keep the nodes with fewer sample and remove their siblings, for accelerating hist computation
Only keep the nodes with fewer sample and remove their siblings, for accelerating hist computation
[ "Only", "keep", "the", "nodes", "with", "fewer", "sample", "and", "remove", "their", "siblings", "for", "accelerating", "hist", "computation" ]
def _trim_node_map(node_map, leaf_sample_counts): """ Only keep the nodes with fewer sample and remove their siblings, for accelerating hist computation """ inverse_node_map = {v: k for k, v in node_map.items()} sibling_node_map = {} # if is root node, return directly if 0 in node_map: return node_map, None kept_node_id = [] idx = 0 for left_count, right_count in zip(leaf_sample_counts[0::2], leaf_sample_counts[1::2]): if left_count < right_count: kept_node_id.append(inverse_node_map[idx]) sibling_node_map[inverse_node_map[idx]] = inverse_node_map[idx + 1] else: kept_node_id.append(inverse_node_map[idx + 1]) sibling_node_map[inverse_node_map[idx + 1]] = inverse_node_map[idx] idx += 2 new_node_map = {node_id: idx for idx, node_id in enumerate(kept_node_id)} return new_node_map, sibling_node_map
[ "def", "_trim_node_map", "(", "node_map", ",", "leaf_sample_counts", ")", ":", "inverse_node_map", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "node_map", ".", "items", "(", ")", "}", "sibling_node_map", "=", "{", "}", "# if is root node, return directly", "if", "0", "in", "node_map", ":", "return", "node_map", ",", "None", "kept_node_id", "=", "[", "]", "idx", "=", "0", "for", "left_count", ",", "right_count", "in", "zip", "(", "leaf_sample_counts", "[", "0", ":", ":", "2", "]", ",", "leaf_sample_counts", "[", "1", ":", ":", "2", "]", ")", ":", "if", "left_count", "<", "right_count", ":", "kept_node_id", ".", "append", "(", "inverse_node_map", "[", "idx", "]", ")", "sibling_node_map", "[", "inverse_node_map", "[", "idx", "]", "]", "=", "inverse_node_map", "[", "idx", "+", "1", "]", "else", ":", "kept_node_id", ".", "append", "(", "inverse_node_map", "[", "idx", "+", "1", "]", ")", "sibling_node_map", "[", "inverse_node_map", "[", "idx", "+", "1", "]", "]", "=", "inverse_node_map", "[", "idx", "]", "idx", "+=", "2", "new_node_map", "=", "{", "node_id", ":", "idx", "for", "idx", ",", "node_id", "in", "enumerate", "(", "kept_node_id", ")", "}", "return", "new_node_map", ",", "sibling_node_map" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/feature_histogram.py#L931-L957
pantsbuild/pants
2e126e78ffc40cb108408316b90e8beebee1df9e
src/python/pants/option/config.py
python
Config.get_value
(self, section: str, option: str)
Returns the value of the option in this config as a string, or None if no value specified.
Returns the value of the option in this config as a string, or None if no value specified.
[ "Returns", "the", "value", "of", "the", "option", "in", "this", "config", "as", "a", "string", "or", "None", "if", "no", "value", "specified", "." ]
def get_value(self, section: str, option: str) -> str | None: """Returns the value of the option in this config as a string, or None if no value specified."""
[ "def", "get_value", "(", "self", ",", "section", ":", "str", ",", "option", ":", "str", ")", "->", "str", "|", "None", ":" ]
https://github.com/pantsbuild/pants/blob/2e126e78ffc40cb108408316b90e8beebee1df9e/src/python/pants/option/config.py#L175-L177
explosion/spaCy
a784b12eff48df9281b184cb7005e66bbd2e3aca
spacy/lang/de/syntax_iterators.py
python
noun_chunks
(doclike: Union[Doc, Span])
Detect base noun phrases from a dependency parse. Works on Doc and Span.
Detect base noun phrases from a dependency parse. Works on Doc and Span.
[ "Detect", "base", "noun", "phrases", "from", "a", "dependency", "parse", ".", "Works", "on", "Doc", "and", "Span", "." ]
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Tuple[int, int, int]]: """Detect base noun phrases from a dependency parse. Works on Doc and Span.""" # this iterator extracts spans headed by NOUNs starting from the left-most # syntactic dependent until the NOUN itself for close apposition and # measurement construction, the span is sometimes extended to the right of # the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" # and not just "eine Tasse", same for "das Thema Familie". # fmt: off labels = ["sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app"] # fmt: on doc = doclike.doc # Ensure works on both Doc and Span. if not doc.has_annotation("DEP"): raise ValueError(Errors.E029) np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") rbracket = 0 prev_end = -1 for i, word in enumerate(doclike): if i < rbracket: continue # Prevent nested chunks from being produced if word.left_edge.i <= prev_end: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: rbracket = word.i + 1 # try to extend the span to the right # to capture close apposition/measurement constructions for rdep in doc[word.i].rights: if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app: rbracket = rdep.i + 1 prev_end = rbracket - 1 yield word.left_edge.i, rbracket, np_label
[ "def", "noun_chunks", "(", "doclike", ":", "Union", "[", "Doc", ",", "Span", "]", ")", "->", "Iterator", "[", "Tuple", "[", "int", ",", "int", ",", "int", "]", "]", ":", "# this iterator extracts spans headed by NOUNs starting from the left-most", "# syntactic dependent until the NOUN itself for close apposition and", "# measurement construction, the span is sometimes extended to the right of", "# the NOUN. Example: \"eine Tasse Tee\" (a cup (of) tea) returns \"eine Tasse Tee\"", "# and not just \"eine Tasse\", same for \"das Thema Familie\".", "# fmt: off", "labels", "=", "[", "\"sb\"", ",", "\"oa\"", ",", "\"da\"", ",", "\"nk\"", ",", "\"mo\"", ",", "\"ag\"", ",", "\"ROOT\"", ",", "\"root\"", ",", "\"cj\"", ",", "\"pd\"", ",", "\"og\"", ",", "\"app\"", "]", "# fmt: on", "doc", "=", "doclike", ".", "doc", "# Ensure works on both Doc and Span.", "if", "not", "doc", ".", "has_annotation", "(", "\"DEP\"", ")", ":", "raise", "ValueError", "(", "Errors", ".", "E029", ")", "np_label", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"NP\"", ")", "np_deps", "=", "set", "(", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "label", ")", "for", "label", "in", "labels", ")", "close_app", "=", "doc", ".", "vocab", ".", "strings", ".", "add", "(", "\"nk\"", ")", "rbracket", "=", "0", "prev_end", "=", "-", "1", "for", "i", ",", "word", "in", "enumerate", "(", "doclike", ")", ":", "if", "i", "<", "rbracket", ":", "continue", "# Prevent nested chunks from being produced", "if", "word", ".", "left_edge", ".", "i", "<=", "prev_end", ":", "continue", "if", "word", ".", "pos", "in", "(", "NOUN", ",", "PROPN", ",", "PRON", ")", "and", "word", ".", "dep", "in", "np_deps", ":", "rbracket", "=", "word", ".", "i", "+", "1", "# try to extend the span to the right", "# to capture close apposition/measurement constructions", "for", "rdep", "in", "doc", "[", "word", ".", "i", "]", ".", "rights", ":", "if", "rdep", ".", "pos", "in", "(", "NOUN", ",", "PROPN", ")", "and", "rdep", ".", "dep", "==", "close_app", ":", "rbracket", "=", "rdep", ".", "i", "+", "1", "prev_end", "=", "rbracket", "-", "1", "yield", "word", ".", "left_edge", ".", "i", ",", "rbracket", ",", "np_label" ]
https://github.com/explosion/spaCy/blob/a784b12eff48df9281b184cb7005e66bbd2e3aca/spacy/lang/de/syntax_iterators.py#L8-L40
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/homematicip_cloud/cover.py
python
HomematicipMultiCoverSlats.async_open_cover_tilt
(self, **kwargs)
Open the slats.
Open the slats.
[ "Open", "the", "slats", "." ]
async def async_open_cover_tilt(self, **kwargs) -> None: """Open the slats.""" await self._device.set_slats_level(HMIP_SLATS_OPEN, self._channel)
[ "async", "def", "async_open_cover_tilt", "(", "self", ",", "*", "*", "kwargs", ")", "->", "None", ":", "await", "self", ".", "_device", ".", "set_slats_level", "(", "HMIP_SLATS_OPEN", ",", "self", ".", "_channel", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/homematicip_cloud/cover.py#L246-L248
dib-lab/khmer
fb65d21eaedf0d397d49ae3debc578897f9d6eb4
versioneer.py
python
plus_or_dot
(pieces)
return "+"
Return a + if we don't already have one, else return a .
Return a + if we don't already have one, else return a .
[ "Return", "a", "+", "if", "we", "don", "t", "already", "have", "one", "else", "return", "a", "." ]
def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+"
[ "def", "plus_or_dot", "(", "pieces", ")", ":", "if", "\"+\"", "in", "pieces", ".", "get", "(", "\"closest-tag\"", ",", "\"\"", ")", ":", "return", "\".\"", "return", "\"+\"" ]
https://github.com/dib-lab/khmer/blob/fb65d21eaedf0d397d49ae3debc578897f9d6eb4/versioneer.py#L1229-L1233
spack/spack
675210bd8bd1c5d32ad1cc83d898fb43b569ed74
lib/spack/spack/util/executable.py
python
Executable.path
(self)
return self.exe[0]
The path to the executable. Returns: str: The path to the executable
The path to the executable.
[ "The", "path", "to", "the", "executable", "." ]
def path(self): """The path to the executable. Returns: str: The path to the executable """ return self.exe[0]
[ "def", "path", "(", "self", ")", ":", "return", "self", ".", "exe", "[", "0", "]" ]
https://github.com/spack/spack/blob/675210bd8bd1c5d32ad1cc83d898fb43b569ed74/lib/spack/spack/util/executable.py#L70-L76
Nordeus/pushkin
39f7057d3eb82c811c5c6b795d8bc7df9352a217
pushkin/database/database.py
python
update_unregistered_devices
(unregistered)
Update data for unregistered Android devices. Unregistered device will not receive notifications and will be deleted when number of devices exceeds maximum.
Update data for unregistered Android devices.
[ "Update", "data", "for", "unregistered", "Android", "devices", "." ]
def update_unregistered_devices(unregistered): ''' Update data for unregistered Android devices. Unregistered device will not receive notifications and will be deleted when number of devices exceeds maximum. ''' global ENGINE binding = [{"p_{}".format(k): v for k, v in u.items()} for u in unregistered] device_table = model.metadata.tables['device'] stmt = update(device_table).\ values(unregistered_ts=func.now()).\ where(and_(device_table.c.login_id == bindparam('p_login_id'), func.coalesce(device_table.c.device_token_new, device_table.c.device_token) == bindparam('p_device_token'))) ENGINE.execute(stmt, binding)
[ "def", "update_unregistered_devices", "(", "unregistered", ")", ":", "global", "ENGINE", "binding", "=", "[", "{", "\"p_{}\"", ".", "format", "(", "k", ")", ":", "v", "for", "k", ",", "v", "in", "u", ".", "items", "(", ")", "}", "for", "u", "in", "unregistered", "]", "device_table", "=", "model", ".", "metadata", ".", "tables", "[", "'device'", "]", "stmt", "=", "update", "(", "device_table", ")", ".", "values", "(", "unregistered_ts", "=", "func", ".", "now", "(", ")", ")", ".", "where", "(", "and_", "(", "device_table", ".", "c", ".", "login_id", "==", "bindparam", "(", "'p_login_id'", ")", ",", "func", ".", "coalesce", "(", "device_table", ".", "c", ".", "device_token_new", ",", "device_table", ".", "c", ".", "device_token", ")", "==", "bindparam", "(", "'p_device_token'", ")", ")", ")", "ENGINE", ".", "execute", "(", "stmt", ",", "binding", ")" ]
https://github.com/Nordeus/pushkin/blob/39f7057d3eb82c811c5c6b795d8bc7df9352a217/pushkin/database/database.py#L201-L214
facebookresearch/Large-Scale-VRD
7ababfe1023941c3653d7aebe9f835a47f5e8277
lib/utils/vis.py
python
vis_bbox
(img, bbox, thick=1)
return img
Visualizes a bounding box.
Visualizes a bounding box.
[ "Visualizes", "a", "bounding", "box", "." ]
def vis_bbox(img, bbox, thick=1): """Visualizes a bounding box.""" (x0, y0, w, h) = bbox x1, y1 = int(x0 + w), int(y0 + h) x0, y0 = int(x0), int(y0) cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick) return img
[ "def", "vis_bbox", "(", "img", ",", "bbox", ",", "thick", "=", "1", ")", ":", "(", "x0", ",", "y0", ",", "w", ",", "h", ")", "=", "bbox", "x1", ",", "y1", "=", "int", "(", "x0", "+", "w", ")", ",", "int", "(", "y0", "+", "h", ")", "x0", ",", "y0", "=", "int", "(", "x0", ")", ",", "int", "(", "y0", ")", "cv2", ".", "rectangle", "(", "img", ",", "(", "x0", ",", "y0", ")", ",", "(", "x1", ",", "y1", ")", ",", "_GREEN", ",", "thickness", "=", "thick", ")", "return", "img" ]
https://github.com/facebookresearch/Large-Scale-VRD/blob/7ababfe1023941c3653d7aebe9f835a47f5e8277/lib/utils/vis.py#L122-L128
SforAiDl/Neural-Voice-Cloning-With-Few-Samples
33fb609427657c9492f46507184ecba4dcc272b0
train_encoder.py
python
get_cloned_voices
(model,no_speakers = 108,no_cloned_texts = 23)
return cloned_voices
[]
def get_cloned_voices(model,no_speakers = 108,no_cloned_texts = 23): try: with open("./Cloning_Audio/speakers_cloned_voices_mel.p" , "rb") as fp: cloned_voices = pickle.load(fp) except: cloned_voices = generate_cloned_samples(model) if(np.array(cloned_voices).shape != (no_speakers , no_cloned_texts)): cloned_voices = generate_cloned_samples(model,"./Cloning_Audio/cloning_text.txt" ,no_speakers,True,0) print("Cloned_voices Loaded!") return cloned_voices
[ "def", "get_cloned_voices", "(", "model", ",", "no_speakers", "=", "108", ",", "no_cloned_texts", "=", "23", ")", ":", "try", ":", "with", "open", "(", "\"./Cloning_Audio/speakers_cloned_voices_mel.p\"", ",", "\"rb\"", ")", "as", "fp", ":", "cloned_voices", "=", "pickle", ".", "load", "(", "fp", ")", "except", ":", "cloned_voices", "=", "generate_cloned_samples", "(", "model", ")", "if", "(", "np", ".", "array", "(", "cloned_voices", ")", ".", "shape", "!=", "(", "no_speakers", ",", "no_cloned_texts", ")", ")", ":", "cloned_voices", "=", "generate_cloned_samples", "(", "model", ",", "\"./Cloning_Audio/cloning_text.txt\"", ",", "no_speakers", ",", "True", ",", "0", ")", "print", "(", "\"Cloned_voices Loaded!\"", ")", "return", "cloned_voices" ]
https://github.com/SforAiDl/Neural-Voice-Cloning-With-Few-Samples/blob/33fb609427657c9492f46507184ecba4dcc272b0/train_encoder.py#L42-L51
WZMIAOMIAO/deep-learning-for-image-processing
a4502c284958d4bf78fb77b089a90e7688ddc196
pytorch_classification/vision_transformer/vit_model.py
python
vit_large_patch32_224_in21k
(num_classes: int = 21843, has_logits: bool = True)
return model
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. weights ported from official Google JAX impl: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights
[ "ViT", "-", "Large", "model", "(", "ViT", "-", "L", "/", "32", ")", "from", "original", "paper", "(", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "2010", ".", "11929", ")", ".", "ImageNet", "-", "21k", "weights" ]
def vit_large_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True): """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. weights ported from official Google JAX impl: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth """ model = VisionTransformer(img_size=224, patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024 if has_logits else None, num_classes=num_classes) return model
[ "def", "vit_large_patch32_224_in21k", "(", "num_classes", ":", "int", "=", "21843", ",", "has_logits", ":", "bool", "=", "True", ")", ":", "model", "=", "VisionTransformer", "(", "img_size", "=", "224", ",", "patch_size", "=", "32", ",", "embed_dim", "=", "1024", ",", "depth", "=", "24", ",", "num_heads", "=", "16", ",", "representation_size", "=", "1024", "if", "has_logits", "else", "None", ",", "num_classes", "=", "num_classes", ")", "return", "model" ]
https://github.com/WZMIAOMIAO/deep-learning-for-image-processing/blob/a4502c284958d4bf78fb77b089a90e7688ddc196/pytorch_classification/vision_transformer/vit_model.py#L392-L406
gammapy/gammapy
735b25cd5bbed35e2004d633621896dcd5295e8b
gammapy/modeling/models/core.py
python
DatasetModels.wcs_geom
(self)
Minimum WCS geom in which all the models are contained
Minimum WCS geom in which all the models are contained
[ "Minimum", "WCS", "geom", "in", "which", "all", "the", "models", "are", "contained" ]
def wcs_geom(self): """Minimum WCS geom in which all the models are contained""" regions = self.to_regions() try: return RegionGeom.from_regions(regions).to_wcs_geom() except IndexError: log.error("No spatial component in any model. Geom not defined")
[ "def", "wcs_geom", "(", "self", ")", ":", "regions", "=", "self", ".", "to_regions", "(", ")", "try", ":", "return", "RegionGeom", ".", "from_regions", "(", "regions", ")", ".", "to_wcs_geom", "(", ")", "except", "IndexError", ":", "log", ".", "error", "(", "\"No spatial component in any model. Geom not defined\"", ")" ]
https://github.com/gammapy/gammapy/blob/735b25cd5bbed35e2004d633621896dcd5295e8b/gammapy/modeling/models/core.py#L911-L917
ialbert/biostar-central
2dc7bd30691a50b2da9c2833ba354056bc686afa
biostar/recipes/forms.py
python
ProjectForm.custom_save
(self, owner)
return project
Used to save on creation using custom function.
Used to save on creation using custom function.
[ "Used", "to", "save", "on", "creation", "using", "custom", "function", "." ]
def custom_save(self, owner): """Used to save on creation using custom function.""" name = self.cleaned_data["name"] text = self.cleaned_data["text"] stream = self.cleaned_data["image"] project = auth.create_project(user=owner, name=name, text=text, stream=stream) project.save() return project
[ "def", "custom_save", "(", "self", ",", "owner", ")", ":", "name", "=", "self", ".", "cleaned_data", "[", "\"name\"", "]", "text", "=", "self", ".", "cleaned_data", "[", "\"text\"", "]", "stream", "=", "self", ".", "cleaned_data", "[", "\"image\"", "]", "project", "=", "auth", ".", "create_project", "(", "user", "=", "owner", ",", "name", "=", "name", ",", "text", "=", "text", ",", "stream", "=", "stream", ")", "project", ".", "save", "(", ")", "return", "project" ]
https://github.com/ialbert/biostar-central/blob/2dc7bd30691a50b2da9c2833ba354056bc686afa/biostar/recipes/forms.py#L182-L191
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/imaplib.py
python
IMAP4.getannotation
(self, mailbox, entry, attribute)
return self._untagged_response(typ, dat, 'ANNOTATION')
(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute) Retrieve ANNOTATIONs.
(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute) Retrieve ANNOTATIONs.
[ "(", "typ", "[", "data", "]", ")", "=", "<instance", ">", ".", "getannotation", "(", "mailbox", "entry", "attribute", ")", "Retrieve", "ANNOTATIONs", "." ]
def getannotation(self, mailbox, entry, attribute): """(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute) Retrieve ANNOTATIONs.""" typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute) return self._untagged_response(typ, dat, 'ANNOTATION')
[ "def", "getannotation", "(", "self", ",", "mailbox", ",", "entry", ",", "attribute", ")", ":", "typ", ",", "dat", "=", "self", ".", "_simple_command", "(", "'GETANNOTATION'", ",", "mailbox", ",", "entry", ",", "attribute", ")", "return", "self", ".", "_untagged_response", "(", "typ", ",", "dat", ",", "'ANNOTATION'", ")" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/imaplib.py#L468-L473
cogitas3d/OrtogOnBlender
881e93f5beb2263e44c270974dd0e81deca44762
__init__.py
python
ORTOG_PT_ArmatureDynamic.draw
(self, context)
[]
def draw(self, context): layout = self.layout context = bpy.context obj = context.object scn = context.scene row = layout.row() row.prop(scn, "my_enum_dynamic") my_enum_dynamic = scn.my_enum_dynamic if my_enum_dynamic == ENUM_VALUES_DYNAMIC.DEFAULT: row = layout.row() row.label(text="Configure Armature (Classic):") row = layout.row() row.operator("object.conf_osteo_auto", text="Setup Osteotomy Auto", icon="FILE_TICK") row = layout.row() row.label(text="Soft Tissue:") row = layout.row() circle=row.operator("object.configura_dinamica_mole", text="Setup Soft Tissue Dynamics", icon="STYLUS_PRESSURE") row = layout.row() circle=row.operator("view3d.clip_border", text="Clipping Border", icon="UV_FACESEL") if my_enum_dynamic == ENUM_VALUES_DYNAMIC.NOSE: row = layout.row() row.operator("object.conf_osteo_auto", text="Setup Osteotomy Auto", icon="FILE_TICK") row = layout.row() row.label(text="Mode:") row = layout.row() linha=row.operator("wm.tool_set_by_id", text="Cursor", icon="PIVOT_CURSOR").name="builtin.cursor" linha=row.operator("wm.tool_set_by_id", text="Select", icon="RESTRICT_SELECT_OFF").name="builtin.select_box" row = layout.row() row = layout.row() row.label(text="Anatomical Points:") row = layout.row() linha=row.operator("object.trichion_pt", text="Trichion") row = layout.row() linha=row.operator("object.radix_pt", text="Radix") row = layout.row() linha=row.operator("object.tip_nose_pt", text="Tip of Nose") row = layout.row() linha=row.operator("object.alar_groove_right_pt", text="Alar Groove right") row = layout.row() linha=row.operator("object.alar_groove_left_pt", text="Alar Groove left") row = layout.row() linha=row.operator("object.submental_pt", text="Submental") row = layout.row() row = layout.row() row.label(text="Soft Tissue:") row = layout.row() circle=row.operator("object.configura_dinamica_mole", text="Setup Soft Tissue Dynamics", icon="STYLUS_PRESSURE") row = layout.row() circle=row.operator("view3d.clip_border", text="Clipping Border", icon="UV_FACESEL") if my_enum_dynamic == ENUM_VALUES_DYNAMIC.EXPERIMENTAL: row = layout.row() row.label(text=" Auto Osteo+Soft Setup (Experimental):") row = layout.row() row.operator("object.nome_face_malha", text="Set Face and Hide", icon="USER") row = layout.row() row.operator("object.conf_osteo_mole_auto", text="Setup Auto!", icon="BONE_DATA") row = layout.row() row = layout.row() row.label(text="Parent Points:") row = layout.row() circle=row.operator("object.parenteia_emp", text="Parent Points", icon="LINKED") row = layout.row() row = layout.row() box = layout.box() col = box.column(align=True) row = col.row() row.scale_y=1.5 row.alignment = 'CENTER' row.operator("object.gera_dir_nome_paciente_dynamic", text="SAVE!", icon="FILE_TICK")
[ "def", "draw", "(", "self", ",", "context", ")", ":", "layout", "=", "self", ".", "layout", "context", "=", "bpy", ".", "context", "obj", "=", "context", ".", "object", "scn", "=", "context", ".", "scene", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "prop", "(", "scn", ",", "\"my_enum_dynamic\"", ")", "my_enum_dynamic", "=", "scn", ".", "my_enum_dynamic", "if", "my_enum_dynamic", "==", "ENUM_VALUES_DYNAMIC", ".", "DEFAULT", ":", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "label", "(", "text", "=", "\"Configure Armature (Classic):\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "operator", "(", "\"object.conf_osteo_auto\"", ",", "text", "=", "\"Setup Osteotomy Auto\"", ",", "icon", "=", "\"FILE_TICK\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "label", "(", "text", "=", "\"Soft Tissue:\"", ")", "row", "=", "layout", ".", "row", "(", ")", "circle", "=", "row", ".", "operator", "(", "\"object.configura_dinamica_mole\"", ",", "text", "=", "\"Setup Soft Tissue Dynamics\"", ",", "icon", "=", "\"STYLUS_PRESSURE\"", ")", "row", "=", "layout", ".", "row", "(", ")", "circle", "=", "row", ".", "operator", "(", "\"view3d.clip_border\"", ",", "text", "=", "\"Clipping Border\"", ",", "icon", "=", "\"UV_FACESEL\"", ")", "if", "my_enum_dynamic", "==", "ENUM_VALUES_DYNAMIC", ".", "NOSE", ":", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "operator", "(", "\"object.conf_osteo_auto\"", ",", "text", "=", "\"Setup Osteotomy Auto\"", ",", "icon", "=", "\"FILE_TICK\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "label", "(", "text", "=", "\"Mode:\"", ")", "row", "=", "layout", ".", "row", "(", ")", "linha", "=", "row", ".", "operator", "(", "\"wm.tool_set_by_id\"", ",", "text", "=", "\"Cursor\"", ",", "icon", "=", "\"PIVOT_CURSOR\"", ")", ".", "name", "=", "\"builtin.cursor\"", "linha", "=", "row", ".", "operator", "(", "\"wm.tool_set_by_id\"", ",", "text", "=", "\"Select\"", ",", "icon", "=", "\"RESTRICT_SELECT_OFF\"", ")", ".", "name", "=", "\"builtin.select_box\"", "row", "=", "layout", ".", "row", "(", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "label", "(", "text", "=", "\"Anatomical Points:\"", ")", "row", "=", "layout", ".", "row", "(", ")", "linha", "=", "row", ".", "operator", "(", "\"object.trichion_pt\"", ",", "text", "=", "\"Trichion\"", ")", "row", "=", "layout", ".", "row", "(", ")", "linha", "=", "row", ".", "operator", "(", "\"object.radix_pt\"", ",", "text", "=", "\"Radix\"", ")", "row", "=", "layout", ".", "row", "(", ")", "linha", "=", "row", ".", "operator", "(", "\"object.tip_nose_pt\"", ",", "text", "=", "\"Tip of Nose\"", ")", "row", "=", "layout", ".", "row", "(", ")", "linha", "=", "row", ".", "operator", "(", "\"object.alar_groove_right_pt\"", ",", "text", "=", "\"Alar Groove right\"", ")", "row", "=", "layout", ".", "row", "(", ")", "linha", "=", "row", ".", "operator", "(", "\"object.alar_groove_left_pt\"", ",", "text", "=", "\"Alar Groove left\"", ")", "row", "=", "layout", ".", "row", "(", ")", "linha", "=", "row", ".", "operator", "(", "\"object.submental_pt\"", ",", "text", "=", "\"Submental\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "label", "(", "text", "=", "\"Soft Tissue:\"", ")", "row", "=", "layout", ".", "row", "(", ")", "circle", "=", "row", ".", "operator", "(", "\"object.configura_dinamica_mole\"", ",", "text", "=", "\"Setup Soft Tissue Dynamics\"", ",", "icon", "=", "\"STYLUS_PRESSURE\"", ")", "row", "=", "layout", ".", "row", "(", ")", "circle", "=", "row", ".", "operator", "(", "\"view3d.clip_border\"", ",", "text", "=", "\"Clipping Border\"", ",", "icon", "=", "\"UV_FACESEL\"", ")", "if", "my_enum_dynamic", "==", "ENUM_VALUES_DYNAMIC", ".", "EXPERIMENTAL", ":", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "label", "(", "text", "=", "\" Auto Osteo+Soft Setup (Experimental):\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "operator", "(", "\"object.nome_face_malha\"", ",", "text", "=", "\"Set Face and Hide\"", ",", "icon", "=", "\"USER\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "operator", "(", "\"object.conf_osteo_mole_auto\"", ",", "text", "=", "\"Setup Auto!\"", ",", "icon", "=", "\"BONE_DATA\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", "=", "layout", ".", "row", "(", ")", "row", ".", "label", "(", "text", "=", "\"Parent Points:\"", ")", "row", "=", "layout", ".", "row", "(", ")", "circle", "=", "row", ".", "operator", "(", "\"object.parenteia_emp\"", ",", "text", "=", "\"Parent Points\"", ",", "icon", "=", "\"LINKED\"", ")", "row", "=", "layout", ".", "row", "(", ")", "row", "=", "layout", ".", "row", "(", ")", "box", "=", "layout", ".", "box", "(", ")", "col", "=", "box", ".", "column", "(", "align", "=", "True", ")", "row", "=", "col", ".", "row", "(", ")", "row", ".", "scale_y", "=", "1.5", "row", ".", "alignment", "=", "'CENTER'", "row", ".", "operator", "(", "\"object.gera_dir_nome_paciente_dynamic\"", ",", "text", "=", "\"SAVE!\"", ",", "icon", "=", "\"FILE_TICK\"", ")" ]
https://github.com/cogitas3d/OrtogOnBlender/blob/881e93f5beb2263e44c270974dd0e81deca44762/__init__.py#L2404-L2502
polakowo/vectorbt
6638735c131655760474d72b9f045d1dbdbd8fe9
vectorbt/signals/nb.py
python
generate_enex_nb
(shape: tp.Shape, entry_wait: int, exit_wait: int, entry_pick_first: bool, exit_pick_first: bool, entry_choice_func_nb: tp.ChoiceFunc, entry_args: tp.Args, exit_choice_func_nb: tp.ChoiceFunc, exit_args: tp.Args)
return entries, exits
Pick entry signals using `entry_choice_func_nb` and exit signals using `exit_choice_func_nb` one after another. Args: shape (array): Target shape. entry_wait (int): Number of ticks to wait before placing entries. !!! note Setting `entry_wait` to 0 or False assumes that both entry and exit can be processed within the same bar, and exit can be processed before entry. exit_wait (int): Number of ticks to wait before placing exits. !!! note Setting `exit_wait` to 0 or False assumes that both entry and exit can be processed within the same bar, and entry can be processed before exit. entry_pick_first (bool): Whether to pick the first entry out of all returned by `entry_choice_func_nb`. exit_pick_first (bool): Whether to pick the first exit out of all returned by `exit_choice_func_nb`. Setting it to False acts similarly to setting `skip_until_exit` to True in `generate_ex_nb`. entry_choice_func_nb (callable): Entry choice function. See `choice_func_nb` in `generate_nb`. entry_args (tuple): Arguments unpacked and passed to `entry_choice_func_nb`. exit_choice_func_nb (callable): Exit choice function. See `choice_func_nb` in `generate_nb`. exit_args (tuple): Arguments unpacked and passed to `exit_choice_func_nb`.
Pick entry signals using `entry_choice_func_nb` and exit signals using `exit_choice_func_nb` one after another.
[ "Pick", "entry", "signals", "using", "entry_choice_func_nb", "and", "exit", "signals", "using", "exit_choice_func_nb", "one", "after", "another", "." ]
def generate_enex_nb(shape: tp.Shape, entry_wait: int, exit_wait: int, entry_pick_first: bool, exit_pick_first: bool, entry_choice_func_nb: tp.ChoiceFunc, entry_args: tp.Args, exit_choice_func_nb: tp.ChoiceFunc, exit_args: tp.Args) -> tp.Tuple[tp.Array2d, tp.Array2d]: """Pick entry signals using `entry_choice_func_nb` and exit signals using `exit_choice_func_nb` one after another. Args: shape (array): Target shape. entry_wait (int): Number of ticks to wait before placing entries. !!! note Setting `entry_wait` to 0 or False assumes that both entry and exit can be processed within the same bar, and exit can be processed before entry. exit_wait (int): Number of ticks to wait before placing exits. !!! note Setting `exit_wait` to 0 or False assumes that both entry and exit can be processed within the same bar, and entry can be processed before exit. entry_pick_first (bool): Whether to pick the first entry out of all returned by `entry_choice_func_nb`. exit_pick_first (bool): Whether to pick the first exit out of all returned by `exit_choice_func_nb`. Setting it to False acts similarly to setting `skip_until_exit` to True in `generate_ex_nb`. entry_choice_func_nb (callable): Entry choice function. See `choice_func_nb` in `generate_nb`. entry_args (tuple): Arguments unpacked and passed to `entry_choice_func_nb`. exit_choice_func_nb (callable): Exit choice function. See `choice_func_nb` in `generate_nb`. exit_args (tuple): Arguments unpacked and passed to `exit_choice_func_nb`. """ entries = np.full(shape, False) exits = np.full(shape, False) if entry_wait == 0 and exit_wait == 0: raise ValueError("entry_wait and exit_wait cannot be both 0") for col in range(shape[1]): prev_prev_i = -2 prev_i = -1 i = 0 while True: to_i = shape[0] # Cannot assign two functions to a var in numba if i % 2 == 0: if i == 0: from_i = 0 else: from_i = prev_i + entry_wait if from_i >= to_i: break idxs = entry_choice_func_nb(from_i, to_i, col, *entry_args) a = entries pick_first = entry_pick_first else: from_i = prev_i + exit_wait if from_i >= to_i: break idxs = exit_choice_func_nb(from_i, to_i, col, *exit_args) a = exits pick_first = exit_pick_first if len(idxs) == 0: break first_i = idxs[0] if first_i == prev_i == prev_prev_i: raise ValueError("Infinite loop detected") if first_i < from_i: raise ValueError("First index is out of bounds") if pick_first: # Consider only the first signal if first_i >= to_i: raise ValueError("First index is out of bounds") a[first_i, col] = True prev_prev_i = prev_i prev_i = first_i i += 1 else: # Consider all signals last_i = idxs[-1] if last_i >= to_i: raise ValueError("Last index is out of bounds") a[idxs, col] = True prev_prev_i = prev_i prev_i = last_i i += 1 return entries, exits
[ "def", "generate_enex_nb", "(", "shape", ":", "tp", ".", "Shape", ",", "entry_wait", ":", "int", ",", "exit_wait", ":", "int", ",", "entry_pick_first", ":", "bool", ",", "exit_pick_first", ":", "bool", ",", "entry_choice_func_nb", ":", "tp", ".", "ChoiceFunc", ",", "entry_args", ":", "tp", ".", "Args", ",", "exit_choice_func_nb", ":", "tp", ".", "ChoiceFunc", ",", "exit_args", ":", "tp", ".", "Args", ")", "->", "tp", ".", "Tuple", "[", "tp", ".", "Array2d", ",", "tp", ".", "Array2d", "]", ":", "entries", "=", "np", ".", "full", "(", "shape", ",", "False", ")", "exits", "=", "np", ".", "full", "(", "shape", ",", "False", ")", "if", "entry_wait", "==", "0", "and", "exit_wait", "==", "0", ":", "raise", "ValueError", "(", "\"entry_wait and exit_wait cannot be both 0\"", ")", "for", "col", "in", "range", "(", "shape", "[", "1", "]", ")", ":", "prev_prev_i", "=", "-", "2", "prev_i", "=", "-", "1", "i", "=", "0", "while", "True", ":", "to_i", "=", "shape", "[", "0", "]", "# Cannot assign two functions to a var in numba", "if", "i", "%", "2", "==", "0", ":", "if", "i", "==", "0", ":", "from_i", "=", "0", "else", ":", "from_i", "=", "prev_i", "+", "entry_wait", "if", "from_i", ">=", "to_i", ":", "break", "idxs", "=", "entry_choice_func_nb", "(", "from_i", ",", "to_i", ",", "col", ",", "*", "entry_args", ")", "a", "=", "entries", "pick_first", "=", "entry_pick_first", "else", ":", "from_i", "=", "prev_i", "+", "exit_wait", "if", "from_i", ">=", "to_i", ":", "break", "idxs", "=", "exit_choice_func_nb", "(", "from_i", ",", "to_i", ",", "col", ",", "*", "exit_args", ")", "a", "=", "exits", "pick_first", "=", "exit_pick_first", "if", "len", "(", "idxs", ")", "==", "0", ":", "break", "first_i", "=", "idxs", "[", "0", "]", "if", "first_i", "==", "prev_i", "==", "prev_prev_i", ":", "raise", "ValueError", "(", "\"Infinite loop detected\"", ")", "if", "first_i", "<", "from_i", ":", "raise", "ValueError", "(", "\"First index is out of bounds\"", ")", "if", "pick_first", ":", "# Consider only the first signal", "if", "first_i", ">=", "to_i", ":", "raise", "ValueError", "(", "\"First index is out of bounds\"", ")", "a", "[", "first_i", ",", "col", "]", "=", "True", "prev_prev_i", "=", "prev_i", "prev_i", "=", "first_i", "i", "+=", "1", "else", ":", "# Consider all signals", "last_i", "=", "idxs", "[", "-", "1", "]", "if", "last_i", ">=", "to_i", ":", "raise", "ValueError", "(", "\"Last index is out of bounds\"", ")", "a", "[", "idxs", ",", "col", "]", "=", "True", "prev_prev_i", "=", "prev_i", "prev_i", "=", "last_i", "i", "+=", "1", "return", "entries", ",", "exits" ]
https://github.com/polakowo/vectorbt/blob/6638735c131655760474d72b9f045d1dbdbd8fe9/vectorbt/signals/nb.py#L158-L249
mesalock-linux/mesapy
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
rpython/tool/setuptools_msvc.py
python
SystemInfo.VCInstallDir
(self)
return path
Microsoft Visual C++ directory.
Microsoft Visual C++ directory.
[ "Microsoft", "Visual", "C", "++", "directory", "." ]
def VCInstallDir(self): """ Microsoft Visual C++ directory. """ self.VSInstallDir guess_vc = self._guess_vc() or self._guess_vc_legacy() # Try to get "VC++ for Python" path from registry as default path reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) python_vc = self.ri.lookup(reg_path, 'installdir') default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc # Try to get path from registry, if fail use default path path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc if not os.path.isdir(path): msg = 'Microsoft Visual C++ directory not found' raise distutils.errors.DistutilsPlatformError(msg) return path
[ "def", "VCInstallDir", "(", "self", ")", ":", "self", ".", "VSInstallDir", "guess_vc", "=", "self", ".", "_guess_vc", "(", ")", "or", "self", ".", "_guess_vc_legacy", "(", ")", "# Try to get \"VC++ for Python\" path from registry as default path", "reg_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "ri", ".", "vc_for_python", ",", "'%0.1f'", "%", "self", ".", "vc_ver", ")", "python_vc", "=", "self", ".", "ri", ".", "lookup", "(", "reg_path", ",", "'installdir'", ")", "default_vc", "=", "os", ".", "path", ".", "join", "(", "python_vc", ",", "'VC'", ")", "if", "python_vc", "else", "guess_vc", "# Try to get path from registry, if fail use default path", "path", "=", "self", ".", "ri", ".", "lookup", "(", "self", ".", "ri", ".", "vc", ",", "'%0.1f'", "%", "self", ".", "vc_ver", ")", "or", "default_vc", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "msg", "=", "'Microsoft Visual C++ directory not found'", "raise", "distutils", ".", "errors", ".", "DistutilsPlatformError", "(", "msg", ")", "return", "path" ]
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/rpython/tool/setuptools_msvc.py#L544-L564
ipython/ipyparallel
d35d4fb9501da5b3280b11e83ed633a95f17be1d
setupbase.py
python
_get_package_data
(root, file_patterns=None)
return _get_files(file_patterns, _glob_pjoin(os.path.abspath(os.getcwd()), root))
Expand file patterns to a list of `package_data` paths. Parameters ----------- root: str The relative path to the package root from the current dir. file_patterns: list or str, optional A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the root or absolute paths. If not given, all files will be used. Note: Files in `node_modules` are ignored.
Expand file patterns to a list of `package_data` paths.
[ "Expand", "file", "patterns", "to", "a", "list", "of", "package_data", "paths", "." ]
def _get_package_data(root, file_patterns=None): """Expand file patterns to a list of `package_data` paths. Parameters ----------- root: str The relative path to the package root from the current dir. file_patterns: list or str, optional A list of glob patterns for the data file locations. The globs can be recursive if they include a `**`. They should be relative paths from the root or absolute paths. If not given, all files will be used. Note: Files in `node_modules` are ignored. """ if file_patterns is None: file_patterns = ['*'] return _get_files(file_patterns, _glob_pjoin(os.path.abspath(os.getcwd()), root))
[ "def", "_get_package_data", "(", "root", ",", "file_patterns", "=", "None", ")", ":", "if", "file_patterns", "is", "None", ":", "file_patterns", "=", "[", "'*'", "]", "return", "_get_files", "(", "file_patterns", ",", "_glob_pjoin", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "getcwd", "(", ")", ")", ",", "root", ")", ")" ]
https://github.com/ipython/ipyparallel/blob/d35d4fb9501da5b3280b11e83ed633a95f17be1d/setupbase.py#L803-L821
paulproteus/python-scraping-code-samples
4e5396d4e311ca66c784a2b5f859308285e511da
new/seleniumrc/selenium-remote-control-1.0-beta-2/selenium-python-client-driver-1.0-beta-2/selenium.py
python
selenium.ignore_attributes_without_value
(self,ignore)
Specifies whether Selenium will ignore xpath attributes that have no value, i.e. are the empty string, when using the non-native xpath evaluation engine. You'd want to do this for performance reasons in IE. However, this could break certain xpaths, for example an xpath that looks for an attribute whose value is NOT the empty string. The hope is that such xpaths are relatively rare, but the user should have the option of using them. Note that this only influences xpath evaluation when using the ajaxslt engine (i.e. not "javascript-xpath"). 'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness.
Specifies whether Selenium will ignore xpath attributes that have no value, i.e. are the empty string, when using the non-native xpath evaluation engine. You'd want to do this for performance reasons in IE. However, this could break certain xpaths, for example an xpath that looks for an attribute whose value is NOT the empty string. The hope is that such xpaths are relatively rare, but the user should have the option of using them. Note that this only influences xpath evaluation when using the ajaxslt engine (i.e. not "javascript-xpath"). 'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness.
[ "Specifies", "whether", "Selenium", "will", "ignore", "xpath", "attributes", "that", "have", "no", "value", "i", ".", "e", ".", "are", "the", "empty", "string", "when", "using", "the", "non", "-", "native", "xpath", "evaluation", "engine", ".", "You", "d", "want", "to", "do", "this", "for", "performance", "reasons", "in", "IE", ".", "However", "this", "could", "break", "certain", "xpaths", "for", "example", "an", "xpath", "that", "looks", "for", "an", "attribute", "whose", "value", "is", "NOT", "the", "empty", "string", ".", "The", "hope", "is", "that", "such", "xpaths", "are", "relatively", "rare", "but", "the", "user", "should", "have", "the", "option", "of", "using", "them", ".", "Note", "that", "this", "only", "influences", "xpath", "evaluation", "when", "using", "the", "ajaxslt", "engine", "(", "i", ".", "e", ".", "not", "javascript", "-", "xpath", ")", ".", "ignore", "is", "boolean", "true", "means", "we", "ll", "ignore", "attributes", "without", "value", "at", "the", "expense", "of", "xpath", "correctness", ";", "false", "means", "we", "ll", "sacrifice", "speed", "for", "correctness", "." ]
def ignore_attributes_without_value(self,ignore): """ Specifies whether Selenium will ignore xpath attributes that have no value, i.e. are the empty string, when using the non-native xpath evaluation engine. You'd want to do this for performance reasons in IE. However, this could break certain xpaths, for example an xpath that looks for an attribute whose value is NOT the empty string. The hope is that such xpaths are relatively rare, but the user should have the option of using them. Note that this only influences xpath evaluation when using the ajaxslt engine (i.e. not "javascript-xpath"). 'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness. """ self.do_command("ignoreAttributesWithoutValue", [ignore,])
[ "def", "ignore_attributes_without_value", "(", "self", ",", "ignore", ")", ":", "self", ".", "do_command", "(", "\"ignoreAttributesWithoutValue\"", ",", "[", "ignore", ",", "]", ")" ]
https://github.com/paulproteus/python-scraping-code-samples/blob/4e5396d4e311ca66c784a2b5f859308285e511da/new/seleniumrc/selenium-remote-control-1.0-beta-2/selenium-python-client-driver-1.0-beta-2/selenium.py#L1630-L1644
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/statistic/statics.py
python
MissingStatistic.__init__
(self, missing_val=None)
[]
def __init__(self, missing_val=None): super(MissingStatistic, self).__init__() self.missing_val = None self.feature_summary = {} self.missing_feature = [] self.all_feature_list = [] self.tag_id_mapping, self.id_tag_mapping = {}, {} self.dense_missing_val = missing_val
[ "def", "__init__", "(", "self", ",", "missing_val", "=", "None", ")", ":", "super", "(", "MissingStatistic", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "missing_val", "=", "None", "self", ".", "feature_summary", "=", "{", "}", "self", ".", "missing_feature", "=", "[", "]", "self", ".", "all_feature_list", "=", "[", "]", "self", ".", "tag_id_mapping", ",", "self", ".", "id_tag_mapping", "=", "{", "}", ",", "{", "}", "self", ".", "dense_missing_val", "=", "missing_val" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/statistic/statics.py#L237-L245
ConsenSys/ethjsonrpc
fe525bdcd889924687ba1646fc46cef329410e22
ethjsonrpc/client.py
python
EthJsonRpc.net_version
(self)
return self._call('net_version')
https://github.com/ethereum/wiki/wiki/JSON-RPC#net_version TESTED
https://github.com/ethereum/wiki/wiki/JSON-RPC#net_version
[ "https", ":", "//", "github", ".", "com", "/", "ethereum", "/", "wiki", "/", "wiki", "/", "JSON", "-", "RPC#net_version" ]
def net_version(self): ''' https://github.com/ethereum/wiki/wiki/JSON-RPC#net_version TESTED ''' return self._call('net_version')
[ "def", "net_version", "(", "self", ")", ":", "return", "self", ".", "_call", "(", "'net_version'", ")" ]
https://github.com/ConsenSys/ethjsonrpc/blob/fe525bdcd889924687ba1646fc46cef329410e22/ethjsonrpc/client.py#L153-L159
paulproteus/python-scraping-code-samples
4e5396d4e311ca66c784a2b5f859308285e511da
new/seleniumrc/selenium-remote-control-1.0-beta-2/selenium-python-client-driver-1.0-beta-2/selenium.py
python
selenium.meta_key_down
(self)
Press the meta key and hold it down until doMetaUp() is called or a new page is loaded.
Press the meta key and hold it down until doMetaUp() is called or a new page is loaded.
[ "Press", "the", "meta", "key", "and", "hold", "it", "down", "until", "doMetaUp", "()", "is", "called", "or", "a", "new", "page", "is", "loaded", "." ]
def meta_key_down(self): """ Press the meta key and hold it down until doMetaUp() is called or a new page is loaded. """ self.do_command("metaKeyDown", [])
[ "def", "meta_key_down", "(", "self", ")", ":", "self", ".", "do_command", "(", "\"metaKeyDown\"", ",", "[", "]", ")" ]
https://github.com/paulproteus/python-scraping-code-samples/blob/4e5396d4e311ca66c784a2b5f859308285e511da/new/seleniumrc/selenium-remote-control-1.0-beta-2/selenium-python-client-driver-1.0-beta-2/selenium.py#L387-L392
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/setuptools/pkg_resources/_vendor/packaging/version.py
python
_parse_version_parts
(s)
[]
def _parse_version_parts(s): for part in _legacy_version_component_re.split(s): part = _legacy_version_replacement_map.get(part, part) if not part or part == ".": continue if part[:1] in "0123456789": # pad for numeric comparison yield part.zfill(8) else: yield "*" + part # ensure that alpha/beta/candidate are before final yield "*final"
[ "def", "_parse_version_parts", "(", "s", ")", ":", "for", "part", "in", "_legacy_version_component_re", ".", "split", "(", "s", ")", ":", "part", "=", "_legacy_version_replacement_map", ".", "get", "(", "part", ",", "part", ")", "if", "not", "part", "or", "part", "==", "\".\"", ":", "continue", "if", "part", "[", ":", "1", "]", "in", "\"0123456789\"", ":", "# pad for numeric comparison", "yield", "part", ".", "zfill", "(", "8", ")", "else", ":", "yield", "\"*\"", "+", "part", "# ensure that alpha/beta/candidate are before final", "yield", "\"*final\"" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/setuptools/pkg_resources/_vendor/packaging/version.py#L114-L128
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/django/utils/timezone.py
python
_get_timezone_name
(timezone)
Returns the name of ``timezone``.
Returns the name of ``timezone``.
[ "Returns", "the", "name", "of", "timezone", "." ]
def _get_timezone_name(timezone): """ Returns the name of ``timezone``. """ try: # for pytz timezones return timezone.zone except AttributeError: # for regular tzinfo objects local_now = datetime.now(timezone) return timezone.tzname(local_now)
[ "def", "_get_timezone_name", "(", "timezone", ")", ":", "try", ":", "# for pytz timezones", "return", "timezone", ".", "zone", "except", "AttributeError", ":", "# for regular tzinfo objects", "local_now", "=", "datetime", ".", "now", "(", "timezone", ")", "return", "timezone", ".", "tzname", "(", "local_now", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/django/utils/timezone.py#L139-L149
openfisca/openfisca-france
207a58191be6830716693f94d37846f1e5037b51
openfisca_france/model/prelevements_obligatoires/impot_revenu/reductions_impot.py
python
intagr.formula_2005_01_01
(foyer_fiscal, period, parameters)
return P.taux * min_(f7um, max1)
Intérêts pour paiement différé accordé aux agriculteurs 2005-
Intérêts pour paiement différé accordé aux agriculteurs 2005-
[ "Intérêts", "pour", "paiement", "différé", "accordé", "aux", "agriculteurs", "2005", "-" ]
def formula_2005_01_01(foyer_fiscal, period, parameters): ''' Intérêts pour paiement différé accordé aux agriculteurs 2005- ''' f7um = foyer_fiscal('f7um', period) maries_ou_pacses = foyer_fiscal('maries_ou_pacses', period) P = parameters(period).impot_revenu.reductions_impots.intagr max1 = P.max * (1 + maries_ou_pacses) return P.taux * min_(f7um, max1)
[ "def", "formula_2005_01_01", "(", "foyer_fiscal", ",", "period", ",", "parameters", ")", ":", "f7um", "=", "foyer_fiscal", "(", "'f7um'", ",", "period", ")", "maries_ou_pacses", "=", "foyer_fiscal", "(", "'maries_ou_pacses'", ",", "period", ")", "P", "=", "parameters", "(", "period", ")", ".", "impot_revenu", ".", "reductions_impots", ".", "intagr", "max1", "=", "P", ".", "max", "*", "(", "1", "+", "maries_ou_pacses", ")", "return", "P", ".", "taux", "*", "min_", "(", "f7um", ",", "max1", ")" ]
https://github.com/openfisca/openfisca-france/blob/207a58191be6830716693f94d37846f1e5037b51/openfisca_france/model/prelevements_obligatoires/impot_revenu/reductions_impot.py#L2723-L2733
apache/libcloud
90971e17bfd7b6bb97b2489986472c531cc8e140
libcloud/storage/drivers/dummy.py
python
DummyStorageDriver.delete_container
(self, container)
return True
>>> driver = DummyStorageDriver('key', 'secret') >>> container = Container(name = 'test container', ... extra={'object_count': 0}, driver=driver) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> len(driver._containers) 1 >>> driver.delete_container(container=container) True >>> len(driver._containers) 0 >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream( ... object_name='test object', iterator=DummyFileObject(5, 10), ... extra={}) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerIsNotEmptyError: @inherits: :class:`StorageDriver.delete_container`
>>> driver = DummyStorageDriver('key', 'secret') >>> container = Container(name = 'test container', ... extra={'object_count': 0}, driver=driver) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> len(driver._containers) 1 >>> driver.delete_container(container=container) True >>> len(driver._containers) 0 >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream( ... object_name='test object', iterator=DummyFileObject(5, 10), ... extra={}) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerIsNotEmptyError:
[ ">>>", "driver", "=", "DummyStorageDriver", "(", "key", "secret", ")", ">>>", "container", "=", "Container", "(", "name", "=", "test", "container", "...", "extra", "=", "{", "object_count", ":", "0", "}", "driver", "=", "driver", ")", ">>>", "driver", ".", "delete_container", "(", "container", "=", "container", ")", "...", "#doctest", ":", "+", "IGNORE_EXCEPTION_DETAIL", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "ContainerDoesNotExistError", ":", ">>>", "container", "=", "driver", ".", "create_container", "(", "...", "container_name", "=", "test", "container", "1", ")", "...", "#doctest", ":", "+", "IGNORE_EXCEPTION_DETAIL", ">>>", "len", "(", "driver", ".", "_containers", ")", "1", ">>>", "driver", ".", "delete_container", "(", "container", "=", "container", ")", "True", ">>>", "len", "(", "driver", ".", "_containers", ")", "0", ">>>", "container", "=", "driver", ".", "create_container", "(", "...", "container_name", "=", "test", "container", "1", ")", "...", "#doctest", ":", "+", "IGNORE_EXCEPTION_DETAIL", ">>>", "obj", "=", "container", ".", "upload_object_via_stream", "(", "...", "object_name", "=", "test", "object", "iterator", "=", "DummyFileObject", "(", "5", "10", ")", "...", "extra", "=", "{}", ")", ">>>", "driver", ".", "delete_container", "(", "container", "=", "container", ")", "...", "#doctest", ":", "+", "IGNORE_EXCEPTION_DETAIL", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "ContainerIsNotEmptyError", ":" ]
def delete_container(self, container): """ >>> driver = DummyStorageDriver('key', 'secret') >>> container = Container(name = 'test container', ... extra={'object_count': 0}, driver=driver) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerDoesNotExistError: >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> len(driver._containers) 1 >>> driver.delete_container(container=container) True >>> len(driver._containers) 0 >>> container = driver.create_container( ... container_name='test container 1') ... #doctest: +IGNORE_EXCEPTION_DETAIL >>> obj = container.upload_object_via_stream( ... object_name='test object', iterator=DummyFileObject(5, 10), ... extra={}) >>> driver.delete_container(container=container) ... #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ContainerIsNotEmptyError: @inherits: :class:`StorageDriver.delete_container` """ container_name = container.name if container_name not in self._containers: raise ContainerDoesNotExistError( container_name=container_name, value=None, driver=self ) container = self._containers[container_name] if len(container["objects"]) > 0: raise ContainerIsNotEmptyError( container_name=container_name, value=None, driver=self ) del self._containers[container_name] return True
[ "def", "delete_container", "(", "self", ",", "container", ")", ":", "container_name", "=", "container", ".", "name", "if", "container_name", "not", "in", "self", ".", "_containers", ":", "raise", "ContainerDoesNotExistError", "(", "container_name", "=", "container_name", ",", "value", "=", "None", ",", "driver", "=", "self", ")", "container", "=", "self", ".", "_containers", "[", "container_name", "]", "if", "len", "(", "container", "[", "\"objects\"", "]", ")", ">", "0", ":", "raise", "ContainerIsNotEmptyError", "(", "container_name", "=", "container_name", ",", "value", "=", "None", ",", "driver", "=", "self", ")", "del", "self", ".", "_containers", "[", "container_name", "]", "return", "True" ]
https://github.com/apache/libcloud/blob/90971e17bfd7b6bb97b2489986472c531cc8e140/libcloud/storage/drivers/dummy.py#L335-L380
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/cinder/cinder/volume/drivers/netapp/iscsi.py
python
NetAppCmodeISCSIDriver.create_volume
(self, volume)
Driver entry point for creating a new volume.
Driver entry point for creating a new volume.
[ "Driver", "entry", "point", "for", "creating", "a", "new", "volume", "." ]
def create_volume(self, volume): """Driver entry point for creating a new volume.""" default_size = '104857600' # 100 MB gigabytes = 1073741824L # 2^30 name = volume['name'] if int(volume['size']) == 0: size = default_size else: size = str(int(volume['size']) * gigabytes) extra_args = {} extra_args['OsType'] = 'linux' extra_args['QosType'] = self._get_qos_type(volume) extra_args['Container'] = volume['project_id'] extra_args['Display'] = volume['display_name'] extra_args['Description'] = volume['display_description'] extra_args['SpaceReserved'] = True server = self.client.service metadata = self._create_metadata_list(extra_args) lun = server.ProvisionLun(Name=name, Size=size, Metadata=metadata) LOG.debug(_("Created LUN with name %s") % name) self._add_lun_to_table( NetAppLun(lun.Handle, lun.Name, lun.Size, self._create_dict_from_meta(lun.Metadata)))
[ "def", "create_volume", "(", "self", ",", "volume", ")", ":", "default_size", "=", "'104857600'", "# 100 MB", "gigabytes", "=", "1073741824L", "# 2^30", "name", "=", "volume", "[", "'name'", "]", "if", "int", "(", "volume", "[", "'size'", "]", ")", "==", "0", ":", "size", "=", "default_size", "else", ":", "size", "=", "str", "(", "int", "(", "volume", "[", "'size'", "]", ")", "*", "gigabytes", ")", "extra_args", "=", "{", "}", "extra_args", "[", "'OsType'", "]", "=", "'linux'", "extra_args", "[", "'QosType'", "]", "=", "self", ".", "_get_qos_type", "(", "volume", ")", "extra_args", "[", "'Container'", "]", "=", "volume", "[", "'project_id'", "]", "extra_args", "[", "'Display'", "]", "=", "volume", "[", "'display_name'", "]", "extra_args", "[", "'Description'", "]", "=", "volume", "[", "'display_description'", "]", "extra_args", "[", "'SpaceReserved'", "]", "=", "True", "server", "=", "self", ".", "client", ".", "service", "metadata", "=", "self", ".", "_create_metadata_list", "(", "extra_args", ")", "lun", "=", "server", ".", "ProvisionLun", "(", "Name", "=", "name", ",", "Size", "=", "size", ",", "Metadata", "=", "metadata", ")", "LOG", ".", "debug", "(", "_", "(", "\"Created LUN with name %s\"", ")", "%", "name", ")", "self", ".", "_add_lun_to_table", "(", "NetAppLun", "(", "lun", ".", "Handle", ",", "lun", ".", "Name", ",", "lun", ".", "Size", ",", "self", ".", "_create_dict_from_meta", "(", "lun", ".", "Metadata", ")", ")", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/cinder/cinder/volume/drivers/netapp/iscsi.py#L1218-L1243
PaddlePaddle/models
511e2e282960ed4c7440c3f1d1e62017acb90e11
tutorials/mobilenetv3_prod/Step1-5/mobilenetv3_ref/torchvision/transforms/functional_tensor.py
python
_max_value
(dtype: torch.dtype)
return max_value.item()
[]
def _max_value(dtype: torch.dtype) -> float: # TODO: replace this method with torch.iinfo when it gets torchscript support. # https://github.com/pytorch/pytorch/issues/41492 a = torch.tensor(2, dtype=dtype) signed = 1 if torch.tensor(0, dtype=dtype).is_signed() else 0 bits = 1 max_value = torch.tensor(-signed, dtype=torch.long) while True: next_value = a.pow(bits - signed).sub(1) if next_value > max_value: max_value = next_value bits *= 2 else: break return max_value.item()
[ "def", "_max_value", "(", "dtype", ":", "torch", ".", "dtype", ")", "->", "float", ":", "# TODO: replace this method with torch.iinfo when it gets torchscript support.", "# https://github.com/pytorch/pytorch/issues/41492", "a", "=", "torch", ".", "tensor", "(", "2", ",", "dtype", "=", "dtype", ")", "signed", "=", "1", "if", "torch", ".", "tensor", "(", "0", ",", "dtype", "=", "dtype", ")", ".", "is_signed", "(", ")", "else", "0", "bits", "=", "1", "max_value", "=", "torch", ".", "tensor", "(", "-", "signed", ",", "dtype", "=", "torch", ".", "long", ")", "while", "True", ":", "next_value", "=", "a", ".", "pow", "(", "bits", "-", "signed", ")", ".", "sub", "(", "1", ")", "if", "next_value", ">", "max_value", ":", "max_value", "=", "next_value", "bits", "*=", "2", "else", ":", "break", "return", "max_value", ".", "item", "(", ")" ]
https://github.com/PaddlePaddle/models/blob/511e2e282960ed4c7440c3f1d1e62017acb90e11/tutorials/mobilenetv3_prod/Step1-5/mobilenetv3_ref/torchvision/transforms/functional_tensor.py#L34-L49
LinkedInAttic/indextank-service
880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e
api/boto/ec2/autoscale/trigger.py
python
Trigger.delete
(self)
return req
Delete this trigger.
Delete this trigger.
[ "Delete", "this", "trigger", "." ]
def delete(self): """ Delete this trigger. """ params = { 'TriggerName' : self.name, 'AutoScalingGroupName' : self.autoscale_group_name, } req =self.connection.get_object('DeleteTrigger', params, Request) self.connection.last_request = req return req
[ "def", "delete", "(", "self", ")", ":", "params", "=", "{", "'TriggerName'", ":", "self", ".", "name", ",", "'AutoScalingGroupName'", ":", "self", ".", "autoscale_group_name", ",", "}", "req", "=", "self", ".", "connection", ".", "get_object", "(", "'DeleteTrigger'", ",", "params", ",", "Request", ")", "self", ".", "connection", ".", "last_request", "=", "req", "return", "req" ]
https://github.com/LinkedInAttic/indextank-service/blob/880c6295ce8e7a3a55bf9b3777cc35c7680e0d7e/api/boto/ec2/autoscale/trigger.py#L127-L136
joelgrus/data-science-from-scratch
d5d0f117f41b3ccab3b07f1ee1fa21cfcf69afa1
first-edition/code/network_analysis.py
python
vector_as_matrix
(v)
return [[v_i] for v_i in v]
returns the vector v (represented as a list) as a n x 1 matrix
returns the vector v (represented as a list) as a n x 1 matrix
[ "returns", "the", "vector", "v", "(", "represented", "as", "a", "list", ")", "as", "a", "n", "x", "1", "matrix" ]
def vector_as_matrix(v): """returns the vector v (represented as a list) as a n x 1 matrix""" return [[v_i] for v_i in v]
[ "def", "vector_as_matrix", "(", "v", ")", ":", "return", "[", "[", "v_i", "]", "for", "v_i", "in", "v", "]" ]
https://github.com/joelgrus/data-science-from-scratch/blob/d5d0f117f41b3ccab3b07f1ee1fa21cfcf69afa1/first-edition/code/network_analysis.py#L127-L129
Ecogenomics/GTDBTk
1e10c56530b4a15eadce519619a62584a490632d
gtdbtk/external/pypfam/HMM/HMMResultsIO.py
python
HMMResultsIO._readUnitData
(self, seqId, fh, hmmRes)
return
# == domain 1 score: 244.0 bits; conditional E-value: 9.5e-76 # SEED 1 medrlallkaisasakdlvalaasrGaksipspvkttavkfdplptPdldalrtrlkeaklPakaiksalsayekaCarWrsdleeafdktaksvsPanlhllealrirlyteqvekWlvqvlevaerWkaemekqrahiaatmgp 146 # m+++la+l++isa+akd++ala+srGa+++ +p++tt+++fd+l++P+ld++rtrl+ea+lP+kaik++lsaye+aCarW++dleeafd+ta+s+sP+n+++l++lr+rly+eqv+kWl++vl+v+erWkaemekqrahi+atmgp # P37935.1 1 MAELLACLQSISAHAKDMMALARSRGATGS-RPTPTTLPHFDELLPPNLDFVRTRLQEARLPPKAIKGTLSAYESACARWKHDLEEAFDRTAHSISPHNFQRLAQLRTRLYVEQVQKWLYEVLQVPERWKAEMEKQRAHINATMGP 145 # 899***************************.******************************************************************************************************************8 PP # # OR.... # # == domain 1 score: 27.6 bits; conditional E-value: 7.4e-10 # PF00018 17 LsfkkGdvitvleksee.eWwkaelkdg.keGlvPsnYvep 55 # L++++Gd+++++++++e++Ww++++++++++G++P+n+v+p # P15498.4 617 LRLNPGDIVELTKAEAEqNWWEGRNTSTnEIGWFPCNRVKP 657 # 7899**********9999*******************9987 PP
# == domain 1 score: 244.0 bits; conditional E-value: 9.5e-76 # SEED 1 medrlallkaisasakdlvalaasrGaksipspvkttavkfdplptPdldalrtrlkeaklPakaiksalsayekaCarWrsdleeafdktaksvsPanlhllealrirlyteqvekWlvqvlevaerWkaemekqrahiaatmgp 146 # m+++la+l++isa+akd++ala+srGa+++ +p++tt+++fd+l++P+ld++rtrl+ea+lP+kaik++lsaye+aCarW++dleeafd+ta+s+sP+n+++l++lr+rly+eqv+kWl++vl+v+erWkaemekqrahi+atmgp # P37935.1 1 MAELLACLQSISAHAKDMMALARSRGATGS-RPTPTTLPHFDELLPPNLDFVRTRLQEARLPPKAIKGTLSAYESACARWKHDLEEAFDRTAHSISPHNFQRLAQLRTRLYVEQVQKWLYEVLQVPERWKAEMEKQRAHINATMGP 145 # 899***************************.******************************************************************************************************************8 PP # # OR.... # # == domain 1 score: 27.6 bits; conditional E-value: 7.4e-10 # PF00018 17 LsfkkGdvitvleksee.eWwkaelkdg.keGlvPsnYvep 55 # L++++Gd+++++++++e++Ww++++++++++G++P+n+v+p # P15498.4 617 LRLNPGDIVELTKAEAEqNWWEGRNTSTnEIGWFPCNRVKP 657 # 7899**********9999*******************9987 PP
[ "#", "==", "domain", "1", "score", ":", "244", ".", "0", "bits", ";", "conditional", "E", "-", "value", ":", "9", ".", "5e", "-", "76", "#", "SEED", "1", "medrlallkaisasakdlvalaasrGaksipspvkttavkfdplptPdldalrtrlkeaklPakaiksalsayekaCarWrsdleeafdktaksvsPanlhllealrirlyteqvekWlvqvlevaerWkaemekqrahiaatmgp", "146", "#", "m", "+++", "la", "+", "l", "++", "isa", "+", "akd", "++", "ala", "+", "srGa", "+++", "+", "p", "++", "tt", "+++", "fd", "+", "l", "++", "P", "+", "ld", "++", "rtrl", "+", "ea", "+", "lP", "+", "kaik", "++", "lsaye", "+", "aCarW", "++", "dleeafd", "+", "ta", "+", "s", "+", "sP", "+", "n", "+++", "l", "++", "lr", "+", "rly", "+", "eqv", "+", "kWl", "++", "vl", "+", "v", "+", "erWkaemekqrahi", "+", "atmgp", "#", "P37935", ".", "1", "1", "MAELLACLQSISAHAKDMMALARSRGATGS", "-", "RPTPTTLPHFDELLPPNLDFVRTRLQEARLPPKAIKGTLSAYESACARWKHDLEEAFDRTAHSISPHNFQRLAQLRTRLYVEQVQKWLYEVLQVPERWKAEMEKQRAHINATMGP", "145", "#", "899", "***************************", ".", "******************************************************************************************************************", "8", "PP", "#", "#", "OR", "....", "#", "#", "==", "domain", "1", "score", ":", "27", ".", "6", "bits", ";", "conditional", "E", "-", "value", ":", "7", ".", "4e", "-", "10", "#", "PF00018", "17", "LsfkkGdvitvleksee", ".", "eWwkaelkdg", ".", "keGlvPsnYvep", "55", "#", "L", "++++", "Gd", "+++++++++", "e", "++", "Ww", "++++++++++", "G", "++", "P", "+", "n", "+", "v", "+", "p", "#", "P15498", ".", "4", "617", "LRLNPGDIVELTKAEAEqNWWEGRNTSTnEIGWFPCNRVKP", "657", "#", "7899", "**********", "9999", "*******************", "9987", "PP" ]
def _readUnitData(self, seqId, fh, hmmRes): if hmmRes.eof: return hmmName = hmmRes.seedName seqName = hmmRes.seqName units = list() align = True recurse = False eof = False nextSeqId = None # Parse the domain hits section while len(fh) > 0: hs = fh.popleft() # Run the regex searches which generate output unitd_2 = self.re_unitd_2.search(hs) if self.re_unitd_1.search(hs): align = False recurse = False eof = True break elif unitd_2: nextSeqId = unitd_2.group(1) align = False recurse = True break elif self.re_unitd_3.search(hs): align = True recurse = False break elif self.re_unitd_4.search(hs): # Two human readable lines continue elif self.re_unitd_5.search(hs): # blank line continue elif self.re_unitd_6.search(hs): dMatch = self.re_unitd_7.split(hs) if len(dMatch) != 17: sys.exit('Expected 16 elements of data.') hmmUnit = HMMUnit() hmmUnit.name = seqId hmmUnit.domain = dMatch[1] hmmUnit.bits = float(dMatch[3]) hmmUnit.bias = float(dMatch[4]) hmmUnit.domEvalue = float(dMatch[5]) hmmUnit.evalue = float(dMatch[6]) hmmUnit.hmmFrom = int(dMatch[7]) hmmUnit.hmmTo = int(dMatch[8]) hmmUnit.seqFrom = int(dMatch[10]) hmmUnit.seqTo = int(dMatch[11]) hmmUnit.envFrom = int(dMatch[13]) hmmUnit.envTo = int(dMatch[14]) hmmUnit.aliAcc = float(dMatch[16]) units.append(hmmUnit) continue elif self.re_unitd_8.search(hs): align = False continue else: sys.exit('Did not parse line %s' % hs) ''' # == domain 1 score: 244.0 bits; conditional E-value: 9.5e-76 # SEED 1 medrlallkaisasakdlvalaasrGaksipspvkttavkfdplptPdldalrtrlkeaklPakaiksalsayekaCarWrsdleeafdktaksvsPanlhllealrirlyteqvekWlvqvlevaerWkaemekqrahiaatmgp 146 # m+++la+l++isa+akd++ala+srGa+++ +p++tt+++fd+l++P+ld++rtrl+ea+lP+kaik++lsaye+aCarW++dleeafd+ta+s+sP+n+++l++lr+rly+eqv+kWl++vl+v+erWkaemekqrahi+atmgp # P37935.1 1 MAELLACLQSISAHAKDMMALARSRGATGS-RPTPTTLPHFDELLPPNLDFVRTRLQEARLPPKAIKGTLSAYESACARWKHDLEEAFDRTAHSISPHNFQRLAQLRTRLYVEQVQKWLYEVLQVPERWKAEMEKQRAHINATMGP 145 # 899***************************.******************************************************************************************************************8 PP # # OR.... # # == domain 1 score: 27.6 bits; conditional E-value: 7.4e-10 # PF00018 17 LsfkkGdvitvleksee.eWwkaelkdg.keGlvPsnYvep 55 # L++++Gd+++++++++e++Ww++++++++++G++P+n+v+p # P15498.4 617 LRLNPGDIVELTKAEAEqNWWEGRNTSTnEIGWFPCNRVKP 657 # 7899**********9999*******************9987 PP ''' if align: # Specifically for python pattern1 = None pattern2 = None if hmmName and hmmRes.program == 'hmmsearch': pattern1 = re.compile(r'^\s+%s\s+\d+\s+(\S+)\s+\d+' % hmmName) seqId = re.sub('(\W)', r'\\\1', seqId) # $id =~ s/\|/\\|/g; #Escape '|', '[' and ']' characters # $id =~ s/\[/\\[/g; # $id =~ s/\]/\\]/g; pattern2 = re.compile(r'^\s+%s\s+\d+\s+(\S+)\s+\d+' % seqId) elif seqName and hmmRes.program == 'hmmscan': tmpSeqName = seqName tmpSeqName = re.sub('(\W)', r'\\\1', tmpSeqName) pattern1 = re.compile(r'^\s+%s\s+\d+\s+(\S+)\s+\d+' % seqId) pattern2 = re.compile(r'^\s+%s\s+\d+\s+(\S+)\s+\d+' % tmpSeqName) elif seqName and (hmmRes.program == 'phmmer' or hmmRes.program == 'jackhmmer'): sys.exit("seqName and (hmmRes.program == 'phmmer' or hmmRes.program == 'jackhmmer' is not implemented.") recurse = False matchNo = None hmmlen = 0 while len(fh) > 0: hs = fh.popleft() # Run a search for each of the patterns. pattern1_res = pattern1.search(hs) pattern2_res = pattern2.search(hs) re_unitd_9_res = self.re_unitd_9.search(hs) re_unitd_10_res = self.re_unitd_10.search(hs) re_unitd_11_res = self.re_unitd_11.search(hs) re_unitd_12_res = self.re_unitd_12.search(hs) re_unitd_13_res = self.re_unitd_13.search(hs) re_unitd_14_res = self.re_unitd_14.search(hs) re_unitd_15_res = self.re_unitd_15.search(hs) re_unitd_16_res = self.re_unitd_16.search(hs) if pattern1_res: dict_hmmalign = units[matchNo - 1].hmmalign if 'hmm' in dict_hmmalign: dict_hmmalign['hmm'] += pattern1_res.group(1) else: dict_hmmalign['hmm'] = pattern1_res.group(1) hmmlen = len(pattern1_res.group(1)) elif pattern2_res: dict_hmmalign = units[matchNo - 1].hmmalign if 'seq' in dict_hmmalign: dict_hmmalign['seq'] += pattern2_res.group(1) else: dict_hmmalign['seq'] = pattern2_res.group(1) # ^\s+([x\.]+)\s+RF$ elif re_unitd_9_res: rf = re_unitd_9_res.group(1) dict_hmmalign = units[matchNo - 1].hmmalign if 'rf' in dict_hmmalign: dict_hmmalign['rf'] += rf else: dict_hmmalign['rf'] = rf # ^\s+([0-9\*\.]+)\s+PP$ elif re_unitd_10_res: pp = re_unitd_10_res.group(1) dict_hmmalign = units[matchNo - 1].hmmalign if 'pp' in dict_hmmalign: dict_hmmalign['pp'] += pp else: dict_hmmalign['pp'] = pp # ^\s+(\S+)\s+CS$ elif re_unitd_11_res: cs = re_unitd_11_res.group(1) dict_hmmalign = units[matchNo - 1].hmmalign if 'cs' in dict_hmmalign: dict_hmmalign['cs'] += cs else: dict_hmmalign['cs'] = cs # ^\s+==\s+domain\s+(\d+) elif re_unitd_12_res: matchNo = int(re_unitd_12_res.group(1)) # ^\s+(.*)\s+$ elif re_unitd_13_res: hs = hs.rstrip() m1 = hs[-hmmlen:] # ^$ elif re_unitd_14_res: continue # ^[(\/\/|Internal)] elif re_unitd_15_res: align = False recurse = False eof = True break # ^\>\>\s+(\S+) elif re_unitd_16_res: nextSeqId = re_unitd_16_res.group(1) recurse = True break else: sys.exit('Did not parse %s in units' % hs) # foreach my u (@units) for u in units: hmmRes.addHMMUnit(u) hmmRes.eof = eof if recurse and nextSeqId: self._readUnitData(nextSeqId, fh, hmmRes) return
[ "def", "_readUnitData", "(", "self", ",", "seqId", ",", "fh", ",", "hmmRes", ")", ":", "if", "hmmRes", ".", "eof", ":", "return", "hmmName", "=", "hmmRes", ".", "seedName", "seqName", "=", "hmmRes", ".", "seqName", "units", "=", "list", "(", ")", "align", "=", "True", "recurse", "=", "False", "eof", "=", "False", "nextSeqId", "=", "None", "# Parse the domain hits section", "while", "len", "(", "fh", ")", ">", "0", ":", "hs", "=", "fh", ".", "popleft", "(", ")", "# Run the regex searches which generate output", "unitd_2", "=", "self", ".", "re_unitd_2", ".", "search", "(", "hs", ")", "if", "self", ".", "re_unitd_1", ".", "search", "(", "hs", ")", ":", "align", "=", "False", "recurse", "=", "False", "eof", "=", "True", "break", "elif", "unitd_2", ":", "nextSeqId", "=", "unitd_2", ".", "group", "(", "1", ")", "align", "=", "False", "recurse", "=", "True", "break", "elif", "self", ".", "re_unitd_3", ".", "search", "(", "hs", ")", ":", "align", "=", "True", "recurse", "=", "False", "break", "elif", "self", ".", "re_unitd_4", ".", "search", "(", "hs", ")", ":", "# Two human readable lines", "continue", "elif", "self", ".", "re_unitd_5", ".", "search", "(", "hs", ")", ":", "# blank line", "continue", "elif", "self", ".", "re_unitd_6", ".", "search", "(", "hs", ")", ":", "dMatch", "=", "self", ".", "re_unitd_7", ".", "split", "(", "hs", ")", "if", "len", "(", "dMatch", ")", "!=", "17", ":", "sys", ".", "exit", "(", "'Expected 16 elements of data.'", ")", "hmmUnit", "=", "HMMUnit", "(", ")", "hmmUnit", ".", "name", "=", "seqId", "hmmUnit", ".", "domain", "=", "dMatch", "[", "1", "]", "hmmUnit", ".", "bits", "=", "float", "(", "dMatch", "[", "3", "]", ")", "hmmUnit", ".", "bias", "=", "float", "(", "dMatch", "[", "4", "]", ")", "hmmUnit", ".", "domEvalue", "=", "float", "(", "dMatch", "[", "5", "]", ")", "hmmUnit", ".", "evalue", "=", "float", "(", "dMatch", "[", "6", "]", ")", "hmmUnit", ".", "hmmFrom", "=", "int", "(", "dMatch", "[", "7", "]", ")", "hmmUnit", ".", "hmmTo", "=", "int", "(", "dMatch", "[", "8", "]", ")", "hmmUnit", ".", "seqFrom", "=", "int", "(", "dMatch", "[", "10", "]", ")", "hmmUnit", ".", "seqTo", "=", "int", "(", "dMatch", "[", "11", "]", ")", "hmmUnit", ".", "envFrom", "=", "int", "(", "dMatch", "[", "13", "]", ")", "hmmUnit", ".", "envTo", "=", "int", "(", "dMatch", "[", "14", "]", ")", "hmmUnit", ".", "aliAcc", "=", "float", "(", "dMatch", "[", "16", "]", ")", "units", ".", "append", "(", "hmmUnit", ")", "continue", "elif", "self", ".", "re_unitd_8", ".", "search", "(", "hs", ")", ":", "align", "=", "False", "continue", "else", ":", "sys", ".", "exit", "(", "'Did not parse line %s'", "%", "hs", ")", "if", "align", ":", "# Specifically for python", "pattern1", "=", "None", "pattern2", "=", "None", "if", "hmmName", "and", "hmmRes", ".", "program", "==", "'hmmsearch'", ":", "pattern1", "=", "re", ".", "compile", "(", "r'^\\s+%s\\s+\\d+\\s+(\\S+)\\s+\\d+'", "%", "hmmName", ")", "seqId", "=", "re", ".", "sub", "(", "'(\\W)'", ",", "r'\\\\\\1'", ",", "seqId", ")", "# $id =~ s/\\|/\\\\|/g; #Escape '|', '[' and ']' characters", "# $id =~ s/\\[/\\\\[/g;", "# $id =~ s/\\]/\\\\]/g;", "pattern2", "=", "re", ".", "compile", "(", "r'^\\s+%s\\s+\\d+\\s+(\\S+)\\s+\\d+'", "%", "seqId", ")", "elif", "seqName", "and", "hmmRes", ".", "program", "==", "'hmmscan'", ":", "tmpSeqName", "=", "seqName", "tmpSeqName", "=", "re", ".", "sub", "(", "'(\\W)'", ",", "r'\\\\\\1'", ",", "tmpSeqName", ")", "pattern1", "=", "re", ".", "compile", "(", "r'^\\s+%s\\s+\\d+\\s+(\\S+)\\s+\\d+'", "%", "seqId", ")", "pattern2", "=", "re", ".", "compile", "(", "r'^\\s+%s\\s+\\d+\\s+(\\S+)\\s+\\d+'", "%", "tmpSeqName", ")", "elif", "seqName", "and", "(", "hmmRes", ".", "program", "==", "'phmmer'", "or", "hmmRes", ".", "program", "==", "'jackhmmer'", ")", ":", "sys", ".", "exit", "(", "\"seqName and (hmmRes.program == 'phmmer' or hmmRes.program == 'jackhmmer' is not implemented.\"", ")", "recurse", "=", "False", "matchNo", "=", "None", "hmmlen", "=", "0", "while", "len", "(", "fh", ")", ">", "0", ":", "hs", "=", "fh", ".", "popleft", "(", ")", "# Run a search for each of the patterns.", "pattern1_res", "=", "pattern1", ".", "search", "(", "hs", ")", "pattern2_res", "=", "pattern2", ".", "search", "(", "hs", ")", "re_unitd_9_res", "=", "self", ".", "re_unitd_9", ".", "search", "(", "hs", ")", "re_unitd_10_res", "=", "self", ".", "re_unitd_10", ".", "search", "(", "hs", ")", "re_unitd_11_res", "=", "self", ".", "re_unitd_11", ".", "search", "(", "hs", ")", "re_unitd_12_res", "=", "self", ".", "re_unitd_12", ".", "search", "(", "hs", ")", "re_unitd_13_res", "=", "self", ".", "re_unitd_13", ".", "search", "(", "hs", ")", "re_unitd_14_res", "=", "self", ".", "re_unitd_14", ".", "search", "(", "hs", ")", "re_unitd_15_res", "=", "self", ".", "re_unitd_15", ".", "search", "(", "hs", ")", "re_unitd_16_res", "=", "self", ".", "re_unitd_16", ".", "search", "(", "hs", ")", "if", "pattern1_res", ":", "dict_hmmalign", "=", "units", "[", "matchNo", "-", "1", "]", ".", "hmmalign", "if", "'hmm'", "in", "dict_hmmalign", ":", "dict_hmmalign", "[", "'hmm'", "]", "+=", "pattern1_res", ".", "group", "(", "1", ")", "else", ":", "dict_hmmalign", "[", "'hmm'", "]", "=", "pattern1_res", ".", "group", "(", "1", ")", "hmmlen", "=", "len", "(", "pattern1_res", ".", "group", "(", "1", ")", ")", "elif", "pattern2_res", ":", "dict_hmmalign", "=", "units", "[", "matchNo", "-", "1", "]", ".", "hmmalign", "if", "'seq'", "in", "dict_hmmalign", ":", "dict_hmmalign", "[", "'seq'", "]", "+=", "pattern2_res", ".", "group", "(", "1", ")", "else", ":", "dict_hmmalign", "[", "'seq'", "]", "=", "pattern2_res", ".", "group", "(", "1", ")", "# ^\\s+([x\\.]+)\\s+RF$", "elif", "re_unitd_9_res", ":", "rf", "=", "re_unitd_9_res", ".", "group", "(", "1", ")", "dict_hmmalign", "=", "units", "[", "matchNo", "-", "1", "]", ".", "hmmalign", "if", "'rf'", "in", "dict_hmmalign", ":", "dict_hmmalign", "[", "'rf'", "]", "+=", "rf", "else", ":", "dict_hmmalign", "[", "'rf'", "]", "=", "rf", "# ^\\s+([0-9\\*\\.]+)\\s+PP$", "elif", "re_unitd_10_res", ":", "pp", "=", "re_unitd_10_res", ".", "group", "(", "1", ")", "dict_hmmalign", "=", "units", "[", "matchNo", "-", "1", "]", ".", "hmmalign", "if", "'pp'", "in", "dict_hmmalign", ":", "dict_hmmalign", "[", "'pp'", "]", "+=", "pp", "else", ":", "dict_hmmalign", "[", "'pp'", "]", "=", "pp", "# ^\\s+(\\S+)\\s+CS$", "elif", "re_unitd_11_res", ":", "cs", "=", "re_unitd_11_res", ".", "group", "(", "1", ")", "dict_hmmalign", "=", "units", "[", "matchNo", "-", "1", "]", ".", "hmmalign", "if", "'cs'", "in", "dict_hmmalign", ":", "dict_hmmalign", "[", "'cs'", "]", "+=", "cs", "else", ":", "dict_hmmalign", "[", "'cs'", "]", "=", "cs", "# ^\\s+==\\s+domain\\s+(\\d+)", "elif", "re_unitd_12_res", ":", "matchNo", "=", "int", "(", "re_unitd_12_res", ".", "group", "(", "1", ")", ")", "# ^\\s+(.*)\\s+$", "elif", "re_unitd_13_res", ":", "hs", "=", "hs", ".", "rstrip", "(", ")", "m1", "=", "hs", "[", "-", "hmmlen", ":", "]", "# ^$", "elif", "re_unitd_14_res", ":", "continue", "# ^[(\\/\\/|Internal)]", "elif", "re_unitd_15_res", ":", "align", "=", "False", "recurse", "=", "False", "eof", "=", "True", "break", "# ^\\>\\>\\s+(\\S+)", "elif", "re_unitd_16_res", ":", "nextSeqId", "=", "re_unitd_16_res", ".", "group", "(", "1", ")", "recurse", "=", "True", "break", "else", ":", "sys", ".", "exit", "(", "'Did not parse %s in units'", "%", "hs", ")", "# foreach my u (@units)", "for", "u", "in", "units", ":", "hmmRes", ".", "addHMMUnit", "(", "u", ")", "hmmRes", ".", "eof", "=", "eof", "if", "recurse", "and", "nextSeqId", ":", "self", ".", "_readUnitData", "(", "nextSeqId", ",", "fh", ",", "hmmRes", ")", "return" ]
https://github.com/Ecogenomics/GTDBTk/blob/1e10c56530b4a15eadce519619a62584a490632d/gtdbtk/external/pypfam/HMM/HMMResultsIO.py#L233-L451
ospalh/anki-addons
4ece13423bd541e29d9b40ebe26ca0999a6962b1
nachschlagen.py
python
on_lookup_forvo_selection
()
u"""Wrapper to look up the selection at Forvo and catch value errors.
u"""Wrapper to look up the selection at Forvo and catch value errors.
[ "u", "Wrapper", "to", "look", "up", "the", "selection", "at", "Forvo", "and", "catch", "value", "errors", "." ]
def on_lookup_forvo_selection(): u"""Wrapper to look up the selection at Forvo and catch value errors.""" try: lookup_forvo([]) except ValueError as ve: tooltip(str(ve))
[ "def", "on_lookup_forvo_selection", "(", ")", ":", "try", ":", "lookup_forvo", "(", "[", "]", ")", "except", "ValueError", "as", "ve", ":", "tooltip", "(", "str", "(", "ve", ")", ")" ]
https://github.com/ospalh/anki-addons/blob/4ece13423bd541e29d9b40ebe26ca0999a6962b1/nachschlagen.py#L306-L311
bids-standard/pybids
9449fdc319c4bdff4ed9aa1b299964352f394d56
bids/variables/variables.py
python
DenseRunVariable.split
(self, grouper)
return [DenseRunVariable(name='%s.%s' % (self.name, name), values=df[name].values, run_info=self.run_info, source=self.source, sampling_rate=self.sampling_rate) for i, name in enumerate(df.columns)]
Split the current DenseRunVariable into multiple columns. Parameters ---------- grouper : :obj:`pandas.DataFrame` Binary DF specifying the design matrix to use for splitting. Number of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable`` will be generated for each column in the grouper. Returns ------- A list of DenseRunVariables, one per unique value in the grouper.
Split the current DenseRunVariable into multiple columns.
[ "Split", "the", "current", "DenseRunVariable", "into", "multiple", "columns", "." ]
def split(self, grouper): """Split the current DenseRunVariable into multiple columns. Parameters ---------- grouper : :obj:`pandas.DataFrame` Binary DF specifying the design matrix to use for splitting. Number of rows must match current ``DenseRunVariable``; a new ``DenseRunVariable`` will be generated for each column in the grouper. Returns ------- A list of DenseRunVariables, one per unique value in the grouper. """ values = grouper.values * self.values.values df = pd.DataFrame(values, columns=grouper.columns) return [DenseRunVariable(name='%s.%s' % (self.name, name), values=df[name].values, run_info=self.run_info, source=self.source, sampling_rate=self.sampling_rate) for i, name in enumerate(df.columns)]
[ "def", "split", "(", "self", ",", "grouper", ")", ":", "values", "=", "grouper", ".", "values", "*", "self", ".", "values", ".", "values", "df", "=", "pd", ".", "DataFrame", "(", "values", ",", "columns", "=", "grouper", ".", "columns", ")", "return", "[", "DenseRunVariable", "(", "name", "=", "'%s.%s'", "%", "(", "self", ".", "name", ",", "name", ")", ",", "values", "=", "df", "[", "name", "]", ".", "values", ",", "run_info", "=", "self", ".", "run_info", ",", "source", "=", "self", ".", "source", ",", "sampling_rate", "=", "self", ".", "sampling_rate", ")", "for", "i", ",", "name", "in", "enumerate", "(", "df", ".", "columns", ")", "]" ]
https://github.com/bids-standard/pybids/blob/9449fdc319c4bdff4ed9aa1b299964352f394d56/bids/variables/variables.py#L476-L497
gentoo/portage
e5be73709b1a42b40380fd336f9381452b01a723
lib/portage/util/backoff.py
python
ExponentialBackoff.__call__
(self, tries)
Given a number of previous tries, calculate the amount of time to delay the next try. @param tries: number of previous tries @type tries: int @return: amount of time to delay the next try @rtype: int
Given a number of previous tries, calculate the amount of time to delay the next try.
[ "Given", "a", "number", "of", "previous", "tries", "calculate", "the", "amount", "of", "time", "to", "delay", "the", "next", "try", "." ]
def __call__(self, tries): """ Given a number of previous tries, calculate the amount of time to delay the next try. @param tries: number of previous tries @type tries: int @return: amount of time to delay the next try @rtype: int """ try: return min(self._limit, self._multiplier * (self._base ** tries)) except OverflowError: return self._limit
[ "def", "__call__", "(", "self", ",", "tries", ")", ":", "try", ":", "return", "min", "(", "self", ".", "_limit", ",", "self", ".", "_multiplier", "*", "(", "self", ".", "_base", "**", "tries", ")", ")", "except", "OverflowError", ":", "return", "self", ".", "_limit" ]
https://github.com/gentoo/portage/blob/e5be73709b1a42b40380fd336f9381452b01a723/lib/portage/util/backoff.py#L32-L45
datascopeanalytics/traces
beed806b548c8c048c62e4224384cd77cf655be0
traces/timeseries.py
python
TimeSeries.items
(self)
return self._d.items()
ts.items() -> list of the (key, value) pairs in ts, as 2-tuples
ts.items() -> list of the (key, value) pairs in ts, as 2-tuples
[ "ts", ".", "items", "()", "-", ">", "list", "of", "the", "(", "key", "value", ")", "pairs", "in", "ts", "as", "2", "-", "tuples" ]
def items(self): """ts.items() -> list of the (key, value) pairs in ts, as 2-tuples""" return self._d.items()
[ "def", "items", "(", "self", ")", ":", "return", "self", ".", "_d", ".", "items", "(", ")" ]
https://github.com/datascopeanalytics/traces/blob/beed806b548c8c048c62e4224384cd77cf655be0/traces/timeseries.py#L210-L212
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/api/search/search.py
python
_CheckFieldName
(name)
return name
Checks field name is not too long and matches field name pattern. Field name pattern: "[A-Za-z][A-Za-z0-9_]*".
Checks field name is not too long and matches field name pattern.
[ "Checks", "field", "name", "is", "not", "too", "long", "and", "matches", "field", "name", "pattern", "." ]
def _CheckFieldName(name): """Checks field name is not too long and matches field name pattern. Field name pattern: "[A-Za-z][A-Za-z0-9_]*". """ _ValidateString(name, 'name', MAXIMUM_FIELD_NAME_LENGTH) if not re.match(_FIELD_NAME_PATTERN, name): raise ValueError('field name "%s" should match pattern: %s' % (name, _FIELD_NAME_PATTERN)) return name
[ "def", "_CheckFieldName", "(", "name", ")", ":", "_ValidateString", "(", "name", ",", "'name'", ",", "MAXIMUM_FIELD_NAME_LENGTH", ")", "if", "not", "re", ".", "match", "(", "_FIELD_NAME_PATTERN", ",", "name", ")", ":", "raise", "ValueError", "(", "'field name \"%s\" should match pattern: %s'", "%", "(", "name", ",", "_FIELD_NAME_PATTERN", ")", ")", "return", "name" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/api/search/search.py#L546-L555
GNS3/gns3-server
aff06572d4173df945ad29ea8feb274f7885d9e4
gns3server/controller/compute.py
python
Compute.forward
(self, method, type, path, data=None)
return res.json
Forward a call to the emulator on compute
Forward a call to the emulator on compute
[ "Forward", "a", "call", "to", "the", "emulator", "on", "compute" ]
async def forward(self, method, type, path, data=None): """ Forward a call to the emulator on compute """ try: action = "/{}/{}".format(type, path) res = await self.http_query(method, action, data=data, timeout=None) except aiohttp.ServerDisconnectedError: log.error("Connection lost to %s during %s %s", self._id, method, action) raise aiohttp.web.HTTPGatewayTimeout() return res.json
[ "async", "def", "forward", "(", "self", ",", "method", ",", "type", ",", "path", ",", "data", "=", "None", ")", ":", "try", ":", "action", "=", "\"/{}/{}\"", ".", "format", "(", "type", ",", "path", ")", "res", "=", "await", "self", ".", "http_query", "(", "method", ",", "action", ",", "data", "=", "data", ",", "timeout", "=", "None", ")", "except", "aiohttp", ".", "ServerDisconnectedError", ":", "log", ".", "error", "(", "\"Connection lost to %s during %s %s\"", ",", "self", ".", "_id", ",", "method", ",", "action", ")", "raise", "aiohttp", ".", "web", ".", "HTTPGatewayTimeout", "(", ")", "return", "res", ".", "json" ]
https://github.com/GNS3/gns3-server/blob/aff06572d4173df945ad29ea8feb274f7885d9e4/gns3server/controller/compute.py#L583-L593
ljean/modbus-tk
1159c71794071ae67f73f86fa14dd71c989b4859
hmi/master_webhmi.py
python
Master.__init__
(self, protocol, address, id, db)
[]
def __init__(self, protocol, address, id, db): if protocol == "tcp": try: (host, port) = address.split(":") self.modbus = modbus_tcp.TcpMaster(str(host), int(port)) except: self.modbus = modbus_tcp.TcpMaster(address) self.modbus.set_timeout(5.0) elif protocol == "rtu": if SERIAL: args = unicode(address).split(',') kwargs = {} for a in args: key, val = a.split(':') if key=='port': try: serial_port = int(val) except: serial_port = val else: kwargs[key] = val try: try: s = SERIAL_PORTS[serial_port] except IndexError: SERIAL_PORTS[serial_port] = s = serial.Serial(port=serial_port, **kwargs) self.modbus = modbus_rtu.RtuMaster(s) except Exception, msg: raise Exception("Protocol {0} error! {1}".format(protocol, msg)) else: raise Exception("Protocol {0} is disabled!".format(protocol)) else: raise Exception("Protocol {0} is not supported!".format(protocol)) self.id = id self._db = db self.address = address self.protocol = protocol self.requests = self._db.get_requests(self.id)
[ "def", "__init__", "(", "self", ",", "protocol", ",", "address", ",", "id", ",", "db", ")", ":", "if", "protocol", "==", "\"tcp\"", ":", "try", ":", "(", "host", ",", "port", ")", "=", "address", ".", "split", "(", "\":\"", ")", "self", ".", "modbus", "=", "modbus_tcp", ".", "TcpMaster", "(", "str", "(", "host", ")", ",", "int", "(", "port", ")", ")", "except", ":", "self", ".", "modbus", "=", "modbus_tcp", ".", "TcpMaster", "(", "address", ")", "self", ".", "modbus", ".", "set_timeout", "(", "5.0", ")", "elif", "protocol", "==", "\"rtu\"", ":", "if", "SERIAL", ":", "args", "=", "unicode", "(", "address", ")", ".", "split", "(", "','", ")", "kwargs", "=", "{", "}", "for", "a", "in", "args", ":", "key", ",", "val", "=", "a", ".", "split", "(", "':'", ")", "if", "key", "==", "'port'", ":", "try", ":", "serial_port", "=", "int", "(", "val", ")", "except", ":", "serial_port", "=", "val", "else", ":", "kwargs", "[", "key", "]", "=", "val", "try", ":", "try", ":", "s", "=", "SERIAL_PORTS", "[", "serial_port", "]", "except", "IndexError", ":", "SERIAL_PORTS", "[", "serial_port", "]", "=", "s", "=", "serial", ".", "Serial", "(", "port", "=", "serial_port", ",", "*", "*", "kwargs", ")", "self", ".", "modbus", "=", "modbus_rtu", ".", "RtuMaster", "(", "s", ")", "except", "Exception", ",", "msg", ":", "raise", "Exception", "(", "\"Protocol {0} error! {1}\"", ".", "format", "(", "protocol", ",", "msg", ")", ")", "else", ":", "raise", "Exception", "(", "\"Protocol {0} is disabled!\"", ".", "format", "(", "protocol", ")", ")", "else", ":", "raise", "Exception", "(", "\"Protocol {0} is not supported!\"", ".", "format", "(", "protocol", ")", ")", "self", ".", "id", "=", "id", "self", ".", "_db", "=", "db", "self", ".", "address", "=", "address", "self", ".", "protocol", "=", "protocol", "self", ".", "requests", "=", "self", ".", "_db", ".", "get_requests", "(", "self", ".", "id", ")" ]
https://github.com/ljean/modbus-tk/blob/1159c71794071ae67f73f86fa14dd71c989b4859/hmi/master_webhmi.py#L31-L70
google/jax
bebe9845a873b3203f8050395255f173ba3bbb71
jax/_src/util.py
python
distributed_debug_log
(*pairs)
Format and log `pairs` if config.jax_distributed_debug is enabled. Args: pairs: A sequence of label/value pairs to log. The first pair is treated as a heading for subsequent pairs.
Format and log `pairs` if config.jax_distributed_debug is enabled.
[ "Format", "and", "log", "pairs", "if", "config", ".", "jax_distributed_debug", "is", "enabled", "." ]
def distributed_debug_log(*pairs): """Format and log `pairs` if config.jax_distributed_debug is enabled. Args: pairs: A sequence of label/value pairs to log. The first pair is treated as a heading for subsequent pairs. """ if config.jax_distributed_debug: lines = ["\nDISTRIBUTED_DEBUG_BEGIN"] try: lines.append(f"{pairs[0][0]}: {pairs[0][1]}") for label, value in pairs[1:]: lines.append(f" {label}: {value}") except Exception as e: lines.append("DISTRIBUTED_DEBUG logging failed!") lines.append(f"{e}") lines.append("DISTRIBUTED_DEBUG_END") logging.warning("\n".join(lines))
[ "def", "distributed_debug_log", "(", "*", "pairs", ")", ":", "if", "config", ".", "jax_distributed_debug", ":", "lines", "=", "[", "\"\\nDISTRIBUTED_DEBUG_BEGIN\"", "]", "try", ":", "lines", ".", "append", "(", "f\"{pairs[0][0]}: {pairs[0][1]}\"", ")", "for", "label", ",", "value", "in", "pairs", "[", "1", ":", "]", ":", "lines", ".", "append", "(", "f\" {label}: {value}\"", ")", "except", "Exception", "as", "e", ":", "lines", ".", "append", "(", "\"DISTRIBUTED_DEBUG logging failed!\"", ")", "lines", ".", "append", "(", "f\"{e}\"", ")", "lines", ".", "append", "(", "\"DISTRIBUTED_DEBUG_END\"", ")", "logging", ".", "warning", "(", "\"\\n\"", ".", "join", "(", "lines", ")", ")" ]
https://github.com/google/jax/blob/bebe9845a873b3203f8050395255f173ba3bbb71/jax/_src/util.py#L393-L410
ANSSI-FR/polichombr
e2dc3874ae3d78c3b496e9656c9a6d1b88ae91e1
polichombr/controllers/family.py
python
FamilyController.add_user
(user, family)
return True
Add a user to the family.
Add a user to the family.
[ "Add", "a", "user", "to", "the", "family", "." ]
def add_user(user, family): """ Add a user to the family. """ if user in family.users: return True family.users.append(user) db.session.commit() return True
[ "def", "add_user", "(", "user", ",", "family", ")", ":", "if", "user", "in", "family", ".", "users", ":", "return", "True", "family", ".", "users", ".", "append", "(", "user", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "True" ]
https://github.com/ANSSI-FR/polichombr/blob/e2dc3874ae3d78c3b496e9656c9a6d1b88ae91e1/polichombr/controllers/family.py#L145-L153
cobbler/cobbler
eed8cdca3e970c8aa1d199e80b8c8f19b3f940cc
cobbler/remote.py
python
CobblerXMLRPCInterface.get_files
(self, page=None, results_per_page=None, token=None, **rest)
return self.get_items("file")
This returns all files. :param page: This parameter is not used currently. :param results_per_page: This parameter is not used currently. :param token: The API-token obtained via the login() method. :param rest: This parameter is not used currently. :return: The list of all files.
This returns all files.
[ "This", "returns", "all", "files", "." ]
def get_files(self, page=None, results_per_page=None, token=None, **rest): """ This returns all files. :param page: This parameter is not used currently. :param results_per_page: This parameter is not used currently. :param token: The API-token obtained via the login() method. :param rest: This parameter is not used currently. :return: The list of all files. """ return self.get_items("file")
[ "def", "get_files", "(", "self", ",", "page", "=", "None", ",", "results_per_page", "=", "None", ",", "token", "=", "None", ",", "*", "*", "rest", ")", ":", "return", "self", ".", "get_items", "(", "\"file\"", ")" ]
https://github.com/cobbler/cobbler/blob/eed8cdca3e970c8aa1d199e80b8c8f19b3f940cc/cobbler/remote.py#L938-L948
aewallin/openvoronoi
c4366904dc7ac40c189e95ebb014db7e4b137b86
python_examples/chain_3_rpg_loop.py
python
rpg_vd
(Npts, seed, debug)
return [is_valid, vd, times]
print "polygon is: " for idx in id_list: print idx," ", print "."
print "polygon is: " for idx in id_list: print idx," ", print "."
[ "print", "polygon", "is", ":", "for", "idx", "in", "id_list", ":", "print", "idx", "print", "." ]
def rpg_vd(Npts, seed, debug): far = 1 vd = ovd.VoronoiDiagram(far, 120) vd.reset_vertex_count() poly = rpg.rpg(Npts, seed) pts = [] for p in poly: ocl_pt = ovd.Point(p[0], p[1]) pts.append(ocl_pt) print ocl_pt times = [] id_list = [] m = 0 t_before = time.time() for p in pts: # print " adding vertex ",m id_list.append(vd.addVertexSite(p)) m = m + 1 """ print "polygon is: " for idx in id_list: print idx," ", print "." """ t_after = time.time() times.append(t_after - t_before) # print " pts inserted in ", times[0], " s" # print " vd-check: ",vd.check() if (debug): vd.debug_on() t_before = time.time() for n in range(len(id_list)): n_nxt = n + 1 if n == (len(id_list) - 1): n_nxt = 0 # point 0 is the endpoint of the last segment # print " adding line-site ", id_list[n]," - ", id_list[n_nxt] vd.addLineSite(id_list[n], id_list[n_nxt]) t_after = time.time() times.append(t_after - t_before) print " segs inserted in ", times[1], " s" is_valid = vd.check() print " vd-check: ", is_valid return [is_valid, vd, times]
[ "def", "rpg_vd", "(", "Npts", ",", "seed", ",", "debug", ")", ":", "far", "=", "1", "vd", "=", "ovd", ".", "VoronoiDiagram", "(", "far", ",", "120", ")", "vd", ".", "reset_vertex_count", "(", ")", "poly", "=", "rpg", ".", "rpg", "(", "Npts", ",", "seed", ")", "pts", "=", "[", "]", "for", "p", "in", "poly", ":", "ocl_pt", "=", "ovd", ".", "Point", "(", "p", "[", "0", "]", ",", "p", "[", "1", "]", ")", "pts", ".", "append", "(", "ocl_pt", ")", "print", "ocl_pt", "times", "=", "[", "]", "id_list", "=", "[", "]", "m", "=", "0", "t_before", "=", "time", ".", "time", "(", ")", "for", "p", "in", "pts", ":", "# print \" adding vertex \",m", "id_list", ".", "append", "(", "vd", ".", "addVertexSite", "(", "p", ")", ")", "m", "=", "m", "+", "1", "t_after", "=", "time", ".", "time", "(", ")", "times", ".", "append", "(", "t_after", "-", "t_before", ")", "# print \" pts inserted in \", times[0], \" s\"", "# print \" vd-check: \",vd.check()", "if", "(", "debug", ")", ":", "vd", ".", "debug_on", "(", ")", "t_before", "=", "time", ".", "time", "(", ")", "for", "n", "in", "range", "(", "len", "(", "id_list", ")", ")", ":", "n_nxt", "=", "n", "+", "1", "if", "n", "==", "(", "len", "(", "id_list", ")", "-", "1", ")", ":", "n_nxt", "=", "0", "# point 0 is the endpoint of the last segment", "# print \" adding line-site \", id_list[n],\" - \", id_list[n_nxt]", "vd", ".", "addLineSite", "(", "id_list", "[", "n", "]", ",", "id_list", "[", "n_nxt", "]", ")", "t_after", "=", "time", ".", "time", "(", ")", "times", ".", "append", "(", "t_after", "-", "t_before", ")", "print", "\" segs inserted in \"", ",", "times", "[", "1", "]", ",", "\" s\"", "is_valid", "=", "vd", ".", "check", "(", ")", "print", "\" vd-check: \"", ",", "is_valid", "return", "[", "is_valid", ",", "vd", ",", "times", "]" ]
https://github.com/aewallin/openvoronoi/blob/c4366904dc7ac40c189e95ebb014db7e4b137b86/python_examples/chain_3_rpg_loop.py#L70-L119
mlrun/mlrun
4c120719d64327a34b7ee1ab08fb5e01b258b00a
mlrun/frameworks/sklearn/__init__.py
python
apply_mlrun
( model, context: mlrun.MLClientCtx = None, X_test=None, y_test=None, model_name=None, tag: str = "", generate_test_set=True, **kwargs )
return mh
Wrap the given model with MLRun model, saving the model's attributes and methods while giving it mlrun's additional features. examples: model = LogisticRegression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = apply_mlrun(model, context, X_test=X_test, y_test=y_test) model.fit(X_train, y_train) :param model: The model which will have the fit() function wrapped :param context: MLRun context to work with. If no context is given it will be retrieved via 'mlrun.get_or_create_ctx(None)' :param X_test: X_test dataset :param y_test: y_test dataset :param model_name: The model artifact name (Optional) :param tag: Tag of a version to give to the logged model. :param generate_test_set: Generates a test_set dataset artifact :return: The model in a MLRun model handler.
Wrap the given model with MLRun model, saving the model's attributes and methods while giving it mlrun's additional features.
[ "Wrap", "the", "given", "model", "with", "MLRun", "model", "saving", "the", "model", "s", "attributes", "and", "methods", "while", "giving", "it", "mlrun", "s", "additional", "features", "." ]
def apply_mlrun( model, context: mlrun.MLClientCtx = None, X_test=None, y_test=None, model_name=None, tag: str = "", generate_test_set=True, **kwargs ) -> SKLearnModelHandler: """ Wrap the given model with MLRun model, saving the model's attributes and methods while giving it mlrun's additional features. examples: model = LogisticRegression() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) model = apply_mlrun(model, context, X_test=X_test, y_test=y_test) model.fit(X_train, y_train) :param model: The model which will have the fit() function wrapped :param context: MLRun context to work with. If no context is given it will be retrieved via 'mlrun.get_or_create_ctx(None)' :param X_test: X_test dataset :param y_test: y_test dataset :param model_name: The model artifact name (Optional) :param tag: Tag of a version to give to the logged model. :param generate_test_set: Generates a test_set dataset artifact :return: The model in a MLRun model handler. """ if context is None: context = mlrun.get_or_create_ctx("mlrun_sklearn") kwargs["X_test"] = X_test kwargs["y_test"] = y_test kwargs["generate_test_set"] = generate_test_set mh = SKLearnModelHandler( model_name=model_name or "model", model=model, context=context ) # Add MLRun's interface to the model: MLMLRunInterface.add_interface(mh, context, tag, kwargs) return mh
[ "def", "apply_mlrun", "(", "model", ",", "context", ":", "mlrun", ".", "MLClientCtx", "=", "None", ",", "X_test", "=", "None", ",", "y_test", "=", "None", ",", "model_name", "=", "None", ",", "tag", ":", "str", "=", "\"\"", ",", "generate_test_set", "=", "True", ",", "*", "*", "kwargs", ")", "->", "SKLearnModelHandler", ":", "if", "context", "is", "None", ":", "context", "=", "mlrun", ".", "get_or_create_ctx", "(", "\"mlrun_sklearn\"", ")", "kwargs", "[", "\"X_test\"", "]", "=", "X_test", "kwargs", "[", "\"y_test\"", "]", "=", "y_test", "kwargs", "[", "\"generate_test_set\"", "]", "=", "generate_test_set", "mh", "=", "SKLearnModelHandler", "(", "model_name", "=", "model_name", "or", "\"model\"", ",", "model", "=", "model", ",", "context", "=", "context", ")", "# Add MLRun's interface to the model:", "MLMLRunInterface", ".", "add_interface", "(", "mh", ",", "context", ",", "tag", ",", "kwargs", ")", "return", "mh" ]
https://github.com/mlrun/mlrun/blob/4c120719d64327a34b7ee1ab08fb5e01b258b00a/mlrun/frameworks/sklearn/__init__.py#L16-L60
nutonomy/nuscenes-devkit
05d05b3c994fb3c17b6643016d9f622a001c7275
python-sdk/nuscenes/nuscenes.py
python
NuScenes.__load_table__
(self, table_name)
return table
Loads a table.
Loads a table.
[ "Loads", "a", "table", "." ]
def __load_table__(self, table_name) -> dict: """ Loads a table. """ with open(osp.join(self.table_root, '{}.json'.format(table_name))) as f: table = json.load(f) return table
[ "def", "__load_table__", "(", "self", ",", "table_name", ")", "->", "dict", ":", "with", "open", "(", "osp", ".", "join", "(", "self", ".", "table_root", ",", "'{}.json'", ".", "format", "(", "table_name", ")", ")", ")", "as", "f", ":", "table", "=", "json", ".", "load", "(", "f", ")", "return", "table" ]
https://github.com/nutonomy/nuscenes-devkit/blob/05d05b3c994fb3c17b6643016d9f622a001c7275/python-sdk/nuscenes/nuscenes.py#L134-L138
PaddlePaddle/Research
2da0bd6c72d60e9df403aff23a7802779561c4a1
ST_DM/GenRegion/src/region/geometry.py
python
Segment.__grids
(self, x, y, grid_size, grid_set)
Desc: 计算指定点的grid, 可能为多个 Args: self : self x : x y : y grid_size : grid_size grid_set : 全局grid set Return: list of (grid_x, grid_y) 如果点在grid内, 返回一个grid; 如果点在grid边上, 返回两个grid; 如果点在grid顶点上, 返回四个grid; Raise: None
Desc: 计算指定点的grid, 可能为多个 Args: self : self x : x y : y grid_size : grid_size grid_set : 全局grid set Return: list of (grid_x, grid_y) 如果点在grid内, 返回一个grid; 如果点在grid边上, 返回两个grid; 如果点在grid顶点上, 返回四个grid; Raise: None
[ "Desc", ":", "计算指定点的grid", "可能为多个", "Args", ":", "self", ":", "self", "x", ":", "x", "y", ":", "y", "grid_size", ":", "grid_size", "grid_set", ":", "全局grid", "set", "Return", ":", "list", "of", "(", "grid_x", "grid_y", ")", "如果点在grid内", "返回一个grid", ";", "如果点在grid边上", "返回两个grid", ";", "如果点在grid顶点上", "返回四个grid", ";", "Raise", ":", "None" ]
def __grids(self, x, y, grid_size, grid_set): """ Desc: 计算指定点的grid, 可能为多个 Args: self : self x : x y : y grid_size : grid_size grid_set : 全局grid set Return: list of (grid_x, grid_y) 如果点在grid内, 返回一个grid; 如果点在grid边上, 返回两个grid; 如果点在grid顶点上, 返回四个grid; Raise: None """ grid_x = int(x) / grid_size grid_y = int(y) / grid_size gs = [(grid_x, grid_y)] if grid_x * grid_size == x: gs.append((grid_x - 1, grid_y)) if grid_y * grid_size == y: for i in range(len(gs)): gs.append((gs[i][0], grid_y - 1)) for gd in gs: grid_set.add(gd)
[ "def", "__grids", "(", "self", ",", "x", ",", "y", ",", "grid_size", ",", "grid_set", ")", ":", "grid_x", "=", "int", "(", "x", ")", "/", "grid_size", "grid_y", "=", "int", "(", "y", ")", "/", "grid_size", "gs", "=", "[", "(", "grid_x", ",", "grid_y", ")", "]", "if", "grid_x", "*", "grid_size", "==", "x", ":", "gs", ".", "append", "(", "(", "grid_x", "-", "1", ",", "grid_y", ")", ")", "if", "grid_y", "*", "grid_size", "==", "y", ":", "for", "i", "in", "range", "(", "len", "(", "gs", ")", ")", ":", "gs", ".", "append", "(", "(", "gs", "[", "i", "]", "[", "0", "]", ",", "grid_y", "-", "1", ")", ")", "for", "gd", "in", "gs", ":", "grid_set", ".", "add", "(", "gd", ")" ]
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/ST_DM/GenRegion/src/region/geometry.py#L313-L339
getsentry/sentry
83b1f25aac3e08075e0e2495bc29efaf35aca18a
src/sentry/utils/locking/lock.py
python
Lock.blocking_acquire
(self, initial_delay: float, timeout: float, exp_base=1.6)
Try to acquire the lock in a polling loop. :param initial_delay: A random retry delay will be picked between 0 and this value (in seconds). The range from which we pick doubles in every iteration. :param timeout: Time in seconds after which ``UnableToAcquireLock`` will be raised.
Try to acquire the lock in a polling loop.
[ "Try", "to", "acquire", "the", "lock", "in", "a", "polling", "loop", "." ]
def blocking_acquire(self, initial_delay: float, timeout: float, exp_base=1.6): """ Try to acquire the lock in a polling loop. :param initial_delay: A random retry delay will be picked between 0 and this value (in seconds). The range from which we pick doubles in every iteration. :param timeout: Time in seconds after which ``UnableToAcquireLock`` will be raised. """ stop = time.monotonic() + timeout attempt = 0 while time.monotonic() < stop: try: return self.acquire() except UnableToAcquireLock: delay = (exp_base ** attempt) * random.random() * initial_delay # Redundant check to prevent futile sleep in last iteration: if time.monotonic() + delay > stop: break time.sleep(delay) attempt += 1 raise UnableToAcquireLock(f"Unable to acquire {self!r} because of timeout")
[ "def", "blocking_acquire", "(", "self", ",", "initial_delay", ":", "float", ",", "timeout", ":", "float", ",", "exp_base", "=", "1.6", ")", ":", "stop", "=", "time", ".", "monotonic", "(", ")", "+", "timeout", "attempt", "=", "0", "while", "time", ".", "monotonic", "(", ")", "<", "stop", ":", "try", ":", "return", "self", ".", "acquire", "(", ")", "except", "UnableToAcquireLock", ":", "delay", "=", "(", "exp_base", "**", "attempt", ")", "*", "random", ".", "random", "(", ")", "*", "initial_delay", "# Redundant check to prevent futile sleep in last iteration:", "if", "time", ".", "monotonic", "(", ")", "+", "delay", ">", "stop", ":", "break", "time", ".", "sleep", "(", "delay", ")", "attempt", "+=", "1", "raise", "UnableToAcquireLock", "(", "f\"Unable to acquire {self!r} because of timeout\"", ")" ]
https://github.com/getsentry/sentry/blob/83b1f25aac3e08075e0e2495bc29efaf35aca18a/src/sentry/utils/locking/lock.py#L47-L72
bbfamily/abu
2de85ae57923a720dac99a545b4f856f6b87304b
abupy/UtilBu/ABuKLUtil.py
python
bcut_change_vc
(df, bins=None)
return _df_dispatch_concat(df, _bcut_change_vc)
eg: tsla = ABuSymbolPd.make_kl_df('usTSLA') ABuKLUtil.bcut_change_vc(tsla) out: p_change rate (0, 3] 209 0.4147 (-3, 0] 193 0.3829 (3, 7] 47 0.0933 (-7, -3] 44 0.0873 (-10, -7] 6 0.0119 (7, 10] 3 0.0060 (10, inf] 1 0.0020 (-inf, -10] 1 0.0020 :param df: abupy中格式化好的kl,或者字典,或者可迭代序列 :param bins: 默认eg:[-np.inf, -10, -7, -3, 0, 3, 7, 10, np.inf] :return: pd.DataFrame
eg: tsla = ABuSymbolPd.make_kl_df('usTSLA') ABuKLUtil.bcut_change_vc(tsla)
[ "eg", ":", "tsla", "=", "ABuSymbolPd", ".", "make_kl_df", "(", "usTSLA", ")", "ABuKLUtil", ".", "bcut_change_vc", "(", "tsla", ")" ]
def bcut_change_vc(df, bins=None): """ eg: tsla = ABuSymbolPd.make_kl_df('usTSLA') ABuKLUtil.bcut_change_vc(tsla) out: p_change rate (0, 3] 209 0.4147 (-3, 0] 193 0.3829 (3, 7] 47 0.0933 (-7, -3] 44 0.0873 (-10, -7] 6 0.0119 (7, 10] 3 0.0060 (10, inf] 1 0.0020 (-inf, -10] 1 0.0020 :param df: abupy中格式化好的kl,或者字典,或者可迭代序列 :param bins: 默认eg:[-np.inf, -10, -7, -3, 0, 3, 7, 10, np.inf] :return: pd.DataFrame """ def _bcut_change_vc(p_df, df_name=''): dww = pd.DataFrame(pd.cut(p_df.p_change, bins=bins).value_counts()) # 计算各个bin所占的百分比 dww['{}rate'.format(df_name)] = dww.p_change.values / dww.p_change.values.sum() if len(df_name) > 0: dww.rename(columns={'p_change': '{}'.format(df_name)}, inplace=True) return dww if bins is None: bins = [-np.inf, -10, -7, -3, 0, 3, 7, 10, np.inf] return _df_dispatch_concat(df, _bcut_change_vc)
[ "def", "bcut_change_vc", "(", "df", ",", "bins", "=", "None", ")", ":", "def", "_bcut_change_vc", "(", "p_df", ",", "df_name", "=", "''", ")", ":", "dww", "=", "pd", ".", "DataFrame", "(", "pd", ".", "cut", "(", "p_df", ".", "p_change", ",", "bins", "=", "bins", ")", ".", "value_counts", "(", ")", ")", "# 计算各个bin所占的百分比", "dww", "[", "'{}rate'", ".", "format", "(", "df_name", ")", "]", "=", "dww", ".", "p_change", ".", "values", "/", "dww", ".", "p_change", ".", "values", ".", "sum", "(", ")", "if", "len", "(", "df_name", ")", ">", "0", ":", "dww", ".", "rename", "(", "columns", "=", "{", "'p_change'", ":", "'{}'", ".", "format", "(", "df_name", ")", "}", ",", "inplace", "=", "True", ")", "return", "dww", "if", "bins", "is", "None", ":", "bins", "=", "[", "-", "np", ".", "inf", ",", "-", "10", ",", "-", "7", ",", "-", "3", ",", "0", ",", "3", ",", "7", ",", "10", ",", "np", ".", "inf", "]", "return", "_df_dispatch_concat", "(", "df", ",", "_bcut_change_vc", ")" ]
https://github.com/bbfamily/abu/blob/2de85ae57923a720dac99a545b4f856f6b87304b/abupy/UtilBu/ABuKLUtil.py#L161-L193
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/util/data_transform.py
python
save_missing_imputer_model
(missing_fill=False, missing_replace_method=None, missing_impute=None, missing_fill_value=None, missing_replace_rate=None, header=None, model_name="Imputer")
return model_meta, model_param
[]
def save_missing_imputer_model(missing_fill=False, missing_replace_method=None, missing_impute=None, missing_fill_value=None, missing_replace_rate=None, header=None, model_name="Imputer"): model_meta = DataTransformImputerMeta() model_param = DataTransformImputerParam() model_meta.is_imputer = missing_fill if missing_fill: if missing_replace_method: model_meta.strategy = str(missing_replace_method) if missing_impute is not None: model_meta.missing_value.extend(map(str, missing_impute)) if missing_fill_value is not None: feature_value_dict = dict(zip(header, map(str, missing_fill_value))) model_param.missing_replace_value.update(feature_value_dict) if missing_replace_rate is not None: missing_replace_rate_dict = dict(zip(header, missing_replace_rate)) model_param.missing_value_ratio.update(missing_replace_rate_dict) return model_meta, model_param
[ "def", "save_missing_imputer_model", "(", "missing_fill", "=", "False", ",", "missing_replace_method", "=", "None", ",", "missing_impute", "=", "None", ",", "missing_fill_value", "=", "None", ",", "missing_replace_rate", "=", "None", ",", "header", "=", "None", ",", "model_name", "=", "\"Imputer\"", ")", ":", "model_meta", "=", "DataTransformImputerMeta", "(", ")", "model_param", "=", "DataTransformImputerParam", "(", ")", "model_meta", ".", "is_imputer", "=", "missing_fill", "if", "missing_fill", ":", "if", "missing_replace_method", ":", "model_meta", ".", "strategy", "=", "str", "(", "missing_replace_method", ")", "if", "missing_impute", "is", "not", "None", ":", "model_meta", ".", "missing_value", ".", "extend", "(", "map", "(", "str", ",", "missing_impute", ")", ")", "if", "missing_fill_value", "is", "not", "None", ":", "feature_value_dict", "=", "dict", "(", "zip", "(", "header", ",", "map", "(", "str", ",", "missing_fill_value", ")", ")", ")", "model_param", ".", "missing_replace_value", ".", "update", "(", "feature_value_dict", ")", "if", "missing_replace_rate", "is", "not", "None", ":", "missing_replace_rate_dict", "=", "dict", "(", "zip", "(", "header", ",", "missing_replace_rate", ")", ")", "model_param", ".", "missing_value_ratio", ".", "update", "(", "missing_replace_rate_dict", ")", "return", "model_meta", ",", "model_param" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/util/data_transform.py#L1083-L1109
jihunchoi/recurrent-batch-normalization-pytorch
61736ecd2547bdb43e193ac6aa28545e3918ff9b
bnlstm.py
python
SeparatedBatchNorm1d.__init__
(self, num_features, max_length, eps=1e-5, momentum=0.1, affine=True)
Most parts are copied from torch.nn.modules.batchnorm._BatchNorm.
Most parts are copied from torch.nn.modules.batchnorm._BatchNorm.
[ "Most", "parts", "are", "copied", "from", "torch", ".", "nn", ".", "modules", ".", "batchnorm", ".", "_BatchNorm", "." ]
def __init__(self, num_features, max_length, eps=1e-5, momentum=0.1, affine=True): """ Most parts are copied from torch.nn.modules.batchnorm._BatchNorm. """ super(SeparatedBatchNorm1d, self).__init__() self.num_features = num_features self.max_length = max_length self.affine = affine self.eps = eps self.momentum = momentum if self.affine: self.weight = nn.Parameter(torch.FloatTensor(num_features)) self.bias = nn.Parameter(torch.FloatTensor(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) for i in range(max_length): self.register_buffer( 'running_mean_{}'.format(i), torch.zeros(num_features)) self.register_buffer( 'running_var_{}'.format(i), torch.ones(num_features)) self.reset_parameters()
[ "def", "__init__", "(", "self", ",", "num_features", ",", "max_length", ",", "eps", "=", "1e-5", ",", "momentum", "=", "0.1", ",", "affine", "=", "True", ")", ":", "super", "(", "SeparatedBatchNorm1d", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "num_features", "=", "num_features", "self", ".", "max_length", "=", "max_length", "self", ".", "affine", "=", "affine", "self", ".", "eps", "=", "eps", "self", ".", "momentum", "=", "momentum", "if", "self", ".", "affine", ":", "self", ".", "weight", "=", "nn", ".", "Parameter", "(", "torch", ".", "FloatTensor", "(", "num_features", ")", ")", "self", ".", "bias", "=", "nn", ".", "Parameter", "(", "torch", ".", "FloatTensor", "(", "num_features", ")", ")", "else", ":", "self", ".", "register_parameter", "(", "'weight'", ",", "None", ")", "self", ".", "register_parameter", "(", "'bias'", ",", "None", ")", "for", "i", "in", "range", "(", "max_length", ")", ":", "self", ".", "register_buffer", "(", "'running_mean_{}'", ".", "format", "(", "i", ")", ",", "torch", ".", "zeros", "(", "num_features", ")", ")", "self", ".", "register_buffer", "(", "'running_var_{}'", ".", "format", "(", "i", ")", ",", "torch", ".", "ones", "(", "num_features", ")", ")", "self", ".", "reset_parameters", "(", ")" ]
https://github.com/jihunchoi/recurrent-batch-normalization-pytorch/blob/61736ecd2547bdb43e193ac6aa28545e3918ff9b/bnlstm.py#L15-L39
nicoboss/nsz
ff0c9fd102e4ddb6c2e4d7bada8840943423f419
nsz/gui/KivyOnTop.py
python
set_not_always_on_top
(title: str)
Sets the HWND_NOTOPMOST flag for the current Kivy Window.
Sets the HWND_NOTOPMOST flag for the current Kivy Window.
[ "Sets", "the", "HWND_NOTOPMOST", "flag", "for", "the", "current", "Kivy", "Window", "." ]
def set_not_always_on_top(title: str): ''' Sets the HWND_NOTOPMOST flag for the current Kivy Window. ''' global hwnd if not 'hwnd' in globals(): find_hwnd(title) rect = win32gui.GetWindowRect(hwnd) x = rect[0] y = rect[1] w = rect[2] - x h = rect[3] - y win32gui.SetWindowPos(hwnd, win32con.HWND_NOTOPMOST, x, y, w, h, 0)
[ "def", "set_not_always_on_top", "(", "title", ":", "str", ")", ":", "global", "hwnd", "if", "not", "'hwnd'", "in", "globals", "(", ")", ":", "find_hwnd", "(", "title", ")", "rect", "=", "win32gui", ".", "GetWindowRect", "(", "hwnd", ")", "x", "=", "rect", "[", "0", "]", "y", "=", "rect", "[", "1", "]", "w", "=", "rect", "[", "2", "]", "-", "x", "h", "=", "rect", "[", "3", "]", "-", "y", "win32gui", ".", "SetWindowPos", "(", "hwnd", ",", "win32con", ".", "HWND_NOTOPMOST", ",", "x", ",", "y", ",", "w", ",", "h", ",", "0", ")" ]
https://github.com/nicoboss/nsz/blob/ff0c9fd102e4ddb6c2e4d7bada8840943423f419/nsz/gui/KivyOnTop.py#L41-L57
fonttools/fonttools
892322aaff6a89bea5927379ec06bc0da3dfb7df
Lib/fontTools/otlLib/builder.py
python
LookupBuilder.setBacktrackCoverage_
(self, prefix, subtable)
[]
def setBacktrackCoverage_(self, prefix, subtable): subtable.BacktrackGlyphCount = len(prefix) subtable.BacktrackCoverage = [] for p in reversed(prefix): coverage = buildCoverage(p, self.glyphMap) subtable.BacktrackCoverage.append(coverage)
[ "def", "setBacktrackCoverage_", "(", "self", ",", "prefix", ",", "subtable", ")", ":", "subtable", ".", "BacktrackGlyphCount", "=", "len", "(", "prefix", ")", "subtable", ".", "BacktrackCoverage", "=", "[", "]", "for", "p", "in", "reversed", "(", "prefix", ")", ":", "coverage", "=", "buildCoverage", "(", "p", ",", "self", ".", "glyphMap", ")", "subtable", ".", "BacktrackCoverage", ".", "append", "(", "coverage", ")" ]
https://github.com/fonttools/fonttools/blob/892322aaff6a89bea5927379ec06bc0da3dfb7df/Lib/fontTools/otlLib/builder.py#L169-L174
JoelBender/bacpypes
41104c2b565b2ae9a637c941dfb0fe04195c5e96
py25/bacpypes/local/file.py
python
LocalRecordAccessFileObject.__init__
(self, **kwargs)
Initialize a record accessed file object.
Initialize a record accessed file object.
[ "Initialize", "a", "record", "accessed", "file", "object", "." ]
def __init__(self, **kwargs): """ Initialize a record accessed file object. """ if _debug: LocalRecordAccessFileObject._debug("__init__ %r", kwargs, ) # verify the file access method or provide it if 'fileAccessMethod' in kwargs: if kwargs['fileAccessMethod'] != 'recordAccess': raise ValueError("inconsistent file access method") else: kwargs['fileAccessMethod'] = 'recordAccess' # continue with initialization FileObject.__init__(self, **kwargs)
[ "def", "__init__", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "_debug", ":", "LocalRecordAccessFileObject", ".", "_debug", "(", "\"__init__ %r\"", ",", "kwargs", ",", ")", "# verify the file access method or provide it", "if", "'fileAccessMethod'", "in", "kwargs", ":", "if", "kwargs", "[", "'fileAccessMethod'", "]", "!=", "'recordAccess'", ":", "raise", "ValueError", "(", "\"inconsistent file access method\"", ")", "else", ":", "kwargs", "[", "'fileAccessMethod'", "]", "=", "'recordAccess'", "# continue with initialization", "FileObject", ".", "__init__", "(", "self", ",", "*", "*", "kwargs", ")" ]
https://github.com/JoelBender/bacpypes/blob/41104c2b565b2ae9a637c941dfb0fe04195c5e96/py25/bacpypes/local/file.py#L17-L32
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/userreports/indicators/specs.py
python
LedgerBalancesIndicatorSpec.readable_output
(self, context)
return "Ledgers from {}".format(str(self.get_case_id_expression(context)))
[]
def readable_output(self, context): return "Ledgers from {}".format(str(self.get_case_id_expression(context)))
[ "def", "readable_output", "(", "self", ",", "context", ")", ":", "return", "\"Ledgers from {}\"", ".", "format", "(", "str", "(", "self", ".", "get_case_id_expression", "(", "context", ")", ")", ")" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/userreports/indicators/specs.py#L144-L145
openstack/nova
b49b7663e1c3073917d5844b81d38db8e86d05c4
nova/api/openstack/compute/views/flavors.py
python
ViewBuilder.index
(self, request, flavors)
return self._list_view(self.basic, request, flavors, coll_name, include_description=include_description)
Return the 'index' view of flavors.
Return the 'index' view of flavors.
[ "Return", "the", "index", "view", "of", "flavors", "." ]
def index(self, request, flavors): """Return the 'index' view of flavors.""" coll_name = self._collection_name include_description = api_version_request.is_supported( request, FLAVOR_DESCRIPTION_MICROVERSION) return self._list_view(self.basic, request, flavors, coll_name, include_description=include_description)
[ "def", "index", "(", "self", ",", "request", ",", "flavors", ")", ":", "coll_name", "=", "self", ".", "_collection_name", "include_description", "=", "api_version_request", ".", "is_supported", "(", "request", ",", "FLAVOR_DESCRIPTION_MICROVERSION", ")", "return", "self", ".", "_list_view", "(", "self", ".", "basic", ",", "request", ",", "flavors", ",", "coll_name", ",", "include_description", "=", "include_description", ")" ]
https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/api/openstack/compute/views/flavors.py#L78-L84
materialsproject/pymatgen
8128f3062a334a2edd240e4062b5b9bdd1ae6f58
pymatgen/analysis/surface_analysis.py
python
WorkFunctionAnalyzer.get_locpot_along_slab_plot
(self, label_energies=True, plt=None, label_fontsize=10)
return plt
Returns a plot of the local potential (eV) vs the position along the c axis of the slab model (Ang) Args: label_energies (bool): Whether to label relevant energy quantities such as the work function, Fermi energy, vacuum locpot, bulk-like locpot plt (plt): Matplotlib pylab object label_fontsize (float): Fontsize of labels Returns plt of the locpot vs c axis
Returns a plot of the local potential (eV) vs the position along the c axis of the slab model (Ang)
[ "Returns", "a", "plot", "of", "the", "local", "potential", "(", "eV", ")", "vs", "the", "position", "along", "the", "c", "axis", "of", "the", "slab", "model", "(", "Ang", ")" ]
def get_locpot_along_slab_plot(self, label_energies=True, plt=None, label_fontsize=10): """ Returns a plot of the local potential (eV) vs the position along the c axis of the slab model (Ang) Args: label_energies (bool): Whether to label relevant energy quantities such as the work function, Fermi energy, vacuum locpot, bulk-like locpot plt (plt): Matplotlib pylab object label_fontsize (float): Fontsize of labels Returns plt of the locpot vs c axis """ plt = pretty_plot(width=6, height=4) if not plt else plt # plot the raw locpot signal along c plt.plot(self.along_c, self.locpot_along_c, "b--") # Get the local averaged signal of the locpot along c xg, yg = [], [] for i, p in enumerate(self.locpot_along_c): # average signal is just the bulk-like potential when in the slab region in_slab = False for r in self.slab_regions: if r[0] <= self.along_c[i] <= r[1]: in_slab = True if len(self.slab_regions) > 1: if self.along_c[i] >= self.slab_regions[1][1]: in_slab = True if self.along_c[i] <= self.slab_regions[0][0]: in_slab = True if in_slab: yg.append(self.ave_bulk_p) xg.append(self.along_c[i]) elif p < self.ave_bulk_p: yg.append(self.ave_bulk_p) xg.append(self.along_c[i]) else: yg.append(p) xg.append(self.along_c[i]) xg, yg = zip(*sorted(zip(xg, yg))) plt.plot(xg, yg, "r", linewidth=2.5, zorder=-1) # make it look nice if label_energies: plt = self.get_labels(plt, label_fontsize=label_fontsize) plt.xlim([0, 1]) plt.ylim([min(self.locpot_along_c), self.vacuum_locpot + self.ave_locpot * 0.2]) plt.xlabel(r"Fractional coordinates ($\hat{c}$)", fontsize=25) plt.xticks(fontsize=15, rotation=45) plt.ylabel(r"Potential (eV)", fontsize=25) plt.yticks(fontsize=15) return plt
[ "def", "get_locpot_along_slab_plot", "(", "self", ",", "label_energies", "=", "True", ",", "plt", "=", "None", ",", "label_fontsize", "=", "10", ")", ":", "plt", "=", "pretty_plot", "(", "width", "=", "6", ",", "height", "=", "4", ")", "if", "not", "plt", "else", "plt", "# plot the raw locpot signal along c", "plt", ".", "plot", "(", "self", ".", "along_c", ",", "self", ".", "locpot_along_c", ",", "\"b--\"", ")", "# Get the local averaged signal of the locpot along c", "xg", ",", "yg", "=", "[", "]", ",", "[", "]", "for", "i", ",", "p", "in", "enumerate", "(", "self", ".", "locpot_along_c", ")", ":", "# average signal is just the bulk-like potential when in the slab region", "in_slab", "=", "False", "for", "r", "in", "self", ".", "slab_regions", ":", "if", "r", "[", "0", "]", "<=", "self", ".", "along_c", "[", "i", "]", "<=", "r", "[", "1", "]", ":", "in_slab", "=", "True", "if", "len", "(", "self", ".", "slab_regions", ")", ">", "1", ":", "if", "self", ".", "along_c", "[", "i", "]", ">=", "self", ".", "slab_regions", "[", "1", "]", "[", "1", "]", ":", "in_slab", "=", "True", "if", "self", ".", "along_c", "[", "i", "]", "<=", "self", ".", "slab_regions", "[", "0", "]", "[", "0", "]", ":", "in_slab", "=", "True", "if", "in_slab", ":", "yg", ".", "append", "(", "self", ".", "ave_bulk_p", ")", "xg", ".", "append", "(", "self", ".", "along_c", "[", "i", "]", ")", "elif", "p", "<", "self", ".", "ave_bulk_p", ":", "yg", ".", "append", "(", "self", ".", "ave_bulk_p", ")", "xg", ".", "append", "(", "self", ".", "along_c", "[", "i", "]", ")", "else", ":", "yg", ".", "append", "(", "p", ")", "xg", ".", "append", "(", "self", ".", "along_c", "[", "i", "]", ")", "xg", ",", "yg", "=", "zip", "(", "*", "sorted", "(", "zip", "(", "xg", ",", "yg", ")", ")", ")", "plt", ".", "plot", "(", "xg", ",", "yg", ",", "\"r\"", ",", "linewidth", "=", "2.5", ",", "zorder", "=", "-", "1", ")", "# make it look nice", "if", "label_energies", ":", "plt", "=", "self", ".", "get_labels", "(", "plt", ",", "label_fontsize", "=", "label_fontsize", ")", "plt", ".", "xlim", "(", "[", "0", ",", "1", "]", ")", "plt", ".", "ylim", "(", "[", "min", "(", "self", ".", "locpot_along_c", ")", ",", "self", ".", "vacuum_locpot", "+", "self", ".", "ave_locpot", "*", "0.2", "]", ")", "plt", ".", "xlabel", "(", "r\"Fractional coordinates ($\\hat{c}$)\"", ",", "fontsize", "=", "25", ")", "plt", ".", "xticks", "(", "fontsize", "=", "15", ",", "rotation", "=", "45", ")", "plt", ".", "ylabel", "(", "r\"Potential (eV)\"", ",", "fontsize", "=", "25", ")", "plt", ".", "yticks", "(", "fontsize", "=", "15", ")", "return", "plt" ]
https://github.com/materialsproject/pymatgen/blob/8128f3062a334a2edd240e4062b5b9bdd1ae6f58/pymatgen/analysis/surface_analysis.py#L1533-L1589
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/src/lib/service.py
python
Service.add_portal_ip
(self, pip)
add cluster ip
add cluster ip
[ "add", "cluster", "ip" ]
def add_portal_ip(self, pip): '''add cluster ip''' self.put(Service.portal_ip, pip)
[ "def", "add_portal_ip", "(", "self", ",", "pip", ")", ":", "self", ".", "put", "(", "Service", ".", "portal_ip", ",", "pip", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/src/lib/service.py#L142-L144
fkie/multimaster_fkie
3d23df29d25d71a75c66bbd3cc6e9cbb255724d8
fkie_node_manager/src/fkie_node_manager/launch_list_model.py
python
PathItem.is_launch_file
(self)
return self.path is not None and self.id in [self.LAUNCH_FILE, self.RECENT_FILE] and self.path.endswith('.launch')
:return: True if it is a launch file :rtype: bool
:return: True if it is a launch file :rtype: bool
[ ":", "return", ":", "True", "if", "it", "is", "a", "launch", "file", ":", "rtype", ":", "bool" ]
def is_launch_file(self): ''' :return: True if it is a launch file :rtype: bool ''' return self.path is not None and self.id in [self.LAUNCH_FILE, self.RECENT_FILE] and self.path.endswith('.launch')
[ "def", "is_launch_file", "(", "self", ")", ":", "return", "self", ".", "path", "is", "not", "None", "and", "self", ".", "id", "in", "[", "self", ".", "LAUNCH_FILE", ",", "self", ".", "RECENT_FILE", "]", "and", "self", ".", "path", ".", "endswith", "(", "'.launch'", ")" ]
https://github.com/fkie/multimaster_fkie/blob/3d23df29d25d71a75c66bbd3cc6e9cbb255724d8/fkie_node_manager/src/fkie_node_manager/launch_list_model.py#L295-L300
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v7/services/services/combined_audience_service/client.py
python
CombinedAudienceServiceClient.__init__
( self, *, credentials: Optional[credentials.Credentials] = None, transport: Union[str, CombinedAudienceServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, )
Instantiate the combined audience service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.CombinedAudienceServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason.
Instantiate the combined audience service client.
[ "Instantiate", "the", "combined", "audience", "service", "client", "." ]
def __init__( self, *, credentials: Optional[credentials.Credentials] = None, transport: Union[str, CombinedAudienceServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the combined audience service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.CombinedAudienceServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool( util.strtobool( os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") ) ) ssl_credentials = None is_mtls = False if use_client_cert: if client_options.client_cert_source: import grpc # type: ignore cert, key = client_options.client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) is_mtls = True else: creds = SslCredentials() is_mtls = creds.is_mtls ssl_credentials = creds.ssl_credentials if is_mtls else None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, CombinedAudienceServiceTransport): # transport is a CombinedAudienceServiceTransport instance. if credentials: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) self._transport = transport elif isinstance(transport, str): Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, host=self.DEFAULT_ENDPOINT ) else: self._transport = CombinedAudienceServiceGrpcTransport( credentials=credentials, host=api_endpoint, ssl_channel_credentials=ssl_credentials, client_info=client_info, )
[ "def", "__init__", "(", "self", ",", "*", ",", "credentials", ":", "Optional", "[", "credentials", ".", "Credentials", "]", "=", "None", ",", "transport", ":", "Union", "[", "str", ",", "CombinedAudienceServiceTransport", ",", "None", "]", "=", "None", ",", "client_options", ":", "Optional", "[", "client_options_lib", ".", "ClientOptions", "]", "=", "None", ",", "client_info", ":", "gapic_v1", ".", "client_info", ".", "ClientInfo", "=", "DEFAULT_CLIENT_INFO", ",", ")", "->", "None", ":", "if", "isinstance", "(", "client_options", ",", "dict", ")", ":", "client_options", "=", "client_options_lib", ".", "from_dict", "(", "client_options", ")", "if", "client_options", "is", "None", ":", "client_options", "=", "client_options_lib", ".", "ClientOptions", "(", ")", "# Create SSL credentials for mutual TLS if needed.", "use_client_cert", "=", "bool", "(", "util", ".", "strtobool", "(", "os", ".", "getenv", "(", "\"GOOGLE_API_USE_CLIENT_CERTIFICATE\"", ",", "\"false\"", ")", ")", ")", "ssl_credentials", "=", "None", "is_mtls", "=", "False", "if", "use_client_cert", ":", "if", "client_options", ".", "client_cert_source", ":", "import", "grpc", "# type: ignore", "cert", ",", "key", "=", "client_options", ".", "client_cert_source", "(", ")", "ssl_credentials", "=", "grpc", ".", "ssl_channel_credentials", "(", "certificate_chain", "=", "cert", ",", "private_key", "=", "key", ")", "is_mtls", "=", "True", "else", ":", "creds", "=", "SslCredentials", "(", ")", "is_mtls", "=", "creds", ".", "is_mtls", "ssl_credentials", "=", "creds", ".", "ssl_credentials", "if", "is_mtls", "else", "None", "# Figure out which api endpoint to use.", "if", "client_options", ".", "api_endpoint", "is", "not", "None", ":", "api_endpoint", "=", "client_options", ".", "api_endpoint", "else", ":", "use_mtls_env", "=", "os", ".", "getenv", "(", "\"GOOGLE_API_USE_MTLS_ENDPOINT\"", ",", "\"auto\"", ")", "if", "use_mtls_env", "==", "\"never\"", ":", "api_endpoint", "=", "self", ".", "DEFAULT_ENDPOINT", "elif", "use_mtls_env", "==", "\"always\"", ":", "api_endpoint", "=", "self", ".", "DEFAULT_MTLS_ENDPOINT", "elif", "use_mtls_env", "==", "\"auto\"", ":", "api_endpoint", "=", "(", "self", ".", "DEFAULT_MTLS_ENDPOINT", "if", "is_mtls", "else", "self", ".", "DEFAULT_ENDPOINT", ")", "else", ":", "raise", "MutualTLSChannelError", "(", "\"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always\"", ")", "# Save or instantiate the transport.", "# Ordinarily, we provide the transport, but allowing a custom transport", "# instance provides an extensibility point for unusual situations.", "if", "isinstance", "(", "transport", ",", "CombinedAudienceServiceTransport", ")", ":", "# transport is a CombinedAudienceServiceTransport instance.", "if", "credentials", ":", "raise", "ValueError", "(", "\"When providing a transport instance, \"", "\"provide its credentials directly.\"", ")", "self", ".", "_transport", "=", "transport", "elif", "isinstance", "(", "transport", ",", "str", ")", ":", "Transport", "=", "type", "(", "self", ")", ".", "get_transport_class", "(", "transport", ")", "self", ".", "_transport", "=", "Transport", "(", "credentials", "=", "credentials", ",", "host", "=", "self", ".", "DEFAULT_ENDPOINT", ")", "else", ":", "self", ".", "_transport", "=", "CombinedAudienceServiceGrpcTransport", "(", "credentials", "=", "credentials", ",", "host", "=", "api_endpoint", ",", "ssl_channel_credentials", "=", "ssl_credentials", ",", "client_info", "=", "client_info", ",", ")" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v7/services/services/combined_audience_service/client.py#L246-L361
bastula/dicompyler
2643e0ee145cb7c699b3d36e3e4f07ac9dc7b1f2
dicompyler/baseplugins/2dview.py
python
plugin2DView.OnZoomOut
(self, evt)
Zoom the view out.
Zoom the view out.
[ "Zoom", "the", "view", "out", "." ]
def OnZoomOut(self, evt): """Zoom the view out.""" if (self.zoom > 1): self.zoom = self.zoom / 1.1 self.Refresh()
[ "def", "OnZoomOut", "(", "self", ",", "evt", ")", ":", "if", "(", "self", ".", "zoom", ">", "1", ")", ":", "self", ".", "zoom", "=", "self", ".", "zoom", "/", "1.1", "self", ".", "Refresh", "(", ")" ]
https://github.com/bastula/dicompyler/blob/2643e0ee145cb7c699b3d36e3e4f07ac9dc7b1f2/dicompyler/baseplugins/2dview.py#L582-L587
aneisch/home-assistant-config
86e381fde9609cb8871c439c433c12989e4e225d
custom_components/hacs/repositories/base.py
python
HacsRepository.common_update_data
(self, ignore_issues: bool = False, force: bool = False)
Common update data.
Common update data.
[ "Common", "update", "data", "." ]
async def common_update_data(self, ignore_issues: bool = False, force: bool = False) -> None: """Common update data.""" releases = [] try: repository_object, etag = await self.async_get_legacy_repository_object( etag=None if force or self.data.installed else self.data.etag_repository, ) self.repository_object = repository_object if self.data.full_name.lower() != repository_object.full_name.lower(): self.hacs.common.renamed_repositories[ self.data.full_name ] = repository_object.full_name raise HacsRepositoryExistException self.data.update_data(repository_object.attributes) self.data.etag_repository = etag except HacsNotModifiedException: return except HacsRepositoryExistException: raise HacsRepositoryExistException from None except (AIOGitHubAPIException, HacsException) as exception: if not self.hacs.status.startup: self.logger.error("%s %s", self, exception) if not ignore_issues: self.validate.errors.append("Repository does not exist.") raise HacsException(exception) from exception # Make sure the repository is not archived. if self.data.archived and not ignore_issues: self.validate.errors.append("Repository is archived.") if self.data.full_name not in self.hacs.common.archived_repositories: self.hacs.common.archived_repositories.append(self.data.full_name) raise HacsRepositoryArchivedException("Repository is archived.") # Make sure the repository is not in the blacklist. if self.hacs.repositories.is_removed(self.data.full_name) and not ignore_issues: self.validate.errors.append("Repository is in the blacklist.") raise HacsException("Repository is in the blacklist.") # Get releases. try: releases = await self.get_releases( prerelease=self.data.show_beta, returnlimit=self.hacs.configuration.release_limit, ) if releases: self.data.releases = True self.releases.objects = [x for x in releases if not x.draft] self.data.published_tags = [x.tag_name for x in self.releases.objects] self.data.last_version = next(iter(self.data.published_tags)) except (AIOGitHubAPIException, HacsException): self.data.releases = False if not self.force_branch: self.ref = version_to_download(self) if self.data.releases: for release in self.releases.objects or []: if release.tag_name == self.ref: assets = release.assets if assets: downloads = next(iter(assets)).attributes.get("download_count") self.data.downloads = downloads self.hacs.log.debug("%s Running checks against %s", self, self.ref.replace("tags/", "")) try: self.tree = await self.get_tree(self.ref) if not self.tree: raise HacsException("No files in tree") self.treefiles = [] for treefile in self.tree: self.treefiles.append(treefile.full_path) except (AIOGitHubAPIException, HacsException) as exception: if not self.hacs.status.startup: self.logger.error("%s %s", self, exception) if not ignore_issues: raise HacsException(exception) from None
[ "async", "def", "common_update_data", "(", "self", ",", "ignore_issues", ":", "bool", "=", "False", ",", "force", ":", "bool", "=", "False", ")", "->", "None", ":", "releases", "=", "[", "]", "try", ":", "repository_object", ",", "etag", "=", "await", "self", ".", "async_get_legacy_repository_object", "(", "etag", "=", "None", "if", "force", "or", "self", ".", "data", ".", "installed", "else", "self", ".", "data", ".", "etag_repository", ",", ")", "self", ".", "repository_object", "=", "repository_object", "if", "self", ".", "data", ".", "full_name", ".", "lower", "(", ")", "!=", "repository_object", ".", "full_name", ".", "lower", "(", ")", ":", "self", ".", "hacs", ".", "common", ".", "renamed_repositories", "[", "self", ".", "data", ".", "full_name", "]", "=", "repository_object", ".", "full_name", "raise", "HacsRepositoryExistException", "self", ".", "data", ".", "update_data", "(", "repository_object", ".", "attributes", ")", "self", ".", "data", ".", "etag_repository", "=", "etag", "except", "HacsNotModifiedException", ":", "return", "except", "HacsRepositoryExistException", ":", "raise", "HacsRepositoryExistException", "from", "None", "except", "(", "AIOGitHubAPIException", ",", "HacsException", ")", "as", "exception", ":", "if", "not", "self", ".", "hacs", ".", "status", ".", "startup", ":", "self", ".", "logger", ".", "error", "(", "\"%s %s\"", ",", "self", ",", "exception", ")", "if", "not", "ignore_issues", ":", "self", ".", "validate", ".", "errors", ".", "append", "(", "\"Repository does not exist.\"", ")", "raise", "HacsException", "(", "exception", ")", "from", "exception", "# Make sure the repository is not archived.", "if", "self", ".", "data", ".", "archived", "and", "not", "ignore_issues", ":", "self", ".", "validate", ".", "errors", ".", "append", "(", "\"Repository is archived.\"", ")", "if", "self", ".", "data", ".", "full_name", "not", "in", "self", ".", "hacs", ".", "common", ".", "archived_repositories", ":", "self", ".", "hacs", ".", "common", ".", "archived_repositories", ".", "append", "(", "self", ".", "data", ".", "full_name", ")", "raise", "HacsRepositoryArchivedException", "(", "\"Repository is archived.\"", ")", "# Make sure the repository is not in the blacklist.", "if", "self", ".", "hacs", ".", "repositories", ".", "is_removed", "(", "self", ".", "data", ".", "full_name", ")", "and", "not", "ignore_issues", ":", "self", ".", "validate", ".", "errors", ".", "append", "(", "\"Repository is in the blacklist.\"", ")", "raise", "HacsException", "(", "\"Repository is in the blacklist.\"", ")", "# Get releases.", "try", ":", "releases", "=", "await", "self", ".", "get_releases", "(", "prerelease", "=", "self", ".", "data", ".", "show_beta", ",", "returnlimit", "=", "self", ".", "hacs", ".", "configuration", ".", "release_limit", ",", ")", "if", "releases", ":", "self", ".", "data", ".", "releases", "=", "True", "self", ".", "releases", ".", "objects", "=", "[", "x", "for", "x", "in", "releases", "if", "not", "x", ".", "draft", "]", "self", ".", "data", ".", "published_tags", "=", "[", "x", ".", "tag_name", "for", "x", "in", "self", ".", "releases", ".", "objects", "]", "self", ".", "data", ".", "last_version", "=", "next", "(", "iter", "(", "self", ".", "data", ".", "published_tags", ")", ")", "except", "(", "AIOGitHubAPIException", ",", "HacsException", ")", ":", "self", ".", "data", ".", "releases", "=", "False", "if", "not", "self", ".", "force_branch", ":", "self", ".", "ref", "=", "version_to_download", "(", "self", ")", "if", "self", ".", "data", ".", "releases", ":", "for", "release", "in", "self", ".", "releases", ".", "objects", "or", "[", "]", ":", "if", "release", ".", "tag_name", "==", "self", ".", "ref", ":", "assets", "=", "release", ".", "assets", "if", "assets", ":", "downloads", "=", "next", "(", "iter", "(", "assets", ")", ")", ".", "attributes", ".", "get", "(", "\"download_count\"", ")", "self", ".", "data", ".", "downloads", "=", "downloads", "self", ".", "hacs", ".", "log", ".", "debug", "(", "\"%s Running checks against %s\"", ",", "self", ",", "self", ".", "ref", ".", "replace", "(", "\"tags/\"", ",", "\"\"", ")", ")", "try", ":", "self", ".", "tree", "=", "await", "self", ".", "get_tree", "(", "self", ".", "ref", ")", "if", "not", "self", ".", "tree", ":", "raise", "HacsException", "(", "\"No files in tree\"", ")", "self", ".", "treefiles", "=", "[", "]", "for", "treefile", "in", "self", ".", "tree", ":", "self", ".", "treefiles", ".", "append", "(", "treefile", ".", "full_path", ")", "except", "(", "AIOGitHubAPIException", ",", "HacsException", ")", "as", "exception", ":", "if", "not", "self", ".", "hacs", ".", "status", ".", "startup", ":", "self", ".", "logger", ".", "error", "(", "\"%s %s\"", ",", "self", ",", "exception", ")", "if", "not", "ignore_issues", ":", "raise", "HacsException", "(", "exception", ")", "from", "None" ]
https://github.com/aneisch/home-assistant-config/blob/86e381fde9609cb8871c439c433c12989e4e225d/custom_components/hacs/repositories/base.py#L911-L987
ClusterHQ/flocker
eaa586248986d7cd681c99c948546c2b507e44de
flocker/control/_persistence.py
python
ConfigurationMigration.upgrade_from_v2
(cls, config)
return dumps(decoded_config)
Migrate a v2 JSON configuration to v3. :param bytes config: The v2 JSON data. :return bytes: The v3 JSON data.
Migrate a v2 JSON configuration to v3.
[ "Migrate", "a", "v2", "JSON", "configuration", "to", "v3", "." ]
def upgrade_from_v2(cls, config): """ Migrate a v2 JSON configuration to v3. :param bytes config: The v2 JSON data. :return bytes: The v3 JSON data. """ decoded_config = loads(config) decoded_config[u"version"] = 3 decoded_config[u"deployment"][u"leases"] = { u"values": [], _CLASS_MARKER: u"PMap", } return dumps(decoded_config)
[ "def", "upgrade_from_v2", "(", "cls", ",", "config", ")", ":", "decoded_config", "=", "loads", "(", "config", ")", "decoded_config", "[", "u\"version\"", "]", "=", "3", "decoded_config", "[", "u\"deployment\"", "]", "[", "u\"leases\"", "]", "=", "{", "u\"values\"", ":", "[", "]", ",", "_CLASS_MARKER", ":", "u\"PMap\"", ",", "}", "return", "dumps", "(", "decoded_config", ")" ]
https://github.com/ClusterHQ/flocker/blob/eaa586248986d7cd681c99c948546c2b507e44de/flocker/control/_persistence.py#L138-L150
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
pytorch/pytorchcv/models/efficientnet.py
python
calc_tf_padding
(x, kernel_size, stride=1, dilation=1)
return pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2
Calculate TF-same like padding size. Parameters: ---------- x : tensor Input tensor. kernel_size : int Convolution window size. stride : int, default 1 Strides of the convolution. dilation : int, default 1 Dilation value for convolution layer. Returns: ------- tuple of 4 int The size of the padding.
Calculate TF-same like padding size.
[ "Calculate", "TF", "-", "same", "like", "padding", "size", "." ]
def calc_tf_padding(x, kernel_size, stride=1, dilation=1): """ Calculate TF-same like padding size. Parameters: ---------- x : tensor Input tensor. kernel_size : int Convolution window size. stride : int, default 1 Strides of the convolution. dilation : int, default 1 Dilation value for convolution layer. Returns: ------- tuple of 4 int The size of the padding. """ height, width = x.size()[2:] oh = math.ceil(float(height) / stride) ow = math.ceil(float(width) / stride) pad_h = max((oh - 1) * stride + (kernel_size - 1) * dilation + 1 - height, 0) pad_w = max((ow - 1) * stride + (kernel_size - 1) * dilation + 1 - width, 0) return pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2
[ "def", "calc_tf_padding", "(", "x", ",", "kernel_size", ",", "stride", "=", "1", ",", "dilation", "=", "1", ")", ":", "height", ",", "width", "=", "x", ".", "size", "(", ")", "[", "2", ":", "]", "oh", "=", "math", ".", "ceil", "(", "float", "(", "height", ")", "/", "stride", ")", "ow", "=", "math", ".", "ceil", "(", "float", "(", "width", ")", "/", "stride", ")", "pad_h", "=", "max", "(", "(", "oh", "-", "1", ")", "*", "stride", "+", "(", "kernel_size", "-", "1", ")", "*", "dilation", "+", "1", "-", "height", ",", "0", ")", "pad_w", "=", "max", "(", "(", "ow", "-", "1", ")", "*", "stride", "+", "(", "kernel_size", "-", "1", ")", "*", "dilation", "+", "1", "-", "width", ",", "0", ")", "return", "pad_h", "//", "2", ",", "pad_h", "-", "pad_h", "//", "2", ",", "pad_w", "//", "2", ",", "pad_w", "-", "pad_w", "//", "2" ]
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/pytorch/pytorchcv/models/efficientnet.py#L23-L51
pandas-dev/pandas
5ba7d714014ae8feaccc0dd4a98890828cf2832d
pandas/core/ops/__init__.py
python
frame_arith_method_with_reindex
(left: DataFrame, right: DataFrame, op)
return result
For DataFrame-with-DataFrame operations that require reindexing, operate only on shared columns, then reindex. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- DataFrame
For DataFrame-with-DataFrame operations that require reindexing, operate only on shared columns, then reindex.
[ "For", "DataFrame", "-", "with", "-", "DataFrame", "operations", "that", "require", "reindexing", "operate", "only", "on", "shared", "columns", "then", "reindex", "." ]
def frame_arith_method_with_reindex(left: DataFrame, right: DataFrame, op) -> DataFrame: """ For DataFrame-with-DataFrame operations that require reindexing, operate only on shared columns, then reindex. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- DataFrame """ # GH#31623, only operate on shared columns cols, lcols, rcols = left.columns.join( right.columns, how="inner", level=None, return_indexers=True ) new_left = left.iloc[:, lcols] new_right = right.iloc[:, rcols] result = op(new_left, new_right) # Do the join on the columns instead of using align_method_FRAME # to avoid constructing two potentially large/sparse DataFrames join_columns, _, _ = left.columns.join( right.columns, how="outer", level=None, return_indexers=True ) if result.columns.has_duplicates: # Avoid reindexing with a duplicate axis. # https://github.com/pandas-dev/pandas/issues/35194 indexer, _ = result.columns.get_indexer_non_unique(join_columns) indexer = algorithms.unique1d(indexer) result = result._reindex_with_indexers( {1: [join_columns, indexer]}, allow_dups=True ) else: result = result.reindex(join_columns, axis=1) return result
[ "def", "frame_arith_method_with_reindex", "(", "left", ":", "DataFrame", ",", "right", ":", "DataFrame", ",", "op", ")", "->", "DataFrame", ":", "# GH#31623, only operate on shared columns", "cols", ",", "lcols", ",", "rcols", "=", "left", ".", "columns", ".", "join", "(", "right", ".", "columns", ",", "how", "=", "\"inner\"", ",", "level", "=", "None", ",", "return_indexers", "=", "True", ")", "new_left", "=", "left", ".", "iloc", "[", ":", ",", "lcols", "]", "new_right", "=", "right", ".", "iloc", "[", ":", ",", "rcols", "]", "result", "=", "op", "(", "new_left", ",", "new_right", ")", "# Do the join on the columns instead of using align_method_FRAME", "# to avoid constructing two potentially large/sparse DataFrames", "join_columns", ",", "_", ",", "_", "=", "left", ".", "columns", ".", "join", "(", "right", ".", "columns", ",", "how", "=", "\"outer\"", ",", "level", "=", "None", ",", "return_indexers", "=", "True", ")", "if", "result", ".", "columns", ".", "has_duplicates", ":", "# Avoid reindexing with a duplicate axis.", "# https://github.com/pandas-dev/pandas/issues/35194", "indexer", ",", "_", "=", "result", ".", "columns", ".", "get_indexer_non_unique", "(", "join_columns", ")", "indexer", "=", "algorithms", ".", "unique1d", "(", "indexer", ")", "result", "=", "result", ".", "_reindex_with_indexers", "(", "{", "1", ":", "[", "join_columns", ",", "indexer", "]", "}", ",", "allow_dups", "=", "True", ")", "else", ":", "result", "=", "result", ".", "reindex", "(", "join_columns", ",", "axis", "=", "1", ")", "return", "result" ]
https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/core/ops/__init__.py#L344-L385
keras-team/keras
5caa668b6a415675064a730f5eb46ecc08e40f65
keras/keras_parameterized.py
python
_test_or_class_decorator
(test_or_class, single_method_decorator)
return _decorate_test_or_class
Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result.
Decorate a test or class with a decorator intended for one method.
[ "Decorate", "a", "test", "or", "class", "with", "a", "decorator", "intended", "for", "one", "method", "." ]
def _test_or_class_decorator(test_or_class, single_method_decorator): """Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result. """ def _decorate_test_or_class(obj): if isinstance(obj, collections.abc.Iterable): return itertools.chain.from_iterable( single_method_decorator(method) for method in obj) if isinstance(obj, type): cls = obj for name, value in cls.__dict__.copy().items(): if callable(value) and name.startswith( unittest.TestLoader.testMethodPrefix): setattr(cls, name, single_method_decorator(value)) cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__, cls.__dict__.copy()) return cls return single_method_decorator(obj) if test_or_class is not None: return _decorate_test_or_class(test_or_class) return _decorate_test_or_class
[ "def", "_test_or_class_decorator", "(", "test_or_class", ",", "single_method_decorator", ")", ":", "def", "_decorate_test_or_class", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "collections", ".", "abc", ".", "Iterable", ")", ":", "return", "itertools", ".", "chain", ".", "from_iterable", "(", "single_method_decorator", "(", "method", ")", "for", "method", "in", "obj", ")", "if", "isinstance", "(", "obj", ",", "type", ")", ":", "cls", "=", "obj", "for", "name", ",", "value", "in", "cls", ".", "__dict__", ".", "copy", "(", ")", ".", "items", "(", ")", ":", "if", "callable", "(", "value", ")", "and", "name", ".", "startswith", "(", "unittest", ".", "TestLoader", ".", "testMethodPrefix", ")", ":", "setattr", "(", "cls", ",", "name", ",", "single_method_decorator", "(", "value", ")", ")", "cls", "=", "type", "(", "cls", ")", ".", "__new__", "(", "type", "(", "cls", ")", ",", "cls", ".", "__name__", ",", "cls", ".", "__bases__", ",", "cls", ".", "__dict__", ".", "copy", "(", ")", ")", "return", "cls", "return", "single_method_decorator", "(", "obj", ")", "if", "test_or_class", "is", "not", "None", ":", "return", "_decorate_test_or_class", "(", "test_or_class", ")", "return", "_decorate_test_or_class" ]
https://github.com/keras-team/keras/blob/5caa668b6a415675064a730f5eb46ecc08e40f65/keras/keras_parameterized.py#L432-L475
insarlab/MintPy
4357b8c726dec8a3f936770e3f3dda92882685b7
mintpy/tropo_pyaps.py
python
cmd_line_parse
(iargs=None)
return inps
Command line parser.
Command line parser.
[ "Command", "line", "parser", "." ]
def cmd_line_parse(iargs=None): """Command line parser.""" parser = create_parser() inps = parser.parse_args(args=iargs) # check the input requirements key_list = ['date_list', 'hour'] # with timeseries file if inps.timeseries_file: for key in key_list+['ref_yx']: if vars(inps)[key]: print(('input "{:<10}" is ignored because it will be extracted from ' 'timeseries file {}').format(key, inps.timeseries_file)) # without timeseries file elif any(not vars(inps)[key] for key in key_list): msg = 'No input timeseries file, all the following options are required: \n{}'.format(key_list) msg += '\n\n'+EXAMPLE raise ValueError(msg) ## default values # Get Grib Source inps.trop_model = standardize_trop_model(inps.trop_model, standardWeatherModelNames) print('weather model: '+inps.trop_model) # weather_dir inps.weather_dir = os.path.expanduser(inps.weather_dir) inps.weather_dir = os.path.expandvars(inps.weather_dir) # Fallback value if WEATHER_DIR is not defined as environment variable if inps.weather_dir == '${WEATHER_DIR}': inps.weather_dir = './' print('weather data directory: '+inps.weather_dir) return inps
[ "def", "cmd_line_parse", "(", "iargs", "=", "None", ")", ":", "parser", "=", "create_parser", "(", ")", "inps", "=", "parser", ".", "parse_args", "(", "args", "=", "iargs", ")", "# check the input requirements", "key_list", "=", "[", "'date_list'", ",", "'hour'", "]", "# with timeseries file", "if", "inps", ".", "timeseries_file", ":", "for", "key", "in", "key_list", "+", "[", "'ref_yx'", "]", ":", "if", "vars", "(", "inps", ")", "[", "key", "]", ":", "print", "(", "(", "'input \"{:<10}\" is ignored because it will be extracted from '", "'timeseries file {}'", ")", ".", "format", "(", "key", ",", "inps", ".", "timeseries_file", ")", ")", "# without timeseries file", "elif", "any", "(", "not", "vars", "(", "inps", ")", "[", "key", "]", "for", "key", "in", "key_list", ")", ":", "msg", "=", "'No input timeseries file, all the following options are required: \\n{}'", ".", "format", "(", "key_list", ")", "msg", "+=", "'\\n\\n'", "+", "EXAMPLE", "raise", "ValueError", "(", "msg", ")", "## default values", "# Get Grib Source", "inps", ".", "trop_model", "=", "standardize_trop_model", "(", "inps", ".", "trop_model", ",", "standardWeatherModelNames", ")", "print", "(", "'weather model: '", "+", "inps", ".", "trop_model", ")", "# weather_dir", "inps", ".", "weather_dir", "=", "os", ".", "path", ".", "expanduser", "(", "inps", ".", "weather_dir", ")", "inps", ".", "weather_dir", "=", "os", ".", "path", ".", "expandvars", "(", "inps", ".", "weather_dir", ")", "# Fallback value if WEATHER_DIR is not defined as environment variable", "if", "inps", ".", "weather_dir", "==", "'${WEATHER_DIR}'", ":", "inps", ".", "weather_dir", "=", "'./'", "print", "(", "'weather data directory: '", "+", "inps", ".", "weather_dir", ")", "return", "inps" ]
https://github.com/insarlab/MintPy/blob/4357b8c726dec8a3f936770e3f3dda92882685b7/mintpy/tropo_pyaps.py#L105-L139
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/apsw/tools/shell.py
python
Shell.command_tables
(self, cmd)
tables ?PATTERN?: Lists names of tables matching LIKE pattern This also returns views.
tables ?PATTERN?: Lists names of tables matching LIKE pattern
[ "tables", "?PATTERN?", ":", "Lists", "names", "of", "tables", "matching", "LIKE", "pattern" ]
def command_tables(self, cmd): """tables ?PATTERN?: Lists names of tables matching LIKE pattern This also returns views. """ self.push_output() self.output=self.output_list self.header=False try: if len(cmd)==0: cmd=['%'] # The SQLite shell code filters out sqlite_ prefixes if # you specified an argument else leaves them in. It also # has a hand coded output mode that does space separation # plus wrapping at 80 columns. for n in cmd: self.process_sql("SELECT name FROM sqlite_master " "WHERE type IN ('table', 'view') AND name NOT LIKE 'sqlite_%' " "AND name like ?1 " "UNION ALL " "SELECT name FROM sqlite_temp_master " "WHERE type IN ('table', 'view') AND name NOT LIKE 'sqlite_%' " "ORDER BY 1", (n,), internal=True) finally: self.pop_output()
[ "def", "command_tables", "(", "self", ",", "cmd", ")", ":", "self", ".", "push_output", "(", ")", "self", ".", "output", "=", "self", ".", "output_list", "self", ".", "header", "=", "False", "try", ":", "if", "len", "(", "cmd", ")", "==", "0", ":", "cmd", "=", "[", "'%'", "]", "# The SQLite shell code filters out sqlite_ prefixes if", "# you specified an argument else leaves them in. It also", "# has a hand coded output mode that does space separation", "# plus wrapping at 80 columns.", "for", "n", "in", "cmd", ":", "self", ".", "process_sql", "(", "\"SELECT name FROM sqlite_master \"", "\"WHERE type IN ('table', 'view') AND name NOT LIKE 'sqlite_%' \"", "\"AND name like ?1 \"", "\"UNION ALL \"", "\"SELECT name FROM sqlite_temp_master \"", "\"WHERE type IN ('table', 'view') AND name NOT LIKE 'sqlite_%' \"", "\"ORDER BY 1\"", ",", "(", "n", ",", ")", ",", "internal", "=", "True", ")", "finally", ":", "self", ".", "pop_output", "(", ")" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/apsw/tools/shell.py#L2240-L2265
ewels/MultiQC
9b953261d3d684c24eef1827a5ce6718c847a5af
multiqc/modules/base_module.py
python
BaseMultiqcModule.write_data_file
(self, data, fn, sort_cols=False, data_format=None)
Saves raw data to a dictionary for downstream use, then redirects to report.write_data_file() to create the file in the report directory
Saves raw data to a dictionary for downstream use, then redirects to report.write_data_file() to create the file in the report directory
[ "Saves", "raw", "data", "to", "a", "dictionary", "for", "downstream", "use", "then", "redirects", "to", "report", ".", "write_data_file", "()", "to", "create", "the", "file", "in", "the", "report", "directory" ]
def write_data_file(self, data, fn, sort_cols=False, data_format=None): """Saves raw data to a dictionary for downstream use, then redirects to report.write_data_file() to create the file in the report directory""" # Append custom module anchor if set mod_cust_config = getattr(self, "mod_cust_config", {}) if "anchor" in mod_cust_config: fn = "{}_{}".format(fn, mod_cust_config["anchor"]) # Generate a unique filename if the file already exists (running module multiple times) i = 1 base_fn = fn while fn in report.saved_raw_data: fn = "{}_{}".format(base_fn, i) i += 1 # Save the file report.saved_raw_data[fn] = data util_functions.write_data_file(data, fn, sort_cols, data_format)
[ "def", "write_data_file", "(", "self", ",", "data", ",", "fn", ",", "sort_cols", "=", "False", ",", "data_format", "=", "None", ")", ":", "# Append custom module anchor if set", "mod_cust_config", "=", "getattr", "(", "self", ",", "\"mod_cust_config\"", ",", "{", "}", ")", "if", "\"anchor\"", "in", "mod_cust_config", ":", "fn", "=", "\"{}_{}\"", ".", "format", "(", "fn", ",", "mod_cust_config", "[", "\"anchor\"", "]", ")", "# Generate a unique filename if the file already exists (running module multiple times)", "i", "=", "1", "base_fn", "=", "fn", "while", "fn", "in", "report", ".", "saved_raw_data", ":", "fn", "=", "\"{}_{}\"", ".", "format", "(", "base_fn", ",", "i", ")", "i", "+=", "1", "# Save the file", "report", ".", "saved_raw_data", "[", "fn", "]", "=", "data", "util_functions", ".", "write_data_file", "(", "data", ",", "fn", ",", "sort_cols", ",", "data_format", ")" ]
https://github.com/ewels/MultiQC/blob/9b953261d3d684c24eef1827a5ce6718c847a5af/multiqc/modules/base_module.py#L469-L487
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/distutils/archive_util.py
python
_get_uid
(name)
return None
Returns an uid, given a user name.
Returns an uid, given a user name.
[ "Returns", "an", "uid", "given", "a", "user", "name", "." ]
def _get_uid(name): """Returns an uid, given a user name.""" if getpwnam is None or name is None: return None try: result = getpwnam(name) except KeyError: result = None if result is not None: return result[2] return None
[ "def", "_get_uid", "(", "name", ")", ":", "if", "getpwnam", "is", "None", "or", "name", "is", "None", ":", "return", "None", "try", ":", "result", "=", "getpwnam", "(", "name", ")", "except", "KeyError", ":", "result", "=", "None", "if", "result", "is", "not", "None", ":", "return", "result", "[", "2", "]", "return", "None" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/distutils/archive_util.py#L43-L53
ProjectQ-Framework/ProjectQ
0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005
projectq/backends/_sim/_pysim.py
python
Simulator.allocate_qubit
(self, qubit_id)
Allocate a qubit. Args: qubit_id (int): ID of the qubit which is being allocated.
Allocate a qubit.
[ "Allocate", "a", "qubit", "." ]
def allocate_qubit(self, qubit_id): """ Allocate a qubit. Args: qubit_id (int): ID of the qubit which is being allocated. """ self._map[qubit_id] = self._num_qubits self._num_qubits += 1 self._state.resize(1 << self._num_qubits, refcheck=_USE_REFCHECK)
[ "def", "allocate_qubit", "(", "self", ",", "qubit_id", ")", ":", "self", ".", "_map", "[", "qubit_id", "]", "=", "self", ".", "_num_qubits", "self", ".", "_num_qubits", "+=", "1", "self", ".", "_state", ".", "resize", "(", "1", "<<", "self", ".", "_num_qubits", ",", "refcheck", "=", "_USE_REFCHECK", ")" ]
https://github.com/ProjectQ-Framework/ProjectQ/blob/0d32c1610ba4e9aefd7f19eb52dadb4fbe5f9005/projectq/backends/_sim/_pysim.py#L106-L115
dpressel/mead-baseline
9987e6b37fa6525a4ddc187c305e292a718f59a9
baseline/tf/deps/train.py
python
to_tensors
(ts, lengths_key)
return features, (heads, labels)
Convert a data feed into a tuple of `features` (`dict`) and `y` values This method is required to produce `tf.dataset`s from the input data feed :param ts: The data feed to convert :return: A `tuple` of `features` and `y` (labels)
Convert a data feed into a tuple of `features` (`dict`) and `y` values
[ "Convert", "a", "data", "feed", "into", "a", "tuple", "of", "features", "(", "dict", ")", "and", "y", "values" ]
def to_tensors(ts, lengths_key): """Convert a data feed into a tuple of `features` (`dict`) and `y` values This method is required to produce `tf.dataset`s from the input data feed :param ts: The data feed to convert :return: A `tuple` of `features` and `y` (labels) """ keys = ts[0].keys() features = dict((k, []) for k in keys) for sample in ts: for k in features.keys(): # add each sample for s in sample[k]: features[k].append(s) features = dict((k, np.stack(v)) for k, v in features.items()) features['lengths'] = features[lengths_key] del features[lengths_key] heads = features.pop('heads') labels = features.pop('labels') return features, (heads, labels)
[ "def", "to_tensors", "(", "ts", ",", "lengths_key", ")", ":", "keys", "=", "ts", "[", "0", "]", ".", "keys", "(", ")", "features", "=", "dict", "(", "(", "k", ",", "[", "]", ")", "for", "k", "in", "keys", ")", "for", "sample", "in", "ts", ":", "for", "k", "in", "features", ".", "keys", "(", ")", ":", "# add each sample", "for", "s", "in", "sample", "[", "k", "]", ":", "features", "[", "k", "]", ".", "append", "(", "s", ")", "features", "=", "dict", "(", "(", "k", ",", "np", ".", "stack", "(", "v", ")", ")", "for", "k", ",", "v", "in", "features", ".", "items", "(", ")", ")", "features", "[", "'lengths'", "]", "=", "features", "[", "lengths_key", "]", "del", "features", "[", "lengths_key", "]", "heads", "=", "features", ".", "pop", "(", "'heads'", ")", "labels", "=", "features", ".", "pop", "(", "'labels'", ")", "return", "features", ",", "(", "heads", ",", "labels", ")" ]
https://github.com/dpressel/mead-baseline/blob/9987e6b37fa6525a4ddc187c305e292a718f59a9/baseline/tf/deps/train.py#L30-L51
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/transforms.py
python
interval_contains
(interval, val)
return a <= val <= b or a >= val >= b
Check, inclusively, whether an interval includes a given value. Parameters ---------- interval : sequence of scalar A 2-length sequence, endpoints that define the interval. val : scalar Value to check is within interval. Returns ------- bool Returns true if given val is within the interval.
Check, inclusively, whether an interval includes a given value.
[ "Check", "inclusively", "whether", "an", "interval", "includes", "a", "given", "value", "." ]
def interval_contains(interval, val): """ Check, inclusively, whether an interval includes a given value. Parameters ---------- interval : sequence of scalar A 2-length sequence, endpoints that define the interval. val : scalar Value to check is within interval. Returns ------- bool Returns true if given val is within the interval. """ a, b = interval return a <= val <= b or a >= val >= b
[ "def", "interval_contains", "(", "interval", ",", "val", ")", ":", "a", ",", "b", "=", "interval", "return", "a", "<=", "val", "<=", "b", "or", "a", ">=", "val", ">=", "b" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/transforms.py#L2915-L2932
pystruct/pystruct
957193a40f3933ae5709336d46289c8ad4a60b7a
pystruct/learners/latent_structured_svm.py
python
LatentSSVM.score
(self, X, Y)
return 1. - np.sum(losses) / float(np.sum(max_losses))
Compute score as 1 - loss over whole data set. Returns the average accuracy (in terms of model.loss) over X and Y. Parameters ---------- X : iterable Evaluation data. Y : iterable True labels. Returns ------- score : float Average of 1 - loss over training examples.
Compute score as 1 - loss over whole data set.
[ "Compute", "score", "as", "1", "-", "loss", "over", "whole", "data", "set", "." ]
def score(self, X, Y): """Compute score as 1 - loss over whole data set. Returns the average accuracy (in terms of model.loss) over X and Y. Parameters ---------- X : iterable Evaluation data. Y : iterable True labels. Returns ------- score : float Average of 1 - loss over training examples. """ losses = [self.model.base_loss(y, y_pred) for y, y_pred in zip(Y, self.predict(X))] max_losses = [self.model.max_loss(y) for y in Y] return 1. - np.sum(losses) / float(np.sum(max_losses))
[ "def", "score", "(", "self", ",", "X", ",", "Y", ")", ":", "losses", "=", "[", "self", ".", "model", ".", "base_loss", "(", "y", ",", "y_pred", ")", "for", "y", ",", "y_pred", "in", "zip", "(", "Y", ",", "self", ".", "predict", "(", "X", ")", ")", "]", "max_losses", "=", "[", "self", ".", "model", ".", "max_loss", "(", "y", ")", "for", "y", "in", "Y", "]", "return", "1.", "-", "np", ".", "sum", "(", "losses", ")", "/", "float", "(", "np", ".", "sum", "(", "max_losses", ")", ")" ]
https://github.com/pystruct/pystruct/blob/957193a40f3933ae5709336d46289c8ad4a60b7a/pystruct/learners/latent_structured_svm.py#L136-L158
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
sklearn/feature_extraction/_dict_vectorizer.py
python
DictVectorizer.inverse_transform
(self, X, dict_type=dict)
return dicts
Transform array or sparse matrix X back to feature mappings. X must have been produced by this DictVectorizer's transform or fit_transform method; it may only have passed through transformers that preserve the number of features and their order. In the case of one-hot/one-of-K coding, the constructed feature names and values are returned rather than the original ones. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample matrix. dict_type : type, default=dict Constructor for feature mappings. Must conform to the collections.Mapping API. Returns ------- D : list of dict_type objects of shape (n_samples,) Feature mappings for the samples in X.
Transform array or sparse matrix X back to feature mappings.
[ "Transform", "array", "or", "sparse", "matrix", "X", "back", "to", "feature", "mappings", "." ]
def inverse_transform(self, X, dict_type=dict): """Transform array or sparse matrix X back to feature mappings. X must have been produced by this DictVectorizer's transform or fit_transform method; it may only have passed through transformers that preserve the number of features and their order. In the case of one-hot/one-of-K coding, the constructed feature names and values are returned rather than the original ones. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample matrix. dict_type : type, default=dict Constructor for feature mappings. Must conform to the collections.Mapping API. Returns ------- D : list of dict_type objects of shape (n_samples,) Feature mappings for the samples in X. """ # COO matrix is not subscriptable X = check_array(X, accept_sparse=["csr", "csc"]) n_samples = X.shape[0] names = self.feature_names_ dicts = [dict_type() for _ in range(n_samples)] if sp.issparse(X): for i, j in zip(*X.nonzero()): dicts[i][names[j]] = X[i, j] else: for i, d in enumerate(dicts): for j, v in enumerate(X[i, :]): if v != 0: d[names[j]] = X[i, j] return dicts
[ "def", "inverse_transform", "(", "self", ",", "X", ",", "dict_type", "=", "dict", ")", ":", "# COO matrix is not subscriptable", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "[", "\"csr\"", ",", "\"csc\"", "]", ")", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "names", "=", "self", ".", "feature_names_", "dicts", "=", "[", "dict_type", "(", ")", "for", "_", "in", "range", "(", "n_samples", ")", "]", "if", "sp", ".", "issparse", "(", "X", ")", ":", "for", "i", ",", "j", "in", "zip", "(", "*", "X", ".", "nonzero", "(", ")", ")", ":", "dicts", "[", "i", "]", "[", "names", "[", "j", "]", "]", "=", "X", "[", "i", ",", "j", "]", "else", ":", "for", "i", ",", "d", "in", "enumerate", "(", "dicts", ")", ":", "for", "j", ",", "v", "in", "enumerate", "(", "X", "[", "i", ",", ":", "]", ")", ":", "if", "v", "!=", "0", ":", "d", "[", "names", "[", "j", "]", "]", "=", "X", "[", "i", ",", "j", "]", "return", "dicts" ]
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/feature_extraction/_dict_vectorizer.py#L315-L354
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/modules/vector_symbolic_dense.py
python
apply_map
(phi)
return apply
Returns a function that applies phi to its argument. EXAMPLES:: sage: from sage.modules.vector_symbolic_dense import apply_map sage: v = vector([1,2,3]) sage: f = apply_map(lambda x: x+1) sage: f(v) (2, 3, 4)
Returns a function that applies phi to its argument.
[ "Returns", "a", "function", "that", "applies", "phi", "to", "its", "argument", "." ]
def apply_map(phi): """ Returns a function that applies phi to its argument. EXAMPLES:: sage: from sage.modules.vector_symbolic_dense import apply_map sage: v = vector([1,2,3]) sage: f = apply_map(lambda x: x+1) sage: f(v) (2, 3, 4) """ def apply(self, *args, **kwds): """ Generic function used to implement common symbolic operations elementwise as methods of a vector. EXAMPLES:: sage: var('x,y') (x, y) sage: v = vector([sin(x)^2 + cos(x)^2, log(x*y), sin(x/(x^2 + x)), factorial(x+1)/factorial(x)]) sage: v.simplify_trig() (1, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x)) sage: v.canonicalize_radical() (cos(x)^2 + sin(x)^2, log(x) + log(y), sin(1/(x + 1)), factorial(x + 1)/factorial(x)) sage: v.simplify_rational() (cos(x)^2 + sin(x)^2, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x)) sage: v.simplify_factorial() (cos(x)^2 + sin(x)^2, log(x*y), sin(x/(x^2 + x)), x + 1) sage: v.simplify_full() (1, log(x*y), sin(1/(x + 1)), x + 1) sage: v = vector([sin(2*x), sin(3*x)]) sage: v.simplify_trig() (2*cos(x)*sin(x), (4*cos(x)^2 - 1)*sin(x)) sage: v.simplify_trig(False) (sin(2*x), sin(3*x)) sage: v.simplify_trig(expand=False) (sin(2*x), sin(3*x)) """ return self.apply_map(lambda x: phi(x, *args, **kwds)) apply.__doc__ += "\nSee Expression." + phi.__name__ + "() for optional arguments." return apply
[ "def", "apply_map", "(", "phi", ")", ":", "def", "apply", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "\"\"\"\n Generic function used to implement common symbolic operations\n elementwise as methods of a vector.\n\n EXAMPLES::\n\n sage: var('x,y')\n (x, y)\n sage: v = vector([sin(x)^2 + cos(x)^2, log(x*y), sin(x/(x^2 + x)), factorial(x+1)/factorial(x)])\n sage: v.simplify_trig()\n (1, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))\n sage: v.canonicalize_radical()\n (cos(x)^2 + sin(x)^2, log(x) + log(y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))\n sage: v.simplify_rational()\n (cos(x)^2 + sin(x)^2, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))\n sage: v.simplify_factorial()\n (cos(x)^2 + sin(x)^2, log(x*y), sin(x/(x^2 + x)), x + 1)\n sage: v.simplify_full()\n (1, log(x*y), sin(1/(x + 1)), x + 1)\n\n sage: v = vector([sin(2*x), sin(3*x)])\n sage: v.simplify_trig()\n (2*cos(x)*sin(x), (4*cos(x)^2 - 1)*sin(x))\n sage: v.simplify_trig(False)\n (sin(2*x), sin(3*x))\n sage: v.simplify_trig(expand=False)\n (sin(2*x), sin(3*x))\n \"\"\"", "return", "self", ".", "apply_map", "(", "lambda", "x", ":", "phi", "(", "x", ",", "*", "args", ",", "*", "*", "kwds", ")", ")", "apply", ".", "__doc__", "+=", "\"\\nSee Expression.\"", "+", "phi", ".", "__name__", "+", "\"() for optional arguments.\"", "return", "apply" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/modules/vector_symbolic_dense.py#L61-L105
jtpereyda/boofuzz
64badab7257117bcadab35e903d723223dde9203
boofuzz/sessions.py
python
Session._main_fuzz_loop
(self, fuzz_case_iterator)
Execute main fuzz logic; takes an iterator of test cases. Preconditions: `self.total_mutant_index` and `self.total_num_mutations` are set properly. Args: fuzz_case_iterator (Iterable): An iterator that walks through fuzz cases and yields MutationContext objects. See _iterate_single_node() for details. Returns: None
Execute main fuzz logic; takes an iterator of test cases.
[ "Execute", "main", "fuzz", "logic", ";", "takes", "an", "iterator", "of", "test", "cases", "." ]
def _main_fuzz_loop(self, fuzz_case_iterator): """Execute main fuzz logic; takes an iterator of test cases. Preconditions: `self.total_mutant_index` and `self.total_num_mutations` are set properly. Args: fuzz_case_iterator (Iterable): An iterator that walks through fuzz cases and yields MutationContext objects. See _iterate_single_node() for details. Returns: None """ self.server_init() try: self._start_target(self.targets[0]) if self._reuse_target_connection: self.targets[0].open() self.num_cases_actually_fuzzed = 0 self.start_time = time.time() for mutation_context in fuzz_case_iterator: if self.total_mutant_index < self._index_start: continue # Check restart interval if ( self.num_cases_actually_fuzzed and self.restart_interval and self.num_cases_actually_fuzzed % self.restart_interval == 0 ): self._fuzz_data_logger.open_test_step("restart interval of %d reached" % self.restart_interval) self._restart_target(self.targets[0]) self._fuzz_current_case(mutation_context) self.num_cases_actually_fuzzed += 1 if self._index_end is not None and self.total_mutant_index >= self._index_end: break if self._reuse_target_connection: self.targets[0].close() if self._keep_web_open and self.web_port is not None: self.end_time = time.time() print( "\nFuzzing session completed. Keeping webinterface up on localhost:{}".format(self.web_port), "\nPress ENTER to close webinterface", ) input() except KeyboardInterrupt: # TODO: should wait for the end of the ongoing test case, and stop gracefully netmon and procmon self.export_file() self._fuzz_data_logger.log_error("SIGINT received ... exiting") raise except exception.BoofuzzRestartFailedError: self._fuzz_data_logger.log_error("Restarting the target failed, exiting.") self.export_file() raise except exception.BoofuzzTargetConnectionFailedError: # exception should have already been handled but rethrown in order to escape test run pass except Exception: self._fuzz_data_logger.log_error("Unexpected exception! {0}".format(traceback.format_exc())) self.export_file() raise finally: self._fuzz_data_logger.close_test()
[ "def", "_main_fuzz_loop", "(", "self", ",", "fuzz_case_iterator", ")", ":", "self", ".", "server_init", "(", ")", "try", ":", "self", ".", "_start_target", "(", "self", ".", "targets", "[", "0", "]", ")", "if", "self", ".", "_reuse_target_connection", ":", "self", ".", "targets", "[", "0", "]", ".", "open", "(", ")", "self", ".", "num_cases_actually_fuzzed", "=", "0", "self", ".", "start_time", "=", "time", ".", "time", "(", ")", "for", "mutation_context", "in", "fuzz_case_iterator", ":", "if", "self", ".", "total_mutant_index", "<", "self", ".", "_index_start", ":", "continue", "# Check restart interval", "if", "(", "self", ".", "num_cases_actually_fuzzed", "and", "self", ".", "restart_interval", "and", "self", ".", "num_cases_actually_fuzzed", "%", "self", ".", "restart_interval", "==", "0", ")", ":", "self", ".", "_fuzz_data_logger", ".", "open_test_step", "(", "\"restart interval of %d reached\"", "%", "self", ".", "restart_interval", ")", "self", ".", "_restart_target", "(", "self", ".", "targets", "[", "0", "]", ")", "self", ".", "_fuzz_current_case", "(", "mutation_context", ")", "self", ".", "num_cases_actually_fuzzed", "+=", "1", "if", "self", ".", "_index_end", "is", "not", "None", "and", "self", ".", "total_mutant_index", ">=", "self", ".", "_index_end", ":", "break", "if", "self", ".", "_reuse_target_connection", ":", "self", ".", "targets", "[", "0", "]", ".", "close", "(", ")", "if", "self", ".", "_keep_web_open", "and", "self", ".", "web_port", "is", "not", "None", ":", "self", ".", "end_time", "=", "time", ".", "time", "(", ")", "print", "(", "\"\\nFuzzing session completed. Keeping webinterface up on localhost:{}\"", ".", "format", "(", "self", ".", "web_port", ")", ",", "\"\\nPress ENTER to close webinterface\"", ",", ")", "input", "(", ")", "except", "KeyboardInterrupt", ":", "# TODO: should wait for the end of the ongoing test case, and stop gracefully netmon and procmon", "self", ".", "export_file", "(", ")", "self", ".", "_fuzz_data_logger", ".", "log_error", "(", "\"SIGINT received ... exiting\"", ")", "raise", "except", "exception", ".", "BoofuzzRestartFailedError", ":", "self", ".", "_fuzz_data_logger", ".", "log_error", "(", "\"Restarting the target failed, exiting.\"", ")", "self", ".", "export_file", "(", ")", "raise", "except", "exception", ".", "BoofuzzTargetConnectionFailedError", ":", "# exception should have already been handled but rethrown in order to escape test run", "pass", "except", "Exception", ":", "self", ".", "_fuzz_data_logger", ".", "log_error", "(", "\"Unexpected exception! {0}\"", ".", "format", "(", "traceback", ".", "format_exc", "(", ")", ")", ")", "self", ".", "export_file", "(", ")", "raise", "finally", ":", "self", ".", "_fuzz_data_logger", ".", "close_test", "(", ")" ]
https://github.com/jtpereyda/boofuzz/blob/64badab7257117bcadab35e903d723223dde9203/boofuzz/sessions.py#L1362-L1430
richardaecn/class-balanced-loss
1d7857208a2abc03d84e35a9d5383af8225d4b4d
tpu/models/official/densenet/densenet_model.py
python
densenet_imagenet_121
(inputs, is_training=True, num_classes=1001)
return densenet_imagenet_model(inputs, growth_rate, depths, num_classes, is_training)
DenseNet 121.
DenseNet 121.
[ "DenseNet", "121", "." ]
def densenet_imagenet_121(inputs, is_training=True, num_classes=1001): """DenseNet 121.""" depths = [6, 12, 24, 16] growth_rate = 32 return densenet_imagenet_model(inputs, growth_rate, depths, num_classes, is_training)
[ "def", "densenet_imagenet_121", "(", "inputs", ",", "is_training", "=", "True", ",", "num_classes", "=", "1001", ")", ":", "depths", "=", "[", "6", ",", "12", ",", "24", ",", "16", "]", "growth_rate", "=", "32", "return", "densenet_imagenet_model", "(", "inputs", ",", "growth_rate", ",", "depths", ",", "num_classes", ",", "is_training", ")" ]
https://github.com/richardaecn/class-balanced-loss/blob/1d7857208a2abc03d84e35a9d5383af8225d4b4d/tpu/models/official/densenet/densenet_model.py#L176-L181
tensorflow/lingvo
ce10019243d954c3c3ebe739f7589b5eebfdf907
lingvo/jax/base_layer.py
python
BaseLayer.forward_update_var
(self, name: str, new_val: JTensor)
Update var 'name' in the forward pass.
Update var 'name' in the forward pass.
[ "Update", "var", "name", "in", "the", "forward", "pass", "." ]
def forward_update_var(self, name: str, new_val: JTensor) -> None: """Update var 'name' in the forward pass.""" assert name in self._private_vars # TODO(yonghui): Maybe lift the constraint below. # A param can only be updated once. assert self._forward_updated_vars.dict[name] is None # Only non-trainable variables can be updated in the forward pass. assert var_not_trainable(self.vars[name]) self._forward_updated_vars.dict[name] = new_val
[ "def", "forward_update_var", "(", "self", ",", "name", ":", "str", ",", "new_val", ":", "JTensor", ")", "->", "None", ":", "assert", "name", "in", "self", ".", "_private_vars", "# TODO(yonghui): Maybe lift the constraint below.", "# A param can only be updated once.", "assert", "self", ".", "_forward_updated_vars", ".", "dict", "[", "name", "]", "is", "None", "# Only non-trainable variables can be updated in the forward pass.", "assert", "var_not_trainable", "(", "self", ".", "vars", "[", "name", "]", ")", "self", ".", "_forward_updated_vars", ".", "dict", "[", "name", "]", "=", "new_val" ]
https://github.com/tensorflow/lingvo/blob/ce10019243d954c3c3ebe739f7589b5eebfdf907/lingvo/jax/base_layer.py#L957-L965
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/python27/1.0/lib/noarch/pycparser/c_parser.py
python
CParser.p_struct_or_union
(self, p)
struct_or_union : STRUCT | UNION
struct_or_union : STRUCT | UNION
[ "struct_or_union", ":", "STRUCT", "|", "UNION" ]
def p_struct_or_union(self, p): """ struct_or_union : STRUCT | UNION """ p[0] = p[1]
[ "def", "p_struct_or_union", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/python27/1.0/lib/noarch/pycparser/c_parser.py#L811-L815
apache/tvm
6eb4ed813ebcdcd9558f0906a1870db8302ff1e0
python/tvm/_ffi/_ctypes/ndarray.py
python
NDArrayBase._copyto
(self, target_nd)
return target_nd
Internal function that implements copy to target ndarray.
Internal function that implements copy to target ndarray.
[ "Internal", "function", "that", "implements", "copy", "to", "target", "ndarray", "." ]
def _copyto(self, target_nd): """Internal function that implements copy to target ndarray.""" check_call(_LIB.TVMArrayCopyFromTo(self.handle, target_nd.handle, None)) return target_nd
[ "def", "_copyto", "(", "self", ",", "target_nd", ")", ":", "check_call", "(", "_LIB", ".", "TVMArrayCopyFromTo", "(", "self", ".", "handle", ",", "target_nd", ".", "handle", ",", "None", ")", ")", "return", "target_nd" ]
https://github.com/apache/tvm/blob/6eb4ed813ebcdcd9558f0906a1870db8302ff1e0/python/tvm/_ffi/_ctypes/ndarray.py#L88-L91
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/_vendor/idna/core.py
python
valid_contexto
(label, pos, exception=False)
[]
def valid_contexto(label, pos, exception=False): cp_value = ord(label[pos]) if cp_value == 0x00b7: if 0 < pos < len(label)-1: if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c: return True return False elif cp_value == 0x0375: if pos < len(label)-1 and len(label) > 1: return _is_script(label[pos + 1], 'Greek') return False elif cp_value == 0x05f3 or cp_value == 0x05f4: if pos > 0: return _is_script(label[pos - 1], 'Hebrew') return False elif cp_value == 0x30fb: for cp in label: if cp == u'\u30fb': continue if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'): return True return False elif 0x660 <= cp_value <= 0x669: for cp in label: if 0x6f0 <= ord(cp) <= 0x06f9: return False return True elif 0x6f0 <= cp_value <= 0x6f9: for cp in label: if 0x660 <= ord(cp) <= 0x0669: return False return True
[ "def", "valid_contexto", "(", "label", ",", "pos", ",", "exception", "=", "False", ")", ":", "cp_value", "=", "ord", "(", "label", "[", "pos", "]", ")", "if", "cp_value", "==", "0x00b7", ":", "if", "0", "<", "pos", "<", "len", "(", "label", ")", "-", "1", ":", "if", "ord", "(", "label", "[", "pos", "-", "1", "]", ")", "==", "0x006c", "and", "ord", "(", "label", "[", "pos", "+", "1", "]", ")", "==", "0x006c", ":", "return", "True", "return", "False", "elif", "cp_value", "==", "0x0375", ":", "if", "pos", "<", "len", "(", "label", ")", "-", "1", "and", "len", "(", "label", ")", ">", "1", ":", "return", "_is_script", "(", "label", "[", "pos", "+", "1", "]", ",", "'Greek'", ")", "return", "False", "elif", "cp_value", "==", "0x05f3", "or", "cp_value", "==", "0x05f4", ":", "if", "pos", ">", "0", ":", "return", "_is_script", "(", "label", "[", "pos", "-", "1", "]", ",", "'Hebrew'", ")", "return", "False", "elif", "cp_value", "==", "0x30fb", ":", "for", "cp", "in", "label", ":", "if", "cp", "==", "u'\\u30fb'", ":", "continue", "if", "_is_script", "(", "cp", ",", "'Hiragana'", ")", "or", "_is_script", "(", "cp", ",", "'Katakana'", ")", "or", "_is_script", "(", "cp", ",", "'Han'", ")", ":", "return", "True", "return", "False", "elif", "0x660", "<=", "cp_value", "<=", "0x669", ":", "for", "cp", "in", "label", ":", "if", "0x6f0", "<=", "ord", "(", "cp", ")", "<=", "0x06f9", ":", "return", "False", "return", "True", "elif", "0x6f0", "<=", "cp_value", "<=", "0x6f9", ":", "for", "cp", "in", "label", ":", "if", "0x660", "<=", "ord", "(", "cp", ")", "<=", "0x0669", ":", "return", "False", "return", "True" ]
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/_vendor/idna/core.py#L198-L236
i-pan/kaggle-rsna18
2db498fe99615d935aa676f04847d0c562fd8e46
models/DeformableConvNets/lib/dataset/imdb.py
python
IMDB.load_rpn_roidb
(self, gt_roidb)
return self.create_roidb_from_box_list(box_list, gt_roidb)
turn rpn detection boxes into roidb :param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped'] :return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
turn rpn detection boxes into roidb :param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped'] :return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
[ "turn", "rpn", "detection", "boxes", "into", "roidb", ":", "param", "gt_roidb", ":", "[", "image_index", "]", "[", "boxes", "gt_classes", "gt_overlaps", "flipped", "]", ":", "return", ":", "roidb", ":", "[", "image_index", "]", "[", "boxes", "gt_classes", "gt_overlaps", "flipped", "]" ]
def load_rpn_roidb(self, gt_roidb): """ turn rpn detection boxes into roidb :param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped'] :return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped'] """ box_list = self.load_rpn_data() return self.create_roidb_from_box_list(box_list, gt_roidb)
[ "def", "load_rpn_roidb", "(", "self", ",", "gt_roidb", ")", ":", "box_list", "=", "self", ".", "load_rpn_data", "(", ")", "return", "self", ".", "create_roidb_from_box_list", "(", "box_list", ",", "gt_roidb", ")" ]
https://github.com/i-pan/kaggle-rsna18/blob/2db498fe99615d935aa676f04847d0c562fd8e46/models/DeformableConvNets/lib/dataset/imdb.py#L93-L100
alexa/alexa-skills-kit-sdk-for-python
079de73bc8b827be51ea700a3e4e19c29983a173
ask-sdk-local-debug/ask_sdk_local_debug/client/autobahn_client_protocol.py
python
AutobahnClientProtocol.onConnect
(self, response)
Callback fired directly after web-socket opening handshake when new web-socket server connection was established. :param response: web-socket connection response information. :type response: instance of :py:class:`autobahn_client.websocket.protocol.ConnectionResponse`
Callback fired directly after web-socket opening handshake when new web-socket server connection was established.
[ "Callback", "fired", "directly", "after", "web", "-", "socket", "opening", "handshake", "when", "new", "web", "-", "socket", "server", "connection", "was", "established", "." ]
def onConnect(self, response): # type: (ConnectionResponse) -> None """Callback fired directly after web-socket opening handshake when new web-socket server connection was established. :param response: web-socket connection response information. :type response: instance of :py:class:`autobahn_client.websocket.protocol.ConnectionResponse` """ logger.info("*****Starting Skill Debug Session*****") logger.info('*****Session will last for 1 hour*****')
[ "def", "onConnect", "(", "self", ",", "response", ")", ":", "# type: (ConnectionResponse) -> None", "logger", ".", "info", "(", "\"*****Starting Skill Debug Session*****\"", ")", "logger", ".", "info", "(", "'*****Session will last for 1 hour*****'", ")" ]
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/079de73bc8b827be51ea700a3e4e19c29983a173/ask-sdk-local-debug/ask_sdk_local_debug/client/autobahn_client_protocol.py#L36-L46
PySimpleGUI/PySimpleGUI
6c0d1fb54f493d45e90180b322fbbe70f7a5af3c
DemoPrograms/Demo_Matplotlib_Animated.py
python
draw_figure
(canvas, figure, loc=(0, 0))
return figure_canvas_agg
[]
def draw_figure(canvas, figure, loc=(0, 0)): figure_canvas_agg = FigureCanvasTkAgg(figure, canvas) figure_canvas_agg.draw() figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1) return figure_canvas_agg
[ "def", "draw_figure", "(", "canvas", ",", "figure", ",", "loc", "=", "(", "0", ",", "0", ")", ")", ":", "figure_canvas_agg", "=", "FigureCanvasTkAgg", "(", "figure", ",", "canvas", ")", "figure_canvas_agg", ".", "draw", "(", ")", "figure_canvas_agg", ".", "get_tk_widget", "(", ")", ".", "pack", "(", "side", "=", "'top'", ",", "fill", "=", "'both'", ",", "expand", "=", "1", ")", "return", "figure_canvas_agg" ]
https://github.com/PySimpleGUI/PySimpleGUI/blob/6c0d1fb54f493d45e90180b322fbbe70f7a5af3c/DemoPrograms/Demo_Matplotlib_Animated.py#L9-L13