nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
openvinotoolkit/open_model_zoo
5a4232f6c35b4916eb6e8750141f4e45761237ef
demos/speech_recognition_deepspeech_demo/python/asr_utils/audio_features.py
python
AudioFeaturesSeqPipelineStage._process_blocks
(self, audio, finish=False)
audio (numpy.ndarray), this buffer is guaranteed to contain data for 1 or more blocks (audio.shape[0]>=self._block_len+self._context_len)
audio (numpy.ndarray), this buffer is guaranteed to contain data for 1 or more blocks (audio.shape[0]>=self._block_len+self._context_len)
[ "audio", "(", "numpy", ".", "ndarray", ")", "this", "buffer", "is", "guaranteed", "to", "contain", "data", "for", "1", "or", "more", "blocks", "(", "audio", ".", "shape", "[", "0", "]", ">", "=", "self", ".", "_block_len", "+", "self", ".", "_context_len", ")" ]
def _process_blocks(self, audio, finish=False): """ audio (numpy.ndarray), this buffer is guaranteed to contain data for 1 or more blocks (audio.shape[0]>=self._block_len+self._context_len) """ # Cut the buffer to the end of the last frame audio_len = audio.shape[0] processable_len = audio_len - (audio_len - self._context_len) % self._block_len buffer_skip_len = processable_len - self._context_len audio = audio[:processable_len] # Convert audio data type to float32 if needed if np.issubdtype(audio.dtype, np.uint8): audio = audio/np.float32(128) - 1 # normalize to -1 to 1, uint8 to float32 elif np.issubdtype(audio.dtype, np.integer): audio = audio/np.float32(32768) # normalize to -1 to 1, int16 to float32 melspectrum = samples_to_melspectrum( audio, # samples self.p['model_sampling_rate'], # sampling_rate self._context_len + self._block_len, # window_size self._block_len, # stride n_mels = self.p['mel_num'], fmin = self.p['mel_fmin'], fmax = self.p['mel_fmax'], ) if self.p['num_mfcc_dct_coefs'] is not None: mfcc_features = melspectrum_to_mfcc(melspectrum, self.p['num_mfcc_dct_coefs']) return [mfcc_features], buffer_skip_len else: return [melspectrum], buffer_skip_len
[ "def", "_process_blocks", "(", "self", ",", "audio", ",", "finish", "=", "False", ")", ":", "# Cut the buffer to the end of the last frame", "audio_len", "=", "audio", ".", "shape", "[", "0", "]", "processable_len", "=", "audio_len", "-", "(", "audio_len", "-", "self", ".", "_context_len", ")", "%", "self", ".", "_block_len", "buffer_skip_len", "=", "processable_len", "-", "self", ".", "_context_len", "audio", "=", "audio", "[", ":", "processable_len", "]", "# Convert audio data type to float32 if needed", "if", "np", ".", "issubdtype", "(", "audio", ".", "dtype", ",", "np", ".", "uint8", ")", ":", "audio", "=", "audio", "/", "np", ".", "float32", "(", "128", ")", "-", "1", "# normalize to -1 to 1, uint8 to float32", "elif", "np", ".", "issubdtype", "(", "audio", ".", "dtype", ",", "np", ".", "integer", ")", ":", "audio", "=", "audio", "/", "np", ".", "float32", "(", "32768", ")", "# normalize to -1 to 1, int16 to float32", "melspectrum", "=", "samples_to_melspectrum", "(", "audio", ",", "# samples", "self", ".", "p", "[", "'model_sampling_rate'", "]", ",", "# sampling_rate", "self", ".", "_context_len", "+", "self", ".", "_block_len", ",", "# window_size", "self", ".", "_block_len", ",", "# stride", "n_mels", "=", "self", ".", "p", "[", "'mel_num'", "]", ",", "fmin", "=", "self", ".", "p", "[", "'mel_fmin'", "]", ",", "fmax", "=", "self", ".", "p", "[", "'mel_fmax'", "]", ",", ")", "if", "self", ".", "p", "[", "'num_mfcc_dct_coefs'", "]", "is", "not", "None", ":", "mfcc_features", "=", "melspectrum_to_mfcc", "(", "melspectrum", ",", "self", ".", "p", "[", "'num_mfcc_dct_coefs'", "]", ")", "return", "[", "mfcc_features", "]", ",", "buffer_skip_len", "else", ":", "return", "[", "melspectrum", "]", ",", "buffer_skip_len" ]
https://github.com/openvinotoolkit/open_model_zoo/blob/5a4232f6c35b4916eb6e8750141f4e45761237ef/demos/speech_recognition_deepspeech_demo/python/asr_utils/audio_features.py#L45-L75
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/vpc/v20170312/models.py
python
TemplateLimit.__init__
(self)
r""" :param AddressTemplateMemberLimit: 参数模板IP地址成员配额。 :type AddressTemplateMemberLimit: int :param AddressTemplateGroupMemberLimit: 参数模板IP地址组成员配额。 :type AddressTemplateGroupMemberLimit: int :param ServiceTemplateMemberLimit: 参数模板I协议端口成员配额。 :type ServiceTemplateMemberLimit: int :param ServiceTemplateGroupMemberLimit: 参数模板协议端口组成员配额。 :type ServiceTemplateGroupMemberLimit: int
r""" :param AddressTemplateMemberLimit: 参数模板IP地址成员配额。 :type AddressTemplateMemberLimit: int :param AddressTemplateGroupMemberLimit: 参数模板IP地址组成员配额。 :type AddressTemplateGroupMemberLimit: int :param ServiceTemplateMemberLimit: 参数模板I协议端口成员配额。 :type ServiceTemplateMemberLimit: int :param ServiceTemplateGroupMemberLimit: 参数模板协议端口组成员配额。 :type ServiceTemplateGroupMemberLimit: int
[ "r", ":", "param", "AddressTemplateMemberLimit", ":", "参数模板IP地址成员配额。", ":", "type", "AddressTemplateMemberLimit", ":", "int", ":", "param", "AddressTemplateGroupMemberLimit", ":", "参数模板IP地址组成员配额。", ":", "type", "AddressTemplateGroupMemberLimit", ":", "int", ":", "param", "ServiceTemplateMemberLimit", ":", "参数模板I协议端口成员配额。", ":", "type", "ServiceTemplateMemberLimit", ":", "int", ":", "param", "ServiceTemplateGroupMemberLimit", ":", "参数模板协议端口组成员配额。", ":", "type", "ServiceTemplateGroupMemberLimit", ":", "int" ]
def __init__(self): r""" :param AddressTemplateMemberLimit: 参数模板IP地址成员配额。 :type AddressTemplateMemberLimit: int :param AddressTemplateGroupMemberLimit: 参数模板IP地址组成员配额。 :type AddressTemplateGroupMemberLimit: int :param ServiceTemplateMemberLimit: 参数模板I协议端口成员配额。 :type ServiceTemplateMemberLimit: int :param ServiceTemplateGroupMemberLimit: 参数模板协议端口组成员配额。 :type ServiceTemplateGroupMemberLimit: int """ self.AddressTemplateMemberLimit = None self.AddressTemplateGroupMemberLimit = None self.ServiceTemplateMemberLimit = None self.ServiceTemplateGroupMemberLimit = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "AddressTemplateMemberLimit", "=", "None", "self", ".", "AddressTemplateGroupMemberLimit", "=", "None", "self", ".", "ServiceTemplateMemberLimit", "=", "None", "self", ".", "ServiceTemplateGroupMemberLimit", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/vpc/v20170312/models.py#L19116-L19130
tensorflow/mesh
57ed4018e6a173952501b074daabad32b6449f3d
mesh_tensorflow/auto_mtf/graph_interface.py
python
GraphInterface.get_tensor_num_entries
(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None)
return num_entries
The number of entries in a tensor. If partial_layout is specified, then mesh_dimension_to_size must also be. In this case, the number of entries on a single device is returned. Args: tensor_name: a string, name of a tensor in the graph. partial_layout: an optional {string: string}, from MTF dimension name to mesh dimension name. mesh_dimension_to_size: an optional {string: int}, from mesh dimension name to size. Returns: an integer
The number of entries in a tensor.
[ "The", "number", "of", "entries", "in", "a", "tensor", "." ]
def get_tensor_num_entries(self, tensor_name, partial_layout=None, mesh_dimension_to_size=None): """The number of entries in a tensor. If partial_layout is specified, then mesh_dimension_to_size must also be. In this case, the number of entries on a single device is returned. Args: tensor_name: a string, name of a tensor in the graph. partial_layout: an optional {string: string}, from MTF dimension name to mesh dimension name. mesh_dimension_to_size: an optional {string: int}, from mesh dimension name to size. Returns: an integer """ shape = self.get_tensor_shape(tensor_name) # We don't have to worry about divisiblity issues because Mesh TensorFlow # only allows evenly divisible assignments. num_entries = 1 for dim in shape.dims: num_entries = num_entries * dim.value if not partial_layout: return num_entries for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name): if mtf_dimension_name not in partial_layout: continue mesh_dimension_name = partial_layout[mtf_dimension_name] mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name] num_entries = int(math.ceil(num_entries / mesh_dimension_size)) return num_entries
[ "def", "get_tensor_num_entries", "(", "self", ",", "tensor_name", ",", "partial_layout", "=", "None", ",", "mesh_dimension_to_size", "=", "None", ")", ":", "shape", "=", "self", ".", "get_tensor_shape", "(", "tensor_name", ")", "# We don't have to worry about divisiblity issues because Mesh TensorFlow", "# only allows evenly divisible assignments.", "num_entries", "=", "1", "for", "dim", "in", "shape", ".", "dims", ":", "num_entries", "=", "num_entries", "*", "dim", ".", "value", "if", "not", "partial_layout", ":", "return", "num_entries", "for", "mtf_dimension_name", "in", "self", ".", "get_tensor_mtf_dimension_names", "(", "tensor_name", ")", ":", "if", "mtf_dimension_name", "not", "in", "partial_layout", ":", "continue", "mesh_dimension_name", "=", "partial_layout", "[", "mtf_dimension_name", "]", "mesh_dimension_size", "=", "mesh_dimension_to_size", "[", "mesh_dimension_name", "]", "num_entries", "=", "int", "(", "math", ".", "ceil", "(", "num_entries", "/", "mesh_dimension_size", ")", ")", "return", "num_entries" ]
https://github.com/tensorflow/mesh/blob/57ed4018e6a173952501b074daabad32b6449f3d/mesh_tensorflow/auto_mtf/graph_interface.py#L153-L187
pinterest/mysql_utils
7ab237699b85de8b503b09f36e0309ac807689fe
retirement_queue.py
python
get_protected_hosts
(return_type='tuple')
Get data on all protected hosts Args: return_type - Options are: 'set'- return a set of protected hosts 'tuple' - returns all data regarding protected hosts Returns: A tuple which may be empty, with entries similar to: ({'protecting_user': 'rwultsch', 'reason': 'because', 'hostname': 'sharddb-14-4'}, {'protecting_user': 'rwultsch', 'reason': 'because reasons', 'hostname': 'sharddb-14-5'})
Get data on all protected hosts
[ "Get", "data", "on", "all", "protected", "hosts" ]
def get_protected_hosts(return_type='tuple'): """ Get data on all protected hosts Args: return_type - Options are: 'set'- return a set of protected hosts 'tuple' - returns all data regarding protected hosts Returns: A tuple which may be empty, with entries similar to: ({'protecting_user': 'rwultsch', 'reason': 'because', 'hostname': 'sharddb-14-4'}, {'protecting_user': 'rwultsch', 'reason': 'because reasons', 'hostname': 'sharddb-14-5'}) """ if return_type != 'tuple' and return_type != 'set': raise Exception('Unsupported return_type ' '{return_type}'.format(return_type=return_type)) reporting_conn = mysql_lib.get_mysqlops_connections() cursor = reporting_conn.cursor() sql = "SELECT * FROM mysqlops.retirement_protection" cursor.execute(sql) results = cursor.fetchall() if return_type == 'tuple': return results elif return_type == 'set': results_set = set() for entry in results: results_set.add(entry['hostname']) return results_set
[ "def", "get_protected_hosts", "(", "return_type", "=", "'tuple'", ")", ":", "if", "return_type", "!=", "'tuple'", "and", "return_type", "!=", "'set'", ":", "raise", "Exception", "(", "'Unsupported return_type '", "'{return_type}'", ".", "format", "(", "return_type", "=", "return_type", ")", ")", "reporting_conn", "=", "mysql_lib", ".", "get_mysqlops_connections", "(", ")", "cursor", "=", "reporting_conn", ".", "cursor", "(", ")", "sql", "=", "\"SELECT * FROM mysqlops.retirement_protection\"", "cursor", ".", "execute", "(", "sql", ")", "results", "=", "cursor", ".", "fetchall", "(", ")", "if", "return_type", "==", "'tuple'", ":", "return", "results", "elif", "return_type", "==", "'set'", ":", "results_set", "=", "set", "(", ")", "for", "entry", "in", "results", ":", "results_set", ".", "add", "(", "entry", "[", "'hostname'", "]", ")", "return", "results_set" ]
https://github.com/pinterest/mysql_utils/blob/7ab237699b85de8b503b09f36e0309ac807689fe/retirement_queue.py#L422-L452
googleapis/python-ndb
e780c81cde1016651afbfcad8180d9912722cf1b
google/cloud/ndb/_cache.py
python
_GlobalCacheDeleteBatch.add
(self, key)
return future
Add a key to delete from the cache. Arguments: key (bytes): The key to delete. Returns: tasklets.Future: Eventual result will be ``None``.
Add a key to delete from the cache.
[ "Add", "a", "key", "to", "delete", "from", "the", "cache", "." ]
def add(self, key): """Add a key to delete from the cache. Arguments: key (bytes): The key to delete. Returns: tasklets.Future: Eventual result will be ``None``. """ future = tasklets.Future(info=self.future_info(key)) self.keys.append(key) self.futures.append(future) return future
[ "def", "add", "(", "self", ",", "key", ")", ":", "future", "=", "tasklets", ".", "Future", "(", "info", "=", "self", ".", "future_info", "(", "key", ")", ")", "self", ".", "keys", ".", "append", "(", "key", ")", "self", ".", "futures", ".", "append", "(", "future", ")", "return", "future" ]
https://github.com/googleapis/python-ndb/blob/e780c81cde1016651afbfcad8180d9912722cf1b/google/cloud/ndb/_cache.py#L467-L479
duterscmy/ccks2019-ckbqa-4th-codes
19784e5ffc801b0647d27286b4984e0d24087a47
src/mention_extractor.py
python
MentionExtractor.extract_mentions
(self,question)
return entity_mention
返回字典,实体mentions
返回字典,实体mentions
[ "返回字典,实体mentions" ]
def extract_mentions(self,question): ''' 返回字典,实体mentions ''' entity_mention = {} #使用jieba粗糙分词的方式得到候选mention mentions = [] tokens = jieba.lcut(question) for t in tokens: if t in self.segment_dic: mentions.append(t) #使用序列标注模型来抽取候选 mention x1, x2 = self.tokenizer.encode(first=question,max_len = self.max_seq_len) x1,x2= np.array([x1]),np.array([x2]) predict_y = self.ner_model.predict([x1,x2],batch_size=32).tolist()[0]#(len,1) predict_y = [1 if each[0]>0.5 else 0 for each in predict_y] mentions_bert = self.restore_entity_from_labels(predict_y,question) #判断是否属于mention_dic mentions = mentions + mentions_bert for token in mentions: entity_mention[token] = token return entity_mention
[ "def", "extract_mentions", "(", "self", ",", "question", ")", ":", "entity_mention", "=", "{", "}", "#使用jieba粗糙分词的方式得到候选mention", "mentions", "=", "[", "]", "tokens", "=", "jieba", ".", "lcut", "(", "question", ")", "for", "t", "in", "tokens", ":", "if", "t", "in", "self", ".", "segment_dic", ":", "mentions", ".", "append", "(", "t", ")", "#使用序列标注模型来抽取候选 mention", "x1", ",", "x2", "=", "self", ".", "tokenizer", ".", "encode", "(", "first", "=", "question", ",", "max_len", "=", "self", ".", "max_seq_len", ")", "x1", ",", "x2", "=", "np", ".", "array", "(", "[", "x1", "]", ")", ",", "np", ".", "array", "(", "[", "x2", "]", ")", "predict_y", "=", "self", ".", "ner_model", ".", "predict", "(", "[", "x1", ",", "x2", "]", ",", "batch_size", "=", "32", ")", ".", "tolist", "(", ")", "[", "0", "]", "#(len,1)", "predict_y", "=", "[", "1", "if", "each", "[", "0", "]", ">", "0.5", "else", "0", "for", "each", "in", "predict_y", "]", "mentions_bert", "=", "self", ".", "restore_entity_from_labels", "(", "predict_y", ",", "question", ")", "#判断是否属于mention_dic", "mentions", "=", "mentions", "+", "mentions_bert", "for", "token", "in", "mentions", ":", "entity_mention", "[", "token", "]", "=", "token", "return", "entity_mention" ]
https://github.com/duterscmy/ccks2019-ckbqa-4th-codes/blob/19784e5ffc801b0647d27286b4984e0d24087a47/src/mention_extractor.py#L42-L67
facebookresearch/ParlAI
e4d59c30eef44f1f67105961b82a83fd28d7d78b
parlai/utils/bpe.py
python
Gpt2BpeHelper.save
(self, dir_name: str, file_name: str)
Save appropriate files. :param dir_name: directory to save. :param file_name: file to save.
Save appropriate files.
[ "Save", "appropriate", "files", "." ]
def save(self, dir_name: str, file_name: str): """ Save appropriate files. :param dir_name: directory to save. :param file_name: file to save. """ out_json_path = os.path.join(dir_name, file_name + "-vocab.json") out_merge_path = os.path.join(dir_name, file_name + "-merges.txt") # Possibly bad assumption: if the destination file already exists, # we don't need to copy it over again. if not PathManager.exists(out_json_path): logging.info(f"Copying {self.json_path} to {out_json_path}") PathManager.copy(self.json_path, out_json_path) if not PathManager.exists(out_merge_path): logging.info(f"Copying {self.merge_path} to {out_merge_path}") PathManager.copy(self.merge_path, out_merge_path)
[ "def", "save", "(", "self", ",", "dir_name", ":", "str", ",", "file_name", ":", "str", ")", ":", "out_json_path", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "file_name", "+", "\"-vocab.json\"", ")", "out_merge_path", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "file_name", "+", "\"-merges.txt\"", ")", "# Possibly bad assumption: if the destination file already exists,", "# we don't need to copy it over again.", "if", "not", "PathManager", ".", "exists", "(", "out_json_path", ")", ":", "logging", ".", "info", "(", "f\"Copying {self.json_path} to {out_json_path}\"", ")", "PathManager", ".", "copy", "(", "self", ".", "json_path", ",", "out_json_path", ")", "if", "not", "PathManager", ".", "exists", "(", "out_merge_path", ")", ":", "logging", ".", "info", "(", "f\"Copying {self.merge_path} to {out_merge_path}\"", ")", "PathManager", ".", "copy", "(", "self", ".", "merge_path", ",", "out_merge_path", ")" ]
https://github.com/facebookresearch/ParlAI/blob/e4d59c30eef44f1f67105961b82a83fd28d7d78b/parlai/utils/bpe.py#L759-L777
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-modules/twisted/twisted/trial/reporter.py
python
Reporter._printErrors
(self)
Print all of the non-success results to the stream in full.
Print all of the non-success results to the stream in full.
[ "Print", "all", "of", "the", "non", "-", "success", "results", "to", "the", "stream", "in", "full", "." ]
def _printErrors(self): """ Print all of the non-success results to the stream in full. """ self._write('\n') self._printResults('[SKIPPED]', self.skips, lambda x : '%s\n' % x) self._printResults('[TODO]', self.expectedFailures, self._printExpectedFailure) self._printResults('[FAIL]', self.failures, self._formatFailureTraceback) self._printResults('[ERROR]', self.errors, self._formatFailureTraceback) self._printResults('[SUCCESS!?!]', self.unexpectedSuccesses, self._printUnexpectedSuccess)
[ "def", "_printErrors", "(", "self", ")", ":", "self", ".", "_write", "(", "'\\n'", ")", "self", ".", "_printResults", "(", "'[SKIPPED]'", ",", "self", ".", "skips", ",", "lambda", "x", ":", "'%s\\n'", "%", "x", ")", "self", ".", "_printResults", "(", "'[TODO]'", ",", "self", ".", "expectedFailures", ",", "self", ".", "_printExpectedFailure", ")", "self", ".", "_printResults", "(", "'[FAIL]'", ",", "self", ".", "failures", ",", "self", ".", "_formatFailureTraceback", ")", "self", ".", "_printResults", "(", "'[ERROR]'", ",", "self", ".", "errors", ",", "self", ".", "_formatFailureTraceback", ")", "self", ".", "_printResults", "(", "'[SUCCESS!?!]'", ",", "self", ".", "unexpectedSuccesses", ",", "self", ".", "_printUnexpectedSuccess", ")" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/trial/reporter.py#L598-L611
mesalock-linux/mesapy
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
lib-python/2.7/mutex.py
python
mutex.lock
(self, function, argument)
Lock a mutex, call the function with supplied argument when it is acquired. If the mutex is already locked, place function and argument in the queue.
Lock a mutex, call the function with supplied argument when it is acquired. If the mutex is already locked, place function and argument in the queue.
[ "Lock", "a", "mutex", "call", "the", "function", "with", "supplied", "argument", "when", "it", "is", "acquired", ".", "If", "the", "mutex", "is", "already", "locked", "place", "function", "and", "argument", "in", "the", "queue", "." ]
def lock(self, function, argument): """Lock a mutex, call the function with supplied argument when it is acquired. If the mutex is already locked, place function and argument in the queue.""" if self.testandset(): function(argument) else: self.queue.append((function, argument))
[ "def", "lock", "(", "self", ",", "function", ",", "argument", ")", ":", "if", "self", ".", "testandset", "(", ")", ":", "function", "(", "argument", ")", "else", ":", "self", ".", "queue", ".", "append", "(", "(", "function", ",", "argument", ")", ")" ]
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/mutex.py#L39-L46
carla-simulator/scenario_runner
f4d00d88eda4212a1e119515c96281a4be5c234e
srunner/scenarios/object_crash_vehicle.py
python
StationaryObjectCrossing._create_test_criteria
(self)
return criteria
A list of all test criteria will be created that is later used in parallel behavior tree.
A list of all test criteria will be created that is later used in parallel behavior tree.
[ "A", "list", "of", "all", "test", "criteria", "will", "be", "created", "that", "is", "later", "used", "in", "parallel", "behavior", "tree", "." ]
def _create_test_criteria(self): """ A list of all test criteria will be created that is later used in parallel behavior tree. """ criteria = [] collision_criterion = CollisionTest(self.ego_vehicles[0]) criteria.append(collision_criterion) return criteria
[ "def", "_create_test_criteria", "(", "self", ")", ":", "criteria", "=", "[", "]", "collision_criterion", "=", "CollisionTest", "(", "self", ".", "ego_vehicles", "[", "0", "]", ")", "criteria", ".", "append", "(", "collision_criterion", ")", "return", "criteria" ]
https://github.com/carla-simulator/scenario_runner/blob/f4d00d88eda4212a1e119515c96281a4be5c234e/srunner/scenarios/object_crash_vehicle.py#L109-L119
edgewall/trac
beb3e4eaf1e0a456d801a50a8614ecab06de29fc
trac/util/html.py
python
TracHTMLSanitizer.sanitize
(self, html)
return Markup(transform.out.getvalue())
Transforms the incoming HTML by removing anything's that deemed unsafe. :param html: the input HTML :type: str :return: the sanitized content :rtype: Markup
Transforms the incoming HTML by removing anything's that deemed unsafe.
[ "Transforms", "the", "incoming", "HTML", "by", "removing", "anything", "s", "that", "deemed", "unsafe", "." ]
def sanitize(self, html): """Transforms the incoming HTML by removing anything's that deemed unsafe. :param html: the input HTML :type: str :return: the sanitized content :rtype: Markup """ transform = HTMLSanitization(self, io.StringIO()) transform.feed(html) transform.close() return Markup(transform.out.getvalue())
[ "def", "sanitize", "(", "self", ",", "html", ")", ":", "transform", "=", "HTMLSanitization", "(", "self", ",", "io", ".", "StringIO", "(", ")", ")", "transform", ".", "feed", "(", "html", ")", "transform", ".", "close", "(", ")", "return", "Markup", "(", "transform", ".", "out", ".", "getvalue", "(", ")", ")" ]
https://github.com/edgewall/trac/blob/beb3e4eaf1e0a456d801a50a8614ecab06de29fc/trac/util/html.py#L608-L621
a312863063/seeprettyface-generator-yellow
c75b95b56c40036d00b35f3e140cc4a45598e077
dnnlib/tflib/network.py
python
Network._init_graph
(self)
[]
def _init_graph(self) -> None: # Collect inputs. self.input_names = [] for param in inspect.signature(self._build_func).parameters.values(): if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty: self.input_names.append(param.name) self.num_inputs = len(self.input_names) assert self.num_inputs >= 1 # Choose name and scope. if self.name is None: self.name = self._build_func_name assert re.match("^[A-Za-z0-9_.\\-]*$", self.name) with tf.name_scope(None): self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True) # Finalize build func kwargs. build_kwargs = dict(self.static_kwargs) build_kwargs["is_template_graph"] = True build_kwargs["components"] = self.components # Build template graph. with tfutil.absolute_variable_scope(self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes assert tf.get_variable_scope().name == self.scope assert tf.get_default_graph().get_name_scope() == self.scope with tf.control_dependencies(None): # ignore surrounding control dependencies self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names] out_expr = self._build_func(*self.input_templates, **build_kwargs) # Collect outputs. assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple) self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr) self.num_outputs = len(self.output_templates) assert self.num_outputs >= 1 assert all(tfutil.is_tf_expression(t) for t in self.output_templates) # Perform sanity checks. if any(t.shape.ndims is None for t in self.input_templates): raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.") if any(t.shape.ndims is None for t in self.output_templates): raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.") if any(not isinstance(comp, Network) for comp in self.components.values()): raise ValueError("Components of a Network must be Networks themselves.") if len(self.components) != len(set(comp.name for comp in self.components.values())): raise ValueError("Components of a Network must have unique names.") # List inputs and outputs. self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates] self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates] self.input_shape = self.input_shapes[0] self.output_shape = self.output_shapes[0] self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates] # List variables. self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/")) self.vars = OrderedDict(self.own_vars) self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items()) self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable) self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
[ "def", "_init_graph", "(", "self", ")", "->", "None", ":", "# Collect inputs.", "self", ".", "input_names", "=", "[", "]", "for", "param", "in", "inspect", ".", "signature", "(", "self", ".", "_build_func", ")", ".", "parameters", ".", "values", "(", ")", ":", "if", "param", ".", "kind", "==", "param", ".", "POSITIONAL_OR_KEYWORD", "and", "param", ".", "default", "is", "param", ".", "empty", ":", "self", ".", "input_names", ".", "append", "(", "param", ".", "name", ")", "self", ".", "num_inputs", "=", "len", "(", "self", ".", "input_names", ")", "assert", "self", ".", "num_inputs", ">=", "1", "# Choose name and scope.", "if", "self", ".", "name", "is", "None", ":", "self", ".", "name", "=", "self", ".", "_build_func_name", "assert", "re", ".", "match", "(", "\"^[A-Za-z0-9_.\\\\-]*$\"", ",", "self", ".", "name", ")", "with", "tf", ".", "name_scope", "(", "None", ")", ":", "self", ".", "scope", "=", "tf", ".", "get_default_graph", "(", ")", ".", "unique_name", "(", "self", ".", "name", ",", "mark_as_used", "=", "True", ")", "# Finalize build func kwargs.", "build_kwargs", "=", "dict", "(", "self", ".", "static_kwargs", ")", "build_kwargs", "[", "\"is_template_graph\"", "]", "=", "True", "build_kwargs", "[", "\"components\"", "]", "=", "self", ".", "components", "# Build template graph.", "with", "tfutil", ".", "absolute_variable_scope", "(", "self", ".", "scope", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ",", "tfutil", ".", "absolute_name_scope", "(", "self", ".", "scope", ")", ":", "# ignore surrounding scopes", "assert", "tf", ".", "get_variable_scope", "(", ")", ".", "name", "==", "self", ".", "scope", "assert", "tf", ".", "get_default_graph", "(", ")", ".", "get_name_scope", "(", ")", "==", "self", ".", "scope", "with", "tf", ".", "control_dependencies", "(", "None", ")", ":", "# ignore surrounding control dependencies", "self", ".", "input_templates", "=", "[", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "name", "=", "name", ")", "for", "name", "in", "self", ".", "input_names", "]", "out_expr", "=", "self", ".", "_build_func", "(", "*", "self", ".", "input_templates", ",", "*", "*", "build_kwargs", ")", "# Collect outputs.", "assert", "tfutil", ".", "is_tf_expression", "(", "out_expr", ")", "or", "isinstance", "(", "out_expr", ",", "tuple", ")", "self", ".", "output_templates", "=", "[", "out_expr", "]", "if", "tfutil", ".", "is_tf_expression", "(", "out_expr", ")", "else", "list", "(", "out_expr", ")", "self", ".", "num_outputs", "=", "len", "(", "self", ".", "output_templates", ")", "assert", "self", ".", "num_outputs", ">=", "1", "assert", "all", "(", "tfutil", ".", "is_tf_expression", "(", "t", ")", "for", "t", "in", "self", ".", "output_templates", ")", "# Perform sanity checks.", "if", "any", "(", "t", ".", "shape", ".", "ndims", "is", "None", "for", "t", "in", "self", ".", "input_templates", ")", ":", "raise", "ValueError", "(", "\"Network input shapes not defined. Please call x.set_shape() for each input.\"", ")", "if", "any", "(", "t", ".", "shape", ".", "ndims", "is", "None", "for", "t", "in", "self", ".", "output_templates", ")", ":", "raise", "ValueError", "(", "\"Network output shapes not defined. Please call x.set_shape() where applicable.\"", ")", "if", "any", "(", "not", "isinstance", "(", "comp", ",", "Network", ")", "for", "comp", "in", "self", ".", "components", ".", "values", "(", ")", ")", ":", "raise", "ValueError", "(", "\"Components of a Network must be Networks themselves.\"", ")", "if", "len", "(", "self", ".", "components", ")", "!=", "len", "(", "set", "(", "comp", ".", "name", "for", "comp", "in", "self", ".", "components", ".", "values", "(", ")", ")", ")", ":", "raise", "ValueError", "(", "\"Components of a Network must have unique names.\"", ")", "# List inputs and outputs.", "self", ".", "input_shapes", "=", "[", "tfutil", ".", "shape_to_list", "(", "t", ".", "shape", ")", "for", "t", "in", "self", ".", "input_templates", "]", "self", ".", "output_shapes", "=", "[", "tfutil", ".", "shape_to_list", "(", "t", ".", "shape", ")", "for", "t", "in", "self", ".", "output_templates", "]", "self", ".", "input_shape", "=", "self", ".", "input_shapes", "[", "0", "]", "self", ".", "output_shape", "=", "self", ".", "output_shapes", "[", "0", "]", "self", ".", "output_names", "=", "[", "t", ".", "name", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ".", "split", "(", "\":\"", ")", "[", "0", "]", "for", "t", "in", "self", ".", "output_templates", "]", "# List variables.", "self", ".", "own_vars", "=", "OrderedDict", "(", "(", "var", ".", "name", "[", "len", "(", "self", ".", "scope", ")", "+", "1", ":", "]", ".", "split", "(", "\":\"", ")", "[", "0", "]", ",", "var", ")", "for", "var", "in", "tf", ".", "global_variables", "(", "self", ".", "scope", "+", "\"/\"", ")", ")", "self", ".", "vars", "=", "OrderedDict", "(", "self", ".", "own_vars", ")", "self", ".", "vars", ".", "update", "(", "(", "comp", ".", "name", "+", "\"/\"", "+", "name", ",", "var", ")", "for", "comp", "in", "self", ".", "components", ".", "values", "(", ")", "for", "name", ",", "var", "in", "comp", ".", "vars", ".", "items", "(", ")", ")", "self", ".", "trainables", "=", "OrderedDict", "(", "(", "name", ",", "var", ")", "for", "name", ",", "var", "in", "self", ".", "vars", ".", "items", "(", ")", "if", "var", ".", "trainable", ")", "self", ".", "var_global_to_local", "=", "OrderedDict", "(", "(", "var", ".", "name", ".", "split", "(", "\":\"", ")", "[", "0", "]", ",", "name", ")", "for", "name", ",", "var", "in", "self", ".", "vars", ".", "items", "(", ")", ")" ]
https://github.com/a312863063/seeprettyface-generator-yellow/blob/c75b95b56c40036d00b35f3e140cc4a45598e077/dnnlib/tflib/network.py#L126-L186
WerWolv/EdiZon_CheatsConfigsAndScripts
d16d36c7509c01dca770f402babd83ff2e9ae6e7
Scripts/lib/python3.5/lzma.py
python
LZMAFile.writable
(self)
return self._mode == _MODE_WRITE
Return whether the file was opened for writing.
Return whether the file was opened for writing.
[ "Return", "whether", "the", "file", "was", "opened", "for", "writing", "." ]
def writable(self): """Return whether the file was opened for writing.""" self._check_not_closed() return self._mode == _MODE_WRITE
[ "def", "writable", "(", "self", ")", ":", "self", ".", "_check_not_closed", "(", ")", "return", "self", ".", "_mode", "==", "_MODE_WRITE" ]
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/lzma.py#L175-L178
xuanzebi/BERT-CH-NER
24bf95411e831281ffc6c13f3ea9382870a17d64
bert-master/extract_features.py
python
input_fn_builder
(features, seq_length)
return input_fn
Creates an `input_fn` closure to be passed to TPUEstimator.
Creates an `input_fn` closure to be passed to TPUEstimator.
[ "Creates", "an", "input_fn", "closure", "to", "be", "passed", "to", "TPUEstimator", "." ]
def input_fn_builder(features, seq_length): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_unique_ids = [] all_input_ids = [] all_input_mask = [] all_input_type_ids = [] for feature in features: all_unique_ids.append(feature.unique_id) all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_input_type_ids.append(feature.input_type_ids) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "unique_ids": tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "input_type_ids": tf.constant( all_input_type_ids, shape=[num_examples, seq_length], dtype=tf.int32), }) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn
[ "def", "input_fn_builder", "(", "features", ",", "seq_length", ")", ":", "all_unique_ids", "=", "[", "]", "all_input_ids", "=", "[", "]", "all_input_mask", "=", "[", "]", "all_input_type_ids", "=", "[", "]", "for", "feature", "in", "features", ":", "all_unique_ids", ".", "append", "(", "feature", ".", "unique_id", ")", "all_input_ids", ".", "append", "(", "feature", ".", "input_ids", ")", "all_input_mask", ".", "append", "(", "feature", ".", "input_mask", ")", "all_input_type_ids", ".", "append", "(", "feature", ".", "input_type_ids", ")", "def", "input_fn", "(", "params", ")", ":", "\"\"\"The actual input function.\"\"\"", "batch_size", "=", "params", "[", "\"batch_size\"", "]", "num_examples", "=", "len", "(", "features", ")", "# This is for demo purposes and does NOT scale to large data sets. We do", "# not use Dataset.from_generator() because that uses tf.py_func which is", "# not TPU compatible. The right way to load data is with TFRecordReader.", "d", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "{", "\"unique_ids\"", ":", "tf", ".", "constant", "(", "all_unique_ids", ",", "shape", "=", "[", "num_examples", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"input_ids\"", ":", "tf", ".", "constant", "(", "all_input_ids", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"input_mask\"", ":", "tf", ".", "constant", "(", "all_input_mask", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"input_type_ids\"", ":", "tf", ".", "constant", "(", "all_input_type_ids", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "}", ")", "d", "=", "d", ".", "batch", "(", "batch_size", "=", "batch_size", ",", "drop_remainder", "=", "False", ")", "return", "d", "return", "input_fn" ]
https://github.com/xuanzebi/BERT-CH-NER/blob/24bf95411e831281ffc6c13f3ea9382870a17d64/bert-master/extract_features.py#L100-L145
Nuitka/Nuitka
39262276993757fa4e299f497654065600453fc9
nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Node/FS.py
python
Dir.glob
(self, pathname, ondisk=True, source=False, strings=False, exclude=None)
return sorted(result, key=lambda a: str(a))
Returns a list of Nodes (or strings) matching a specified pathname pattern. Pathname patterns follow UNIX shell semantics: * matches any-length strings of any characters, ? matches any character, and [] can enclose lists or ranges of characters. Matches do not span directory separators. The matches take into account Repositories, returning local Nodes if a corresponding entry exists in a Repository (either an in-memory Node or something on disk). By defafult, the glob() function matches entries that exist on-disk, in addition to in-memory Nodes. Setting the "ondisk" argument to False (or some other non-true value) causes the glob() function to only match in-memory Nodes. The default behavior is to return both the on-disk and in-memory Nodes. The "source" argument, when true, specifies that corresponding source Nodes must be returned if you're globbing in a build directory (initialized with VariantDir()). The default behavior is to return Nodes local to the VariantDir(). The "strings" argument, when true, returns the matches as strings, not Nodes. The strings are path names relative to this directory. The "exclude" argument, if not None, must be a pattern or a list of patterns following the same UNIX shell semantics. Elements matching a least one pattern of this list will be excluded from the result. The underlying algorithm is adapted from the glob.glob() function in the Python library (but heavily modified), and uses fnmatch() under the covers.
Returns a list of Nodes (or strings) matching a specified pathname pattern.
[ "Returns", "a", "list", "of", "Nodes", "(", "or", "strings", ")", "matching", "a", "specified", "pathname", "pattern", "." ]
def glob(self, pathname, ondisk=True, source=False, strings=False, exclude=None): """ Returns a list of Nodes (or strings) matching a specified pathname pattern. Pathname patterns follow UNIX shell semantics: * matches any-length strings of any characters, ? matches any character, and [] can enclose lists or ranges of characters. Matches do not span directory separators. The matches take into account Repositories, returning local Nodes if a corresponding entry exists in a Repository (either an in-memory Node or something on disk). By defafult, the glob() function matches entries that exist on-disk, in addition to in-memory Nodes. Setting the "ondisk" argument to False (or some other non-true value) causes the glob() function to only match in-memory Nodes. The default behavior is to return both the on-disk and in-memory Nodes. The "source" argument, when true, specifies that corresponding source Nodes must be returned if you're globbing in a build directory (initialized with VariantDir()). The default behavior is to return Nodes local to the VariantDir(). The "strings" argument, when true, returns the matches as strings, not Nodes. The strings are path names relative to this directory. The "exclude" argument, if not None, must be a pattern or a list of patterns following the same UNIX shell semantics. Elements matching a least one pattern of this list will be excluded from the result. The underlying algorithm is adapted from the glob.glob() function in the Python library (but heavily modified), and uses fnmatch() under the covers. """ dirname, basename = os.path.split(pathname) if not dirname: result = self._glob1(basename, ondisk, source, strings) else: if has_glob_magic(dirname): list = self.glob(dirname, ondisk, source, False, exclude) else: list = [self.Dir(dirname, create=True)] result = [] for dir in list: r = dir._glob1(basename, ondisk, source, strings) if strings: r = [os.path.join(str(dir), x) for x in r] result.extend(r) if exclude: excludes = [] excludeList = SCons.Util.flatten(exclude) for x in excludeList: r = self.glob(x, ondisk, source, strings) excludes.extend(r) result = [x for x in result if not any(fnmatch.fnmatch(str(x), str(e)) for e in SCons.Util.flatten(excludes))] return sorted(result, key=lambda a: str(a))
[ "def", "glob", "(", "self", ",", "pathname", ",", "ondisk", "=", "True", ",", "source", "=", "False", ",", "strings", "=", "False", ",", "exclude", "=", "None", ")", ":", "dirname", ",", "basename", "=", "os", ".", "path", ".", "split", "(", "pathname", ")", "if", "not", "dirname", ":", "result", "=", "self", ".", "_glob1", "(", "basename", ",", "ondisk", ",", "source", ",", "strings", ")", "else", ":", "if", "has_glob_magic", "(", "dirname", ")", ":", "list", "=", "self", ".", "glob", "(", "dirname", ",", "ondisk", ",", "source", ",", "False", ",", "exclude", ")", "else", ":", "list", "=", "[", "self", ".", "Dir", "(", "dirname", ",", "create", "=", "True", ")", "]", "result", "=", "[", "]", "for", "dir", "in", "list", ":", "r", "=", "dir", ".", "_glob1", "(", "basename", ",", "ondisk", ",", "source", ",", "strings", ")", "if", "strings", ":", "r", "=", "[", "os", ".", "path", ".", "join", "(", "str", "(", "dir", ")", ",", "x", ")", "for", "x", "in", "r", "]", "result", ".", "extend", "(", "r", ")", "if", "exclude", ":", "excludes", "=", "[", "]", "excludeList", "=", "SCons", ".", "Util", ".", "flatten", "(", "exclude", ")", "for", "x", "in", "excludeList", ":", "r", "=", "self", ".", "glob", "(", "x", ",", "ondisk", ",", "source", ",", "strings", ")", "excludes", ".", "extend", "(", "r", ")", "result", "=", "[", "x", "for", "x", "in", "result", "if", "not", "any", "(", "fnmatch", ".", "fnmatch", "(", "str", "(", "x", ")", ",", "str", "(", "e", ")", ")", "for", "e", "in", "SCons", ".", "Util", ".", "flatten", "(", "excludes", ")", ")", "]", "return", "sorted", "(", "result", ",", "key", "=", "lambda", "a", ":", "str", "(", "a", ")", ")" ]
https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Node/FS.py#L2131-L2189
biopython/biopython
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
Bio/Align/__init__.py
python
Alignment.__getitem__
(self, key)
Return self[key]. Indices of the form self[:, :] return a copy of the Alignment object; self[:, i:] self[:, :j] self[:, i:j] self[:, iterable] (where iterable returns integers) return a new Alignment object spanning the selected columns; self[k, i] self[k, i:] self[k, :j] self[k, i:j] self[k, iterable] (where iterable returns integers) self[k] (equivalent to self[k, :]) return a string with the aligned sequence (including gaps) for the selected columns, where k = 0 represents the target and k = 1 represents the query sequence; and self[:, i] returns a string with the selected column in the alignment. >>> from Bio.Align import PairwiseAligner >>> aligner = PairwiseAligner() >>> alignments = aligner.align("ACCGGTTT", "ACGGGTT") >>> alignment = alignments[0] >>> print(alignment) ACCGG-TTT ||-||-||- AC-GGGTT- <BLANKLINE> >>> alignment[0, :] 'ACCGG-TTT' >>> alignment[1, :] 'AC-GGGTT-' >>> alignment[0] 'ACCGG-TTT' >>> alignment[1] 'AC-GGGTT-' >>> alignment[0, 1:-2] 'CCGG-T' >>> alignment[1, 1:-2] 'C-GGGT' >>> alignment[0, (1, 5, 2)] 'C-C' >>> alignment[1, ::2] 'A-GT-' >>> alignment[1, range(0, 9, 2)] 'A-GT-' >>> alignment[:, 0] 'AA' >>> alignment[:, 5] '-G' >>> alignment[:, 1:] # doctest:+ELLIPSIS <Bio.Align.Alignment object (2 rows x 8 columns) at 0x...> >>> print(alignment[:, 1:]) ACCGG-TTT |-||-||- AC-GGGTT- <BLANKLINE> >>> print(alignment[:, 2:]) ACCGG-TTT -||-||- AC-GGGTT- <BLANKLINE> >>> print(alignment[:, 3:]) ACCGG-TTT ||-||- ACGGGTT- <BLANKLINE> >>> print(alignment[:, 3:-1]) ACCGG-TTT ||-|| ACGGGTT <BLANKLINE> >>> print(alignment[:, ::2]) ACGTT |-||- A-GT- <BLANKLINE> >>> print(alignment[:, range(1, 9, 2)]) CG-T ||-| CGGT <BLANKLINE> >>> print(alignment[:, (2, 7, 3)]) CTG -|| -TG <BLANKLINE>
Return self[key].
[ "Return", "self", "[", "key", "]", "." ]
def __getitem__(self, key): """Return self[key]. Indices of the form self[:, :] return a copy of the Alignment object; self[:, i:] self[:, :j] self[:, i:j] self[:, iterable] (where iterable returns integers) return a new Alignment object spanning the selected columns; self[k, i] self[k, i:] self[k, :j] self[k, i:j] self[k, iterable] (where iterable returns integers) self[k] (equivalent to self[k, :]) return a string with the aligned sequence (including gaps) for the selected columns, where k = 0 represents the target and k = 1 represents the query sequence; and self[:, i] returns a string with the selected column in the alignment. >>> from Bio.Align import PairwiseAligner >>> aligner = PairwiseAligner() >>> alignments = aligner.align("ACCGGTTT", "ACGGGTT") >>> alignment = alignments[0] >>> print(alignment) ACCGG-TTT ||-||-||- AC-GGGTT- <BLANKLINE> >>> alignment[0, :] 'ACCGG-TTT' >>> alignment[1, :] 'AC-GGGTT-' >>> alignment[0] 'ACCGG-TTT' >>> alignment[1] 'AC-GGGTT-' >>> alignment[0, 1:-2] 'CCGG-T' >>> alignment[1, 1:-2] 'C-GGGT' >>> alignment[0, (1, 5, 2)] 'C-C' >>> alignment[1, ::2] 'A-GT-' >>> alignment[1, range(0, 9, 2)] 'A-GT-' >>> alignment[:, 0] 'AA' >>> alignment[:, 5] '-G' >>> alignment[:, 1:] # doctest:+ELLIPSIS <Bio.Align.Alignment object (2 rows x 8 columns) at 0x...> >>> print(alignment[:, 1:]) ACCGG-TTT |-||-||- AC-GGGTT- <BLANKLINE> >>> print(alignment[:, 2:]) ACCGG-TTT -||-||- AC-GGGTT- <BLANKLINE> >>> print(alignment[:, 3:]) ACCGG-TTT ||-||- ACGGGTT- <BLANKLINE> >>> print(alignment[:, 3:-1]) ACCGG-TTT ||-|| ACGGGTT <BLANKLINE> >>> print(alignment[:, ::2]) ACGTT |-||- A-GT- <BLANKLINE> >>> print(alignment[:, range(1, 9, 2)]) CG-T ||-| CGGT <BLANKLINE> >>> print(alignment[:, (2, 7, 3)]) CTG -|| -TG <BLANKLINE> """ import numpy if isinstance(key, slice): if key.indices(len(self)) == (0, 2, 1): sequences = self.sequences coordinates = self.coordinates.copy() alignment = Alignment(sequences, coordinates) alignment.score = self.score return alignment raise NotImplementedError sequences = list(self.sequences) coordinates = self.coordinates.copy() for i, sequence in enumerate(sequences): if coordinates[i, 0] > coordinates[i, -1]: # mapped to reverse strand n = len(sequences[i]) coordinates[i, :] = n - coordinates[i, :] sequences[i] = reverse_complement(sequences[i], inplace=False) if isinstance(key, int): n, m = self.shape row = key if row < 0: row += n if row < 0 or row >= n: raise IndexError("row index %d is out of bounds (%d rows)" % (row, n)) sequence = sequences[row] try: sequence = sequence.seq # SeqRecord confusion except AttributeError: pass line = "" steps = numpy.diff(coordinates, 1) gaps = steps[row] == 0 # seriously, flake8?? steps = steps.max(0) i = coordinates[row, 0] for step, gap in zip(steps, gaps): if gap: line += "-" * step else: j = i + step line += str(sequence[i:j]) i = j # line may be a str or a Seq at this point. return str(line) if isinstance(key, tuple): try: row, col = key except ValueError: raise ValueError("only tuples of length 2 can be alignment indices") if isinstance(row, int): n, m = self.shape if row < 0: row += n if row < 0 or row >= n: raise IndexError( "row index %d is out of bounds (%d rows)" % (row, n) ) if isinstance(col, int): start_index = col if start_index < 0: start_index += m if start_index < 0 or start_index >= m: raise IndexError( "column index %d is out of bounds (%d columns)" % (col, m) ) steps = numpy.diff(coordinates, 1) indices = steps.max(0).cumsum() steps = steps[row] index = indices.searchsorted(start_index, side="right") if steps[index]: offset = start_index - indices[index] indices = coordinates[row, 0] + steps.cumsum() i = indices[index] + offset line = sequences[row][i : i + 1] else: line = "-" return line if isinstance(col, slice): sequence = sequences[row] try: sequence = sequence.seq # SeqRecord confusion except AttributeError: pass sequence = str(sequence) start_index, stop_index, step = col.indices(m) if start_index < stop_index and step == 1: steps = numpy.diff(coordinates, 1) gaps = steps[row] == 0 # come on flake8, this is ugly sequence_indices = coordinates[row, 0] + steps[row, :].cumsum() steps = steps.max(0) indices = steps.cumsum() i = indices.searchsorted(start_index, side="right") j = i + indices[i:].searchsorted(stop_index, side="right") if i == j: length = stop_index - start_index if gaps[i]: line = "-" * length else: offset = start_index - indices[i] start = sequence_indices[i] + offset stop = start + length line = sequence[start:stop] else: length = indices[i] - start_index stop = sequence_indices[i] if gaps[i]: line = "-" * length else: start = stop - length line = sequence[start:stop] i += 1 while i < j: step = steps[i] if gaps[i]: line += "-" * step else: start = stop stop = start + step line += sequence[start:stop] i += 1 length = stop_index - indices[j - 1] if length > 0: if gaps[j]: line += "-" * length else: start = stop stop = start + length line += sequence[start:stop] return line # make an iterable if step != 1 col = range(start_index, stop_index, step) # try if we can use col as an iterable line = self[row] try: line = "".join(line[index] for index in col) except IndexError: raise except Exception: raise TypeError( "second index must be an integer, slice, or iterable of integers" ) from None else: return line if isinstance(row, slice): n, m = self.shape if isinstance(col, int): if col < 0: col += m if col < 0 or col >= m: raise IndexError( "column index %d is out of bounds (%d columns)" % (col, m) ) starts = numpy.full(n, sys.maxsize) for ends in coordinates.transpose(): step = max(ends - starts) if step < 0: index = 0 else: index += step if col < index: break starts = ends else: raise IndexError("column index %d is out of bounds" % col) offset = index - col line = "" start, stop, step = row.indices(n) for i in range(start, stop, step): s = starts[i] e = ends[i] if s == e: line += "-" else: sequence = sequences[i] line += sequence[e - offset] return line if row.indices(n) != (0, n, 1): raise NotImplementedError if isinstance(col, slice): start_index, stop_index, step = col.indices(m) if start_index < stop_index and step == 1: steps = numpy.diff(coordinates, 1) indices = steps.max(0).cumsum() i = indices.searchsorted(start_index, side="right") j = i + indices[i:].searchsorted(stop_index, side="left") + 1 offset = steps[:, i] - indices[i] + start_index coordinates[:, i] += offset * (steps[:, i] > 0) offset = indices[j - 1] - stop_index coordinates[:, j] -= offset * (steps[:, j - 1] > 0) coordinates = coordinates[:, i : j + 1] for i, sequence in enumerate(sequences): if self.coordinates[i, 0] > self.coordinates[i, -1]: # mapped to reverse strand n = len(sequence) coordinates[i, :] = n - coordinates[i, :] sequences = self.sequences alignment = Alignment(sequences, coordinates) if numpy.array_equal(coordinates, self.coordinates): try: alignment.score = self.score except AttributeError: pass try: column_annotations = self.column_annotations except AttributeError: pass else: alignment.column_annotations = {} for key, value in column_annotations.items(): value = value[start_index:stop_index] try: value = value.copy() except AttributeError: # immutable tuples like str, tuple pass alignment.column_annotations[key] = value return alignment # make an iterable if step != 1 col = range(start_index, stop_index, step) # try if we can use col as an iterable indices = tuple(col) try: lines = [self[i, indices] for i in range(n)] except IndexError: raise except Exception: raise TypeError( "second index must be an integer, slice, or iterable of integers" ) from None else: sequences = [line.replace("-", "") for line in lines] coordinates = self.infer_coordinates(lines) alignment = Alignment(sequences, coordinates) try: column_annotations = self.column_annotations except AttributeError: pass else: alignment.column_annotations = {} for key, value in column_annotations.items(): value_generator = (value[index] for index in indices) if isinstance(value, str): value = "".join(value_generator) else: value = value.__class__(value_generator) alignment.column_annotations[key] = value return alignment raise TypeError("first index must be an integer or slice") raise TypeError("alignment indices must be integers, slices, or tuples")
[ "def", "__getitem__", "(", "self", ",", "key", ")", ":", "import", "numpy", "if", "isinstance", "(", "key", ",", "slice", ")", ":", "if", "key", ".", "indices", "(", "len", "(", "self", ")", ")", "==", "(", "0", ",", "2", ",", "1", ")", ":", "sequences", "=", "self", ".", "sequences", "coordinates", "=", "self", ".", "coordinates", ".", "copy", "(", ")", "alignment", "=", "Alignment", "(", "sequences", ",", "coordinates", ")", "alignment", ".", "score", "=", "self", ".", "score", "return", "alignment", "raise", "NotImplementedError", "sequences", "=", "list", "(", "self", ".", "sequences", ")", "coordinates", "=", "self", ".", "coordinates", ".", "copy", "(", ")", "for", "i", ",", "sequence", "in", "enumerate", "(", "sequences", ")", ":", "if", "coordinates", "[", "i", ",", "0", "]", ">", "coordinates", "[", "i", ",", "-", "1", "]", ":", "# mapped to reverse strand", "n", "=", "len", "(", "sequences", "[", "i", "]", ")", "coordinates", "[", "i", ",", ":", "]", "=", "n", "-", "coordinates", "[", "i", ",", ":", "]", "sequences", "[", "i", "]", "=", "reverse_complement", "(", "sequences", "[", "i", "]", ",", "inplace", "=", "False", ")", "if", "isinstance", "(", "key", ",", "int", ")", ":", "n", ",", "m", "=", "self", ".", "shape", "row", "=", "key", "if", "row", "<", "0", ":", "row", "+=", "n", "if", "row", "<", "0", "or", "row", ">=", "n", ":", "raise", "IndexError", "(", "\"row index %d is out of bounds (%d rows)\"", "%", "(", "row", ",", "n", ")", ")", "sequence", "=", "sequences", "[", "row", "]", "try", ":", "sequence", "=", "sequence", ".", "seq", "# SeqRecord confusion", "except", "AttributeError", ":", "pass", "line", "=", "\"\"", "steps", "=", "numpy", ".", "diff", "(", "coordinates", ",", "1", ")", "gaps", "=", "steps", "[", "row", "]", "==", "0", "# seriously, flake8??", "steps", "=", "steps", ".", "max", "(", "0", ")", "i", "=", "coordinates", "[", "row", ",", "0", "]", "for", "step", ",", "gap", "in", "zip", "(", "steps", ",", "gaps", ")", ":", "if", "gap", ":", "line", "+=", "\"-\"", "*", "step", "else", ":", "j", "=", "i", "+", "step", "line", "+=", "str", "(", "sequence", "[", "i", ":", "j", "]", ")", "i", "=", "j", "# line may be a str or a Seq at this point.", "return", "str", "(", "line", ")", "if", "isinstance", "(", "key", ",", "tuple", ")", ":", "try", ":", "row", ",", "col", "=", "key", "except", "ValueError", ":", "raise", "ValueError", "(", "\"only tuples of length 2 can be alignment indices\"", ")", "if", "isinstance", "(", "row", ",", "int", ")", ":", "n", ",", "m", "=", "self", ".", "shape", "if", "row", "<", "0", ":", "row", "+=", "n", "if", "row", "<", "0", "or", "row", ">=", "n", ":", "raise", "IndexError", "(", "\"row index %d is out of bounds (%d rows)\"", "%", "(", "row", ",", "n", ")", ")", "if", "isinstance", "(", "col", ",", "int", ")", ":", "start_index", "=", "col", "if", "start_index", "<", "0", ":", "start_index", "+=", "m", "if", "start_index", "<", "0", "or", "start_index", ">=", "m", ":", "raise", "IndexError", "(", "\"column index %d is out of bounds (%d columns)\"", "%", "(", "col", ",", "m", ")", ")", "steps", "=", "numpy", ".", "diff", "(", "coordinates", ",", "1", ")", "indices", "=", "steps", ".", "max", "(", "0", ")", ".", "cumsum", "(", ")", "steps", "=", "steps", "[", "row", "]", "index", "=", "indices", ".", "searchsorted", "(", "start_index", ",", "side", "=", "\"right\"", ")", "if", "steps", "[", "index", "]", ":", "offset", "=", "start_index", "-", "indices", "[", "index", "]", "indices", "=", "coordinates", "[", "row", ",", "0", "]", "+", "steps", ".", "cumsum", "(", ")", "i", "=", "indices", "[", "index", "]", "+", "offset", "line", "=", "sequences", "[", "row", "]", "[", "i", ":", "i", "+", "1", "]", "else", ":", "line", "=", "\"-\"", "return", "line", "if", "isinstance", "(", "col", ",", "slice", ")", ":", "sequence", "=", "sequences", "[", "row", "]", "try", ":", "sequence", "=", "sequence", ".", "seq", "# SeqRecord confusion", "except", "AttributeError", ":", "pass", "sequence", "=", "str", "(", "sequence", ")", "start_index", ",", "stop_index", ",", "step", "=", "col", ".", "indices", "(", "m", ")", "if", "start_index", "<", "stop_index", "and", "step", "==", "1", ":", "steps", "=", "numpy", ".", "diff", "(", "coordinates", ",", "1", ")", "gaps", "=", "steps", "[", "row", "]", "==", "0", "# come on flake8, this is ugly", "sequence_indices", "=", "coordinates", "[", "row", ",", "0", "]", "+", "steps", "[", "row", ",", ":", "]", ".", "cumsum", "(", ")", "steps", "=", "steps", ".", "max", "(", "0", ")", "indices", "=", "steps", ".", "cumsum", "(", ")", "i", "=", "indices", ".", "searchsorted", "(", "start_index", ",", "side", "=", "\"right\"", ")", "j", "=", "i", "+", "indices", "[", "i", ":", "]", ".", "searchsorted", "(", "stop_index", ",", "side", "=", "\"right\"", ")", "if", "i", "==", "j", ":", "length", "=", "stop_index", "-", "start_index", "if", "gaps", "[", "i", "]", ":", "line", "=", "\"-\"", "*", "length", "else", ":", "offset", "=", "start_index", "-", "indices", "[", "i", "]", "start", "=", "sequence_indices", "[", "i", "]", "+", "offset", "stop", "=", "start", "+", "length", "line", "=", "sequence", "[", "start", ":", "stop", "]", "else", ":", "length", "=", "indices", "[", "i", "]", "-", "start_index", "stop", "=", "sequence_indices", "[", "i", "]", "if", "gaps", "[", "i", "]", ":", "line", "=", "\"-\"", "*", "length", "else", ":", "start", "=", "stop", "-", "length", "line", "=", "sequence", "[", "start", ":", "stop", "]", "i", "+=", "1", "while", "i", "<", "j", ":", "step", "=", "steps", "[", "i", "]", "if", "gaps", "[", "i", "]", ":", "line", "+=", "\"-\"", "*", "step", "else", ":", "start", "=", "stop", "stop", "=", "start", "+", "step", "line", "+=", "sequence", "[", "start", ":", "stop", "]", "i", "+=", "1", "length", "=", "stop_index", "-", "indices", "[", "j", "-", "1", "]", "if", "length", ">", "0", ":", "if", "gaps", "[", "j", "]", ":", "line", "+=", "\"-\"", "*", "length", "else", ":", "start", "=", "stop", "stop", "=", "start", "+", "length", "line", "+=", "sequence", "[", "start", ":", "stop", "]", "return", "line", "# make an iterable if step != 1", "col", "=", "range", "(", "start_index", ",", "stop_index", ",", "step", ")", "# try if we can use col as an iterable", "line", "=", "self", "[", "row", "]", "try", ":", "line", "=", "\"\"", ".", "join", "(", "line", "[", "index", "]", "for", "index", "in", "col", ")", "except", "IndexError", ":", "raise", "except", "Exception", ":", "raise", "TypeError", "(", "\"second index must be an integer, slice, or iterable of integers\"", ")", "from", "None", "else", ":", "return", "line", "if", "isinstance", "(", "row", ",", "slice", ")", ":", "n", ",", "m", "=", "self", ".", "shape", "if", "isinstance", "(", "col", ",", "int", ")", ":", "if", "col", "<", "0", ":", "col", "+=", "m", "if", "col", "<", "0", "or", "col", ">=", "m", ":", "raise", "IndexError", "(", "\"column index %d is out of bounds (%d columns)\"", "%", "(", "col", ",", "m", ")", ")", "starts", "=", "numpy", ".", "full", "(", "n", ",", "sys", ".", "maxsize", ")", "for", "ends", "in", "coordinates", ".", "transpose", "(", ")", ":", "step", "=", "max", "(", "ends", "-", "starts", ")", "if", "step", "<", "0", ":", "index", "=", "0", "else", ":", "index", "+=", "step", "if", "col", "<", "index", ":", "break", "starts", "=", "ends", "else", ":", "raise", "IndexError", "(", "\"column index %d is out of bounds\"", "%", "col", ")", "offset", "=", "index", "-", "col", "line", "=", "\"\"", "start", ",", "stop", ",", "step", "=", "row", ".", "indices", "(", "n", ")", "for", "i", "in", "range", "(", "start", ",", "stop", ",", "step", ")", ":", "s", "=", "starts", "[", "i", "]", "e", "=", "ends", "[", "i", "]", "if", "s", "==", "e", ":", "line", "+=", "\"-\"", "else", ":", "sequence", "=", "sequences", "[", "i", "]", "line", "+=", "sequence", "[", "e", "-", "offset", "]", "return", "line", "if", "row", ".", "indices", "(", "n", ")", "!=", "(", "0", ",", "n", ",", "1", ")", ":", "raise", "NotImplementedError", "if", "isinstance", "(", "col", ",", "slice", ")", ":", "start_index", ",", "stop_index", ",", "step", "=", "col", ".", "indices", "(", "m", ")", "if", "start_index", "<", "stop_index", "and", "step", "==", "1", ":", "steps", "=", "numpy", ".", "diff", "(", "coordinates", ",", "1", ")", "indices", "=", "steps", ".", "max", "(", "0", ")", ".", "cumsum", "(", ")", "i", "=", "indices", ".", "searchsorted", "(", "start_index", ",", "side", "=", "\"right\"", ")", "j", "=", "i", "+", "indices", "[", "i", ":", "]", ".", "searchsorted", "(", "stop_index", ",", "side", "=", "\"left\"", ")", "+", "1", "offset", "=", "steps", "[", ":", ",", "i", "]", "-", "indices", "[", "i", "]", "+", "start_index", "coordinates", "[", ":", ",", "i", "]", "+=", "offset", "*", "(", "steps", "[", ":", ",", "i", "]", ">", "0", ")", "offset", "=", "indices", "[", "j", "-", "1", "]", "-", "stop_index", "coordinates", "[", ":", ",", "j", "]", "-=", "offset", "*", "(", "steps", "[", ":", ",", "j", "-", "1", "]", ">", "0", ")", "coordinates", "=", "coordinates", "[", ":", ",", "i", ":", "j", "+", "1", "]", "for", "i", ",", "sequence", "in", "enumerate", "(", "sequences", ")", ":", "if", "self", ".", "coordinates", "[", "i", ",", "0", "]", ">", "self", ".", "coordinates", "[", "i", ",", "-", "1", "]", ":", "# mapped to reverse strand", "n", "=", "len", "(", "sequence", ")", "coordinates", "[", "i", ",", ":", "]", "=", "n", "-", "coordinates", "[", "i", ",", ":", "]", "sequences", "=", "self", ".", "sequences", "alignment", "=", "Alignment", "(", "sequences", ",", "coordinates", ")", "if", "numpy", ".", "array_equal", "(", "coordinates", ",", "self", ".", "coordinates", ")", ":", "try", ":", "alignment", ".", "score", "=", "self", ".", "score", "except", "AttributeError", ":", "pass", "try", ":", "column_annotations", "=", "self", ".", "column_annotations", "except", "AttributeError", ":", "pass", "else", ":", "alignment", ".", "column_annotations", "=", "{", "}", "for", "key", ",", "value", "in", "column_annotations", ".", "items", "(", ")", ":", "value", "=", "value", "[", "start_index", ":", "stop_index", "]", "try", ":", "value", "=", "value", ".", "copy", "(", ")", "except", "AttributeError", ":", "# immutable tuples like str, tuple", "pass", "alignment", ".", "column_annotations", "[", "key", "]", "=", "value", "return", "alignment", "# make an iterable if step != 1", "col", "=", "range", "(", "start_index", ",", "stop_index", ",", "step", ")", "# try if we can use col as an iterable", "indices", "=", "tuple", "(", "col", ")", "try", ":", "lines", "=", "[", "self", "[", "i", ",", "indices", "]", "for", "i", "in", "range", "(", "n", ")", "]", "except", "IndexError", ":", "raise", "except", "Exception", ":", "raise", "TypeError", "(", "\"second index must be an integer, slice, or iterable of integers\"", ")", "from", "None", "else", ":", "sequences", "=", "[", "line", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "for", "line", "in", "lines", "]", "coordinates", "=", "self", ".", "infer_coordinates", "(", "lines", ")", "alignment", "=", "Alignment", "(", "sequences", ",", "coordinates", ")", "try", ":", "column_annotations", "=", "self", ".", "column_annotations", "except", "AttributeError", ":", "pass", "else", ":", "alignment", ".", "column_annotations", "=", "{", "}", "for", "key", ",", "value", "in", "column_annotations", ".", "items", "(", ")", ":", "value_generator", "=", "(", "value", "[", "index", "]", "for", "index", "in", "indices", ")", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "\"\"", ".", "join", "(", "value_generator", ")", "else", ":", "value", "=", "value", ".", "__class__", "(", "value_generator", ")", "alignment", ".", "column_annotations", "[", "key", "]", "=", "value", "return", "alignment", "raise", "TypeError", "(", "\"first index must be an integer or slice\"", ")", "raise", "TypeError", "(", "\"alignment indices must be integers, slices, or tuples\"", ")" ]
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/Align/__init__.py#L1245-L1592
networkx/networkx
1620568e36702b1cfeaf1c0277b167b6cb93e48d
networkx/algorithms/isomorphism/ismags.py
python
ISMAGS._find_node_edge_color
(graph, node_colors, edge_colors)
return node_edge_colors
For every node in graph, come up with a color that combines 1) the color of the node, and 2) the number of edges of a color to each type of node.
For every node in graph, come up with a color that combines 1) the color of the node, and 2) the number of edges of a color to each type of node.
[ "For", "every", "node", "in", "graph", "come", "up", "with", "a", "color", "that", "combines", "1", ")", "the", "color", "of", "the", "node", "and", "2", ")", "the", "number", "of", "edges", "of", "a", "color", "to", "each", "type", "of", "node", "." ]
def _find_node_edge_color(graph, node_colors, edge_colors): """ For every node in graph, come up with a color that combines 1) the color of the node, and 2) the number of edges of a color to each type of node. """ counts = defaultdict(lambda: defaultdict(int)) for node1, node2 in graph.edges: if (node1, node2) in edge_colors: # FIXME directed graphs ecolor = edge_colors[node1, node2] else: ecolor = edge_colors[node2, node1] # Count per node how many edges it has of what color to nodes of # what color counts[node1][ecolor, node_colors[node2]] += 1 counts[node2][ecolor, node_colors[node1]] += 1 node_edge_colors = dict() for node in graph.nodes: node_edge_colors[node] = node_colors[node], set(counts[node].items()) return node_edge_colors
[ "def", "_find_node_edge_color", "(", "graph", ",", "node_colors", ",", "edge_colors", ")", ":", "counts", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "int", ")", ")", "for", "node1", ",", "node2", "in", "graph", ".", "edges", ":", "if", "(", "node1", ",", "node2", ")", "in", "edge_colors", ":", "# FIXME directed graphs", "ecolor", "=", "edge_colors", "[", "node1", ",", "node2", "]", "else", ":", "ecolor", "=", "edge_colors", "[", "node2", ",", "node1", "]", "# Count per node how many edges it has of what color to nodes of", "# what color", "counts", "[", "node1", "]", "[", "ecolor", ",", "node_colors", "[", "node2", "]", "]", "+=", "1", "counts", "[", "node2", "]", "[", "ecolor", ",", "node_colors", "[", "node1", "]", "]", "+=", "1", "node_edge_colors", "=", "dict", "(", ")", "for", "node", "in", "graph", ".", "nodes", ":", "node_edge_colors", "[", "node", "]", "=", "node_colors", "[", "node", "]", ",", "set", "(", "counts", "[", "node", "]", ".", "items", "(", ")", ")", "return", "node_edge_colors" ]
https://github.com/networkx/networkx/blob/1620568e36702b1cfeaf1c0277b167b6cb93e48d/networkx/algorithms/isomorphism/ismags.py#L688-L710
quantumlib/Cirq
89f88b01d69222d3f1ec14d649b7b3a85ed9211f
cirq-google/cirq_google/engine/engine_client.py
python
EngineClient.remove_job_labels
( self, project_id: str, program_id: str, job_id: str, label_keys: List[str] )
return job
Removes labels with given keys from the labels of a previously created quantum job. Args: project_id: A project_id of the parent Google Cloud Project. program_id: Unique ID of the program within the parent project. job_id: Unique ID of the job within the parent program. label_keys: Label keys to remove from the existing job labels. Returns: The updated quantum job.
Removes labels with given keys from the labels of a previously created quantum job.
[ "Removes", "labels", "with", "given", "keys", "from", "the", "labels", "of", "a", "previously", "created", "quantum", "job", "." ]
def remove_job_labels( self, project_id: str, program_id: str, job_id: str, label_keys: List[str] ) -> qtypes.QuantumJob: """Removes labels with given keys from the labels of a previously created quantum job. Args: project_id: A project_id of the parent Google Cloud Project. program_id: Unique ID of the program within the parent project. job_id: Unique ID of the job within the parent program. label_keys: Label keys to remove from the existing job labels. Returns: The updated quantum job. """ job = self.get_job(project_id, program_id, job_id, False) old_labels = job.labels new_labels = dict(old_labels) for key in label_keys: new_labels.pop(key, None) if new_labels != old_labels: fingerprint = job.label_fingerprint return self._set_job_labels(project_id, program_id, job_id, new_labels, fingerprint) return job
[ "def", "remove_job_labels", "(", "self", ",", "project_id", ":", "str", ",", "program_id", ":", "str", ",", "job_id", ":", "str", ",", "label_keys", ":", "List", "[", "str", "]", ")", "->", "qtypes", ".", "QuantumJob", ":", "job", "=", "self", ".", "get_job", "(", "project_id", ",", "program_id", ",", "job_id", ",", "False", ")", "old_labels", "=", "job", ".", "labels", "new_labels", "=", "dict", "(", "old_labels", ")", "for", "key", "in", "label_keys", ":", "new_labels", ".", "pop", "(", "key", ",", "None", ")", "if", "new_labels", "!=", "old_labels", ":", "fingerprint", "=", "job", ".", "label_fingerprint", "return", "self", ".", "_set_job_labels", "(", "project_id", ",", "program_id", ",", "job_id", ",", "new_labels", ",", "fingerprint", ")", "return", "job" ]
https://github.com/quantumlib/Cirq/blob/89f88b01d69222d3f1ec14d649b7b3a85ed9211f/cirq-google/cirq_google/engine/engine_client.py#L536-L559
numenta/nupic
b9ebedaf54f49a33de22d8d44dff7c765cdb5548
external/linux32/lib/python2.6/site-packages/pkg_resources.py
python
Environment.obtain
(self, requirement, installer=None)
Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.
Obtain a distribution matching `requirement` (e.g. via download)
[ "Obtain", "a", "distribution", "matching", "requirement", "(", "e", ".", "g", ".", "via", "download", ")" ]
def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement)
[ "def", "obtain", "(", "self", ",", "requirement", ",", "installer", "=", "None", ")", ":", "if", "installer", "is", "not", "None", ":", "return", "installer", "(", "requirement", ")" ]
https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/external/linux32/lib/python2.6/site-packages/pkg_resources.py#L760-L770
mozilla/treeherder
228750c5774cfbe7d395e10e2c84665d9122496d
treeherder/services/pulse/exchange.py
python
get_exchange
(connection, name, create=False)
return bound_exchange
Get a Kombu Exchange object using the passed in name. Can create an Exchange but this is typically not wanted in production-like environments and only useful for testing.
Get a Kombu Exchange object using the passed in name.
[ "Get", "a", "Kombu", "Exchange", "object", "using", "the", "passed", "in", "name", "." ]
def get_exchange(connection, name, create=False): """ Get a Kombu Exchange object using the passed in name. Can create an Exchange but this is typically not wanted in production-like environments and only useful for testing. """ exchange = Exchange(name, type="topic", passive=not create) # bind the exchange to our connection so operations can be performed on it bound_exchange = exchange(connection) # ensure the exchange exists. Throw an error if it was created with # passive=True and it doesn't exist. bound_exchange.declare() return bound_exchange
[ "def", "get_exchange", "(", "connection", ",", "name", ",", "create", "=", "False", ")", ":", "exchange", "=", "Exchange", "(", "name", ",", "type", "=", "\"topic\"", ",", "passive", "=", "not", "create", ")", "# bind the exchange to our connection so operations can be performed on it", "bound_exchange", "=", "exchange", "(", "connection", ")", "# ensure the exchange exists. Throw an error if it was created with", "# passive=True and it doesn't exist.", "bound_exchange", ".", "declare", "(", ")", "return", "bound_exchange" ]
https://github.com/mozilla/treeherder/blob/228750c5774cfbe7d395e10e2c84665d9122496d/treeherder/services/pulse/exchange.py#L4-L20
KhronosGroup/glTF-Blender-Exporter
dd7a3dbd8f43a79d572e7c45f4215f770bb92a37
scripts/addons/io_scene_gltf2/gltf2_get.py
python
get_light_index
(glTF, name)
return -1
Return the light index in the glTF array.
Return the light index in the glTF array.
[ "Return", "the", "light", "index", "in", "the", "glTF", "array", "." ]
def get_light_index(glTF, name): """ Return the light index in the glTF array. """ if glTF.get('extensions') is None: return -1 extensions = glTF['extensions'] if extensions.get('KHR_lights') is None: return -1 khr_lights = extensions['KHR_lights'] if khr_lights.get('lights') is None: return -1 lights = khr_lights['lights'] index = 0 for light in lights: if light['name'] == name: return index index += 1 return -1
[ "def", "get_light_index", "(", "glTF", ",", "name", ")", ":", "if", "glTF", ".", "get", "(", "'extensions'", ")", "is", "None", ":", "return", "-", "1", "extensions", "=", "glTF", "[", "'extensions'", "]", "if", "extensions", ".", "get", "(", "'KHR_lights'", ")", "is", "None", ":", "return", "-", "1", "khr_lights", "=", "extensions", "[", "'KHR_lights'", "]", "if", "khr_lights", ".", "get", "(", "'lights'", ")", "is", "None", ":", "return", "-", "1", "lights", "=", "khr_lights", "[", "'lights'", "]", "index", "=", "0", "for", "light", "in", "lights", ":", "if", "light", "[", "'name'", "]", "==", "name", ":", "return", "index", "index", "+=", "1", "return", "-", "1" ]
https://github.com/KhronosGroup/glTF-Blender-Exporter/blob/dd7a3dbd8f43a79d572e7c45f4215f770bb92a37/scripts/addons/io_scene_gltf2/gltf2_get.py#L287-L314
holoviz/holoviews
cc6b27f01710402fdfee2aeef1507425ca78c91f
holoviews/core/spaces.py
python
HoloMap.overlay
(self, dimensions=None, **kwargs)
Group by supplied dimension(s) and overlay each group Groups data by supplied dimension(s) overlaying the groups along the dimension(s). Args: dimensions: Dimension(s) of dimensions to group by Returns: NdOverlay object(s) with supplied dimensions
Group by supplied dimension(s) and overlay each group
[ "Group", "by", "supplied", "dimension", "(", "s", ")", "and", "overlay", "each", "group" ]
def overlay(self, dimensions=None, **kwargs): """Group by supplied dimension(s) and overlay each group Groups data by supplied dimension(s) overlaying the groups along the dimension(s). Args: dimensions: Dimension(s) of dimensions to group by Returns: NdOverlay object(s) with supplied dimensions """ dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return NdOverlay(self, **kwargs).reindex(dimensions) else: dims = [d for d in self.kdims if d not in dimensions] return self.groupby(dims, group_type=NdOverlay, **kwargs)
[ "def", "overlay", "(", "self", ",", "dimensions", "=", "None", ",", "*", "*", "kwargs", ")", ":", "dimensions", "=", "self", ".", "_valid_dimensions", "(", "dimensions", ")", "if", "len", "(", "dimensions", ")", "==", "self", ".", "ndims", ":", "with", "item_check", "(", "False", ")", ":", "return", "NdOverlay", "(", "self", ",", "*", "*", "kwargs", ")", ".", "reindex", "(", "dimensions", ")", "else", ":", "dims", "=", "[", "d", "for", "d", "in", "self", ".", "kdims", "if", "d", "not", "in", "dimensions", "]", "return", "self", ".", "groupby", "(", "dims", ",", "group_type", "=", "NdOverlay", ",", "*", "*", "kwargs", ")" ]
https://github.com/holoviz/holoviews/blob/cc6b27f01710402fdfee2aeef1507425ca78c91f/holoviews/core/spaces.py#L47-L65
ruotianluo/Image_Captioning_AI_Challenger
ad230614efb8b964b4ced49a3622f4585d4de1cc
scripts/prepro_ngrams.py
python
cook_refs
(refs, n=4)
return [precook(ref, n) for ref in refs]
Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. :param refs: list of string : reference sentences for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (list of dict)
Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. :param refs: list of string : reference sentences for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (list of dict)
[ "Takes", "a", "list", "of", "reference", "sentences", "for", "a", "single", "segment", "and", "returns", "an", "object", "that", "encapsulates", "everything", "that", "BLEU", "needs", "to", "know", "about", "them", ".", ":", "param", "refs", ":", "list", "of", "string", ":", "reference", "sentences", "for", "some", "image", ":", "param", "n", ":", "int", ":", "number", "of", "ngrams", "for", "which", "(", "ngram", ")", "representation", "is", "calculated", ":", "return", ":", "result", "(", "list", "of", "dict", ")" ]
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average" '''Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. :param refs: list of string : reference sentences for some image :param n: int : number of ngrams for which (ngram) representation is calculated :return: result (list of dict) ''' return [precook(ref, n) for ref in refs]
[ "def", "cook_refs", "(", "refs", ",", "n", "=", "4", ")", ":", "## lhuang: oracle will call with \"average\"", "return", "[", "precook", "(", "ref", ",", "n", ")", "for", "ref", "in", "refs", "]" ]
https://github.com/ruotianluo/Image_Captioning_AI_Challenger/blob/ad230614efb8b964b4ced49a3622f4585d4de1cc/scripts/prepro_ngrams.py#L49-L57
mila-iqia/myia
56774a39579b4ec4123f44843ad4ca688acc859b
myia/monomorphize.py
python
_MonoRemapper.__init__
( self, graphs, inlines, manager, relation, graph_relation, clone_constants, engine, graph_repl, fv_function, )
Initialize the _MonoRemapper.
Initialize the _MonoRemapper.
[ "Initialize", "the", "_MonoRemapper", "." ]
def __init__( self, graphs, inlines, manager, relation, graph_relation, clone_constants, engine, graph_repl, fv_function, ): """Initialize the _MonoRemapper.""" super().__init__( graphs=graphs, inlines=inlines, manager=manager, relation=relation, graph_repl=graph_repl, graph_relation=graph_relation, clone_constants=clone_constants, set_abstract=False, ) self.engine = engine self.fv_function = fv_function
[ "def", "__init__", "(", "self", ",", "graphs", ",", "inlines", ",", "manager", ",", "relation", ",", "graph_relation", ",", "clone_constants", ",", "engine", ",", "graph_repl", ",", "fv_function", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "graphs", "=", "graphs", ",", "inlines", "=", "inlines", ",", "manager", "=", "manager", ",", "relation", "=", "relation", ",", "graph_repl", "=", "graph_repl", ",", "graph_relation", "=", "graph_relation", ",", "clone_constants", "=", "clone_constants", ",", "set_abstract", "=", "False", ",", ")", "self", ".", "engine", "=", "engine", "self", ".", "fv_function", "=", "fv_function" ]
https://github.com/mila-iqia/myia/blob/56774a39579b4ec4123f44843ad4ca688acc859b/myia/monomorphize.py#L739-L763
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/pyparsing/core.py
python
ParseExpression.ignore_whitespace
(self, recursive=True)
return self
Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on all contained expressions.
Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on all contained expressions.
[ "Extends", "ignore_whitespace", "defined", "in", "base", "class", "and", "also", "invokes", "leave_whitespace", "on", "all", "contained", "expressions", "." ]
def ignore_whitespace(self, recursive=True): """ Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on all contained expressions. """ super().ignore_whitespace(recursive) if recursive: self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.ignore_whitespace(recursive) return self
[ "def", "ignore_whitespace", "(", "self", ",", "recursive", "=", "True", ")", ":", "super", "(", ")", ".", "ignore_whitespace", "(", "recursive", ")", "if", "recursive", ":", "self", ".", "exprs", "=", "[", "e", ".", "copy", "(", ")", "for", "e", "in", "self", ".", "exprs", "]", "for", "e", "in", "self", ".", "exprs", ":", "e", ".", "ignore_whitespace", "(", "recursive", ")", "return", "self" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/pyparsing/core.py#L3619-L3629
garnaat/kappa
a06a0047cc51f6a210beec4028a5fbe8ebcd5c53
kappa/restapi.py
python
RestApi.api_exists
(self)
return self._get_api()
[]
def api_exists(self): return self._get_api()
[ "def", "api_exists", "(", "self", ")", ":", "return", "self", ".", "_get_api", "(", ")" ]
https://github.com/garnaat/kappa/blob/a06a0047cc51f6a210beec4028a5fbe8ebcd5c53/kappa/restapi.py#L197-L198
tobegit3hub/deep_image_model
8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e
java_predict_client/src/main/proto/tensorflow/python/ops/gradients_impl.py
python
_MultiDeviceAddN
(tensor_list)
return math_ops.add_n(summands)
Adds tensors from potentially multiple devices.
Adds tensors from potentially multiple devices.
[ "Adds", "tensors", "from", "potentially", "multiple", "devices", "." ]
def _MultiDeviceAddN(tensor_list): """Adds tensors from potentially multiple devices.""" # Basic function structure comes from control_flow_ops.group(). # Sort tensors according to their devices. tensors_on_device = collections.defaultdict(lambda: []) for tensor in tensor_list: tensors_on_device[tensor.device].append(tensor) # For each device, add the tensors on that device first. # Then gather the partial sums from multiple devices. # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion. # E.g., aggregate per GPU, then per task, and so on. summands = [] def DeviceKey(dev): return "" if dev is None else dev for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey): tensors = tensors_on_device[dev] with ops.colocate_with(tensors[0].op, ignore_existing=True): summands.append(math_ops.add_n(tensors)) return math_ops.add_n(summands)
[ "def", "_MultiDeviceAddN", "(", "tensor_list", ")", ":", "# Basic function structure comes from control_flow_ops.group().", "# Sort tensors according to their devices.", "tensors_on_device", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "]", ")", "for", "tensor", "in", "tensor_list", ":", "tensors_on_device", "[", "tensor", ".", "device", "]", ".", "append", "(", "tensor", ")", "# For each device, add the tensors on that device first.", "# Then gather the partial sums from multiple devices.", "# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.", "# E.g., aggregate per GPU, then per task, and so on.", "summands", "=", "[", "]", "def", "DeviceKey", "(", "dev", ")", ":", "return", "\"\"", "if", "dev", "is", "None", "else", "dev", "for", "dev", "in", "sorted", "(", "six", ".", "iterkeys", "(", "tensors_on_device", ")", ",", "key", "=", "DeviceKey", ")", ":", "tensors", "=", "tensors_on_device", "[", "dev", "]", "with", "ops", ".", "colocate_with", "(", "tensors", "[", "0", "]", ".", "op", ",", "ignore_existing", "=", "True", ")", ":", "summands", ".", "append", "(", "math_ops", ".", "add_n", "(", "tensors", ")", ")", "return", "math_ops", ".", "add_n", "(", "summands", ")" ]
https://github.com/tobegit3hub/deep_image_model/blob/8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e/java_predict_client/src/main/proto/tensorflow/python/ops/gradients_impl.py#L638-L660
wikimedia/pywikibot
81a01ffaec7271bf5b4b170f85a80388420a4e78
scripts/welcome.py
python
WelcomeBot.define_sign
(self, force=False)
return self._random_signature
Setup signature.
Setup signature.
[ "Setup", "signature", "." ]
def define_sign(self, force=False) -> List[str]: """Setup signature.""" if hasattr(self, '_random_signature') and not force: return self._random_signature sign_text = '' creg = re.compile(r'^\* ?(.*?)$', re.M) if not globalvar.sign_file_name: sign_page_name = i18n.translate(self.site, random_sign) if not sign_page_name: self.show_status(Msg.WARN) pywikibot.output( "{} doesn't allow random signature, force disable." .format(self.site)) globalvar.random_sign = False return [] sign_page = pywikibot.Page(self.site, sign_page_name) if sign_page.exists(): pywikibot.output('Loading signature list...') sign_text = sign_page.get() else: pywikibot.output('The signature list page does not exist, ' 'random signature will be disabled.') globalvar.random_sign = False else: try: f = codecs.open( pywikibot.config.datafilepath(globalvar.sign_file_name), 'r', encoding=config.console_encoding) except LookupError: f = codecs.open(pywikibot.config.datafilepath( globalvar.sign_file_name), 'r', encoding='utf-8') except IOError: pywikibot.error('No fileName!') raise FilenameNotSet('No signature filename specified.') sign_text = f.read() f.close() self._random_signature = creg.findall(sign_text) return self._random_signature
[ "def", "define_sign", "(", "self", ",", "force", "=", "False", ")", "->", "List", "[", "str", "]", ":", "if", "hasattr", "(", "self", ",", "'_random_signature'", ")", "and", "not", "force", ":", "return", "self", ".", "_random_signature", "sign_text", "=", "''", "creg", "=", "re", ".", "compile", "(", "r'^\\* ?(.*?)$'", ",", "re", ".", "M", ")", "if", "not", "globalvar", ".", "sign_file_name", ":", "sign_page_name", "=", "i18n", ".", "translate", "(", "self", ".", "site", ",", "random_sign", ")", "if", "not", "sign_page_name", ":", "self", ".", "show_status", "(", "Msg", ".", "WARN", ")", "pywikibot", ".", "output", "(", "\"{} doesn't allow random signature, force disable.\"", ".", "format", "(", "self", ".", "site", ")", ")", "globalvar", ".", "random_sign", "=", "False", "return", "[", "]", "sign_page", "=", "pywikibot", ".", "Page", "(", "self", ".", "site", ",", "sign_page_name", ")", "if", "sign_page", ".", "exists", "(", ")", ":", "pywikibot", ".", "output", "(", "'Loading signature list...'", ")", "sign_text", "=", "sign_page", ".", "get", "(", ")", "else", ":", "pywikibot", ".", "output", "(", "'The signature list page does not exist, '", "'random signature will be disabled.'", ")", "globalvar", ".", "random_sign", "=", "False", "else", ":", "try", ":", "f", "=", "codecs", ".", "open", "(", "pywikibot", ".", "config", ".", "datafilepath", "(", "globalvar", ".", "sign_file_name", ")", ",", "'r'", ",", "encoding", "=", "config", ".", "console_encoding", ")", "except", "LookupError", ":", "f", "=", "codecs", ".", "open", "(", "pywikibot", ".", "config", ".", "datafilepath", "(", "globalvar", ".", "sign_file_name", ")", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "except", "IOError", ":", "pywikibot", ".", "error", "(", "'No fileName!'", ")", "raise", "FilenameNotSet", "(", "'No signature filename specified.'", ")", "sign_text", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "self", ".", "_random_signature", "=", "creg", ".", "findall", "(", "sign_text", ")", "return", "self", ".", "_random_signature" ]
https://github.com/wikimedia/pywikibot/blob/81a01ffaec7271bf5b4b170f85a80388420a4e78/scripts/welcome.py#L727-L768
twke18/Adaptive_Affinity_Fields
8488aa6ad16022ab4b89fb8626386997559cb951
pyscripts/train/train_affinity_mgpu.py
python
save
(saver, sess, logdir, step)
Save the trained weights. Args: saver: TensorFlow Saver object. sess: TensorFlow session. logdir: path to the snapshots directory. step: current training step.
Save the trained weights. Args: saver: TensorFlow Saver object. sess: TensorFlow session. logdir: path to the snapshots directory. step: current training step.
[ "Save", "the", "trained", "weights", ".", "Args", ":", "saver", ":", "TensorFlow", "Saver", "object", ".", "sess", ":", "TensorFlow", "session", ".", "logdir", ":", "path", "to", "the", "snapshots", "directory", ".", "step", ":", "current", "training", "step", "." ]
def save(saver, sess, logdir, step): """Save the trained weights. Args: saver: TensorFlow Saver object. sess: TensorFlow session. logdir: path to the snapshots directory. step: current training step. """ model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print('The checkpoint has been created.')
[ "def", "save", "(", "saver", ",", "sess", ",", "logdir", ",", "step", ")", ":", "model_name", "=", "'model.ckpt'", "checkpoint_path", "=", "os", ".", "path", ".", "join", "(", "logdir", ",", "model_name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "logdir", ")", ":", "os", ".", "makedirs", "(", "logdir", ")", "saver", ".", "save", "(", "sess", ",", "checkpoint_path", ",", "global_step", "=", "step", ")", "print", "(", "'The checkpoint has been created.'", ")" ]
https://github.com/twke18/Adaptive_Affinity_Fields/blob/8488aa6ad16022ab4b89fb8626386997559cb951/pyscripts/train/train_affinity_mgpu.py#L89-L104
fzlee/alipay
0f8eab30fea7adb43284182cc6bcac08f51b2e08
alipay/__init__.py
python
DCAliPay.load_alipay_public_key_string
(self)
return OpenSSL.crypto.dump_publickey( OpenSSL.crypto.FILETYPE_PEM, cert.get_pubkey() ).decode("utf-8")
[]
def load_alipay_public_key_string(self): cert = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, self._alipay_public_key_cert_string ) return OpenSSL.crypto.dump_publickey( OpenSSL.crypto.FILETYPE_PEM, cert.get_pubkey() ).decode("utf-8")
[ "def", "load_alipay_public_key_string", "(", "self", ")", ":", "cert", "=", "OpenSSL", ".", "crypto", ".", "load_certificate", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "self", ".", "_alipay_public_key_cert_string", ")", "return", "OpenSSL", ".", "crypto", ".", "dump_publickey", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "cert", ".", "get_pubkey", "(", ")", ")", ".", "decode", "(", "\"utf-8\"", ")" ]
https://github.com/fzlee/alipay/blob/0f8eab30fea7adb43284182cc6bcac08f51b2e08/alipay/__init__.py#L691-L697
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/acmeda/sensor.py
python
AcmedaBattery.name
(self)
return f"{super().name} Battery"
Return the name of roller.
Return the name of roller.
[ "Return", "the", "name", "of", "roller", "." ]
def name(self): """Return the name of roller.""" return f"{super().name} Battery"
[ "def", "name", "(", "self", ")", ":", "return", "f\"{super().name} Battery\"" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/acmeda/sensor.py#L48-L50
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/plugins/attack/db/sqlmap/lib/utils/hash.py
python
vbulletin_passwd
(password, salt, **kwargs)
return "%s:%s" % (md5("%s%s" % (md5(password).hexdigest(), salt)).hexdigest(), salt)
Reference: https://stackoverflow.com/a/2202810 >>> vbulletin_passwd(password='testpass', salt='salt') '85c4d8ea77ebef2236fb7e9d24ba9482:salt'
Reference: https://stackoverflow.com/a/2202810
[ "Reference", ":", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "2202810" ]
def vbulletin_passwd(password, salt, **kwargs): """ Reference: https://stackoverflow.com/a/2202810 >>> vbulletin_passwd(password='testpass', salt='salt') '85c4d8ea77ebef2236fb7e9d24ba9482:salt' """ return "%s:%s" % (md5("%s%s" % (md5(password).hexdigest(), salt)).hexdigest(), salt)
[ "def", "vbulletin_passwd", "(", "password", ",", "salt", ",", "*", "*", "kwargs", ")", ":", "return", "\"%s:%s\"", "%", "(", "md5", "(", "\"%s%s\"", "%", "(", "md5", "(", "password", ")", ".", "hexdigest", "(", ")", ",", "salt", ")", ")", ".", "hexdigest", "(", ")", ",", "salt", ")" ]
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/db/sqlmap/lib/utils/hash.py#L449-L457
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/util/sample_weight.py
python
SampleWeight.transform_weighted_instance
(self, data_instances, weight_loc)
return SampleWeight.assign_sample_weight(data_instances, self.class_weight_dict, weight_loc, self.normalize)
[]
def transform_weighted_instance(self, data_instances, weight_loc): if self.class_weight and self.class_weight == 'balanced': self.class_weight_dict = SampleWeight.get_class_weight(data_instances) else: if self.class_weight_dict is None: self.class_weight_dict = self.class_weight return SampleWeight.assign_sample_weight(data_instances, self.class_weight_dict, weight_loc, self.normalize)
[ "def", "transform_weighted_instance", "(", "self", ",", "data_instances", ",", "weight_loc", ")", ":", "if", "self", ".", "class_weight", "and", "self", ".", "class_weight", "==", "'balanced'", ":", "self", ".", "class_weight_dict", "=", "SampleWeight", ".", "get_class_weight", "(", "data_instances", ")", "else", ":", "if", "self", ".", "class_weight_dict", "is", "None", ":", "self", ".", "class_weight_dict", "=", "self", ".", "class_weight", "return", "SampleWeight", ".", "assign_sample_weight", "(", "data_instances", ",", "self", ".", "class_weight_dict", ",", "weight_loc", ",", "self", ".", "normalize", ")" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/util/sample_weight.py#L102-L108
MichiganCOG/ViP
74776f2575bd5339ba39c784bbda4f04cc859add
train.py
python
train
(**args)
Evaluate selected model Args: rerun (Int): Integer indicating number of repetitions for the select experiment seed (Int): Integer indicating set seed for random state save_dir (String): Top level directory to generate results folder model (String): Name of selected model dataset (String): Name of selected dataset exp (String): Name of experiment debug (Int): Debug state to avoid saving variables load_type (String): Keyword indicator to evaluate the testing or validation set pretrained (Int/String): Int/String indicating loading of random, pretrained or saved weights opt (String): Int/String indicating loading of random, pretrained or saved weights lr (Float): Learning rate momentum (Float): Momentum in optimizer weight_decay (Float): Weight_decay value final_shape ([Int, Int]): Shape of data when passed into network Return: None
Evaluate selected model Args: rerun (Int): Integer indicating number of repetitions for the select experiment seed (Int): Integer indicating set seed for random state save_dir (String): Top level directory to generate results folder model (String): Name of selected model dataset (String): Name of selected dataset exp (String): Name of experiment debug (Int): Debug state to avoid saving variables load_type (String): Keyword indicator to evaluate the testing or validation set pretrained (Int/String): Int/String indicating loading of random, pretrained or saved weights opt (String): Int/String indicating loading of random, pretrained or saved weights lr (Float): Learning rate momentum (Float): Momentum in optimizer weight_decay (Float): Weight_decay value final_shape ([Int, Int]): Shape of data when passed into network Return: None
[ "Evaluate", "selected", "model", "Args", ":", "rerun", "(", "Int", ")", ":", "Integer", "indicating", "number", "of", "repetitions", "for", "the", "select", "experiment", "seed", "(", "Int", ")", ":", "Integer", "indicating", "set", "seed", "for", "random", "state", "save_dir", "(", "String", ")", ":", "Top", "level", "directory", "to", "generate", "results", "folder", "model", "(", "String", ")", ":", "Name", "of", "selected", "model", "dataset", "(", "String", ")", ":", "Name", "of", "selected", "dataset", "exp", "(", "String", ")", ":", "Name", "of", "experiment", "debug", "(", "Int", ")", ":", "Debug", "state", "to", "avoid", "saving", "variables", "load_type", "(", "String", ")", ":", "Keyword", "indicator", "to", "evaluate", "the", "testing", "or", "validation", "set", "pretrained", "(", "Int", "/", "String", ")", ":", "Int", "/", "String", "indicating", "loading", "of", "random", "pretrained", "or", "saved", "weights", "opt", "(", "String", ")", ":", "Int", "/", "String", "indicating", "loading", "of", "random", "pretrained", "or", "saved", "weights", "lr", "(", "Float", ")", ":", "Learning", "rate", "momentum", "(", "Float", ")", ":", "Momentum", "in", "optimizer", "weight_decay", "(", "Float", ")", ":", "Weight_decay", "value", "final_shape", "(", "[", "Int", "Int", "]", ")", ":", "Shape", "of", "data", "when", "passed", "into", "network", "Return", ":", "None" ]
def train(**args): """ Evaluate selected model Args: rerun (Int): Integer indicating number of repetitions for the select experiment seed (Int): Integer indicating set seed for random state save_dir (String): Top level directory to generate results folder model (String): Name of selected model dataset (String): Name of selected dataset exp (String): Name of experiment debug (Int): Debug state to avoid saving variables load_type (String): Keyword indicator to evaluate the testing or validation set pretrained (Int/String): Int/String indicating loading of random, pretrained or saved weights opt (String): Int/String indicating loading of random, pretrained or saved weights lr (Float): Learning rate momentum (Float): Momentum in optimizer weight_decay (Float): Weight_decay value final_shape ([Int, Int]): Shape of data when passed into network Return: None """ print("\n############################################################################\n") print("Experimental Setup: ", args) print("\n############################################################################\n") for total_iteration in range(args['rerun']): # Generate Results Directory d = datetime.datetime.today() date = d.strftime('%Y%m%d-%H%M%S') result_dir = os.path.join(args['save_dir'], args['model'], '_'.join((args['dataset'],args['exp'],date))) log_dir = os.path.join(result_dir, 'logs') save_dir = os.path.join(result_dir, 'checkpoints') if not args['debug']: os.makedirs(result_dir, exist_ok=True) os.makedirs(log_dir, exist_ok=True) os.makedirs(save_dir, exist_ok=True) # Save copy of config file with open(os.path.join(result_dir, 'config.yaml'),'w') as outfile: yaml.dump(args, outfile, default_flow_style=False) # Tensorboard Element writer = SummaryWriter(log_dir) # Check if GPU is available (CUDA) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Load Network model = create_model_object(**args).to(device) # Load Data loader = data_loader(model_obj=model, **args) if args['load_type'] == 'train': train_loader = loader['train'] valid_loader = loader['train'] # Run accuracy on train data if only `train` selected elif args['load_type'] == 'train_val': train_loader = loader['train'] valid_loader = loader['valid'] else: sys.exit('Invalid environment selection for training, exiting') # END IF # Training Setup params = [p for p in model.parameters() if p.requires_grad] if args['opt'] == 'sgd': optimizer = optim.SGD(params, lr=args['lr'], momentum=args['momentum'], weight_decay=args['weight_decay']) elif args['opt'] == 'adam': optimizer = optim.Adam(params, lr=args['lr'], weight_decay=args['weight_decay']) else: sys.exit('Unsupported optimizer selected. Exiting') # END IF scheduler = MultiStepLR(optimizer, milestones=args['milestones'], gamma=args['gamma']) if isinstance(args['pretrained'], str): ckpt = load_checkpoint(args['pretrained']) model.load_state_dict(ckpt) if args['resume']: start_epoch = load_checkpoint(args['pretrained'], key_name='epoch') + 1 optimizer.load_state_dict(load_checkpoint(args['pretrained'], key_name='optimizer')) scheduler.step(epoch=start_epoch) else: start_epoch = 0 # END IF else: start_epoch = 0 # END IF model_loss = Losses(device=device, **args) best_val_acc = 0.0 ############################################################################################################################################################################ # Start: Training Loop for epoch in range(start_epoch, args['epoch']): running_loss = 0.0 print('Epoch: ', epoch) # Setup Model To Train model.train() # Start: Epoch for step, data in enumerate(train_loader): if step% args['pseudo_batch_loop'] == 0: loss = 0.0 running_batch = 0 optimizer.zero_grad() # END IF x_input = data['data'] annotations = data['annots'] if isinstance(x_input, torch.Tensor): mini_batch_size = x_input.shape[0] outputs = model(x_input.to(device)) assert args['final_shape']==list(x_input.size()[-2:]), "Input to model does not match final_shape argument" else: #Model takes several inputs in forward function mini_batch_size = x_input[0].shape[0] #Assuming the first element contains the true data input for i, item in enumerate(x_input): if isinstance(item, torch.Tensor): x_input[i] = item.to(device) outputs = model(*x_input) loss = model_loss.loss(outputs, annotations) loss = loss * mini_batch_size loss.backward() running_loss += loss.item() running_batch += mini_batch_size if np.isnan(running_loss): import pdb; pdb.set_trace() # END IF if not args['debug']: # Add Learning Rate Element for param_group in optimizer.param_groups: writer.add_scalar(args['dataset']+'/'+args['model']+'/learning_rate', param_group['lr'], epoch*len(train_loader) + step) # END FOR # Add Loss Element writer.add_scalar(args['dataset']+'/'+args['model']+'/minibatch_loss', loss.item()/mini_batch_size, epoch*len(train_loader) + step) # END IF if ((epoch*len(train_loader) + step+1) % 100 == 0): print('Epoch: {}/{}, step: {}/{} | train loss: {:.4f}'.format(epoch, args['epoch'], step+1, len(train_loader), running_loss/float(step+1)/mini_batch_size)) # END IF if (epoch * len(train_loader) + (step+1)) % args['pseudo_batch_loop'] == 0 and step > 0: # Apply large mini-batch normalization for param in model.parameters(): if param.requires_grad: param.grad *= 1./float(running_batch) # END FOR # Apply gradient clipping if ("grad_max_norm" in args) and float(args['grad_max_norm'] > 0): nn.utils.clip_grad_norm_(model.parameters(),float(args['grad_max_norm'])) optimizer.step() running_batch = 0 # END IF # END FOR: Epoch scheduler.step(epoch=epoch) print('Schedulers lr: %f', scheduler.get_lr()[0]) if not args['debug']: # Save Current Model save_path = os.path.join(save_dir, args['dataset']+'_epoch'+str(epoch)+'.pkl') save_checkpoint(epoch, step, model, optimizer, save_path) # END IF: Debug ## START FOR: Validation Accuracy running_acc = [] running_acc = valid(valid_loader, running_acc, model, device) if not args['debug']: writer.add_scalar(args['dataset']+'/'+args['model']+'/validation_accuracy', 100.*running_acc[-1], epoch*len(train_loader) + step) print('Accuracy of the network on the validation set: %f %%\n' % (100.*running_acc[-1])) # Save Best Validation Accuracy Model Separately if best_val_acc < running_acc[-1]: best_val_acc = running_acc[-1] if not args['debug']: # Save Current Model save_path = os.path.join(save_dir, args['dataset']+'_best_model.pkl') save_checkpoint(epoch, step, model, optimizer, save_path) # END IF # END IF # END FOR: Training Loop ############################################################################################################################################################################ if not args['debug']: # Close Tensorboard Element writer.close()
[ "def", "train", "(", "*", "*", "args", ")", ":", "print", "(", "\"\\n############################################################################\\n\"", ")", "print", "(", "\"Experimental Setup: \"", ",", "args", ")", "print", "(", "\"\\n############################################################################\\n\"", ")", "for", "total_iteration", "in", "range", "(", "args", "[", "'rerun'", "]", ")", ":", "# Generate Results Directory", "d", "=", "datetime", ".", "datetime", ".", "today", "(", ")", "date", "=", "d", ".", "strftime", "(", "'%Y%m%d-%H%M%S'", ")", "result_dir", "=", "os", ".", "path", ".", "join", "(", "args", "[", "'save_dir'", "]", ",", "args", "[", "'model'", "]", ",", "'_'", ".", "join", "(", "(", "args", "[", "'dataset'", "]", ",", "args", "[", "'exp'", "]", ",", "date", ")", ")", ")", "log_dir", "=", "os", ".", "path", ".", "join", "(", "result_dir", ",", "'logs'", ")", "save_dir", "=", "os", ".", "path", ".", "join", "(", "result_dir", ",", "'checkpoints'", ")", "if", "not", "args", "[", "'debug'", "]", ":", "os", ".", "makedirs", "(", "result_dir", ",", "exist_ok", "=", "True", ")", "os", ".", "makedirs", "(", "log_dir", ",", "exist_ok", "=", "True", ")", "os", ".", "makedirs", "(", "save_dir", ",", "exist_ok", "=", "True", ")", "# Save copy of config file", "with", "open", "(", "os", ".", "path", ".", "join", "(", "result_dir", ",", "'config.yaml'", ")", ",", "'w'", ")", "as", "outfile", ":", "yaml", ".", "dump", "(", "args", ",", "outfile", ",", "default_flow_style", "=", "False", ")", "# Tensorboard Element", "writer", "=", "SummaryWriter", "(", "log_dir", ")", "# Check if GPU is available (CUDA)", "device", "=", "torch", ".", "device", "(", "\"cuda:0\"", "if", "torch", ".", "cuda", ".", "is_available", "(", ")", "else", "\"cpu\"", ")", "# Load Network", "model", "=", "create_model_object", "(", "*", "*", "args", ")", ".", "to", "(", "device", ")", "# Load Data", "loader", "=", "data_loader", "(", "model_obj", "=", "model", ",", "*", "*", "args", ")", "if", "args", "[", "'load_type'", "]", "==", "'train'", ":", "train_loader", "=", "loader", "[", "'train'", "]", "valid_loader", "=", "loader", "[", "'train'", "]", "# Run accuracy on train data if only `train` selected", "elif", "args", "[", "'load_type'", "]", "==", "'train_val'", ":", "train_loader", "=", "loader", "[", "'train'", "]", "valid_loader", "=", "loader", "[", "'valid'", "]", "else", ":", "sys", ".", "exit", "(", "'Invalid environment selection for training, exiting'", ")", "# END IF", "# Training Setup", "params", "=", "[", "p", "for", "p", "in", "model", ".", "parameters", "(", ")", "if", "p", ".", "requires_grad", "]", "if", "args", "[", "'opt'", "]", "==", "'sgd'", ":", "optimizer", "=", "optim", ".", "SGD", "(", "params", ",", "lr", "=", "args", "[", "'lr'", "]", ",", "momentum", "=", "args", "[", "'momentum'", "]", ",", "weight_decay", "=", "args", "[", "'weight_decay'", "]", ")", "elif", "args", "[", "'opt'", "]", "==", "'adam'", ":", "optimizer", "=", "optim", ".", "Adam", "(", "params", ",", "lr", "=", "args", "[", "'lr'", "]", ",", "weight_decay", "=", "args", "[", "'weight_decay'", "]", ")", "else", ":", "sys", ".", "exit", "(", "'Unsupported optimizer selected. Exiting'", ")", "# END IF", "scheduler", "=", "MultiStepLR", "(", "optimizer", ",", "milestones", "=", "args", "[", "'milestones'", "]", ",", "gamma", "=", "args", "[", "'gamma'", "]", ")", "if", "isinstance", "(", "args", "[", "'pretrained'", "]", ",", "str", ")", ":", "ckpt", "=", "load_checkpoint", "(", "args", "[", "'pretrained'", "]", ")", "model", ".", "load_state_dict", "(", "ckpt", ")", "if", "args", "[", "'resume'", "]", ":", "start_epoch", "=", "load_checkpoint", "(", "args", "[", "'pretrained'", "]", ",", "key_name", "=", "'epoch'", ")", "+", "1", "optimizer", ".", "load_state_dict", "(", "load_checkpoint", "(", "args", "[", "'pretrained'", "]", ",", "key_name", "=", "'optimizer'", ")", ")", "scheduler", ".", "step", "(", "epoch", "=", "start_epoch", ")", "else", ":", "start_epoch", "=", "0", "# END IF ", "else", ":", "start_epoch", "=", "0", "# END IF", "model_loss", "=", "Losses", "(", "device", "=", "device", ",", "*", "*", "args", ")", "best_val_acc", "=", "0.0", "############################################################################################################################################################################", "# Start: Training Loop", "for", "epoch", "in", "range", "(", "start_epoch", ",", "args", "[", "'epoch'", "]", ")", ":", "running_loss", "=", "0.0", "print", "(", "'Epoch: '", ",", "epoch", ")", "# Setup Model To Train ", "model", ".", "train", "(", ")", "# Start: Epoch", "for", "step", ",", "data", "in", "enumerate", "(", "train_loader", ")", ":", "if", "step", "%", "args", "[", "'pseudo_batch_loop'", "]", "==", "0", ":", "loss", "=", "0.0", "running_batch", "=", "0", "optimizer", ".", "zero_grad", "(", ")", "# END IF", "x_input", "=", "data", "[", "'data'", "]", "annotations", "=", "data", "[", "'annots'", "]", "if", "isinstance", "(", "x_input", ",", "torch", ".", "Tensor", ")", ":", "mini_batch_size", "=", "x_input", ".", "shape", "[", "0", "]", "outputs", "=", "model", "(", "x_input", ".", "to", "(", "device", ")", ")", "assert", "args", "[", "'final_shape'", "]", "==", "list", "(", "x_input", ".", "size", "(", ")", "[", "-", "2", ":", "]", ")", ",", "\"Input to model does not match final_shape argument\"", "else", ":", "#Model takes several inputs in forward function ", "mini_batch_size", "=", "x_input", "[", "0", "]", ".", "shape", "[", "0", "]", "#Assuming the first element contains the true data input ", "for", "i", ",", "item", "in", "enumerate", "(", "x_input", ")", ":", "if", "isinstance", "(", "item", ",", "torch", ".", "Tensor", ")", ":", "x_input", "[", "i", "]", "=", "item", ".", "to", "(", "device", ")", "outputs", "=", "model", "(", "*", "x_input", ")", "loss", "=", "model_loss", ".", "loss", "(", "outputs", ",", "annotations", ")", "loss", "=", "loss", "*", "mini_batch_size", "loss", ".", "backward", "(", ")", "running_loss", "+=", "loss", ".", "item", "(", ")", "running_batch", "+=", "mini_batch_size", "if", "np", ".", "isnan", "(", "running_loss", ")", ":", "import", "pdb", "pdb", ".", "set_trace", "(", ")", "# END IF", "if", "not", "args", "[", "'debug'", "]", ":", "# Add Learning Rate Element", "for", "param_group", "in", "optimizer", ".", "param_groups", ":", "writer", ".", "add_scalar", "(", "args", "[", "'dataset'", "]", "+", "'/'", "+", "args", "[", "'model'", "]", "+", "'/learning_rate'", ",", "param_group", "[", "'lr'", "]", ",", "epoch", "*", "len", "(", "train_loader", ")", "+", "step", ")", "# END FOR", "# Add Loss Element", "writer", ".", "add_scalar", "(", "args", "[", "'dataset'", "]", "+", "'/'", "+", "args", "[", "'model'", "]", "+", "'/minibatch_loss'", ",", "loss", ".", "item", "(", ")", "/", "mini_batch_size", ",", "epoch", "*", "len", "(", "train_loader", ")", "+", "step", ")", "# END IF", "if", "(", "(", "epoch", "*", "len", "(", "train_loader", ")", "+", "step", "+", "1", ")", "%", "100", "==", "0", ")", ":", "print", "(", "'Epoch: {}/{}, step: {}/{} | train loss: {:.4f}'", ".", "format", "(", "epoch", ",", "args", "[", "'epoch'", "]", ",", "step", "+", "1", ",", "len", "(", "train_loader", ")", ",", "running_loss", "/", "float", "(", "step", "+", "1", ")", "/", "mini_batch_size", ")", ")", "# END IF", "if", "(", "epoch", "*", "len", "(", "train_loader", ")", "+", "(", "step", "+", "1", ")", ")", "%", "args", "[", "'pseudo_batch_loop'", "]", "==", "0", "and", "step", ">", "0", ":", "# Apply large mini-batch normalization", "for", "param", "in", "model", ".", "parameters", "(", ")", ":", "if", "param", ".", "requires_grad", ":", "param", ".", "grad", "*=", "1.", "/", "float", "(", "running_batch", ")", "# END FOR", "# Apply gradient clipping", "if", "(", "\"grad_max_norm\"", "in", "args", ")", "and", "float", "(", "args", "[", "'grad_max_norm'", "]", ">", "0", ")", ":", "nn", ".", "utils", ".", "clip_grad_norm_", "(", "model", ".", "parameters", "(", ")", ",", "float", "(", "args", "[", "'grad_max_norm'", "]", ")", ")", "optimizer", ".", "step", "(", ")", "running_batch", "=", "0", "# END IF", "# END FOR: Epoch", "scheduler", ".", "step", "(", "epoch", "=", "epoch", ")", "print", "(", "'Schedulers lr: %f'", ",", "scheduler", ".", "get_lr", "(", ")", "[", "0", "]", ")", "if", "not", "args", "[", "'debug'", "]", ":", "# Save Current Model", "save_path", "=", "os", ".", "path", ".", "join", "(", "save_dir", ",", "args", "[", "'dataset'", "]", "+", "'_epoch'", "+", "str", "(", "epoch", ")", "+", "'.pkl'", ")", "save_checkpoint", "(", "epoch", ",", "step", ",", "model", ",", "optimizer", ",", "save_path", ")", "# END IF: Debug", "## START FOR: Validation Accuracy", "running_acc", "=", "[", "]", "running_acc", "=", "valid", "(", "valid_loader", ",", "running_acc", ",", "model", ",", "device", ")", "if", "not", "args", "[", "'debug'", "]", ":", "writer", ".", "add_scalar", "(", "args", "[", "'dataset'", "]", "+", "'/'", "+", "args", "[", "'model'", "]", "+", "'/validation_accuracy'", ",", "100.", "*", "running_acc", "[", "-", "1", "]", ",", "epoch", "*", "len", "(", "train_loader", ")", "+", "step", ")", "print", "(", "'Accuracy of the network on the validation set: %f %%\\n'", "%", "(", "100.", "*", "running_acc", "[", "-", "1", "]", ")", ")", "# Save Best Validation Accuracy Model Separately", "if", "best_val_acc", "<", "running_acc", "[", "-", "1", "]", ":", "best_val_acc", "=", "running_acc", "[", "-", "1", "]", "if", "not", "args", "[", "'debug'", "]", ":", "# Save Current Model", "save_path", "=", "os", ".", "path", ".", "join", "(", "save_dir", ",", "args", "[", "'dataset'", "]", "+", "'_best_model.pkl'", ")", "save_checkpoint", "(", "epoch", ",", "step", ",", "model", ",", "optimizer", ",", "save_path", ")", "# END IF", "# END IF", "# END FOR: Training Loop", "############################################################################################################################################################################", "if", "not", "args", "[", "'debug'", "]", ":", "# Close Tensorboard Element", "writer", ".", "close", "(", ")" ]
https://github.com/MichiganCOG/ViP/blob/74776f2575bd5339ba39c784bbda4f04cc859add/train.py#L21-L252
telegraphic/hickle
a5aac2db4e236d90990e3387c5822b466e8e84fa
hickle/loaders/load_numpy.py
python
create_np_scalar_dataset
(py_obj, h_group, name, **kwargs)
return d,()
dumps an numpy.dtype object to h5py file Parameters ---------- py_obj (numpy.scalar): python object to dump; should be a numpy scalar, e.g. numpy.float16(1) h_group (h5.File.group): group to dump data into. name (str): the name of the resulting dataset kwargs (dict): keyword arguments to be passed to create_dataset function Returns ------- tuple containing h5py.Dataset and empty list of subitems
dumps an numpy.dtype object to h5py file
[ "dumps", "an", "numpy", ".", "dtype", "object", "to", "h5py", "file" ]
def create_np_scalar_dataset(py_obj, h_group, name, **kwargs): """ dumps an numpy.dtype object to h5py file Parameters ---------- py_obj (numpy.scalar): python object to dump; should be a numpy scalar, e.g. numpy.float16(1) h_group (h5.File.group): group to dump data into. name (str): the name of the resulting dataset kwargs (dict): keyword arguments to be passed to create_dataset function Returns ------- tuple containing h5py.Dataset and empty list of subitems """ d = h_group.create_dataset(name, data=py_obj, **no_compression(kwargs)) d.attrs["np_dtype"] = py_obj.dtype.str.encode("ascii") return d,()
[ "def", "create_np_scalar_dataset", "(", "py_obj", ",", "h_group", ",", "name", ",", "*", "*", "kwargs", ")", ":", "d", "=", "h_group", ".", "create_dataset", "(", "name", ",", "data", "=", "py_obj", ",", "*", "*", "no_compression", "(", "kwargs", ")", ")", "d", ".", "attrs", "[", "\"np_dtype\"", "]", "=", "py_obj", ".", "dtype", ".", "str", ".", "encode", "(", "\"ascii\"", ")", "return", "d", ",", "(", ")" ]
https://github.com/telegraphic/hickle/blob/a5aac2db4e236d90990e3387c5822b466e8e84fa/hickle/loaders/load_numpy.py#L21-L46
avalonstrel/GatedConvolution_pytorch
0a49013a70e77cc484ab45a5da535c2ac003b252
data/base_dataset.py
python
NoriBaseDataset.__len__
(self)
return len(self.nori_list)
[]
def __len__(self): return len(self.nori_list)
[ "def", "__len__", "(", "self", ")", ":", "return", "len", "(", "self", ".", "nori_list", ")" ]
https://github.com/avalonstrel/GatedConvolution_pytorch/blob/0a49013a70e77cc484ab45a5da535c2ac003b252/data/base_dataset.py#L70-L71
tobegit3hub/deep_image_model
8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e
java_predict_client/src/main/proto/tensorflow/contrib/distributions/python/ops/normal.py
python
_kl_normal_normal
(n_a, n_b, name=None)
Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. Args: n_a: instance of a Normal distribution object. n_b: instance of a Normal distribution object. name: (optional) Name to use for created operations. default is "kl_normal_normal". Returns: Batchwise KL(n_a || n_b)
Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
[ "Calculate", "the", "batched", "KL", "divergence", "KL", "(", "n_a", "||", "n_b", ")", "with", "n_a", "and", "n_b", "Normal", "." ]
def _kl_normal_normal(n_a, n_b, name=None): """Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. Args: n_a: instance of a Normal distribution object. n_b: instance of a Normal distribution object. name: (optional) Name to use for created operations. default is "kl_normal_normal". Returns: Batchwise KL(n_a || n_b) """ with ops.name_scope(name, "kl_normal_normal", [n_a.mu, n_b.mu]): one = constant_op.constant(1, dtype=n_a.dtype) two = constant_op.constant(2, dtype=n_a.dtype) half = constant_op.constant(0.5, dtype=n_a.dtype) s_a_squared = math_ops.square(n_a.sigma) s_b_squared = math_ops.square(n_b.sigma) ratio = s_a_squared / s_b_squared return (math_ops.square(n_a.mu - n_b.mu) / (two * s_b_squared) + half * (ratio - one - math_ops.log(ratio)))
[ "def", "_kl_normal_normal", "(", "n_a", ",", "n_b", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "name_scope", "(", "name", ",", "\"kl_normal_normal\"", ",", "[", "n_a", ".", "mu", ",", "n_b", ".", "mu", "]", ")", ":", "one", "=", "constant_op", ".", "constant", "(", "1", ",", "dtype", "=", "n_a", ".", "dtype", ")", "two", "=", "constant_op", ".", "constant", "(", "2", ",", "dtype", "=", "n_a", ".", "dtype", ")", "half", "=", "constant_op", ".", "constant", "(", "0.5", ",", "dtype", "=", "n_a", ".", "dtype", ")", "s_a_squared", "=", "math_ops", ".", "square", "(", "n_a", ".", "sigma", ")", "s_b_squared", "=", "math_ops", ".", "square", "(", "n_b", ".", "sigma", ")", "ratio", "=", "s_a_squared", "/", "s_b_squared", "return", "(", "math_ops", ".", "square", "(", "n_a", ".", "mu", "-", "n_b", ".", "mu", ")", "/", "(", "two", "*", "s_b_squared", ")", "+", "half", "*", "(", "ratio", "-", "one", "-", "math_ops", ".", "log", "(", "ratio", ")", ")", ")" ]
https://github.com/tobegit3hub/deep_image_model/blob/8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e/java_predict_client/src/main/proto/tensorflow/contrib/distributions/python/ops/normal.py#L229-L249
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/vm/v20201229/models.py
python
AudioResultDetailMoanResult.__init__
(self)
r""" :param Label: 该字段用于返回检测结果需要检测的内容类型,此处固定为**Moan**(呻吟)以调用呻吟检测功能。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Score: 该字段用于返回呻吟检测的置信度,取值范围:0(**置信度最低**)-100(**置信度最高**),越高代表音频越有可能属于呻吟内容。 :type Score: int :param StartTime: 该字段用于返回对应呻吟标签的片段在音频文件内的开始时间,单位为毫秒。 :type StartTime: float :param EndTime: 该字段用于返回对应呻吟标签的片段在音频文件内的结束时间,单位为毫秒。 :type EndTime: float :param SubLabelCode: *内测中,敬请期待* :type SubLabelCode: str :param SubLabel: 该字段用于返回当前标签(Lable)下的二级标签。 注意:此字段可能返回 null,表示取不到有效值。 :type SubLabel: str
r""" :param Label: 该字段用于返回检测结果需要检测的内容类型,此处固定为**Moan**(呻吟)以调用呻吟检测功能。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Score: 该字段用于返回呻吟检测的置信度,取值范围:0(**置信度最低**)-100(**置信度最高**),越高代表音频越有可能属于呻吟内容。 :type Score: int :param StartTime: 该字段用于返回对应呻吟标签的片段在音频文件内的开始时间,单位为毫秒。 :type StartTime: float :param EndTime: 该字段用于返回对应呻吟标签的片段在音频文件内的结束时间,单位为毫秒。 :type EndTime: float :param SubLabelCode: *内测中,敬请期待* :type SubLabelCode: str :param SubLabel: 该字段用于返回当前标签(Lable)下的二级标签。 注意:此字段可能返回 null,表示取不到有效值。 :type SubLabel: str
[ "r", ":", "param", "Label", ":", "该字段用于返回检测结果需要检测的内容类型,此处固定为", "**", "Moan", "**", "(呻吟)以调用呻吟检测功能。", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "Label", ":", "str", ":", "param", "Score", ":", "该字段用于返回呻吟检测的置信度,取值范围:0(", "**", "置信度最低", "**", ")", "-", "100(", "**", "置信度最高", "**", "),越高代表音频越有可能属于呻吟内容。", ":", "type", "Score", ":", "int", ":", "param", "StartTime", ":", "该字段用于返回对应呻吟标签的片段在音频文件内的开始时间,单位为毫秒。", ":", "type", "StartTime", ":", "float", ":", "param", "EndTime", ":", "该字段用于返回对应呻吟标签的片段在音频文件内的结束时间,单位为毫秒。", ":", "type", "EndTime", ":", "float", ":", "param", "SubLabelCode", ":", "*", "内测中,敬请期待", "*", ":", "type", "SubLabelCode", ":", "str", ":", "param", "SubLabel", ":", "该字段用于返回当前标签(Lable)下的二级标签。", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "SubLabel", ":", "str" ]
def __init__(self): r""" :param Label: 该字段用于返回检测结果需要检测的内容类型,此处固定为**Moan**(呻吟)以调用呻吟检测功能。 注意:此字段可能返回 null,表示取不到有效值。 :type Label: str :param Score: 该字段用于返回呻吟检测的置信度,取值范围:0(**置信度最低**)-100(**置信度最高**),越高代表音频越有可能属于呻吟内容。 :type Score: int :param StartTime: 该字段用于返回对应呻吟标签的片段在音频文件内的开始时间,单位为毫秒。 :type StartTime: float :param EndTime: 该字段用于返回对应呻吟标签的片段在音频文件内的结束时间,单位为毫秒。 :type EndTime: float :param SubLabelCode: *内测中,敬请期待* :type SubLabelCode: str :param SubLabel: 该字段用于返回当前标签(Lable)下的二级标签。 注意:此字段可能返回 null,表示取不到有效值。 :type SubLabel: str """ self.Label = None self.Score = None self.StartTime = None self.EndTime = None self.SubLabelCode = None self.SubLabel = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "Label", "=", "None", "self", ".", "Score", "=", "None", "self", ".", "StartTime", "=", "None", "self", ".", "EndTime", "=", "None", "self", ".", "SubLabelCode", "=", "None", "self", ".", "SubLabel", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/vm/v20201229/models.py#L167-L189
City-Bureau/city-scrapers
b295d0aa612e3979a9fccab7c5f55ecea9ed074c
city_scrapers/spiders/cook_emergency_telephone.py
python
CookEmergencyTelephoneSpider.parse
(self, response)
`parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs.
`parse` should always `yield` Meeting items.
[ "parse", "should", "always", "yield", "Meeting", "items", "." ]
def parse(self, response): """ `parse` should always `yield` Meeting items. Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping needs. """ for link in response.css(".sliding_box a"): link_text = link.css("*::text").get() if "Minutes" in link_text: self.docs_link = link.attrib["href"] elif "Agenda" in link_text: self.agenda_link = link.attrib["href"] if not (self.docs_link == "" and self.agenda_link == ""): yield scrapy.Request( response.urljoin(self.schedule_pdf_link), callback=self._parse_schedule, dont_filter=True, ) else: raise ValueError("Required links not found")
[ "def", "parse", "(", "self", ",", "response", ")", ":", "for", "link", "in", "response", ".", "css", "(", "\".sliding_box a\"", ")", ":", "link_text", "=", "link", ".", "css", "(", "\"*::text\"", ")", ".", "get", "(", ")", "if", "\"Minutes\"", "in", "link_text", ":", "self", ".", "docs_link", "=", "link", ".", "attrib", "[", "\"href\"", "]", "elif", "\"Agenda\"", "in", "link_text", ":", "self", ".", "agenda_link", "=", "link", ".", "attrib", "[", "\"href\"", "]", "if", "not", "(", "self", ".", "docs_link", "==", "\"\"", "and", "self", ".", "agenda_link", "==", "\"\"", ")", ":", "yield", "scrapy", ".", "Request", "(", "response", ".", "urljoin", "(", "self", ".", "schedule_pdf_link", ")", ",", "callback", "=", "self", ".", "_parse_schedule", ",", "dont_filter", "=", "True", ",", ")", "else", ":", "raise", "ValueError", "(", "\"Required links not found\"", ")" ]
https://github.com/City-Bureau/city-scrapers/blob/b295d0aa612e3979a9fccab7c5f55ecea9ed074c/city_scrapers/spiders/cook_emergency_telephone.py#L30-L53
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_utils/library/yedit.py
python
Yedit.write
(self)
return (True, self.yaml_dict)
write to file
write to file
[ "write", "to", "file" ]
def write(self): ''' write to file ''' if not self.filename: raise YeditException('Please specify a filename.') if self.backup and self.file_exists(): shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext)) # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripDumper if supported. if self.content_type == 'yaml': try: Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) except AttributeError: Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) elif self.content_type == 'json': Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True)) else: raise YeditException('Unsupported content_type: {}.'.format(self.content_type) + 'Please specify a content_type of yaml or json.') return (True, self.yaml_dict)
[ "def", "write", "(", "self", ")", ":", "if", "not", "self", ".", "filename", ":", "raise", "YeditException", "(", "'Please specify a filename.'", ")", "if", "self", ".", "backup", "and", "self", ".", "file_exists", "(", ")", ":", "shutil", ".", "copy", "(", "self", ".", "filename", ",", "'{}{}'", ".", "format", "(", "self", ".", "filename", ",", "self", ".", "backup_ext", ")", ")", "# Try to set format attributes if supported", "try", ":", "self", ".", "yaml_dict", ".", "fa", ".", "set_block_style", "(", ")", "except", "AttributeError", ":", "pass", "# Try to use RoundTripDumper if supported.", "if", "self", ".", "content_type", "==", "'yaml'", ":", "try", ":", "Yedit", ".", "_write", "(", "self", ".", "filename", ",", "yaml", ".", "dump", "(", "self", ".", "yaml_dict", ",", "Dumper", "=", "yaml", ".", "RoundTripDumper", ")", ")", "except", "AttributeError", ":", "Yedit", ".", "_write", "(", "self", ".", "filename", ",", "yaml", ".", "safe_dump", "(", "self", ".", "yaml_dict", ",", "default_flow_style", "=", "False", ")", ")", "elif", "self", ".", "content_type", "==", "'json'", ":", "Yedit", ".", "_write", "(", "self", ".", "filename", ",", "json", ".", "dumps", "(", "self", ".", "yaml_dict", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", "else", ":", "raise", "YeditException", "(", "'Unsupported content_type: {}.'", ".", "format", "(", "self", ".", "content_type", ")", "+", "'Please specify a content_type of yaml or json.'", ")", "return", "(", "True", ",", "self", ".", "yaml_dict", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_utils/library/yedit.py#L434-L460
DigitalSlideArchive/HistomicsTK
db2ceb4831bec0efa557cf5b18078ae790253de5
histomicstk/annotations_and_masks/annotation_database_parser.py
python
dump_annotations_locally
( gc, folderid, local, save_json=True, save_sqlite=False, dbcon=None, callback=None, callback_kwargs=None)
Dump annotations of folder and subfolders locally recursively. This reproduces this tiered structure locally and (possibly) dumps annotations there. Adapted from Lee A.D. Cooper Parameters ----------- gc : girder_client.GirderClient authenticated girder client instance folderid : str girder id of source (base) folder local : str local path to dump annotations save_json : bool whether to dump annotations as json file save_sqlite : bool whether to save the backup into an sqlite database dbcon : sqlalchemy.create_engine.connect() object IGNORE THIS PARAMETER!! This is used internally. callback : function function to call that CAN accept AT LEAST the following params - item: girder response with item information - annotations: loaded annotations - local: local directory - monitorPrefix: string - dbcon: sqlalchemy.create_engine.connect() object You can just add kwargs at the end of your callback definition for simplicity. callback_kwargs : dict kwargs to pass along to callback. DO NOT pass any of the parameters item, annotations, local, monitorPrefix, or dbcon as these will be internally passed. Just include any specific paremeters for the callback. See parse_annotations_to_local_tables() above for an example of a callback and the unir test of this function.
Dump annotations of folder and subfolders locally recursively.
[ "Dump", "annotations", "of", "folder", "and", "subfolders", "locally", "recursively", "." ]
def dump_annotations_locally( gc, folderid, local, save_json=True, save_sqlite=False, dbcon=None, callback=None, callback_kwargs=None): """Dump annotations of folder and subfolders locally recursively. This reproduces this tiered structure locally and (possibly) dumps annotations there. Adapted from Lee A.D. Cooper Parameters ----------- gc : girder_client.GirderClient authenticated girder client instance folderid : str girder id of source (base) folder local : str local path to dump annotations save_json : bool whether to dump annotations as json file save_sqlite : bool whether to save the backup into an sqlite database dbcon : sqlalchemy.create_engine.connect() object IGNORE THIS PARAMETER!! This is used internally. callback : function function to call that CAN accept AT LEAST the following params - item: girder response with item information - annotations: loaded annotations - local: local directory - monitorPrefix: string - dbcon: sqlalchemy.create_engine.connect() object You can just add kwargs at the end of your callback definition for simplicity. callback_kwargs : dict kwargs to pass along to callback. DO NOT pass any of the parameters item, annotations, local, monitorPrefix, or dbcon as these will be internally passed. Just include any specific paremeters for the callback. See parse_annotations_to_local_tables() above for an example of a callback and the unir test of this function. """ callback_kwargs = callback_kwargs or {} assert(save_json or save_sqlite), "must save results somehow!" monitor = os.path.basename(local) # get folder info folder_info = gc.get("folder/%s" % folderid) folder_info['folder_path'] = get_absolute_girder_folderpath( gc=gc, folder_info=folder_info) # connect to sqlite database -- only first stack does this if save_sqlite and (dbcon is None): db_path = os.path.join(local, folder_info['name'] + ".sqlite") sql_engine = create_engine('sqlite:///' + db_path, echo=False) dbcon = sql_engine.connect() # save folder information json if save_json: print("%s: save folder info" % monitor) savepath = os.path.join(local, folder_info['name'] + '.json') with open(savepath, 'w') as fout: json.dump(folder_info, fout) # save folder info to sqlite if save_sqlite: _add_folder_to_sqlite(dbcon, folder_info) # pull annotations for each slide in folder workflow_runner = Workflow_runner( slide_iterator=Slide_iterator( gc, source_folder_id=folderid, keep_slides=None, ), workflow=dump_annotations_workflow, workflow_kwargs={ 'gc': gc, 'local': local, 'save_json': save_json, 'save_sqlite': save_sqlite, 'dbcon': dbcon, 'callback': callback, 'callback_kwargs': callback_kwargs, }, monitorPrefix=monitor) workflow_runner.run() # for each subfolder, create a new folder locally and call self for folder in gc.listFolder(parentId=folderid): # create folder in local new_folder = os.path.join(local, folder['name']) os.mkdir(new_folder) # call self with same prameters dump_annotations_locally( gc=gc, folderid=folder['_id'], local=new_folder, save_json=save_json, save_sqlite=save_sqlite, dbcon=dbcon, callback=callback, callback_kwargs=callback_kwargs)
[ "def", "dump_annotations_locally", "(", "gc", ",", "folderid", ",", "local", ",", "save_json", "=", "True", ",", "save_sqlite", "=", "False", ",", "dbcon", "=", "None", ",", "callback", "=", "None", ",", "callback_kwargs", "=", "None", ")", ":", "callback_kwargs", "=", "callback_kwargs", "or", "{", "}", "assert", "(", "save_json", "or", "save_sqlite", ")", ",", "\"must save results somehow!\"", "monitor", "=", "os", ".", "path", ".", "basename", "(", "local", ")", "# get folder info", "folder_info", "=", "gc", ".", "get", "(", "\"folder/%s\"", "%", "folderid", ")", "folder_info", "[", "'folder_path'", "]", "=", "get_absolute_girder_folderpath", "(", "gc", "=", "gc", ",", "folder_info", "=", "folder_info", ")", "# connect to sqlite database -- only first stack does this", "if", "save_sqlite", "and", "(", "dbcon", "is", "None", ")", ":", "db_path", "=", "os", ".", "path", ".", "join", "(", "local", ",", "folder_info", "[", "'name'", "]", "+", "\".sqlite\"", ")", "sql_engine", "=", "create_engine", "(", "'sqlite:///'", "+", "db_path", ",", "echo", "=", "False", ")", "dbcon", "=", "sql_engine", ".", "connect", "(", ")", "# save folder information json", "if", "save_json", ":", "print", "(", "\"%s: save folder info\"", "%", "monitor", ")", "savepath", "=", "os", ".", "path", ".", "join", "(", "local", ",", "folder_info", "[", "'name'", "]", "+", "'.json'", ")", "with", "open", "(", "savepath", ",", "'w'", ")", "as", "fout", ":", "json", ".", "dump", "(", "folder_info", ",", "fout", ")", "# save folder info to sqlite", "if", "save_sqlite", ":", "_add_folder_to_sqlite", "(", "dbcon", ",", "folder_info", ")", "# pull annotations for each slide in folder", "workflow_runner", "=", "Workflow_runner", "(", "slide_iterator", "=", "Slide_iterator", "(", "gc", ",", "source_folder_id", "=", "folderid", ",", "keep_slides", "=", "None", ",", ")", ",", "workflow", "=", "dump_annotations_workflow", ",", "workflow_kwargs", "=", "{", "'gc'", ":", "gc", ",", "'local'", ":", "local", ",", "'save_json'", ":", "save_json", ",", "'save_sqlite'", ":", "save_sqlite", ",", "'dbcon'", ":", "dbcon", ",", "'callback'", ":", "callback", ",", "'callback_kwargs'", ":", "callback_kwargs", ",", "}", ",", "monitorPrefix", "=", "monitor", ")", "workflow_runner", ".", "run", "(", ")", "# for each subfolder, create a new folder locally and call self", "for", "folder", "in", "gc", ".", "listFolder", "(", "parentId", "=", "folderid", ")", ":", "# create folder in local", "new_folder", "=", "os", ".", "path", ".", "join", "(", "local", ",", "folder", "[", "'name'", "]", ")", "os", ".", "mkdir", "(", "new_folder", ")", "# call self with same prameters", "dump_annotations_locally", "(", "gc", "=", "gc", ",", "folderid", "=", "folder", "[", "'_id'", "]", ",", "local", "=", "new_folder", ",", "save_json", "=", "save_json", ",", "save_sqlite", "=", "save_sqlite", ",", "dbcon", "=", "dbcon", ",", "callback", "=", "callback", ",", "callback_kwargs", "=", "callback_kwargs", ")" ]
https://github.com/DigitalSlideArchive/HistomicsTK/blob/db2ceb4831bec0efa557cf5b18078ae790253de5/histomicstk/annotations_and_masks/annotation_database_parser.py#L275-L379
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/dwindle.py
python
DwindleSkein.addThread
(self)
Add the thread sections to the gcode.
Add the thread sections to the gcode.
[ "Add", "the", "thread", "sections", "to", "the", "gcode", "." ]
def addThread(self): 'Add the thread sections to the gcode.' if len(self.threadSections) == 0: return area = self.area dwindlePortion = 0.0 endRateMultiplier = self.repository.endRateMultiplier.value halfOverSteps = self.halfOverSteps oneOverSteps = self.oneOverSteps currentPentUpVolume = self.repository.pentUpVolume.value * self.oldFlowRate / self.operatingFlowRate slowdownFlowRateMultiplier = 1.0 - (currentPentUpVolume / self.repository.slowdownVolume.value) operatingFeedRateMinute = self.operatingFeedRateMinute slowdownVolume = self.repository.slowdownVolume.value for threadSectionIndex in xrange(len(self.threadSections) - 1, -1, -1): threadSection = self.threadSections[threadSectionIndex] dwindlePortion = threadSection.getDwindlePortion(area, dwindlePortion, operatingFeedRateMinute, self.operatingFlowRate, slowdownVolume) for threadSection in self.threadSections: threadSection.addGcodeThreadSection(self.distanceFeedRate, endRateMultiplier, halfOverSteps, oneOverSteps, slowdownFlowRateMultiplier) self.distanceFeedRate.addFlowRateLine(self.oldFlowRate) self.threadSections = []
[ "def", "addThread", "(", "self", ")", ":", "if", "len", "(", "self", ".", "threadSections", ")", "==", "0", ":", "return", "area", "=", "self", ".", "area", "dwindlePortion", "=", "0.0", "endRateMultiplier", "=", "self", ".", "repository", ".", "endRateMultiplier", ".", "value", "halfOverSteps", "=", "self", ".", "halfOverSteps", "oneOverSteps", "=", "self", ".", "oneOverSteps", "currentPentUpVolume", "=", "self", ".", "repository", ".", "pentUpVolume", ".", "value", "*", "self", ".", "oldFlowRate", "/", "self", ".", "operatingFlowRate", "slowdownFlowRateMultiplier", "=", "1.0", "-", "(", "currentPentUpVolume", "/", "self", ".", "repository", ".", "slowdownVolume", ".", "value", ")", "operatingFeedRateMinute", "=", "self", ".", "operatingFeedRateMinute", "slowdownVolume", "=", "self", ".", "repository", ".", "slowdownVolume", ".", "value", "for", "threadSectionIndex", "in", "xrange", "(", "len", "(", "self", ".", "threadSections", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "threadSection", "=", "self", ".", "threadSections", "[", "threadSectionIndex", "]", "dwindlePortion", "=", "threadSection", ".", "getDwindlePortion", "(", "area", ",", "dwindlePortion", ",", "operatingFeedRateMinute", ",", "self", ".", "operatingFlowRate", ",", "slowdownVolume", ")", "for", "threadSection", "in", "self", ".", "threadSections", ":", "threadSection", ".", "addGcodeThreadSection", "(", "self", ".", "distanceFeedRate", ",", "endRateMultiplier", ",", "halfOverSteps", ",", "oneOverSteps", ",", "slowdownFlowRateMultiplier", ")", "self", ".", "distanceFeedRate", ".", "addFlowRateLine", "(", "self", ".", "oldFlowRate", ")", "self", ".", "threadSections", "=", "[", "]" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/craft_plugins/dwindle.py#L129-L148
linkchecker/linkchecker
d1078ed8480e5cfc4264d0dbf026b45b45aede4d
linkcheck/dummy.py
python
Dummy.__getitem__
(self, key)
return self
Return self
Return self
[ "Return", "self" ]
def __getitem__(self, key): """Return self""" return self
[ "def", "__getitem__", "(", "self", ",", "key", ")", ":", "return", "self" ]
https://github.com/linkchecker/linkchecker/blob/d1078ed8480e5cfc4264d0dbf026b45b45aede4d/linkcheck/dummy.py#L60-L62
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/funnel/_textfont.py
python
Textfont.color
(self)
return self["color"]
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above
[ "The", "color", "property", "is", "a", "color", "and", "may", "be", "specified", "as", ":", "-", "A", "hex", "string", "(", "e", ".", "g", ".", "#ff0000", ")", "-", "An", "rgb", "/", "rgba", "string", "(", "e", ".", "g", ".", "rgb", "(", "255", "0", "0", ")", ")", "-", "An", "hsl", "/", "hsla", "string", "(", "e", ".", "g", ".", "hsl", "(", "0", "100%", "50%", ")", ")", "-", "An", "hsv", "/", "hsva", "string", "(", "e", ".", "g", ".", "hsv", "(", "0", "100%", "100%", ")", ")", "-", "A", "named", "CSS", "color", ":", "aliceblue", "antiquewhite", "aqua", "aquamarine", "azure", "beige", "bisque", "black", "blanchedalmond", "blue", "blueviolet", "brown", "burlywood", "cadetblue", "chartreuse", "chocolate", "coral", "cornflowerblue", "cornsilk", "crimson", "cyan", "darkblue", "darkcyan", "darkgoldenrod", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta", "darkolivegreen", "darkorange", "darkorchid", "darkred", "darksalmon", "darkseagreen", "darkslateblue", "darkslategray", "darkslategrey", "darkturquoise", "darkviolet", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen", "fuchsia", "gainsboro", "ghostwhite", "gold", "goldenrod", "gray", "grey", "green", "greenyellow", "honeydew", "hotpink", "indianred", "indigo", "ivory", "khaki", "lavender", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcoral", "lightcyan", "lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightpink", "lightsalmon", "lightseagreen", "lightskyblue", "lightslategray", "lightslategrey", "lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "magenta", "maroon", "mediumaquamarine", "mediumblue", "mediumorchid", "mediumpurple", "mediumseagreen", "mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin", "navajowhite", "navy", "oldlace", "olive", "olivedrab", "orange", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru", "pink", "plum", "powderblue", "purple", "red", "rosybrown", "royalblue", "rebeccapurple", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver", "skyblue", "slateblue", "slategray", "slategrey", "snow", "springgreen", "steelblue", "tan", "teal", "thistle", "tomato", "turquoise", "violet", "wheat", "white", "whitesmoke", "yellow", "yellowgreen", "-", "A", "list", "or", "array", "of", "any", "of", "the", "above" ]
def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"]
[ "def", "color", "(", "self", ")", ":", "return", "self", "[", "\"color\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/funnel/_textfont.py#L16-L65
quantumlib/Cirq
89f88b01d69222d3f1ec14d649b7b3a85ed9211f
cirq-ionq/cirq_ionq/ionq_client.py
python
_IonQClient.list_jobs
( self, status: Optional[str] = None, limit: int = 100, batch_size: int = 1000 )
return self._list('jobs', params, 'jobs', limit, batch_size)
Lists jobs from the IonQ API. Args: status: If not None, filter to jobs with this status. limit: The maximum number of jobs to return. batch_size: The size of the batches requested per http GET call. Returns: A list of the json bodies of the job dicts. Raises: IonQException: If the API call fails.
Lists jobs from the IonQ API.
[ "Lists", "jobs", "from", "the", "IonQ", "API", "." ]
def list_jobs( self, status: Optional[str] = None, limit: int = 100, batch_size: int = 1000 ) -> List[Dict[str, Any]]: """Lists jobs from the IonQ API. Args: status: If not None, filter to jobs with this status. limit: The maximum number of jobs to return. batch_size: The size of the batches requested per http GET call. Returns: A list of the json bodies of the job dicts. Raises: IonQException: If the API call fails. """ params = {} if status: params['status'] = status return self._list('jobs', params, 'jobs', limit, batch_size)
[ "def", "list_jobs", "(", "self", ",", "status", ":", "Optional", "[", "str", "]", "=", "None", ",", "limit", ":", "int", "=", "100", ",", "batch_size", ":", "int", "=", "1000", ")", "->", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "params", "=", "{", "}", "if", "status", ":", "params", "[", "'status'", "]", "=", "status", "return", "self", ".", "_list", "(", "'jobs'", ",", "params", ",", "'jobs'", ",", "limit", ",", "batch_size", ")" ]
https://github.com/quantumlib/Cirq/blob/89f88b01d69222d3f1ec14d649b7b3a85ed9211f/cirq-ionq/cirq_ionq/ionq_client.py#L157-L176
phantomcyber/playbooks
9e850ecc44cb98c5dde53784744213a1ed5799bd
zscaler_hunt_and_block_url.py
python
exec_short_description
(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs)
return
[]
def exec_short_description(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug('exec_short_description() called') template = """Please Investigate Executive Workstation - {0}""" # parameter list for template variable replacement parameters = [ "filtered-data:filter_2:condition_1:run_query_1:action_result.data.*.client_ip", ] phantom.format(container=container, template=template, parameters=parameters, name="exec_short_description") exec_long_description(container=container) return
[ "def", "exec_short_description", "(", "action", "=", "None", ",", "success", "=", "None", ",", "container", "=", "None", ",", "results", "=", "None", ",", "handle", "=", "None", ",", "filtered_artifacts", "=", "None", ",", "filtered_results", "=", "None", ",", "custom_function", "=", "None", ",", "*", "*", "kwargs", ")", ":", "phantom", ".", "debug", "(", "'exec_short_description() called'", ")", "template", "=", "\"\"\"Please Investigate Executive Workstation - {0}\"\"\"", "# parameter list for template variable replacement", "parameters", "=", "[", "\"filtered-data:filter_2:condition_1:run_query_1:action_result.data.*.client_ip\"", ",", "]", "phantom", ".", "format", "(", "container", "=", "container", ",", "template", "=", "template", ",", "parameters", "=", "parameters", ",", "name", "=", "\"exec_short_description\"", ")", "exec_long_description", "(", "container", "=", "container", ")", "return" ]
https://github.com/phantomcyber/playbooks/blob/9e850ecc44cb98c5dde53784744213a1ed5799bd/zscaler_hunt_and_block_url.py#L412-L426
hasegaw/IkaLog
bd476da541fcc296f792d4db76a6b9174c4777ad
ikalog/engine.py
python
IkaEngine.disble_profile
(self)
[]
def disble_profile(self): self._enable_profile = False
[ "def", "disble_profile", "(", "self", ")", ":", "self", ".", "_enable_profile", "=", "False" ]
https://github.com/hasegaw/IkaLog/blob/bd476da541fcc296f792d4db76a6b9174c4777ad/ikalog/engine.py#L51-L52
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/utils/dummy_tf_objects.py
python
TFDistilBertForSequenceClassification.call
(self, *args, **kwargs)
[]
def call(self, *args, **kwargs): requires_backends(self, ["tf"])
[ "def", "call", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "requires_backends", "(", "self", ",", "[", "\"tf\"", "]", ")" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/utils/dummy_tf_objects.py#L1098-L1099
josiah-wolf-oberholtzer/supriya
5ca725a6b97edfbe016a75666d420ecfdf49592f
supriya/intervals/Interval.py
python
Interval.intersects
(self, expr: Union["Interval", float])
return self.start_offset <= expr < self.stop_offset
True when interval intersects ``expr``, another interval or offset: :: >>> from supriya.intervals import Interval >>> interval_1 = Interval(0, 10) >>> interval_2 = Interval(5, 15) >>> interval_3 = Interval(10, 15) >>> interval_4 = Interval(15, 25) :: >>> interval_1.intersects(interval_1) True >>> interval_1.intersects(interval_2) True >>> interval_1.intersects(interval_3) False >>> interval_1.intersects(interval_4) False :: >>> for offset in [-5, 0, 5, 10, 15]: ... print(offset, interval_1.intersects(offset)) ... -5 False 0 True 5 True 10 False 15 False
True when interval intersects ``expr``, another interval or offset:
[ "True", "when", "interval", "intersects", "expr", "another", "interval", "or", "offset", ":" ]
def intersects(self, expr: Union["Interval", float]) -> bool: """ True when interval intersects ``expr``, another interval or offset: :: >>> from supriya.intervals import Interval >>> interval_1 = Interval(0, 10) >>> interval_2 = Interval(5, 15) >>> interval_3 = Interval(10, 15) >>> interval_4 = Interval(15, 25) :: >>> interval_1.intersects(interval_1) True >>> interval_1.intersects(interval_2) True >>> interval_1.intersects(interval_3) False >>> interval_1.intersects(interval_4) False :: >>> for offset in [-5, 0, 5, 10, 15]: ... print(offset, interval_1.intersects(offset)) ... -5 False 0 True 5 True 10 False 15 False """ if isinstance(expr, Interval): return ( expr.start_offset <= self.start_offset and self.start_offset < expr.stop_offset ) or ( self.start_offset <= expr.start_offset and expr.start_offset < self.stop_offset ) return self.start_offset <= expr < self.stop_offset
[ "def", "intersects", "(", "self", ",", "expr", ":", "Union", "[", "\"Interval\"", ",", "float", "]", ")", "->", "bool", ":", "if", "isinstance", "(", "expr", ",", "Interval", ")", ":", "return", "(", "expr", ".", "start_offset", "<=", "self", ".", "start_offset", "and", "self", ".", "start_offset", "<", "expr", ".", "stop_offset", ")", "or", "(", "self", ".", "start_offset", "<=", "expr", ".", "start_offset", "and", "expr", ".", "start_offset", "<", "self", ".", "stop_offset", ")", "return", "self", ".", "start_offset", "<=", "expr", "<", "self", ".", "stop_offset" ]
https://github.com/josiah-wolf-oberholtzer/supriya/blob/5ca725a6b97edfbe016a75666d420ecfdf49592f/supriya/intervals/Interval.py#L488-L531
fkie/multimaster_fkie
3d23df29d25d71a75c66bbd3cc6e9cbb255724d8
fkie_node_manager_daemon/src/fkie_node_manager_daemon/url.py
python
equal_uri
(url1, url2)
return url1.rstrip(os.path.sep) == url2.rstrip(os.path.sep)
Removes to string after remove last slash character.
Removes to string after remove last slash character.
[ "Removes", "to", "string", "after", "remove", "last", "slash", "character", "." ]
def equal_uri(url1, url2): ''' Removes to string after remove last slash character. ''' return url1.rstrip(os.path.sep) == url2.rstrip(os.path.sep)
[ "def", "equal_uri", "(", "url1", ",", "url2", ")", ":", "return", "url1", ".", "rstrip", "(", "os", ".", "path", ".", "sep", ")", "==", "url2", ".", "rstrip", "(", "os", ".", "path", ".", "sep", ")" ]
https://github.com/fkie/multimaster_fkie/blob/3d23df29d25d71a75c66bbd3cc6e9cbb255724d8/fkie_node_manager_daemon/src/fkie_node_manager_daemon/url.py#L47-L51
jansel/opentuner
070c5cef6d933eb760a2f9cd5cd08c95f27aee75
examples/gccflags/gccflags.py
python
GccFlagsTuner.save_final_config
(self, configuration)
called at the end of tuning
called at the end of tuning
[ "called", "at", "the", "end", "of", "tuning" ]
def save_final_config(self, configuration): """called at the end of tuning""" print("Best flags written to gccflags_final_config.{json,cmd}") self.manipulator().save_to_file(configuration.data, 'gccflags_final_config.json') with open('gccflags_final_config.cmd', 'w') as fd: fd.write(self.make_command(configuration.data))
[ "def", "save_final_config", "(", "self", ",", "configuration", ")", ":", "print", "(", "\"Best flags written to gccflags_final_config.{json,cmd}\"", ")", "self", ".", "manipulator", "(", ")", ".", "save_to_file", "(", "configuration", ".", "data", ",", "'gccflags_final_config.json'", ")", "with", "open", "(", "'gccflags_final_config.cmd'", ",", "'w'", ")", "as", "fd", ":", "fd", ".", "write", "(", "self", ".", "make_command", "(", "configuration", ".", "data", ")", ")" ]
https://github.com/jansel/opentuner/blob/070c5cef6d933eb760a2f9cd5cd08c95f27aee75/examples/gccflags/gccflags.py#L345-L351
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/whoosh/codec/whoosh3.py
python
_lenfield
(fieldname)
return "_%s_len" % fieldname
[]
def _lenfield(fieldname): return "_%s_len" % fieldname
[ "def", "_lenfield", "(", "fieldname", ")", ":", "return", "\"_%s_len\"", "%", "fieldname" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/codec/whoosh3.py#L150-L151
ninja-ide/ninja-ide
87d91131bd19fdc3dcfd91eb97ad1e41c49c60c0
ninja_ide/gui/explorer/tabs/tree_projects_widget.py
python
ProjectTreeColumn._add_file_to_project
(self, path)
Add the file for 'path' in the project the user choose here.
Add the file for 'path' in the project the user choose here.
[ "Add", "the", "file", "for", "path", "in", "the", "project", "the", "user", "choose", "here", "." ]
def _add_file_to_project(self, path): """Add the file for 'path' in the project the user choose here.""" if self._projects_area.count() > 0: path_project = [self.current_project] _add_to_project = add_to_project.AddToProject(path_project, self) _add_to_project.exec_() if not _add_to_project.path_selected: return main_container = IDE.get_service('main_container') if not main_container: return editorWidget = main_container.get_current_editor() if not editorWidget.file_path: name = QInputDialog.getText( None, translations.TR_ADD_FILE_TO_PROJECT, translations.TR_FILENAME + ": ")[0] if not name: QMessageBox.information( self, translations.TR_INVALID_FILENAME, translations.TR_INVALID_FILENAME_ENTER_A_FILENAME) return else: name = file_manager.get_basename(editorWidget.file_path) new_path = file_manager.create_path( _add_to_project.path_selected, name) ide_srv = IDE.get_service("ide") old_file = ide_srv.get_or_create_nfile(path) new_file = old_file.save(editorWidget.text(), new_path) # FIXME: Make this file replace the original in the open tab else: pass
[ "def", "_add_file_to_project", "(", "self", ",", "path", ")", ":", "if", "self", ".", "_projects_area", ".", "count", "(", ")", ">", "0", ":", "path_project", "=", "[", "self", ".", "current_project", "]", "_add_to_project", "=", "add_to_project", ".", "AddToProject", "(", "path_project", ",", "self", ")", "_add_to_project", ".", "exec_", "(", ")", "if", "not", "_add_to_project", ".", "path_selected", ":", "return", "main_container", "=", "IDE", ".", "get_service", "(", "'main_container'", ")", "if", "not", "main_container", ":", "return", "editorWidget", "=", "main_container", ".", "get_current_editor", "(", ")", "if", "not", "editorWidget", ".", "file_path", ":", "name", "=", "QInputDialog", ".", "getText", "(", "None", ",", "translations", ".", "TR_ADD_FILE_TO_PROJECT", ",", "translations", ".", "TR_FILENAME", "+", "\": \"", ")", "[", "0", "]", "if", "not", "name", ":", "QMessageBox", ".", "information", "(", "self", ",", "translations", ".", "TR_INVALID_FILENAME", ",", "translations", ".", "TR_INVALID_FILENAME_ENTER_A_FILENAME", ")", "return", "else", ":", "name", "=", "file_manager", ".", "get_basename", "(", "editorWidget", ".", "file_path", ")", "new_path", "=", "file_manager", ".", "create_path", "(", "_add_to_project", ".", "path_selected", ",", "name", ")", "ide_srv", "=", "IDE", ".", "get_service", "(", "\"ide\"", ")", "old_file", "=", "ide_srv", ".", "get_or_create_nfile", "(", "path", ")", "new_file", "=", "old_file", ".", "save", "(", "editorWidget", ".", "text", "(", ")", ",", "new_path", ")", "# FIXME: Make this file replace the original in the open tab", "else", ":", "pass" ]
https://github.com/ninja-ide/ninja-ide/blob/87d91131bd19fdc3dcfd91eb97ad1e41c49c60c0/ninja_ide/gui/explorer/tabs/tree_projects_widget.py#L200-L232
FederatedAI/FATE
32540492623568ecd1afcb367360133616e02fa3
python/federatedml/statistic/data_overview.py
python
with_weight
(data_instances)
return False
[]
def with_weight(data_instances): first_entry = data_instances.first()[1] if isinstance(first_entry, Instance) and first_entry.weight is not None: return True return False
[ "def", "with_weight", "(", "data_instances", ")", ":", "first_entry", "=", "data_instances", ".", "first", "(", ")", "[", "1", "]", "if", "isinstance", "(", "first_entry", ",", "Instance", ")", "and", "first_entry", ".", "weight", "is", "not", "None", ":", "return", "True", "return", "False" ]
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/statistic/data_overview.py#L180-L184
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/urllib3/util/selectors.py
python
BaseSelector._key_from_fd
(self, fd)
Return the key associated to a given file descriptor Return None if it is not found.
Return the key associated to a given file descriptor Return None if it is not found.
[ "Return", "the", "key", "associated", "to", "a", "given", "file", "descriptor", "Return", "None", "if", "it", "is", "not", "found", "." ]
def _key_from_fd(self, fd): """ Return the key associated to a given file descriptor Return None if it is not found. """ try: return self._fd_to_key[fd] except KeyError: return None
[ "def", "_key_from_fd", "(", "self", ",", "fd", ")", ":", "try", ":", "return", "self", ".", "_fd_to_key", "[", "fd", "]", "except", "KeyError", ":", "return", "None" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/urllib3/util/selectors.py#L270-L276
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/ssl.py
python
_inet_paton
(ipname)
Try to convert an IP address to packed binary form Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 support.
Try to convert an IP address to packed binary form
[ "Try", "to", "convert", "an", "IP", "address", "to", "packed", "binary", "form" ]
def _inet_paton(ipname): """Try to convert an IP address to packed binary form Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 support. """ # inet_aton() also accepts strings like '1', '127.1', some also trailing # data like '127.0.0.1 whatever'. try: addr = _socket.inet_aton(ipname) except OSError: # not an IPv4 address pass else: if _socket.inet_ntoa(addr) == ipname: # only accept injective ipnames return addr else: # refuse for short IPv4 notation and additional trailing data raise ValueError( "{!r} is not a quad-dotted IPv4 address.".format(ipname) ) try: return _socket.inet_pton(_socket.AF_INET6, ipname) except OSError: raise ValueError("{!r} is neither an IPv4 nor an IP6 " "address.".format(ipname)) except AttributeError: # AF_INET6 not available pass raise ValueError("{!r} is not an IPv4 address.".format(ipname))
[ "def", "_inet_paton", "(", "ipname", ")", ":", "# inet_aton() also accepts strings like '1', '127.1', some also trailing", "# data like '127.0.0.1 whatever'.", "try", ":", "addr", "=", "_socket", ".", "inet_aton", "(", "ipname", ")", "except", "OSError", ":", "# not an IPv4 address", "pass", "else", ":", "if", "_socket", ".", "inet_ntoa", "(", "addr", ")", "==", "ipname", ":", "# only accept injective ipnames", "return", "addr", "else", ":", "# refuse for short IPv4 notation and additional trailing data", "raise", "ValueError", "(", "\"{!r} is not a quad-dotted IPv4 address.\"", ".", "format", "(", "ipname", ")", ")", "try", ":", "return", "_socket", ".", "inet_pton", "(", "_socket", ".", "AF_INET6", ",", "ipname", ")", "except", "OSError", ":", "raise", "ValueError", "(", "\"{!r} is neither an IPv4 nor an IP6 \"", "\"address.\"", ".", "format", "(", "ipname", ")", ")", "except", "AttributeError", ":", "# AF_INET6 not available", "pass", "raise", "ValueError", "(", "\"{!r} is not an IPv4 address.\"", ".", "format", "(", "ipname", ")", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/ssl.py#L324-L356
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/cpdp/v20190820/models.py
python
ApplyDeclareData.__init__
(self)
r""" :param MerchantId: 商户号 :type MerchantId: str :param TransactionId: 第三方指令编号 :type TransactionId: str :param Status: 受理状态 :type Status: str :param DeclareId: 申报流水号 :type DeclareId: str :param OriginalDeclareId: 原申报流水号 注意:此字段可能返回 null,表示取不到有效值。 :type OriginalDeclareId: str :param PayerId: 付款人ID :type PayerId: str
r""" :param MerchantId: 商户号 :type MerchantId: str :param TransactionId: 第三方指令编号 :type TransactionId: str :param Status: 受理状态 :type Status: str :param DeclareId: 申报流水号 :type DeclareId: str :param OriginalDeclareId: 原申报流水号 注意:此字段可能返回 null,表示取不到有效值。 :type OriginalDeclareId: str :param PayerId: 付款人ID :type PayerId: str
[ "r", ":", "param", "MerchantId", ":", "商户号", ":", "type", "MerchantId", ":", "str", ":", "param", "TransactionId", ":", "第三方指令编号", ":", "type", "TransactionId", ":", "str", ":", "param", "Status", ":", "受理状态", ":", "type", "Status", ":", "str", ":", "param", "DeclareId", ":", "申报流水号", ":", "type", "DeclareId", ":", "str", ":", "param", "OriginalDeclareId", ":", "原申报流水号", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "OriginalDeclareId", ":", "str", ":", "param", "PayerId", ":", "付款人ID", ":", "type", "PayerId", ":", "str" ]
def __init__(self): r""" :param MerchantId: 商户号 :type MerchantId: str :param TransactionId: 第三方指令编号 :type TransactionId: str :param Status: 受理状态 :type Status: str :param DeclareId: 申报流水号 :type DeclareId: str :param OriginalDeclareId: 原申报流水号 注意:此字段可能返回 null,表示取不到有效值。 :type OriginalDeclareId: str :param PayerId: 付款人ID :type PayerId: str """ self.MerchantId = None self.TransactionId = None self.Status = None self.DeclareId = None self.OriginalDeclareId = None self.PayerId = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "MerchantId", "=", "None", "self", ".", "TransactionId", "=", "None", "self", ".", "Status", "=", "None", "self", ".", "DeclareId", "=", "None", "self", ".", "OriginalDeclareId", "=", "None", "self", ".", "PayerId", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/cpdp/v20190820/models.py#L1052-L1073
sqlalchemy/sqlalchemy
eb716884a4abcabae84a6aaba105568e925b7d27
lib/sqlalchemy/orm/attributes.py
python
History.has_changes
(self)
return bool(self.added or self.deleted)
Return True if this :class:`.History` has changes.
Return True if this :class:`.History` has changes.
[ "Return", "True", "if", "this", ":", "class", ":", ".", "History", "has", "changes", "." ]
def has_changes(self): """Return True if this :class:`.History` has changes.""" return bool(self.added or self.deleted)
[ "def", "has_changes", "(", "self", ")", ":", "return", "bool", "(", "self", ".", "added", "or", "self", ".", "deleted", ")" ]
https://github.com/sqlalchemy/sqlalchemy/blob/eb716884a4abcabae84a6aaba105568e925b7d27/lib/sqlalchemy/orm/attributes.py#L1828-L1831
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/graphs/generators/random.py
python
growing_subtrees
(T, k)
return S
r""" Return a list of the vertex sets of ``n`` randomly chosen subtrees of ``T``. For a tree of order `n`, the collection contains `n` subtrees with maximum order `k` and average order `\frac{k + 1}{2}`. This method is part of :meth:`~sage.graphs.generators.random.RandomChordalGraph`. ALGORITHM: For each subtree `T_i`, the algorithm picks a size `k_i` randomly from `[1,k]`. Then a random node of `T` is chosen as the first node of `T_i`. In each of the subsequent `k_i - 1` iterations, it picks a random node in the neighborhood of `T_i` and adds it to `T_i`. See [SHET2018]_ for more details. INPUT: - ``T`` -- a tree - ``k`` -- a strictly positive integer; maximum size of a subtree EXAMPLES:: sage: from sage.graphs.generators.random import growing_subtrees sage: T = graphs.RandomTree(10) sage: S = growing_subtrees(T, 5) sage: len(S) 10
r""" Return a list of the vertex sets of ``n`` randomly chosen subtrees of ``T``.
[ "r", "Return", "a", "list", "of", "the", "vertex", "sets", "of", "n", "randomly", "chosen", "subtrees", "of", "T", "." ]
def growing_subtrees(T, k): r""" Return a list of the vertex sets of ``n`` randomly chosen subtrees of ``T``. For a tree of order `n`, the collection contains `n` subtrees with maximum order `k` and average order `\frac{k + 1}{2}`. This method is part of :meth:`~sage.graphs.generators.random.RandomChordalGraph`. ALGORITHM: For each subtree `T_i`, the algorithm picks a size `k_i` randomly from `[1,k]`. Then a random node of `T` is chosen as the first node of `T_i`. In each of the subsequent `k_i - 1` iterations, it picks a random node in the neighborhood of `T_i` and adds it to `T_i`. See [SHET2018]_ for more details. INPUT: - ``T`` -- a tree - ``k`` -- a strictly positive integer; maximum size of a subtree EXAMPLES:: sage: from sage.graphs.generators.random import growing_subtrees sage: T = graphs.RandomTree(10) sage: S = growing_subtrees(T, 5) sage: len(S) 10 """ from sage.misc.prandom import choice n = T.order() S = [] for _ in range(n): ki = randint(1, k) if ki == n: Vi = frozenset(T) else: x = T.random_vertex() Ti = set([x]) neighbors = set(T.neighbor_iterator(x)) for j in range(ki - 1): # Select a random neighbor z outside of Ti and add it to Ti z = choice(tuple(neighbors)) Ti.add(z) neighbors.update(y for y in T.neighbor_iterator(z) if y not in Ti) Vi = frozenset(Ti) S.append(Vi) return S
[ "def", "growing_subtrees", "(", "T", ",", "k", ")", ":", "from", "sage", ".", "misc", ".", "prandom", "import", "choice", "n", "=", "T", ".", "order", "(", ")", "S", "=", "[", "]", "for", "_", "in", "range", "(", "n", ")", ":", "ki", "=", "randint", "(", "1", ",", "k", ")", "if", "ki", "==", "n", ":", "Vi", "=", "frozenset", "(", "T", ")", "else", ":", "x", "=", "T", ".", "random_vertex", "(", ")", "Ti", "=", "set", "(", "[", "x", "]", ")", "neighbors", "=", "set", "(", "T", ".", "neighbor_iterator", "(", "x", ")", ")", "for", "j", "in", "range", "(", "ki", "-", "1", ")", ":", "# Select a random neighbor z outside of Ti and add it to Ti", "z", "=", "choice", "(", "tuple", "(", "neighbors", ")", ")", "Ti", ".", "add", "(", "z", ")", "neighbors", ".", "update", "(", "y", "for", "y", "in", "T", ".", "neighbor_iterator", "(", "z", ")", "if", "y", "not", "in", "Ti", ")", "Vi", "=", "frozenset", "(", "Ti", ")", "S", ".", "append", "(", "Vi", ")", "return", "S" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/graphs/generators/random.py#L857-L909
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/whoosh/src/whoosh/matching.py
python
FilterMatcher.__init__
(self, child, ids, exclude=False, boost=1.0)
:param child: the child matcher. :param ids: a set of IDs to filter by. :param exclude: by default, only IDs from the wrapped matcher that are **in** the set are used. If this argument is True, only IDs from the wrapped matcher that are **not in** the set are used.
:param child: the child matcher. :param ids: a set of IDs to filter by. :param exclude: by default, only IDs from the wrapped matcher that are **in** the set are used. If this argument is True, only IDs from the wrapped matcher that are **not in** the set are used.
[ ":", "param", "child", ":", "the", "child", "matcher", ".", ":", "param", "ids", ":", "a", "set", "of", "IDs", "to", "filter", "by", ".", ":", "param", "exclude", ":", "by", "default", "only", "IDs", "from", "the", "wrapped", "matcher", "that", "are", "**", "in", "**", "the", "set", "are", "used", ".", "If", "this", "argument", "is", "True", "only", "IDs", "from", "the", "wrapped", "matcher", "that", "are", "**", "not", "in", "**", "the", "set", "are", "used", "." ]
def __init__(self, child, ids, exclude=False, boost=1.0): """ :param child: the child matcher. :param ids: a set of IDs to filter by. :param exclude: by default, only IDs from the wrapped matcher that are **in** the set are used. If this argument is True, only IDs from the wrapped matcher that are **not in** the set are used. """ super(FilterMatcher, self).__init__(child) self._ids = ids self._exclude = exclude self.boost = boost self._find_next()
[ "def", "__init__", "(", "self", ",", "child", ",", "ids", ",", "exclude", "=", "False", ",", "boost", "=", "1.0", ")", ":", "super", "(", "FilterMatcher", ",", "self", ")", ".", "__init__", "(", "child", ")", "self", ".", "_ids", "=", "ids", "self", ".", "_exclude", "=", "exclude", "self", ".", "boost", "=", "boost", "self", ".", "_find_next", "(", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/whoosh/src/whoosh/matching.py#L737-L750
libertysoft3/saidit
271c7d03adb369f82921d811360b00812e42da24
r2/r2/controllers/api.py
python
ApiController.POST_subreddit_stylesheet
(self, form, jquery, stylesheet_contents = '', prevstyle='', op='save', reason=None)
Update a subreddit's stylesheet. `op` should be `save` to update the contents of the stylesheet.
Update a subreddit's stylesheet.
[ "Update", "a", "subreddit", "s", "stylesheet", "." ]
def POST_subreddit_stylesheet(self, form, jquery, stylesheet_contents = '', prevstyle='', op='save', reason=None): """Update a subreddit's stylesheet. `op` should be `save` to update the contents of the stylesheet. """ if g.css_killswitch: return abort(403, 'forbidden') css_errors, parsed = c.site.parse_css(stylesheet_contents) # The hook passes errors back by setting them on the form. hooks.get_hook('subreddit.css.validate').call( request=request, form=form, op=op, stylesheet_contents=stylesheet_contents, parsed_stylesheet=parsed, css_errors=css_errors, subreddit=c.site, user=c.user ) if css_errors: error_items = [CssError(x).render(style='html') for x in css_errors] form.set_text(".status", _('validation errors')) form.set_html(".errors ul", ''.join(error_items)) form.find('.errors').show() c.errors.add(errors.BAD_CSS, field="stylesheet_contents") form.has_errors("stylesheet_contents", errors.BAD_CSS) return else: form.find('.errors').hide() form.set_html(".errors ul", '') # Don't allow users in timeout to modify the stylesheet VNotInTimeout().run(action_name="editsettings", details_text="%s_stylesheet" % op, target=c.site) if op == 'save' and not form.has_error(): wr = c.site.change_css(stylesheet_contents, parsed, reason=reason) form.find('.errors').hide() form.set_text(".status", _('saved')) form.set_html(".errors ul", "") jquery.apply_stylesheet(parsed) if op == 'preview': # try to find a link to use, otherwise give up and # return links = SubredditStylesheet.find_preview_links(c.site) if links: jquery('#preview-table').show() # do a regular link jquery('#preview_link_normal').html( SubredditStylesheet.rendered_link( links, media='off', compress=False)) # now do one with media jquery('#preview_link_media').html( SubredditStylesheet.rendered_link( links, media='on', compress=False)) # do a compressed link jquery('#preview_link_compressed').html( SubredditStylesheet.rendered_link( links, media='off', compress=True)) # do a stickied link jquery('#preview_link_stickied').html( SubredditStylesheet.rendered_link( links, media='off', compress=False, stickied=True)) # and do a comment comments = SubredditStylesheet.find_preview_comments(c.site) if comments: jquery('#preview_comment').html( SubredditStylesheet.rendered_comment(comments)) jquery('#preview_comment_gilded').html( SubredditStylesheet.rendered_comment( comments, gilded=True))
[ "def", "POST_subreddit_stylesheet", "(", "self", ",", "form", ",", "jquery", ",", "stylesheet_contents", "=", "''", ",", "prevstyle", "=", "''", ",", "op", "=", "'save'", ",", "reason", "=", "None", ")", ":", "if", "g", ".", "css_killswitch", ":", "return", "abort", "(", "403", ",", "'forbidden'", ")", "css_errors", ",", "parsed", "=", "c", ".", "site", ".", "parse_css", "(", "stylesheet_contents", ")", "# The hook passes errors back by setting them on the form.", "hooks", ".", "get_hook", "(", "'subreddit.css.validate'", ")", ".", "call", "(", "request", "=", "request", ",", "form", "=", "form", ",", "op", "=", "op", ",", "stylesheet_contents", "=", "stylesheet_contents", ",", "parsed_stylesheet", "=", "parsed", ",", "css_errors", "=", "css_errors", ",", "subreddit", "=", "c", ".", "site", ",", "user", "=", "c", ".", "user", ")", "if", "css_errors", ":", "error_items", "=", "[", "CssError", "(", "x", ")", ".", "render", "(", "style", "=", "'html'", ")", "for", "x", "in", "css_errors", "]", "form", ".", "set_text", "(", "\".status\"", ",", "_", "(", "'validation errors'", ")", ")", "form", ".", "set_html", "(", "\".errors ul\"", ",", "''", ".", "join", "(", "error_items", ")", ")", "form", ".", "find", "(", "'.errors'", ")", ".", "show", "(", ")", "c", ".", "errors", ".", "add", "(", "errors", ".", "BAD_CSS", ",", "field", "=", "\"stylesheet_contents\"", ")", "form", ".", "has_errors", "(", "\"stylesheet_contents\"", ",", "errors", ".", "BAD_CSS", ")", "return", "else", ":", "form", ".", "find", "(", "'.errors'", ")", ".", "hide", "(", ")", "form", ".", "set_html", "(", "\".errors ul\"", ",", "''", ")", "# Don't allow users in timeout to modify the stylesheet", "VNotInTimeout", "(", ")", ".", "run", "(", "action_name", "=", "\"editsettings\"", ",", "details_text", "=", "\"%s_stylesheet\"", "%", "op", ",", "target", "=", "c", ".", "site", ")", "if", "op", "==", "'save'", "and", "not", "form", ".", "has_error", "(", ")", ":", "wr", "=", "c", ".", "site", ".", "change_css", "(", "stylesheet_contents", ",", "parsed", ",", "reason", "=", "reason", ")", "form", ".", "find", "(", "'.errors'", ")", ".", "hide", "(", ")", "form", ".", "set_text", "(", "\".status\"", ",", "_", "(", "'saved'", ")", ")", "form", ".", "set_html", "(", "\".errors ul\"", ",", "\"\"", ")", "jquery", ".", "apply_stylesheet", "(", "parsed", ")", "if", "op", "==", "'preview'", ":", "# try to find a link to use, otherwise give up and", "# return", "links", "=", "SubredditStylesheet", ".", "find_preview_links", "(", "c", ".", "site", ")", "if", "links", ":", "jquery", "(", "'#preview-table'", ")", ".", "show", "(", ")", "# do a regular link", "jquery", "(", "'#preview_link_normal'", ")", ".", "html", "(", "SubredditStylesheet", ".", "rendered_link", "(", "links", ",", "media", "=", "'off'", ",", "compress", "=", "False", ")", ")", "# now do one with media", "jquery", "(", "'#preview_link_media'", ")", ".", "html", "(", "SubredditStylesheet", ".", "rendered_link", "(", "links", ",", "media", "=", "'on'", ",", "compress", "=", "False", ")", ")", "# do a compressed link", "jquery", "(", "'#preview_link_compressed'", ")", ".", "html", "(", "SubredditStylesheet", ".", "rendered_link", "(", "links", ",", "media", "=", "'off'", ",", "compress", "=", "True", ")", ")", "# do a stickied link", "jquery", "(", "'#preview_link_stickied'", ")", ".", "html", "(", "SubredditStylesheet", ".", "rendered_link", "(", "links", ",", "media", "=", "'off'", ",", "compress", "=", "False", ",", "stickied", "=", "True", ")", ")", "# and do a comment", "comments", "=", "SubredditStylesheet", ".", "find_preview_comments", "(", "c", ".", "site", ")", "if", "comments", ":", "jquery", "(", "'#preview_comment'", ")", ".", "html", "(", "SubredditStylesheet", ".", "rendered_comment", "(", "comments", ")", ")", "jquery", "(", "'#preview_comment_gilded'", ")", ".", "html", "(", "SubredditStylesheet", ".", "rendered_comment", "(", "comments", ",", "gilded", "=", "True", ")", ")" ]
https://github.com/libertysoft3/saidit/blob/271c7d03adb369f82921d811360b00812e42da24/r2/r2/controllers/api.py#L2417-L2498
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1_service_list.py
python
V1ServiceList.__ne__
(self, other)
return self.to_dict() != other.to_dict()
Returns true if both objects are not equal
Returns true if both objects are not equal
[ "Returns", "true", "if", "both", "objects", "are", "not", "equal" ]
def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ServiceList): return True return self.to_dict() != other.to_dict()
[ "def", "__ne__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "V1ServiceList", ")", ":", "return", "True", "return", "self", ".", "to_dict", "(", ")", "!=", "other", ".", "to_dict", "(", ")" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_service_list.py#L200-L205
CharlesBlonde/libpurecoollink
a91362c57a0bc4126279c8c51c407dd713b08e10
libpurecoollink/dyson_360_eye.py
python
Dyson360Eye.on_message
(client, userdata, msg)
Set function Callback when message received.
Set function Callback when message received.
[ "Set", "function", "Callback", "when", "message", "received", "." ]
def on_message(client, userdata, msg): # pylint: disable=unused-argument """Set function Callback when message received.""" payload = msg.payload.decode("utf-8") device_msg = None if Dyson360EyeState.is_state_message(payload): device_msg = Dyson360EyeState(payload) if not userdata.device_available: userdata.state_data_available() userdata.state = device_msg elif Dyson360EyeMapGlobal.is_map_global(payload): device_msg = Dyson360EyeMapGlobal(payload) elif Dyson360EyeTelemetryData.is_telemetry_data(payload): device_msg = Dyson360EyeTelemetryData(payload) elif Dyson360EyeMapGrid.is_map_grid(payload): device_msg = Dyson360EyeMapGrid(payload) elif Dyson360EyeMapData.is_map_data(payload): device_msg = Dyson360EyeMapData(payload) elif Dyson360Goodbye.is_goodbye_message(payload): device_msg = Dyson360Goodbye(payload) else: _LOGGER.warning(payload) if device_msg: Dyson360Eye.call_callback_functions(userdata.callback_message, device_msg)
[ "def", "on_message", "(", "client", ",", "userdata", ",", "msg", ")", ":", "# pylint: disable=unused-argument", "payload", "=", "msg", ".", "payload", ".", "decode", "(", "\"utf-8\"", ")", "device_msg", "=", "None", "if", "Dyson360EyeState", ".", "is_state_message", "(", "payload", ")", ":", "device_msg", "=", "Dyson360EyeState", "(", "payload", ")", "if", "not", "userdata", ".", "device_available", ":", "userdata", ".", "state_data_available", "(", ")", "userdata", ".", "state", "=", "device_msg", "elif", "Dyson360EyeMapGlobal", ".", "is_map_global", "(", "payload", ")", ":", "device_msg", "=", "Dyson360EyeMapGlobal", "(", "payload", ")", "elif", "Dyson360EyeTelemetryData", ".", "is_telemetry_data", "(", "payload", ")", ":", "device_msg", "=", "Dyson360EyeTelemetryData", "(", "payload", ")", "elif", "Dyson360EyeMapGrid", ".", "is_map_grid", "(", "payload", ")", ":", "device_msg", "=", "Dyson360EyeMapGrid", "(", "payload", ")", "elif", "Dyson360EyeMapData", ".", "is_map_data", "(", "payload", ")", ":", "device_msg", "=", "Dyson360EyeMapData", "(", "payload", ")", "elif", "Dyson360Goodbye", ".", "is_goodbye_message", "(", "payload", ")", ":", "device_msg", "=", "Dyson360Goodbye", "(", "payload", ")", "else", ":", "_LOGGER", ".", "warning", "(", "payload", ")", "if", "device_msg", ":", "Dyson360Eye", ".", "call_callback_functions", "(", "userdata", ".", "callback_message", ",", "device_msg", ")" ]
https://github.com/CharlesBlonde/libpurecoollink/blob/a91362c57a0bc4126279c8c51c407dd713b08e10/libpurecoollink/dyson_360_eye.py#L109-L134
wizyoung/googletranslate.popclipext
a3c465685a5a75213e2ec8517eb98d336984bc50
src/httpx/_utils.py
python
peek_filelike_length
(stream: typing.IO)
Given a file-like stream object, return its length in number of bytes without reading it into memory.
Given a file-like stream object, return its length in number of bytes without reading it into memory.
[ "Given", "a", "file", "-", "like", "stream", "object", "return", "its", "length", "in", "number", "of", "bytes", "without", "reading", "it", "into", "memory", "." ]
def peek_filelike_length(stream: typing.IO) -> int: """ Given a file-like stream object, return its length in number of bytes without reading it into memory. """ try: # Is it an actual file? fd = stream.fileno() except OSError: # No... Maybe it's something that supports random access, like `io.BytesIO`? try: # Assuming so, go to end of stream to figure out its length, # then put it back in place. offset = stream.tell() length = stream.seek(0, os.SEEK_END) stream.seek(offset) except OSError: # Not even that? Sorry, we're doomed... raise else: return length else: # Yup, seems to be an actual file. return os.fstat(fd).st_size
[ "def", "peek_filelike_length", "(", "stream", ":", "typing", ".", "IO", ")", "->", "int", ":", "try", ":", "# Is it an actual file?", "fd", "=", "stream", ".", "fileno", "(", ")", "except", "OSError", ":", "# No... Maybe it's something that supports random access, like `io.BytesIO`?", "try", ":", "# Assuming so, go to end of stream to figure out its length,", "# then put it back in place.", "offset", "=", "stream", ".", "tell", "(", ")", "length", "=", "stream", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "stream", ".", "seek", "(", "offset", ")", "except", "OSError", ":", "# Not even that? Sorry, we're doomed...", "raise", "else", ":", "return", "length", "else", ":", "# Yup, seems to be an actual file.", "return", "os", ".", "fstat", "(", "fd", ")", ".", "st_size" ]
https://github.com/wizyoung/googletranslate.popclipext/blob/a3c465685a5a75213e2ec8517eb98d336984bc50/src/httpx/_utils.py#L324-L347
facelessuser/ColorHelper
cfed17c35dbae4db49a14165ef222407c48a3014
ch_preview.py
python
ColorHelperListener.should_update
(self, view)
return force_update
Check if an update should be performed.
Check if an update should be performed.
[ "Check", "if", "an", "update", "should", "be", "performed", "." ]
def should_update(self, view): """Check if an update should be performed.""" force_update = False rules = view.settings().get('color_helper.scan', None) if rules: last_updated = rules.get('last_updated', None) if last_updated is None or last_updated < ch_last_updated: force_update = True file_name = view.file_name() ext = os.path.splitext(file_name)[1].lower() if file_name is not None else None old_ext = rules.get('current_ext') if ext != old_ext: force_update = True syntax = os.path.splitext(view.settings().get('syntax').replace('Packages/', '', 1))[0] old_syntax = rules.get("current_syntax") if old_syntax is None or old_syntax != syntax: force_update = True else: force_update = True return force_update
[ "def", "should_update", "(", "self", ",", "view", ")", ":", "force_update", "=", "False", "rules", "=", "view", ".", "settings", "(", ")", ".", "get", "(", "'color_helper.scan'", ",", "None", ")", "if", "rules", ":", "last_updated", "=", "rules", ".", "get", "(", "'last_updated'", ",", "None", ")", "if", "last_updated", "is", "None", "or", "last_updated", "<", "ch_last_updated", ":", "force_update", "=", "True", "file_name", "=", "view", ".", "file_name", "(", ")", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "file_name", "is", "not", "None", "else", "None", "old_ext", "=", "rules", ".", "get", "(", "'current_ext'", ")", "if", "ext", "!=", "old_ext", ":", "force_update", "=", "True", "syntax", "=", "os", ".", "path", ".", "splitext", "(", "view", ".", "settings", "(", ")", ".", "get", "(", "'syntax'", ")", ".", "replace", "(", "'Packages/'", ",", "''", ",", "1", ")", ")", "[", "0", "]", "old_syntax", "=", "rules", ".", "get", "(", "\"current_syntax\"", ")", "if", "old_syntax", "is", "None", "or", "old_syntax", "!=", "syntax", ":", "force_update", "=", "True", "else", ":", "force_update", "=", "True", "return", "force_update" ]
https://github.com/facelessuser/ColorHelper/blob/cfed17c35dbae4db49a14165ef222407c48a3014/ch_preview.py#L780-L800
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/numbers.py
python
Real.__le__
(self, other)
self <= other
self <= other
[ "self", "<", "=", "other" ]
def __le__(self, other): """self <= other""" raise NotImplementedError
[ "def", "__le__", "(", "self", ",", "other", ")", ":", "raise", "NotImplementedError" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/numbers.py#L241-L243
angr/angr-management
60ae5fa483e74810fb3a3da8d37b00034d7fefab
angrmanagement/plugins/base_plugin.py
python
BasePlugin.on_workspace_initialized
(self, workspace: 'Workspace')
A handler that is called right after a workspace is initialized.
A handler that is called right after a workspace is initialized.
[ "A", "handler", "that", "is", "called", "right", "after", "a", "workspace", "is", "initialized", "." ]
def on_workspace_initialized(self, workspace: 'Workspace'): """ A handler that is called right after a workspace is initialized. """
[ "def", "on_workspace_initialized", "(", "self", ",", "workspace", ":", "'Workspace'", ")", ":" ]
https://github.com/angr/angr-management/blob/60ae5fa483e74810fb3a3da8d37b00034d7fefab/angrmanagement/plugins/base_plugin.py#L61-L64
cisco/mindmeld
809c36112e9ea8019fe29d54d136ca14eb4fd8db
mindmeld/resource_loader.py
python
Hasher._get_algorithm
(self)
return self._algorithm
Getter for algorithm property. Returns: str: the hashing algorithm
Getter for algorithm property.
[ "Getter", "for", "algorithm", "property", "." ]
def _get_algorithm(self): """Getter for algorithm property. Returns: str: the hashing algorithm """ return self._algorithm
[ "def", "_get_algorithm", "(", "self", ")", ":", "return", "self", ".", "_algorithm" ]
https://github.com/cisco/mindmeld/blob/809c36112e9ea8019fe29d54d136ca14eb4fd8db/mindmeld/resource_loader.py#L956-L962
Nuitka/Nuitka
39262276993757fa4e299f497654065600453fc9
nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Taskmaster.py
python
Task.failed
(self)
Default action when a task fails: stop the build. Note: Although this function is normally invoked on nodes in the executing state, it might also be invoked on up-to-date nodes when using Configure().
Default action when a task fails: stop the build.
[ "Default", "action", "when", "a", "task", "fails", ":", "stop", "the", "build", "." ]
def failed(self): """ Default action when a task fails: stop the build. Note: Although this function is normally invoked on nodes in the executing state, it might also be invoked on up-to-date nodes when using Configure(). """ self.fail_stop()
[ "def", "failed", "(", "self", ")", ":", "self", ".", "fail_stop", "(", ")" ]
https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Taskmaster.py#L303-L311
tensorlayer/tensorlayer
cb4eb896dd063e650ef22533ed6fa6056a71cad5
tensorlayer/layers/normalization.py
python
LocalResponseNorm.forward
(self, inputs)
return outputs
prev_layer : :class:`Layer` The previous layer with a 4D output shape.
prev_layer : :class:`Layer` The previous layer with a 4D output shape.
[ "prev_layer", ":", ":", "class", ":", "Layer", "The", "previous", "layer", "with", "a", "4D", "output", "shape", "." ]
def forward(self, inputs): """ prev_layer : :class:`Layer` The previous layer with a 4D output shape. """ outputs = tf.nn.lrn(inputs, depth_radius=self.depth_radius, bias=self.bias, alpha=self.alpha, beta=self.beta) return outputs
[ "def", "forward", "(", "self", ",", "inputs", ")", ":", "outputs", "=", "tf", ".", "nn", ".", "lrn", "(", "inputs", ",", "depth_radius", "=", "self", ".", "depth_radius", ",", "bias", "=", "self", ".", "bias", ",", "alpha", "=", "self", ".", "alpha", ",", "beta", "=", "self", ".", "beta", ")", "return", "outputs" ]
https://github.com/tensorlayer/tensorlayer/blob/cb4eb896dd063e650ef22533ed6fa6056a71cad5/tensorlayer/layers/normalization.py#L73-L79
calmevtime/DCTNet
bd7c669b478e47fde230119045133d10e135de97
segmentation/mmdet/core/bbox/samplers/base_sampler.py
python
BaseSampler.sample
(self, assign_result, bboxes, gt_bboxes, gt_labels=None, **kwargs)
return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags)
Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: :obj:`SamplingResult`: Sampling result.
Sample positive and negative bboxes.
[ "Sample", "positive", "and", "negative", "bboxes", "." ]
def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: :obj:`SamplingResult`: Sampling result. """ bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals: bboxes = torch.cat([gt_bboxes, bboxes], dim=0) assign_result.add_gt_(gt_labels) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) # We found that sampled indices have duplicated items occasionally. # (may be a bug of PyTorch) pos_inds = pos_inds.unique() num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes=bboxes, **kwargs) neg_inds = neg_inds.unique() return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags)
[ "def", "sample", "(", "self", ",", "assign_result", ",", "bboxes", ",", "gt_bboxes", ",", "gt_labels", "=", "None", ",", "*", "*", "kwargs", ")", ":", "bboxes", "=", "bboxes", "[", ":", ",", ":", "4", "]", "gt_flags", "=", "bboxes", ".", "new_zeros", "(", "(", "bboxes", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "torch", ".", "uint8", ")", "if", "self", ".", "add_gt_as_proposals", ":", "bboxes", "=", "torch", ".", "cat", "(", "[", "gt_bboxes", ",", "bboxes", "]", ",", "dim", "=", "0", ")", "assign_result", ".", "add_gt_", "(", "gt_labels", ")", "gt_ones", "=", "bboxes", ".", "new_ones", "(", "gt_bboxes", ".", "shape", "[", "0", "]", ",", "dtype", "=", "torch", ".", "uint8", ")", "gt_flags", "=", "torch", ".", "cat", "(", "[", "gt_ones", ",", "gt_flags", "]", ")", "num_expected_pos", "=", "int", "(", "self", ".", "num", "*", "self", ".", "pos_fraction", ")", "pos_inds", "=", "self", ".", "pos_sampler", ".", "_sample_pos", "(", "assign_result", ",", "num_expected_pos", ",", "bboxes", "=", "bboxes", ",", "*", "*", "kwargs", ")", "# We found that sampled indices have duplicated items occasionally.", "# (may be a bug of PyTorch)", "pos_inds", "=", "pos_inds", ".", "unique", "(", ")", "num_sampled_pos", "=", "pos_inds", ".", "numel", "(", ")", "num_expected_neg", "=", "self", ".", "num", "-", "num_sampled_pos", "if", "self", ".", "neg_pos_ub", ">=", "0", ":", "_pos", "=", "max", "(", "1", ",", "num_sampled_pos", ")", "neg_upper_bound", "=", "int", "(", "self", ".", "neg_pos_ub", "*", "_pos", ")", "if", "num_expected_neg", ">", "neg_upper_bound", ":", "num_expected_neg", "=", "neg_upper_bound", "neg_inds", "=", "self", ".", "neg_sampler", ".", "_sample_neg", "(", "assign_result", ",", "num_expected_neg", ",", "bboxes", "=", "bboxes", ",", "*", "*", "kwargs", ")", "neg_inds", "=", "neg_inds", ".", "unique", "(", ")", "return", "SamplingResult", "(", "pos_inds", ",", "neg_inds", ",", "bboxes", ",", "gt_bboxes", ",", "assign_result", ",", "gt_flags", ")" ]
https://github.com/calmevtime/DCTNet/blob/bd7c669b478e47fde230119045133d10e135de97/segmentation/mmdet/core/bbox/samplers/base_sampler.py#L31-L78
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/pyplot.py
python
colormaps
()
return sorted(cm.cmap_d.keys())
Matplotlib provides a number of colormaps, and others can be added using :func:`~matplotlib.cm.register_cmap`. This function documents the built-in colormaps, and will also return a list of all registered colormaps if called. You can set the colormap for an image, pcolor, scatter, etc, using a keyword argument:: imshow(X, cmap=cm.hot) or using the :func:`set_cmap` function:: imshow(X) pyplot.set_cmap('hot') pyplot.set_cmap('jet') In interactive mode, :func:`set_cmap` will update the colormap post-hoc, allowing you to see which one works best for your data. All built-in colormaps can be reversed by appending ``_r``: For instance, ``gray_r`` is the reverse of ``gray``. There are several common color schemes used in visualization: Sequential schemes for unipolar data that progresses from low to high Diverging schemes for bipolar data that emphasizes positive or negative deviations from a central value Cyclic schemes meant for plotting values that wrap around at the endpoints, such as phase angle, wind direction, or time of day Qualitative schemes for nominal data that has no inherent ordering, where color is used only to distinguish categories The base colormaps are derived from those of the same name provided with Matlab: ========= ======================================================= Colormap Description ========= ======================================================= autumn sequential linearly-increasing shades of red-orange-yellow bone sequential increasing black-white color map with a tinge of blue, to emulate X-ray film cool linearly-decreasing shades of cyan-magenta copper sequential increasing shades of black-copper flag repetitive red-white-blue-black pattern (not cyclic at endpoints) gray sequential linearly-increasing black-to-white grayscale hot sequential black-red-yellow-white, to emulate blackbody radiation from an object at increasing temperatures hsv cyclic red-yellow-green-cyan-blue-magenta-red, formed by changing the hue component in the HSV color space jet a spectral map with dark endpoints, blue-cyan-yellow-red; based on a fluid-jet simulation by NCSA [#]_ pink sequential increasing pastel black-pink-white, meant for sepia tone colorization of photographs prism repetitive red-yellow-green-blue-purple-...-green pattern (not cyclic at endpoints) spring linearly-increasing shades of magenta-yellow summer sequential linearly-increasing shades of green-yellow winter linearly-increasing shades of blue-green ========= ======================================================= For the above list only, you can also set the colormap using the corresponding pylab shortcut interface function, similar to Matlab:: imshow(X) hot() jet() The next set of palettes are from the `Yorick scientific visualisation package <http://yorick.sourceforge.net/index.php>`_, an evolution of the GIST package, both by David H. Munro: ============ ======================================================= Colormap Description ============ ======================================================= gist_earth mapmaker's colors from dark blue deep ocean to green lowlands to brown highlands to white mountains gist_heat sequential increasing black-red-orange-white, to emulate blackbody radiation from an iron bar as it grows hotter gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white colormap from National Center for Atmospheric Research [#]_ gist_rainbow runs through the colors in spectral order from red to violet at full saturation (like *hsv* but not cyclic) gist_stern "Stern special" color table from Interactive Data Language software ============ ======================================================= The following colormaps are based on the `ColorBrewer <http://colorbrewer.org>`_ color specifications and designs developed by Cynthia Brewer: ColorBrewer Diverging (luminance is highest at the midpoint, and decreases towards differently-colored endpoints): ======== =================================== Colormap Description ======== =================================== BrBG brown, white, blue-green PiYG pink, white, yellow-green PRGn purple, white, green PuOr orange, white, purple RdBu red, white, blue RdGy red, white, gray RdYlBu red, yellow, blue RdYlGn red, yellow, green Spectral red, orange, yellow, green, blue ======== =================================== ColorBrewer Sequential (luminance decreases monotonically): ======== ==================================== Colormap Description ======== ==================================== Blues white to dark blue BuGn white, light blue, dark green BuPu white, light blue, dark purple GnBu white, light green, dark blue Greens white to dark green Greys white to black (not linear) Oranges white, orange, dark brown OrRd white, orange, dark red PuBu white, light purple, dark blue PuBuGn white, light purple, dark green PuRd white, light purple, dark red Purples white to dark purple RdPu white, pink, dark purple Reds white to dark red YlGn light yellow, dark green YlGnBu light yellow, light green, dark blue YlOrBr light yellow, orange, dark brown YlOrRd light yellow, orange, dark red ======== ==================================== ColorBrewer Qualitative: (For plotting nominal data, :class:`ListedColormap` should be used, not :class:`LinearSegmentedColormap`. Different sets of colors are recommended for different numbers of categories. These continuous versions of the qualitative schemes may be removed or converted in the future.) * Accent * Dark2 * Paired * Pastel1 * Pastel2 * Set1 * Set2 * Set3 Other miscellaneous schemes: ============= ======================================================= Colormap Description ============= ======================================================= afmhot sequential black-orange-yellow-white blackbody spectrum, commonly used in atomic force microscopy brg blue-red-green bwr diverging blue-white-red coolwarm diverging blue-gray-red, meant to avoid issues with 3D shading, color blindness, and ordering of colors [#]_ CMRmap "Default colormaps on color images often reproduce to confusing grayscale images. The proposed colormap maintains an aesthetically pleasing color image that automatically reproduces to a monotonic grayscale with discrete, quantifiable saturation levels." [#]_ cubehelix Unlike most other color schemes cubehelix was designed by D.A. Green to be monotonically increasing in terms of perceived brightness. Also, when printed on a black and white postscript printer, the scheme results in a greyscale with monotonically increasing brightness. This color scheme is named cubehelix because the r,g,b values produced can be visualised as a squashed helix around the diagonal in the r,g,b color cube. gnuplot gnuplot's traditional pm3d scheme (black-blue-red-yellow) gnuplot2 sequential color printable as gray (black-blue-violet-yellow-white) ocean green-blue-white rainbow spectral purple-blue-green-yellow-orange-red colormap with diverging luminance seismic diverging blue-white-red nipy_spectral black-purple-blue-green-yellow-red-white spectrum, originally from the Neuroimaging in Python project terrain mapmaker's colors, blue-green-yellow-brown-white, originally from IGOR Pro ============= ======================================================= The following colormaps are redundant and may be removed in future versions. It's recommended to use the names in the descriptions instead, which produce identical output: ========= ======================================================= Colormap Description ========= ======================================================= gist_gray identical to *gray* gist_yarg identical to *gray_r* binary identical to *gray_r* spectral identical to *nipy_spectral* [#]_ ========= ======================================================= .. rubric:: Footnotes .. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor choice for scientific visualization by many researchers: `Rainbow Color Map (Still) Considered Harmful <http://www.jwave.vt.edu/%7Erkriz/Projects/create_color_table/color_07.pdf>`_ .. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command Language. See `Color Table Gallery <http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_ .. [#] See `Diverging Color Maps for Scientific Visualization <http://www.cs.unm.edu/~kmorel/documents/ColorMaps/>`_ by Kenneth Moreland. .. [#] See `A Color Map for Effective Black-and-White Rendering of Color-Scale Images <http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_ by Carey Rappaport .. [#] Changed to distinguish from ColorBrewer's *Spectral* map. :func:`spectral` still works, but ``set_cmap('nipy_spectral')`` is recommended for clarity.
Matplotlib provides a number of colormaps, and others can be added using :func:`~matplotlib.cm.register_cmap`. This function documents the built-in colormaps, and will also return a list of all registered colormaps if called.
[ "Matplotlib", "provides", "a", "number", "of", "colormaps", "and", "others", "can", "be", "added", "using", ":", "func", ":", "~matplotlib", ".", "cm", ".", "register_cmap", ".", "This", "function", "documents", "the", "built", "-", "in", "colormaps", "and", "will", "also", "return", "a", "list", "of", "all", "registered", "colormaps", "if", "called", "." ]
def colormaps(): """ Matplotlib provides a number of colormaps, and others can be added using :func:`~matplotlib.cm.register_cmap`. This function documents the built-in colormaps, and will also return a list of all registered colormaps if called. You can set the colormap for an image, pcolor, scatter, etc, using a keyword argument:: imshow(X, cmap=cm.hot) or using the :func:`set_cmap` function:: imshow(X) pyplot.set_cmap('hot') pyplot.set_cmap('jet') In interactive mode, :func:`set_cmap` will update the colormap post-hoc, allowing you to see which one works best for your data. All built-in colormaps can be reversed by appending ``_r``: For instance, ``gray_r`` is the reverse of ``gray``. There are several common color schemes used in visualization: Sequential schemes for unipolar data that progresses from low to high Diverging schemes for bipolar data that emphasizes positive or negative deviations from a central value Cyclic schemes meant for plotting values that wrap around at the endpoints, such as phase angle, wind direction, or time of day Qualitative schemes for nominal data that has no inherent ordering, where color is used only to distinguish categories The base colormaps are derived from those of the same name provided with Matlab: ========= ======================================================= Colormap Description ========= ======================================================= autumn sequential linearly-increasing shades of red-orange-yellow bone sequential increasing black-white color map with a tinge of blue, to emulate X-ray film cool linearly-decreasing shades of cyan-magenta copper sequential increasing shades of black-copper flag repetitive red-white-blue-black pattern (not cyclic at endpoints) gray sequential linearly-increasing black-to-white grayscale hot sequential black-red-yellow-white, to emulate blackbody radiation from an object at increasing temperatures hsv cyclic red-yellow-green-cyan-blue-magenta-red, formed by changing the hue component in the HSV color space jet a spectral map with dark endpoints, blue-cyan-yellow-red; based on a fluid-jet simulation by NCSA [#]_ pink sequential increasing pastel black-pink-white, meant for sepia tone colorization of photographs prism repetitive red-yellow-green-blue-purple-...-green pattern (not cyclic at endpoints) spring linearly-increasing shades of magenta-yellow summer sequential linearly-increasing shades of green-yellow winter linearly-increasing shades of blue-green ========= ======================================================= For the above list only, you can also set the colormap using the corresponding pylab shortcut interface function, similar to Matlab:: imshow(X) hot() jet() The next set of palettes are from the `Yorick scientific visualisation package <http://yorick.sourceforge.net/index.php>`_, an evolution of the GIST package, both by David H. Munro: ============ ======================================================= Colormap Description ============ ======================================================= gist_earth mapmaker's colors from dark blue deep ocean to green lowlands to brown highlands to white mountains gist_heat sequential increasing black-red-orange-white, to emulate blackbody radiation from an iron bar as it grows hotter gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white colormap from National Center for Atmospheric Research [#]_ gist_rainbow runs through the colors in spectral order from red to violet at full saturation (like *hsv* but not cyclic) gist_stern "Stern special" color table from Interactive Data Language software ============ ======================================================= The following colormaps are based on the `ColorBrewer <http://colorbrewer.org>`_ color specifications and designs developed by Cynthia Brewer: ColorBrewer Diverging (luminance is highest at the midpoint, and decreases towards differently-colored endpoints): ======== =================================== Colormap Description ======== =================================== BrBG brown, white, blue-green PiYG pink, white, yellow-green PRGn purple, white, green PuOr orange, white, purple RdBu red, white, blue RdGy red, white, gray RdYlBu red, yellow, blue RdYlGn red, yellow, green Spectral red, orange, yellow, green, blue ======== =================================== ColorBrewer Sequential (luminance decreases monotonically): ======== ==================================== Colormap Description ======== ==================================== Blues white to dark blue BuGn white, light blue, dark green BuPu white, light blue, dark purple GnBu white, light green, dark blue Greens white to dark green Greys white to black (not linear) Oranges white, orange, dark brown OrRd white, orange, dark red PuBu white, light purple, dark blue PuBuGn white, light purple, dark green PuRd white, light purple, dark red Purples white to dark purple RdPu white, pink, dark purple Reds white to dark red YlGn light yellow, dark green YlGnBu light yellow, light green, dark blue YlOrBr light yellow, orange, dark brown YlOrRd light yellow, orange, dark red ======== ==================================== ColorBrewer Qualitative: (For plotting nominal data, :class:`ListedColormap` should be used, not :class:`LinearSegmentedColormap`. Different sets of colors are recommended for different numbers of categories. These continuous versions of the qualitative schemes may be removed or converted in the future.) * Accent * Dark2 * Paired * Pastel1 * Pastel2 * Set1 * Set2 * Set3 Other miscellaneous schemes: ============= ======================================================= Colormap Description ============= ======================================================= afmhot sequential black-orange-yellow-white blackbody spectrum, commonly used in atomic force microscopy brg blue-red-green bwr diverging blue-white-red coolwarm diverging blue-gray-red, meant to avoid issues with 3D shading, color blindness, and ordering of colors [#]_ CMRmap "Default colormaps on color images often reproduce to confusing grayscale images. The proposed colormap maintains an aesthetically pleasing color image that automatically reproduces to a monotonic grayscale with discrete, quantifiable saturation levels." [#]_ cubehelix Unlike most other color schemes cubehelix was designed by D.A. Green to be monotonically increasing in terms of perceived brightness. Also, when printed on a black and white postscript printer, the scheme results in a greyscale with monotonically increasing brightness. This color scheme is named cubehelix because the r,g,b values produced can be visualised as a squashed helix around the diagonal in the r,g,b color cube. gnuplot gnuplot's traditional pm3d scheme (black-blue-red-yellow) gnuplot2 sequential color printable as gray (black-blue-violet-yellow-white) ocean green-blue-white rainbow spectral purple-blue-green-yellow-orange-red colormap with diverging luminance seismic diverging blue-white-red nipy_spectral black-purple-blue-green-yellow-red-white spectrum, originally from the Neuroimaging in Python project terrain mapmaker's colors, blue-green-yellow-brown-white, originally from IGOR Pro ============= ======================================================= The following colormaps are redundant and may be removed in future versions. It's recommended to use the names in the descriptions instead, which produce identical output: ========= ======================================================= Colormap Description ========= ======================================================= gist_gray identical to *gray* gist_yarg identical to *gray_r* binary identical to *gray_r* spectral identical to *nipy_spectral* [#]_ ========= ======================================================= .. rubric:: Footnotes .. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor choice for scientific visualization by many researchers: `Rainbow Color Map (Still) Considered Harmful <http://www.jwave.vt.edu/%7Erkriz/Projects/create_color_table/color_07.pdf>`_ .. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command Language. See `Color Table Gallery <http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_ .. [#] See `Diverging Color Maps for Scientific Visualization <http://www.cs.unm.edu/~kmorel/documents/ColorMaps/>`_ by Kenneth Moreland. .. [#] See `A Color Map for Effective Black-and-White Rendering of Color-Scale Images <http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_ by Carey Rappaport .. [#] Changed to distinguish from ColorBrewer's *Spectral* map. :func:`spectral` still works, but ``set_cmap('nipy_spectral')`` is recommended for clarity. """ return sorted(cm.cmap_d.keys())
[ "def", "colormaps", "(", ")", ":", "return", "sorted", "(", "cm", ".", "cmap_d", ".", "keys", "(", ")", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/pyplot.py#L1820-L2054
deepmind/dm_control
806a10e896e7c887635328bfa8352604ad0fedae
dm_control/entities/manipulators/kinova/jaco_arm.py
python
JacoArm.joints
(self)
return self._joints
List of joint elements belonging to the arm.
List of joint elements belonging to the arm.
[ "List", "of", "joint", "elements", "belonging", "to", "the", "arm", "." ]
def joints(self): """List of joint elements belonging to the arm.""" return self._joints
[ "def", "joints", "(", "self", ")", ":", "return", "self", ".", "_joints" ]
https://github.com/deepmind/dm_control/blob/806a10e896e7c887635328bfa8352604ad0fedae/dm_control/entities/manipulators/kinova/jaco_arm.py#L77-L79
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/httplib2/__init__.py
python
Http.add_certificate
(self, key, cert, domain)
Add a key and cert that will be used any time a request requires authentication.
Add a key and cert that will be used any time a request requires authentication.
[ "Add", "a", "key", "and", "cert", "that", "will", "be", "used", "any", "time", "a", "request", "requires", "authentication", "." ]
def add_certificate(self, key, cert, domain): """Add a key and cert that will be used any time a request requires authentication.""" self.certificates.add(key, cert, domain)
[ "def", "add_certificate", "(", "self", ",", "key", ",", "cert", ",", "domain", ")", ":", "self", ".", "certificates", ".", "add", "(", "key", ",", "cert", ",", "domain", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/httplib2/__init__.py#L1237-L1240
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/CPython/27/Lib/logging/__init__.py
python
Filterer.filter
(self, record)
return rv
Determine if a record is loggable by consulting all the filters. The default is to allow the record to be logged; any filter can veto this and the record is then dropped. Returns a zero value if a record is to be dropped, else non-zero.
Determine if a record is loggable by consulting all the filters.
[ "Determine", "if", "a", "record", "is", "loggable", "by", "consulting", "all", "the", "filters", "." ]
def filter(self, record): """ Determine if a record is loggable by consulting all the filters. The default is to allow the record to be logged; any filter can veto this and the record is then dropped. Returns a zero value if a record is to be dropped, else non-zero. """ rv = 1 for f in self.filters: if not f.filter(record): rv = 0 break return rv
[ "def", "filter", "(", "self", ",", "record", ")", ":", "rv", "=", "1", "for", "f", "in", "self", ".", "filters", ":", "if", "not", "f", ".", "filter", "(", "record", ")", ":", "rv", "=", "0", "break", "return", "rv" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/CPython/27/Lib/logging/__init__.py#L594-L607
memray/seq2seq-keyphrase
9145c63ebdc4c3bc431f8091dc52547a46804012
emolga/models/covc_encdec.py
python
AutoEncoder.__init__
(self, config, n_rng, rng, mode='Evaluation')
[]
def __init__(self, config, n_rng, rng, mode='Evaluation'): super(RNNLM, self).__init__() self.config = config self.n_rng = n_rng # numpy random stream self.rng = rng # Theano random stream self.mode = mode self.name = 'vae'
[ "def", "__init__", "(", "self", ",", "config", ",", "n_rng", ",", "rng", ",", "mode", "=", "'Evaluation'", ")", ":", "super", "(", "RNNLM", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "config", "=", "config", "self", ".", "n_rng", "=", "n_rng", "# numpy random stream", "self", ".", "rng", "=", "rng", "# Theano random stream", "self", ".", "mode", "=", "mode", "self", ".", "name", "=", "'vae'" ]
https://github.com/memray/seq2seq-keyphrase/blob/9145c63ebdc4c3bc431f8091dc52547a46804012/emolga/models/covc_encdec.py#L1712-L1721
pm4py/pm4py-core
7807b09a088b02199cd0149d724d0e28793971bf
pm4py/write.py
python
write_xes
(log: EventLog, file_path: str)
Exports a XES log Parameters -------------- log Event log file_path Destination path Returns ------------- void
Exports a XES log
[ "Exports", "a", "XES", "log" ]
def write_xes(log: EventLog, file_path: str) -> None: """ Exports a XES log Parameters -------------- log Event log file_path Destination path Returns ------------- void """ general_checks_classical_event_log(log) from pm4py.objects.log.exporter.xes import exporter as xes_exporter xes_exporter.apply(log, file_path)
[ "def", "write_xes", "(", "log", ":", "EventLog", ",", "file_path", ":", "str", ")", "->", "None", ":", "general_checks_classical_event_log", "(", "log", ")", "from", "pm4py", ".", "objects", ".", "log", ".", "exporter", ".", "xes", "import", "exporter", "as", "xes_exporter", "xes_exporter", ".", "apply", "(", "log", ",", "file_path", ")" ]
https://github.com/pm4py/pm4py-core/blob/7807b09a088b02199cd0149d724d0e28793971bf/pm4py/write.py#L29-L46
spesmilo/electrum
bdbd59300fbd35b01605e66145458e5f396108e8
electrum/lnpeer.py
python
close_and_cleanup
(self)
[]
def close_and_cleanup(self): # note: This method might get called multiple times! # E.g. if you call close_and_cleanup() to cause a disconnection from the peer, # it will get called a second time in handle_disconnect(). try: if self.transport: self.transport.close() except: pass self.lnworker.peer_closed(self) self.got_disconnected.set()
[ "def", "close_and_cleanup", "(", "self", ")", ":", "# note: This method might get called multiple times!", "# E.g. if you call close_and_cleanup() to cause a disconnection from the peer,", "# it will get called a second time in handle_disconnect().", "try", ":", "if", "self", ".", "transport", ":", "self", ".", "transport", ".", "close", "(", ")", "except", ":", "pass", "self", ".", "lnworker", ".", "peer_closed", "(", "self", ")", "self", ".", "got_disconnected", ".", "set", "(", ")" ]
https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/lnpeer.py#L493-L503
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/rings/asymptotic/term_monoid.py
python
GenericTerm._eq_
(self, other)
return self.growth == other.growth
r""" Return whether this generic term is equal to ``other``. INPUT: - ``other`` -- an asymptotic term. OUTPUT: A boolean. .. NOTE:: This method gets called by the coercion framework, so it can be assumed that this asymptotic term is from the same parent as ``other``. EXAMPLES:: sage: from sage.rings.asymptotic.growth_group import GrowthGroup sage: from sage.rings.asymptotic.term_monoid import (GenericTermMonoid, ....: ExactTermMonoid, OTermMonoid) sage: from sage.rings.asymptotic.term_monoid import DefaultTermMonoidFactory as TermMonoid sage: GT = GenericTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: ET = ExactTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), ZZ) sage: OT = OTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: g = GT.an_element(); e = ET.an_element(); o = OT.an_element() sage: g, e, o (Generic Term with growth x, x, O(x)) sage: e == e^2 # indirect doctest False sage: e == ET(x, coefficient=1) # indirect doctest True sage: o == OT(x^2) # indirect doctest False TESTS:: sage: T = GenericTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: t = T.an_element() sage: t == t True :: sage: OT = OTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: t = OT.an_element(); t O(x) sage: t == OT(x) # indirect doctest True sage: t == OT(x^2) # indirect doctest False
r""" Return whether this generic term is equal to ``other``.
[ "r", "Return", "whether", "this", "generic", "term", "is", "equal", "to", "other", "." ]
def _eq_(self, other): r""" Return whether this generic term is equal to ``other``. INPUT: - ``other`` -- an asymptotic term. OUTPUT: A boolean. .. NOTE:: This method gets called by the coercion framework, so it can be assumed that this asymptotic term is from the same parent as ``other``. EXAMPLES:: sage: from sage.rings.asymptotic.growth_group import GrowthGroup sage: from sage.rings.asymptotic.term_monoid import (GenericTermMonoid, ....: ExactTermMonoid, OTermMonoid) sage: from sage.rings.asymptotic.term_monoid import DefaultTermMonoidFactory as TermMonoid sage: GT = GenericTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: ET = ExactTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), ZZ) sage: OT = OTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: g = GT.an_element(); e = ET.an_element(); o = OT.an_element() sage: g, e, o (Generic Term with growth x, x, O(x)) sage: e == e^2 # indirect doctest False sage: e == ET(x, coefficient=1) # indirect doctest True sage: o == OT(x^2) # indirect doctest False TESTS:: sage: T = GenericTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: t = T.an_element() sage: t == t True :: sage: OT = OTermMonoid(TermMonoid, GrowthGroup('x^ZZ'), QQ) sage: t = OT.an_element(); t O(x) sage: t == OT(x) # indirect doctest True sage: t == OT(x^2) # indirect doctest False """ return self.growth == other.growth
[ "def", "_eq_", "(", "self", ",", "other", ")", ":", "return", "self", ".", "growth", "==", "other", ".", "growth" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/asymptotic/term_monoid.py#L1019-L1074
mesonbuild/meson
a22d0f9a0a787df70ce79b05d0c45de90a970048
mesonbuild/compilers/d.py
python
DmdLikeCompilerMixin._get_crt_args
(self, crt_val: str, buildtype: str)
[]
def _get_crt_args(self, crt_val: str, buildtype: str) -> T.List[str]: if not self.info.is_windows(): return [] if crt_val in self.mscrt_args: return self.mscrt_args[crt_val] assert crt_val in ['from_buildtype', 'static_from_buildtype'] dbg = 'mdd' rel = 'md' if crt_val == 'static_from_buildtype': dbg = 'mtd' rel = 'mt' # Match what build type flags used to do. if buildtype == 'plain': return [] elif buildtype == 'debug': return self.mscrt_args[dbg] elif buildtype == 'debugoptimized': return self.mscrt_args[rel] elif buildtype == 'release': return self.mscrt_args[rel] elif buildtype == 'minsize': return self.mscrt_args[rel] else: assert buildtype == 'custom' raise EnvironmentException('Requested C runtime based on buildtype, but buildtype is "custom".')
[ "def", "_get_crt_args", "(", "self", ",", "crt_val", ":", "str", ",", "buildtype", ":", "str", ")", "->", "T", ".", "List", "[", "str", "]", ":", "if", "not", "self", ".", "info", ".", "is_windows", "(", ")", ":", "return", "[", "]", "if", "crt_val", "in", "self", ".", "mscrt_args", ":", "return", "self", ".", "mscrt_args", "[", "crt_val", "]", "assert", "crt_val", "in", "[", "'from_buildtype'", ",", "'static_from_buildtype'", "]", "dbg", "=", "'mdd'", "rel", "=", "'md'", "if", "crt_val", "==", "'static_from_buildtype'", ":", "dbg", "=", "'mtd'", "rel", "=", "'mt'", "# Match what build type flags used to do.", "if", "buildtype", "==", "'plain'", ":", "return", "[", "]", "elif", "buildtype", "==", "'debug'", ":", "return", "self", ".", "mscrt_args", "[", "dbg", "]", "elif", "buildtype", "==", "'debugoptimized'", ":", "return", "self", ".", "mscrt_args", "[", "rel", "]", "elif", "buildtype", "==", "'release'", ":", "return", "self", ".", "mscrt_args", "[", "rel", "]", "elif", "buildtype", "==", "'minsize'", ":", "return", "self", ".", "mscrt_args", "[", "rel", "]", "else", ":", "assert", "buildtype", "==", "'custom'", "raise", "EnvironmentException", "(", "'Requested C runtime based on buildtype, but buildtype is \"custom\".'", ")" ]
https://github.com/mesonbuild/meson/blob/a22d0f9a0a787df70ce79b05d0c45de90a970048/mesonbuild/compilers/d.py#L441-L468
Delta-ML/delta
31dfebc8f20b7cb282b62f291ff25a87e403cc86
delta/utils/postprocess/base_postproc.py
python
PostProcABC.call
(self)
implementation func
implementation func
[ "implementation", "func" ]
def call(self): ''' implementation func ''' raise NotImplementedError()
[ "def", "call", "(", "self", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/Delta-ML/delta/blob/31dfebc8f20b7cb282b62f291ff25a87e403cc86/delta/utils/postprocess/base_postproc.py#L28-L30
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/sisyphus/media_player.py
python
SisyphusPlayer.media_position_updated_at
(self)
return self._table.active_track_remaining_time_as_of
Return the last time we got a position update.
Return the last time we got a position update.
[ "Return", "the", "last", "time", "we", "got", "a", "position", "update", "." ]
def media_position_updated_at(self): """Return the last time we got a position update.""" return self._table.active_track_remaining_time_as_of
[ "def", "media_position_updated_at", "(", "self", ")", ":", "return", "self", ".", "_table", ".", "active_track_remaining_time_as_of" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/sisyphus/media_player.py#L150-L152
PowerScript/KatanaFramework
0f6ad90a88de865d58ec26941cb4460501e75496
lib/MySQLdb/connections.py
python
Connection.show_warnings
(self)
return warnings
Return detailed information about warnings as a sequence of tuples of (Level, Code, Message). This is only supported in MySQL-4.1 and up. If your server is an earlier version, an empty sequence is returned.
Return detailed information about warnings as a sequence of tuples of (Level, Code, Message). This is only supported in MySQL-4.1 and up. If your server is an earlier version, an empty sequence is returned.
[ "Return", "detailed", "information", "about", "warnings", "as", "a", "sequence", "of", "tuples", "of", "(", "Level", "Code", "Message", ")", ".", "This", "is", "only", "supported", "in", "MySQL", "-", "4", ".", "1", "and", "up", ".", "If", "your", "server", "is", "an", "earlier", "version", "an", "empty", "sequence", "is", "returned", "." ]
def show_warnings(self): """Return detailed information about warnings as a sequence of tuples of (Level, Code, Message). This is only supported in MySQL-4.1 and up. If your server is an earlier version, an empty sequence is returned.""" if self._server_version < (4,1): return () self.query("SHOW WARNINGS") r = self.store_result() warnings = r.fetch_row(0) return warnings
[ "def", "show_warnings", "(", "self", ")", ":", "if", "self", ".", "_server_version", "<", "(", "4", ",", "1", ")", ":", "return", "(", ")", "self", ".", "query", "(", "\"SHOW WARNINGS\"", ")", "r", "=", "self", ".", "store_result", "(", ")", "warnings", "=", "r", ".", "fetch_row", "(", "0", ")", "return", "warnings" ]
https://github.com/PowerScript/KatanaFramework/blob/0f6ad90a88de865d58ec26941cb4460501e75496/lib/MySQLdb/connections.py#L311-L320
Fizzadar/pyinfra
ff0913d6a172966760b63fe59e55dff9ea852e0d
pyinfra/api/command.py
python
FileDownloadCommand.__repr__
(self)
return 'FileDownloadCommand({0}, {1})'.format(self.src, self.dest)
[]
def __repr__(self): return 'FileDownloadCommand({0}, {1})'.format(self.src, self.dest)
[ "def", "__repr__", "(", "self", ")", ":", "return", "'FileDownloadCommand({0}, {1})'", ".", "format", "(", "self", ".", "src", ",", "self", ".", "dest", ")" ]
https://github.com/Fizzadar/pyinfra/blob/ff0913d6a172966760b63fe59e55dff9ea852e0d/pyinfra/api/command.py#L155-L156
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/asyncio/trsock.py
python
TransportSocket.send
(self, *args, **kwargs)
return self._sock.send(*args, **kwargs)
[]
def send(self, *args, **kwargs): self._na('send() method') return self._sock.send(*args, **kwargs)
[ "def", "send", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_na", "(", "'send() method'", ")", "return", "self", ".", "_sock", ".", "send", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/asyncio/trsock.py#L145-L147
PaloAltoNetworks/pan-os-python
30f6cd9e29d0e3c2549d46c722f6dcb507acd437
panos/base.py
python
PanDevice.update_connection_method
(self)
return self._xapi_private
Regenerate the xapi object used to connect to the device This is only necessary if the API key, password, hostname, or other connectivity information in this object has changed. In this case, the xapi object used to communicate with the firewall must be regenerated to use the new connectivity information. The new xapi is stored in the PanDevice object and returned. Returns: XapiWrapper: The xapi object which is also stored in self.xapi.
Regenerate the xapi object used to connect to the device
[ "Regenerate", "the", "xapi", "object", "used", "to", "connect", "to", "the", "device" ]
def update_connection_method(self): """Regenerate the xapi object used to connect to the device This is only necessary if the API key, password, hostname, or other connectivity information in this object has changed. In this case, the xapi object used to communicate with the firewall must be regenerated to use the new connectivity information. The new xapi is stored in the PanDevice object and returned. Returns: XapiWrapper: The xapi object which is also stored in self.xapi. """ self._xapi_private = self.generate_xapi() return self._xapi_private
[ "def", "update_connection_method", "(", "self", ")", ":", "self", ".", "_xapi_private", "=", "self", ".", "generate_xapi", "(", ")", "return", "self", ".", "_xapi_private" ]
https://github.com/PaloAltoNetworks/pan-os-python/blob/30f6cd9e29d0e3c2549d46c722f6dcb507acd437/panos/base.py#L3832-L3847
nilearn/nilearn
9edba4471747efacf21260bf470a346307f52706
nilearn/interfaces/fmriprep/load_confounds_utils.py
python
MissingConfound.__init__
(self, params=None, keywords=None)
Set missing parameters and keywords.
Set missing parameters and keywords.
[ "Set", "missing", "parameters", "and", "keywords", "." ]
def __init__(self, params=None, keywords=None): """Set missing parameters and keywords.""" self.params = params or [] self.keywords = keywords or []
[ "def", "__init__", "(", "self", ",", "params", "=", "None", ",", "keywords", "=", "None", ")", ":", "self", ".", "params", "=", "params", "or", "[", "]", "self", ".", "keywords", "=", "keywords", "or", "[", "]" ]
https://github.com/nilearn/nilearn/blob/9edba4471747efacf21260bf470a346307f52706/nilearn/interfaces/fmriprep/load_confounds_utils.py#L246-L249
MushroomRL/mushroom-rl
a0eaa2cf8001e433419234a9fc48b64170e3f61c
mushroom_rl/utils/parameters.py
python
Parameter._compute
(self, *idx, **kwargs)
return self._initial_value
Returns: The value of the parameter in the provided index.
Returns: The value of the parameter in the provided index.
[ "Returns", ":", "The", "value", "of", "the", "parameter", "in", "the", "provided", "index", "." ]
def _compute(self, *idx, **kwargs): """ Returns: The value of the parameter in the provided index. """ return self._initial_value
[ "def", "_compute", "(", "self", ",", "*", "idx", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_initial_value" ]
https://github.com/MushroomRL/mushroom-rl/blob/a0eaa2cf8001e433419234a9fc48b64170e3f61c/mushroom_rl/utils/parameters.py#L83-L89
coherence-project/Coherence
88016204c7778bf0d3ad1ae331b4d8fd725dd2af
coherence/extern/youtubedl/youtubedl.py
python
YoutubeIE.report_video_info_webpage_download
(self, video_id)
Report attempt to download video info webpage.
Report attempt to download video info webpage.
[ "Report", "attempt", "to", "download", "video", "info", "webpage", "." ]
def report_video_info_webpage_download(self, video_id): """Report attempt to download video info webpage.""" self._downloader.to_stdout(u'[youtube] %s: Downloading video info webpage' % video_id)
[ "def", "report_video_info_webpage_download", "(", "self", ",", "video_id", ")", ":", "self", ".", "_downloader", ".", "to_stdout", "(", "u'[youtube] %s: Downloading video info webpage'", "%", "video_id", ")" ]
https://github.com/coherence-project/Coherence/blob/88016204c7778bf0d3ad1ae331b4d8fd725dd2af/coherence/extern/youtubedl/youtubedl.py#L559-L561
Eniac-Xie/faster-rcnn-resnet
aba743e8404b47fc9bcccba4920846d1068c7e3c
lib/roi_data_layer/layer.py
python
RoIDataLayer.reshape
(self, bottom, top)
Reshaping happens during the call to forward.
Reshaping happens during the call to forward.
[ "Reshaping", "happens", "during", "the", "call", "to", "forward", "." ]
def reshape(self, bottom, top): """Reshaping happens during the call to forward.""" pass
[ "def", "reshape", "(", "self", ",", "bottom", ",", "top", ")", ":", "pass" ]
https://github.com/Eniac-Xie/faster-rcnn-resnet/blob/aba743e8404b47fc9bcccba4920846d1068c7e3c/lib/roi_data_layer/layer.py#L161-L163
TheAlgorithms/Python
9af2eef9b3761bf51580dedfb6fa7136ca0c5c2c
data_structures/binary_tree/binary_tree_traversals.py
python
zigzag
(root: Node | None)
return output
ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively.
ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively.
[ "ZigZag", "traverse", ":", "Returns", "a", "list", "of", "nodes", "value", "from", "left", "to", "right", "and", "right", "to", "left", "alternatively", "." ]
def zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]: """ ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively. """ if root is None: return [] output: list[Sequence[Node | None]] = [] flag = 0 height_tree = height(root) for h in range(1, height_tree + 1): if not flag: output.append(get_nodes_from_left_to_right(root, h)) flag = 1 else: output.append(get_nodes_from_right_to_left(root, h)) flag = 0 return output
[ "def", "zigzag", "(", "root", ":", "Node", "|", "None", ")", "->", "Sequence", "[", "Node", "|", "None", "]", "|", "list", "[", "Any", "]", ":", "if", "root", "is", "None", ":", "return", "[", "]", "output", ":", "list", "[", "Sequence", "[", "Node", "|", "None", "]", "]", "=", "[", "]", "flag", "=", "0", "height_tree", "=", "height", "(", "root", ")", "for", "h", "in", "range", "(", "1", ",", "height_tree", "+", "1", ")", ":", "if", "not", "flag", ":", "output", ".", "append", "(", "get_nodes_from_left_to_right", "(", "root", ",", "h", ")", ")", "flag", "=", "1", "else", ":", "output", ".", "append", "(", "get_nodes_from_right_to_left", "(", "root", ",", "h", ")", ")", "flag", "=", "0", "return", "output" ]
https://github.com/TheAlgorithms/Python/blob/9af2eef9b3761bf51580dedfb6fa7136ca0c5c2c/data_structures/binary_tree/binary_tree_traversals.py#L126-L147
BigBrotherBot/big-brother-bot
848823c71413c86e7f1ff9584f43e08d40a7f2c0
b3/tools/debug/statlib/stats.py
python
azmap
(scores, compare, dimension=0)
return (scores - mns) / sstd
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0 of the compare array. Usage: azs(scores, compare, dimension=0)
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0 of the compare array. Usage: azs(scores, compare, dimension=0)
[ "Returns", "an", "array", "of", "z", "-", "scores", "the", "shape", "of", "scores", "(", "e", ".", "g", ".", "[", "x", "y", "]", ")", "compared", "to", "array", "passed", "to", "compare", "(", "e", ".", "g", ".", "[", "time", "x", "y", "]", ")", ".", "Assumes", "collapsing", "over", "dim", "0", "of", "the", "compare", "array", ".", "Usage", ":", "azs", "(", "scores", "compare", "dimension", "=", "0", ")" ]
def azmap (scores, compare, dimension=0): """ Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0 of the compare array. Usage: azs(scores, compare, dimension=0) """ mns = amean(compare,dimension) sstd = asamplestdev(compare,0) return (scores - mns) / sstd
[ "def", "azmap", "(", "scores", ",", "compare", ",", "dimension", "=", "0", ")", ":", "mns", "=", "amean", "(", "compare", ",", "dimension", ")", "sstd", "=", "asamplestdev", "(", "compare", ",", "0", ")", "return", "(", "scores", "-", "mns", ")", "/", "sstd" ]
https://github.com/BigBrotherBot/big-brother-bot/blob/848823c71413c86e7f1ff9584f43e08d40a7f2c0/b3/tools/debug/statlib/stats.py#L2916-L2926
ns2250225/py-mt4
60b6b98f80923289dc6a73542e4b31e34abc030c
pythonicMT4.py
python
zmq_python.get_data
(self, symbol, timeframe, start_bar, end_bar)
return price_arr
only start_bar and end_bar as int
only start_bar and end_bar as int
[ "only", "start_bar", "and", "end_bar", "as", "int" ]
def get_data(self, symbol, timeframe, start_bar, end_bar): ''' only start_bar and end_bar as int ''' self.data = "DATA|"+ symbol+"|"+"PERIOD_"+timeframe+"|"+str(start_bar)+"|"+str(end_bar+1) self.remote_send(self.reqSocket, self.data) prices= self.remote_pull(self.pullSocket) prices_str= str(prices) price_lst= prices_str.split(sep='|')[1:-1] price_lst= [float(i) for i in price_lst] price_lst= price_lst[::-1] price_arr= np.array(price_lst) return price_arr
[ "def", "get_data", "(", "self", ",", "symbol", ",", "timeframe", ",", "start_bar", ",", "end_bar", ")", ":", "self", ".", "data", "=", "\"DATA|\"", "+", "symbol", "+", "\"|\"", "+", "\"PERIOD_\"", "+", "timeframe", "+", "\"|\"", "+", "str", "(", "start_bar", ")", "+", "\"|\"", "+", "str", "(", "end_bar", "+", "1", ")", "self", ".", "remote_send", "(", "self", ".", "reqSocket", ",", "self", ".", "data", ")", "prices", "=", "self", ".", "remote_pull", "(", "self", ".", "pullSocket", ")", "prices_str", "=", "str", "(", "prices", ")", "price_lst", "=", "prices_str", ".", "split", "(", "sep", "=", "'|'", ")", "[", "1", ":", "-", "1", "]", "price_lst", "=", "[", "float", "(", "i", ")", "for", "i", "in", "price_lst", "]", "price_lst", "=", "price_lst", "[", ":", ":", "-", "1", "]", "price_arr", "=", "np", ".", "array", "(", "price_lst", ")", "return", "price_arr" ]
https://github.com/ns2250225/py-mt4/blob/60b6b98f80923289dc6a73542e4b31e34abc030c/pythonicMT4.py#L42-L54
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
thirdparty_libs/django/db/models/sql/query.py
python
Query.reset_refcounts
(self, to_counts)
This method will reset reference counts for aliases so that they match the value passed in :param to_counts:.
This method will reset reference counts for aliases so that they match the value passed in :param to_counts:.
[ "This", "method", "will", "reset", "reference", "counts", "for", "aliases", "so", "that", "they", "match", "the", "value", "passed", "in", ":", "param", "to_counts", ":", "." ]
def reset_refcounts(self, to_counts): """ This method will reset reference counts for aliases so that they match the value passed in :param to_counts:. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount)
[ "def", "reset_refcounts", "(", "self", ",", "to_counts", ")", ":", "for", "alias", ",", "cur_refcount", "in", "self", ".", "alias_refcount", ".", "copy", "(", ")", ".", "items", "(", ")", ":", "unref_amount", "=", "cur_refcount", "-", "to_counts", ".", "get", "(", "alias", ",", "0", ")", "self", ".", "unref_alias", "(", "alias", ",", "unref_amount", ")" ]
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/django/db/models/sql/query.py#L733-L740
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/reporting_task_dto.py
python
ReportingTaskDTO.scheduling_period
(self)
return self._scheduling_period
Gets the scheduling_period of this ReportingTaskDTO. The frequency with which to schedule the reporting task. The format of the value willd epend on the valud of the schedulingStrategy. :return: The scheduling_period of this ReportingTaskDTO. :rtype: str
Gets the scheduling_period of this ReportingTaskDTO. The frequency with which to schedule the reporting task. The format of the value willd epend on the valud of the schedulingStrategy.
[ "Gets", "the", "scheduling_period", "of", "this", "ReportingTaskDTO", ".", "The", "frequency", "with", "which", "to", "schedule", "the", "reporting", "task", ".", "The", "format", "of", "the", "value", "willd", "epend", "on", "the", "valud", "of", "the", "schedulingStrategy", "." ]
def scheduling_period(self): """ Gets the scheduling_period of this ReportingTaskDTO. The frequency with which to schedule the reporting task. The format of the value willd epend on the valud of the schedulingStrategy. :return: The scheduling_period of this ReportingTaskDTO. :rtype: str """ return self._scheduling_period
[ "def", "scheduling_period", "(", "self", ")", ":", "return", "self", ".", "_scheduling_period" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/reporting_task_dto.py#L472-L480
RasaHQ/rasa
54823b68c1297849ba7ae841a4246193cd1223a1
rasa/shared/core/events.py
python
StoryExported.__init__
( self, path: Optional[Text] = None, timestamp: Optional[float] = None, metadata: Optional[Dict[Text, Any]] = None, )
Creates event about story exporting. Args: path: Path to which story was exported to. timestamp: When the event was created. metadata: Additional event metadata.
Creates event about story exporting.
[ "Creates", "event", "about", "story", "exporting", "." ]
def __init__( self, path: Optional[Text] = None, timestamp: Optional[float] = None, metadata: Optional[Dict[Text, Any]] = None, ) -> None: """Creates event about story exporting. Args: path: Path to which story was exported to. timestamp: When the event was created. metadata: Additional event metadata. """ self.path = path super().__init__(timestamp, metadata)
[ "def", "__init__", "(", "self", ",", "path", ":", "Optional", "[", "Text", "]", "=", "None", ",", "timestamp", ":", "Optional", "[", "float", "]", "=", "None", ",", "metadata", ":", "Optional", "[", "Dict", "[", "Text", ",", "Any", "]", "]", "=", "None", ",", ")", "->", "None", ":", "self", ".", "path", "=", "path", "super", "(", ")", ".", "__init__", "(", "timestamp", ",", "metadata", ")" ]
https://github.com/RasaHQ/rasa/blob/54823b68c1297849ba7ae841a4246193cd1223a1/rasa/shared/core/events.py#L1330-L1344
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/cosmology/core.py
python
wpwaCDM.wp
(self)
return self._wp
Dark energy equation of state at the pivot redshift zp
Dark energy equation of state at the pivot redshift zp
[ "Dark", "energy", "equation", "of", "state", "at", "the", "pivot", "redshift", "zp" ]
def wp(self): """ Dark energy equation of state at the pivot redshift zp""" return self._wp
[ "def", "wp", "(", "self", ")", ":", "return", "self", ".", "_wp" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/cosmology/core.py#L2999-L3001
number5/cloud-init
19948dbaf40309355e1a2dbef116efb0ce66245c
cloudinit/distros/debian.py
python
read_system_locale
(sys_path=LOCALE_CONF_FN, keyname="LANG")
return sys_val
Read system default locale setting, if present
Read system default locale setting, if present
[ "Read", "system", "default", "locale", "setting", "if", "present" ]
def read_system_locale(sys_path=LOCALE_CONF_FN, keyname="LANG"): """Read system default locale setting, if present""" sys_val = "" if not sys_path: raise ValueError("Invalid path: %s" % sys_path) if os.path.exists(sys_path): locale_content = util.load_file(sys_path) sys_defaults = util.load_shell_content(locale_content) sys_val = sys_defaults.get(keyname, "") return sys_val
[ "def", "read_system_locale", "(", "sys_path", "=", "LOCALE_CONF_FN", ",", "keyname", "=", "\"LANG\"", ")", ":", "sys_val", "=", "\"\"", "if", "not", "sys_path", ":", "raise", "ValueError", "(", "\"Invalid path: %s\"", "%", "sys_path", ")", "if", "os", ".", "path", ".", "exists", "(", "sys_path", ")", ":", "locale_content", "=", "util", ".", "load_file", "(", "sys_path", ")", "sys_defaults", "=", "util", ".", "load_shell_content", "(", "locale_content", ")", "sys_val", "=", "sys_defaults", ".", "get", "(", "keyname", ",", "\"\"", ")", "return", "sys_val" ]
https://github.com/number5/cloud-init/blob/19948dbaf40309355e1a2dbef116efb0ce66245c/cloudinit/distros/debian.py#L348-L359
spotify/luigi
c3b66f4a5fa7eaa52f9a72eb6704b1049035c789
luigi/contrib/rdbms.py
python
CopyToTable.init_copy
(self, connection)
Override to perform custom queries. Any code here will be formed in the same transaction as the main copy, just prior to copying data. Example use cases include truncating the table or removing all data older than X in the database to keep a rolling window of data available in the table.
Override to perform custom queries.
[ "Override", "to", "perform", "custom", "queries", "." ]
def init_copy(self, connection): """ Override to perform custom queries. Any code here will be formed in the same transaction as the main copy, just prior to copying data. Example use cases include truncating the table or removing all data older than X in the database to keep a rolling window of data available in the table. """ # TODO: remove this after sufficient time so most people using the # clear_table attribtue will have noticed it doesn't work anymore if hasattr(self, "clear_table"): raise Exception("The clear_table attribute has been removed. Override init_copy instead!") if self.enable_metadata_columns: self._add_metadata_columns(connection.cursor())
[ "def", "init_copy", "(", "self", ",", "connection", ")", ":", "# TODO: remove this after sufficient time so most people using the", "# clear_table attribtue will have noticed it doesn't work anymore", "if", "hasattr", "(", "self", ",", "\"clear_table\"", ")", ":", "raise", "Exception", "(", "\"The clear_table attribute has been removed. Override init_copy instead!\"", ")", "if", "self", ".", "enable_metadata_columns", ":", "self", ".", "_add_metadata_columns", "(", "connection", ".", "cursor", "(", ")", ")" ]
https://github.com/spotify/luigi/blob/c3b66f4a5fa7eaa52f9a72eb6704b1049035c789/luigi/contrib/rdbms.py#L237-L252
dfirtrack/dfirtrack
d405e9e2520f3f95372883e6543fa2aa7dac0121
dfirtrack_main/exporter/markdown/write_report.py
python
write_headline
(django_report, system)
write headline
write headline
[ "write", "headline" ]
def write_headline(django_report, system): """write headline""" django_report.write("# " + system.system_name + "\n") emptyline(django_report)
[ "def", "write_headline", "(", "django_report", ",", "system", ")", ":", "django_report", ".", "write", "(", "\"# \"", "+", "system", ".", "system_name", "+", "\"\\n\"", ")", "emptyline", "(", "django_report", ")" ]
https://github.com/dfirtrack/dfirtrack/blob/d405e9e2520f3f95372883e6543fa2aa7dac0121/dfirtrack_main/exporter/markdown/write_report.py#L9-L13