repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
IBMStreams/pypi.streamsx
streamsx/rest.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest.py#L309-L342
def _get_vcap_services(vcap_services=None): """Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If `vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable. Args: vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file. vcap_services (dict): Return the dict as is. Returns: dict: A dict representation of the VCAP Services information. Raises: ValueError: * if `vcap_services` nor VCAP_SERVICES environment variable are specified. * cannot parse `vcap_services` as a JSON string nor as a filename. """ vcap_services = vcap_services or os.environ.get('VCAP_SERVICES') if not vcap_services: raise ValueError( "VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'") # If it was passed to config as a dict, simply return it if isinstance(vcap_services, dict): return vcap_services try: # Otherwise, if it's a string, try to load it as json vcap_services = json.loads(vcap_services) except json.JSONDecodeError: # If that doesn't work, attempt to open it as a file path to the json config. try: with open(vcap_services) as vcap_json_data: vcap_services = json.load(vcap_json_data) except: raise ValueError("VCAP_SERVICES information is not JSON or a file containing JSON:", vcap_services) return vcap_services
[ "def", "_get_vcap_services", "(", "vcap_services", "=", "None", ")", ":", "vcap_services", "=", "vcap_services", "or", "os", ".", "environ", ".", "get", "(", "'VCAP_SERVICES'", ")", "if", "not", "vcap_services", ":", "raise", "ValueError", "(", "\"VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'\"", ")", "# If it was passed to config as a dict, simply return it", "if", "isinstance", "(", "vcap_services", ",", "dict", ")", ":", "return", "vcap_services", "try", ":", "# Otherwise, if it's a string, try to load it as json", "vcap_services", "=", "json", ".", "loads", "(", "vcap_services", ")", "except", "json", ".", "JSONDecodeError", ":", "# If that doesn't work, attempt to open it as a file path to the json config.", "try", ":", "with", "open", "(", "vcap_services", ")", "as", "vcap_json_data", ":", "vcap_services", "=", "json", ".", "load", "(", "vcap_json_data", ")", "except", ":", "raise", "ValueError", "(", "\"VCAP_SERVICES information is not JSON or a file containing JSON:\"", ",", "vcap_services", ")", "return", "vcap_services" ]
Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If `vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable. Args: vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file. vcap_services (dict): Return the dict as is. Returns: dict: A dict representation of the VCAP Services information. Raises: ValueError: * if `vcap_services` nor VCAP_SERVICES environment variable are specified. * cannot parse `vcap_services` as a JSON string nor as a filename.
[ "Retrieves", "the", "VCAP", "Services", "information", "from", "the", "ConfigParams", ".", "VCAP_SERVICES", "field", "in", "the", "config", "object", ".", "If", "vcap_services", "is", "not", "specified", "it", "takes", "the", "information", "from", "VCAP_SERVICES", "environment", "variable", "." ]
python
train
46.058824
thomasvandoren/bugzscout-py
bugzscout/ext/cli.py
https://github.com/thomasvandoren/bugzscout-py/blob/514528e958a97e0e7b36870037c5c69661511824/bugzscout/ext/cli.py#L63-L94
def _parse_args(): """Parse and return command line arguments.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=_CliFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose output.') fb_group = parser.add_argument_group('FogBugz arguments') fb_group.add_argument( '-u', '--url', help=( 'URL for bugzscout requests to be sent. Should be something ' 'like .../scoutSubmit.asp.')) fb_group.add_argument( '--user', help='User to designate when submitting via bugzscout.') fb_group.add_argument( '--project', help='Fogbugz project to file cases under.') fb_group.add_argument( '--area', help='Fogbugz area to file cases under.') error_group = parser.add_argument_group('error arguments') error_group.add_argument('-e', '--extra', help='Extra data to send with error.') error_group.add_argument('--default-message', help='Set default message if case is new.') error_group.add_argument('description', help=('Description of error. Will be matched ' 'against existing cases.')) parser.set_defaults(**_defaults()) return parser.parse_args()
[ "def", "_parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "__doc__", ",", "formatter_class", "=", "_CliFormatter", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Enable verbose output.'", ")", "fb_group", "=", "parser", ".", "add_argument_group", "(", "'FogBugz arguments'", ")", "fb_group", ".", "add_argument", "(", "'-u'", ",", "'--url'", ",", "help", "=", "(", "'URL for bugzscout requests to be sent. Should be something '", "'like .../scoutSubmit.asp.'", ")", ")", "fb_group", ".", "add_argument", "(", "'--user'", ",", "help", "=", "'User to designate when submitting via bugzscout.'", ")", "fb_group", ".", "add_argument", "(", "'--project'", ",", "help", "=", "'Fogbugz project to file cases under.'", ")", "fb_group", ".", "add_argument", "(", "'--area'", ",", "help", "=", "'Fogbugz area to file cases under.'", ")", "error_group", "=", "parser", ".", "add_argument_group", "(", "'error arguments'", ")", "error_group", ".", "add_argument", "(", "'-e'", ",", "'--extra'", ",", "help", "=", "'Extra data to send with error.'", ")", "error_group", ".", "add_argument", "(", "'--default-message'", ",", "help", "=", "'Set default message if case is new.'", ")", "error_group", ".", "add_argument", "(", "'description'", ",", "help", "=", "(", "'Description of error. Will be matched '", "'against existing cases.'", ")", ")", "parser", ".", "set_defaults", "(", "*", "*", "_defaults", "(", ")", ")", "return", "parser", ".", "parse_args", "(", ")" ]
Parse and return command line arguments.
[ "Parse", "and", "return", "command", "line", "arguments", "." ]
python
train
41.78125
angr/claripy
claripy/vsa/discrete_strided_interval_set.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/vsa/discrete_strided_interval_set.py#L536-L552
def _union_with_dsis(self, dsis): """ Union with another DiscreteStridedIntervalSet. :param dsis: :return: """ copied = self.copy() for a in dsis._si_set: copied = copied.union(a) if isinstance(copied, DiscreteStridedIntervalSet): copied._update_bounds(dsis) return copied.normalize()
[ "def", "_union_with_dsis", "(", "self", ",", "dsis", ")", ":", "copied", "=", "self", ".", "copy", "(", ")", "for", "a", "in", "dsis", ".", "_si_set", ":", "copied", "=", "copied", ".", "union", "(", "a", ")", "if", "isinstance", "(", "copied", ",", "DiscreteStridedIntervalSet", ")", ":", "copied", ".", "_update_bounds", "(", "dsis", ")", "return", "copied", ".", "normalize", "(", ")" ]
Union with another DiscreteStridedIntervalSet. :param dsis: :return:
[ "Union", "with", "another", "DiscreteStridedIntervalSet", "." ]
python
train
21.705882
MaxStrange/AudioSegment
audiosegment.py
https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/audiosegment.py#L1142-L1151
def silent(duration=1000, frame_rate=11025): """ Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence. :param duration: The duration of the returned object in ms. :param frame_rate: The samples per second of the returned object. :returns: AudioSegment object filled with pure digital silence. """ seg = pydub.AudioSegment.silent(duration=duration, frame_rate=frame_rate) return AudioSegment(seg, "")
[ "def", "silent", "(", "duration", "=", "1000", ",", "frame_rate", "=", "11025", ")", ":", "seg", "=", "pydub", ".", "AudioSegment", ".", "silent", "(", "duration", "=", "duration", ",", "frame_rate", "=", "frame_rate", ")", "return", "AudioSegment", "(", "seg", ",", "\"\"", ")" ]
Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence. :param duration: The duration of the returned object in ms. :param frame_rate: The samples per second of the returned object. :returns: AudioSegment object filled with pure digital silence.
[ "Creates", "an", "AudioSegment", "object", "of", "the", "specified", "duration", "/", "frame_rate", "filled", "with", "digital", "silence", "." ]
python
test
46.6
alfred82santa/dirty-models
dirty_models/model_types.py
https://github.com/alfred82santa/dirty-models/blob/354becdb751b21f673515eae928c256c7e923c50/dirty_models/model_types.py#L395-L426
def get_attrs_by_path(self, field_path, stop_first=False): """ It returns list of values looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: list or None. :param stop_first: Stop iteration on first value looked up. Default: False. :type stop_first: bool :return: value """ index_list, next_field = self._get_indexes_by_path(field_path) values = [] for idx in index_list: if next_field: try: res = self[idx].get_attrs_by_path(next_field, stop_first=stop_first) if res is None: continue values.extend(res) if stop_first and len(values): break except AttributeError: pass else: if stop_first: return [self[idx], ] values.append(self[idx]) return values if len(values) else None
[ "def", "get_attrs_by_path", "(", "self", ",", "field_path", ",", "stop_first", "=", "False", ")", ":", "index_list", ",", "next_field", "=", "self", ".", "_get_indexes_by_path", "(", "field_path", ")", "values", "=", "[", "]", "for", "idx", "in", "index_list", ":", "if", "next_field", ":", "try", ":", "res", "=", "self", "[", "idx", "]", ".", "get_attrs_by_path", "(", "next_field", ",", "stop_first", "=", "stop_first", ")", "if", "res", "is", "None", ":", "continue", "values", ".", "extend", "(", "res", ")", "if", "stop_first", "and", "len", "(", "values", ")", ":", "break", "except", "AttributeError", ":", "pass", "else", ":", "if", "stop_first", ":", "return", "[", "self", "[", "idx", "]", ",", "]", "values", ".", "append", "(", "self", "[", "idx", "]", ")", "return", "values", "if", "len", "(", "values", ")", "else", "None" ]
It returns list of values looked up by field path. Field path is dot-formatted string path: ``parent_field.child_field``. :param field_path: field path. It allows ``*`` as wildcard. :type field_path: list or None. :param stop_first: Stop iteration on first value looked up. Default: False. :type stop_first: bool :return: value
[ "It", "returns", "list", "of", "values", "looked", "up", "by", "field", "path", ".", "Field", "path", "is", "dot", "-", "formatted", "string", "path", ":", "parent_field", ".", "child_field", "." ]
python
train
35.28125
keon/algorithms
algorithms/arrays/three_sum.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/arrays/three_sum.py#L18-L48
def three_sum(array): """ :param array: List[int] :return: Set[ Tuple[int, int, int] ] """ res = set() array.sort() for i in range(len(array) - 2): if i > 0 and array[i] == array[i - 1]: continue l, r = i + 1, len(array) - 1 while l < r: s = array[i] + array[l] + array[r] if s > 0: r -= 1 elif s < 0: l += 1 else: # found three sum res.add((array[i], array[l], array[r])) # remove duplicates while l < r and array[l] == array[l + 1]: l += 1 while l < r and array[r] == array[r - 1]: r -= 1 l += 1 r -= 1 return res
[ "def", "three_sum", "(", "array", ")", ":", "res", "=", "set", "(", ")", "array", ".", "sort", "(", ")", "for", "i", "in", "range", "(", "len", "(", "array", ")", "-", "2", ")", ":", "if", "i", ">", "0", "and", "array", "[", "i", "]", "==", "array", "[", "i", "-", "1", "]", ":", "continue", "l", ",", "r", "=", "i", "+", "1", ",", "len", "(", "array", ")", "-", "1", "while", "l", "<", "r", ":", "s", "=", "array", "[", "i", "]", "+", "array", "[", "l", "]", "+", "array", "[", "r", "]", "if", "s", ">", "0", ":", "r", "-=", "1", "elif", "s", "<", "0", ":", "l", "+=", "1", "else", ":", "# found three sum", "res", ".", "add", "(", "(", "array", "[", "i", "]", ",", "array", "[", "l", "]", ",", "array", "[", "r", "]", ")", ")", "# remove duplicates", "while", "l", "<", "r", "and", "array", "[", "l", "]", "==", "array", "[", "l", "+", "1", "]", ":", "l", "+=", "1", "while", "l", "<", "r", "and", "array", "[", "r", "]", "==", "array", "[", "r", "-", "1", "]", ":", "r", "-=", "1", "l", "+=", "1", "r", "-=", "1", "return", "res" ]
:param array: List[int] :return: Set[ Tuple[int, int, int] ]
[ ":", "param", "array", ":", "List", "[", "int", "]", ":", "return", ":", "Set", "[", "Tuple", "[", "int", "int", "int", "]", "]" ]
python
train
25.419355
Tanganelli/CoAPthon3
coapthon/caching/cache.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/caching/cache.py#L89-L113
def search_response(self, request): """ creates a key from the request and searches the cache with it :param request: :return CacheElement: returns None if there's a cache miss """ logger.debug("Cache Search Response") if self.cache.is_empty() is True: logger.debug("Empty Cache") return None """ create a new cache key from the request """ if self.mode == defines.FORWARD_PROXY: search_key = CacheKey(request) else: search_key = ReverseCacheKey(request) response = self.cache.get(search_key) return response
[ "def", "search_response", "(", "self", ",", "request", ")", ":", "logger", ".", "debug", "(", "\"Cache Search Response\"", ")", "if", "self", ".", "cache", ".", "is_empty", "(", ")", "is", "True", ":", "logger", ".", "debug", "(", "\"Empty Cache\"", ")", "return", "None", "\"\"\"\n create a new cache key from the request\n \"\"\"", "if", "self", ".", "mode", "==", "defines", ".", "FORWARD_PROXY", ":", "search_key", "=", "CacheKey", "(", "request", ")", "else", ":", "search_key", "=", "ReverseCacheKey", "(", "request", ")", "response", "=", "self", ".", "cache", ".", "get", "(", "search_key", ")", "return", "response" ]
creates a key from the request and searches the cache with it :param request: :return CacheElement: returns None if there's a cache miss
[ "creates", "a", "key", "from", "the", "request", "and", "searches", "the", "cache", "with", "it" ]
python
train
26
vertexproject/synapse
synapse/lib/certdir.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/certdir.py#L729-L745
def isUserCert(self, name): ''' Checks if a user certificate exists. Args: name (str): The name of the user keypair. Examples: Check if the user cert "myuser" exists: exists = cdir.isUserCert('myuser') Returns: bool: True if the certificate is present, False otherwise. ''' crtpath = self._getPathJoin('users', '%s.crt' % name) return os.path.isfile(crtpath)
[ "def", "isUserCert", "(", "self", ",", "name", ")", ":", "crtpath", "=", "self", ".", "_getPathJoin", "(", "'users'", ",", "'%s.crt'", "%", "name", ")", "return", "os", ".", "path", ".", "isfile", "(", "crtpath", ")" ]
Checks if a user certificate exists. Args: name (str): The name of the user keypair. Examples: Check if the user cert "myuser" exists: exists = cdir.isUserCert('myuser') Returns: bool: True if the certificate is present, False otherwise.
[ "Checks", "if", "a", "user", "certificate", "exists", "." ]
python
train
27.176471
SwoopSearch/pyaddress
address/address.py
https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L348-L375
def check_apartment_number(self, token): """ Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out, because it has a lot of false positives. """ apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+', r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*', r'style\s\w{1,2}', r'\d{1,4}/\d{1,4}', r'\d{1,4}', r'\w{1,2}'] for regex in apartment_regexes: if re.match(regex, token.lower()): self.apartment = self._clean(token) return True # if self.apartment is None and re.match(apartment_regex_number, token.lower()): ## print "Apt regex" # self.apartment = token # return True ## If we come on apt or apartment and already have an apartment number, add apt or apartment to the front if self.apartment and token.lower() in ['apt', 'apartment']: # print "Apt in a_n" self.apartment = self._clean(token + ' ' + self.apartment) return True if not self.street_suffix and not self.street and not self.apartment: # print "Searching for unmatched term: ", token, token.lower(), if re.match(r'\d?\w?', token.lower()): self.apartment = self._clean(token) return True return False
[ "def", "check_apartment_number", "(", "self", ",", "token", ")", ":", "apartment_regexes", "=", "[", "r'#\\w+ & \\w+'", ",", "'#\\w+ rm \\w+'", ",", "\"#\\w+-\\w\"", ",", "r'apt #{0,1}\\w+'", ",", "r'apartment #{0,1}\\w+'", ",", "r'#\\w+'", ",", "r'# \\w+'", ",", "r'rm \\w+'", ",", "r'unit #?\\w+'", ",", "r'units #?\\w+'", ",", "r'- #{0,1}\\w+'", ",", "r'no\\s?\\d+\\w*'", ",", "r'style\\s\\w{1,2}'", ",", "r'\\d{1,4}/\\d{1,4}'", ",", "r'\\d{1,4}'", ",", "r'\\w{1,2}'", "]", "for", "regex", "in", "apartment_regexes", ":", "if", "re", ".", "match", "(", "regex", ",", "token", ".", "lower", "(", ")", ")", ":", "self", ".", "apartment", "=", "self", ".", "_clean", "(", "token", ")", "return", "True", "# if self.apartment is None and re.match(apartment_regex_number, token.lower()):", "## print \"Apt regex\"", "# self.apartment = token", "# return True", "## If we come on apt or apartment and already have an apartment number, add apt or apartment to the front", "if", "self", ".", "apartment", "and", "token", ".", "lower", "(", ")", "in", "[", "'apt'", ",", "'apartment'", "]", ":", "# print \"Apt in a_n\"", "self", ".", "apartment", "=", "self", ".", "_clean", "(", "token", "+", "' '", "+", "self", ".", "apartment", ")", "return", "True", "if", "not", "self", ".", "street_suffix", "and", "not", "self", ".", "street", "and", "not", "self", ".", "apartment", ":", "# print \"Searching for unmatched term: \", token, token.lower(),", "if", "re", ".", "match", "(", "r'\\d?\\w?'", ",", "token", ".", "lower", "(", ")", ")", ":", "self", ".", "apartment", "=", "self", ".", "_clean", "(", "token", ")", "return", "True", "return", "False" ]
Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out, because it has a lot of false positives.
[ "Finds", "apartment", "unit", "#", "etc", "regardless", "of", "spot", "in", "string", ".", "This", "needs", "to", "come", "after", "everything", "else", "has", "been", "ruled", "out", "because", "it", "has", "a", "lot", "of", "false", "positives", "." ]
python
train
56.214286
tensorflow/tensor2tensor
tensor2tensor/layers/modalities.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/modalities.py#L552-L566
def video_bitwise_bottom(x, model_hparams, vocab_size): """Bottom transformation for embedding video bitwise.""" pixel_embedding_size = 64 inputs = x with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE): common_layers.summarize_video(inputs, "bottom") # Embed bitwise. assert vocab_size == 256 embedded = discretization.int_to_bit_embed(inputs, 8, pixel_embedding_size) # Project. return tf.layers.dense( embedded, model_hparams.hidden_size, name="merge_pixel_embedded_frames")
[ "def", "video_bitwise_bottom", "(", "x", ",", "model_hparams", ",", "vocab_size", ")", ":", "pixel_embedding_size", "=", "64", "inputs", "=", "x", "with", "tf", ".", "variable_scope", "(", "\"video_modality_bitwise\"", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "common_layers", ".", "summarize_video", "(", "inputs", ",", "\"bottom\"", ")", "# Embed bitwise.", "assert", "vocab_size", "==", "256", "embedded", "=", "discretization", ".", "int_to_bit_embed", "(", "inputs", ",", "8", ",", "pixel_embedding_size", ")", "# Project.", "return", "tf", ".", "layers", ".", "dense", "(", "embedded", ",", "model_hparams", ".", "hidden_size", ",", "name", "=", "\"merge_pixel_embedded_frames\"", ")" ]
Bottom transformation for embedding video bitwise.
[ "Bottom", "transformation", "for", "embedding", "video", "bitwise", "." ]
python
train
38.866667
MoseleyBioinformaticsLab/nmrstarlib
nmrstarlib/nmrstarlib.py
https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/nmrstarlib.py#L399-L423
def _build_loop(self, lexer): """Build saveframe loop. :param lexer: instance of lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Fields and values of the loop. :rtype: :py:class:`tuple` """ fields = [] values = [] token = next(lexer) while token[0] == u"_": fields.append(token[1:]) token = next(lexer) while token != u"stop_": values.append(token) token = next(lexer) assert float(len(values) / len(fields)).is_integer(), \ "Error in loop construction: number of fields must be equal to number of values." values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))] return fields, values
[ "def", "_build_loop", "(", "self", ",", "lexer", ")", ":", "fields", "=", "[", "]", "values", "=", "[", "]", "token", "=", "next", "(", "lexer", ")", "while", "token", "[", "0", "]", "==", "u\"_\"", ":", "fields", ".", "append", "(", "token", "[", "1", ":", "]", ")", "token", "=", "next", "(", "lexer", ")", "while", "token", "!=", "u\"stop_\"", ":", "values", ".", "append", "(", "token", ")", "token", "=", "next", "(", "lexer", ")", "assert", "float", "(", "len", "(", "values", ")", "/", "len", "(", "fields", ")", ")", ".", "is_integer", "(", ")", ",", "\"Error in loop construction: number of fields must be equal to number of values.\"", "values", "=", "[", "OrderedDict", "(", "zip", "(", "fields", ",", "values", "[", "i", ":", "i", "+", "len", "(", "fields", ")", "]", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "values", ")", ",", "len", "(", "fields", ")", ")", "]", "return", "fields", ",", "values" ]
Build saveframe loop. :param lexer: instance of lexical analyzer. :type lexer: :func:`~nmrstarlib.bmrblex.bmrblex` :return: Fields and values of the loop. :rtype: :py:class:`tuple`
[ "Build", "saveframe", "loop", "." ]
python
train
32.64
veltzer/pylogconf
config/helpers.py
https://github.com/veltzer/pylogconf/blob/a3e230a073380b43b5d5096f40bb37ae28f3e430/config/helpers.py#L24-L35
def find_packages(path: str) -> List[str]: """ A better version of find_packages than what setuptools offers This function needs to be deterministic. :param path: :return: """ ret = [] for root, _dir, files in os.walk(path): if '__init__.py' in files: ret.append(root.replace("/", ".")) return sorted(ret)
[ "def", "find_packages", "(", "path", ":", "str", ")", "->", "List", "[", "str", "]", ":", "ret", "=", "[", "]", "for", "root", ",", "_dir", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "if", "'__init__.py'", "in", "files", ":", "ret", ".", "append", "(", "root", ".", "replace", "(", "\"/\"", ",", "\".\"", ")", ")", "return", "sorted", "(", "ret", ")" ]
A better version of find_packages than what setuptools offers This function needs to be deterministic. :param path: :return:
[ "A", "better", "version", "of", "find_packages", "than", "what", "setuptools", "offers", "This", "function", "needs", "to", "be", "deterministic", ".", ":", "param", "path", ":", ":", "return", ":" ]
python
train
29.166667
Robpol86/sphinxcontrib-versioning
sphinxcontrib/versioning/git.py
https://github.com/Robpol86/sphinxcontrib-versioning/blob/920edec0ac764081b583a2ecf4e6952762b9dbf2/sphinxcontrib/versioning/git.py#L181-L209
def list_remote(local_root): """Get remote branch/tag latest SHAs. :raise GitError: When git ls-remote fails. :param str local_root: Local path to git root directory. :return: List of tuples containing strings. Each tuple is sha, name, kind. :rtype: list """ command = ['git', 'ls-remote', '--heads', '--tags'] try: output = run_command(local_root, command) except CalledProcessError as exc: raise GitError('Git failed to list remote refs.', exc.output) # Dereference annotated tags if any. No need to fetch annotations. if '^{}' in output: parsed = list() for group in (m.groupdict() for m in RE_REMOTE.finditer(output)): dereferenced, name, kind = group['name'].endswith('^{}'), group['name'][:-3], group['kind'] if dereferenced and parsed and kind == parsed[-1]['kind'] == 'tags' and name == parsed[-1]['name']: parsed[-1]['sha'] = group['sha'] else: parsed.append(group) else: parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)] return [[i['sha'], i['name'], i['kind']] for i in parsed]
[ "def", "list_remote", "(", "local_root", ")", ":", "command", "=", "[", "'git'", ",", "'ls-remote'", ",", "'--heads'", ",", "'--tags'", "]", "try", ":", "output", "=", "run_command", "(", "local_root", ",", "command", ")", "except", "CalledProcessError", "as", "exc", ":", "raise", "GitError", "(", "'Git failed to list remote refs.'", ",", "exc", ".", "output", ")", "# Dereference annotated tags if any. No need to fetch annotations.", "if", "'^{}'", "in", "output", ":", "parsed", "=", "list", "(", ")", "for", "group", "in", "(", "m", ".", "groupdict", "(", ")", "for", "m", "in", "RE_REMOTE", ".", "finditer", "(", "output", ")", ")", ":", "dereferenced", ",", "name", ",", "kind", "=", "group", "[", "'name'", "]", ".", "endswith", "(", "'^{}'", ")", ",", "group", "[", "'name'", "]", "[", ":", "-", "3", "]", ",", "group", "[", "'kind'", "]", "if", "dereferenced", "and", "parsed", "and", "kind", "==", "parsed", "[", "-", "1", "]", "[", "'kind'", "]", "==", "'tags'", "and", "name", "==", "parsed", "[", "-", "1", "]", "[", "'name'", "]", ":", "parsed", "[", "-", "1", "]", "[", "'sha'", "]", "=", "group", "[", "'sha'", "]", "else", ":", "parsed", ".", "append", "(", "group", ")", "else", ":", "parsed", "=", "[", "m", ".", "groupdict", "(", ")", "for", "m", "in", "RE_REMOTE", ".", "finditer", "(", "output", ")", "]", "return", "[", "[", "i", "[", "'sha'", "]", ",", "i", "[", "'name'", "]", ",", "i", "[", "'kind'", "]", "]", "for", "i", "in", "parsed", "]" ]
Get remote branch/tag latest SHAs. :raise GitError: When git ls-remote fails. :param str local_root: Local path to git root directory. :return: List of tuples containing strings. Each tuple is sha, name, kind. :rtype: list
[ "Get", "remote", "branch", "/", "tag", "latest", "SHAs", "." ]
python
train
39.172414
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/storage_api.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/storage_api.py#L374-L419
def read(self, size=-1): """Read data from RAW file. Args: size: Number of bytes to read as integer. Actual number of bytes read is always equal to size unless EOF is reached. If size is negative or unspecified, read the entire file. Returns: data read as str. Raises: IOError: When this buffer is closed. """ self._check_open() if not self._remaining(): return '' data_list = [] while True: remaining = self._buffer.remaining() if size >= 0 and size < remaining: data_list.append(self._buffer.read(size)) self._offset += size break else: size -= remaining self._offset += remaining data_list.append(self._buffer.read()) if self._buffer_future is None: if size < 0 or size >= self._remaining(): needs = self._remaining() else: needs = size data_list.extend(self._get_segments(self._offset, needs)) self._offset += needs break if self._buffer_future: self._buffer.reset(self._buffer_future.get_result()) self._buffer_future = None if self._buffer_future is None: self._request_next_buffer() return ''.join(data_list)
[ "def", "read", "(", "self", ",", "size", "=", "-", "1", ")", ":", "self", ".", "_check_open", "(", ")", "if", "not", "self", ".", "_remaining", "(", ")", ":", "return", "''", "data_list", "=", "[", "]", "while", "True", ":", "remaining", "=", "self", ".", "_buffer", ".", "remaining", "(", ")", "if", "size", ">=", "0", "and", "size", "<", "remaining", ":", "data_list", ".", "append", "(", "self", ".", "_buffer", ".", "read", "(", "size", ")", ")", "self", ".", "_offset", "+=", "size", "break", "else", ":", "size", "-=", "remaining", "self", ".", "_offset", "+=", "remaining", "data_list", ".", "append", "(", "self", ".", "_buffer", ".", "read", "(", ")", ")", "if", "self", ".", "_buffer_future", "is", "None", ":", "if", "size", "<", "0", "or", "size", ">=", "self", ".", "_remaining", "(", ")", ":", "needs", "=", "self", ".", "_remaining", "(", ")", "else", ":", "needs", "=", "size", "data_list", ".", "extend", "(", "self", ".", "_get_segments", "(", "self", ".", "_offset", ",", "needs", ")", ")", "self", ".", "_offset", "+=", "needs", "break", "if", "self", ".", "_buffer_future", ":", "self", ".", "_buffer", ".", "reset", "(", "self", ".", "_buffer_future", ".", "get_result", "(", ")", ")", "self", ".", "_buffer_future", "=", "None", "if", "self", ".", "_buffer_future", "is", "None", ":", "self", ".", "_request_next_buffer", "(", ")", "return", "''", ".", "join", "(", "data_list", ")" ]
Read data from RAW file. Args: size: Number of bytes to read as integer. Actual number of bytes read is always equal to size unless EOF is reached. If size is negative or unspecified, read the entire file. Returns: data read as str. Raises: IOError: When this buffer is closed.
[ "Read", "data", "from", "RAW", "file", "." ]
python
train
27.021739
PyCQA/pylint
pylint/checkers/base.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/base.py#L2236-L2243
def register(linter): """required method to auto register this checker""" linter.register_checker(BasicErrorChecker(linter)) linter.register_checker(BasicChecker(linter)) linter.register_checker(NameChecker(linter)) linter.register_checker(DocStringChecker(linter)) linter.register_checker(PassChecker(linter)) linter.register_checker(ComparisonChecker(linter))
[ "def", "register", "(", "linter", ")", ":", "linter", ".", "register_checker", "(", "BasicErrorChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "BasicChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "NameChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "DocStringChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "PassChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "ComparisonChecker", "(", "linter", ")", ")" ]
required method to auto register this checker
[ "required", "method", "to", "auto", "register", "this", "checker" ]
python
test
47.75
SBRG/ssbio
ssbio/protein/structure/structprop.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L620-L666
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False): """Use NGLviewer to display a structure in a Jupyter notebook Args: only_chains (str, list): Chain ID or IDs to display opacity (float): Opacity of the structure recolor (bool): If structure should be cleaned and recolored to silver gui (bool): If the NGLview GUI should show up Returns: NGLviewer object """ # TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly if ssbio.utils.is_ipynb(): import nglview as nv else: raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment') if not self.structure_file: raise ValueError("Structure file not loaded") only_chains = ssbio.utils.force_list(only_chains) to_show_chains = '( ' for c in only_chains: to_show_chains += ':{} or'.format(c) to_show_chains = to_show_chains.strip(' or ') to_show_chains += ' )' if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz': view = nv.NGLWidget() view.add_component(self.structure_path) else: view = nv.show_structure_file(self.structure_path, gui=gui) if recolor: view.clear_representations() if only_chains: view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity) else: view.add_cartoon(selection='protein', color='silver', opacity=opacity) elif only_chains: view.clear_representations() view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity) return view
[ "def", "view_structure", "(", "self", ",", "only_chains", "=", "None", ",", "opacity", "=", "1.0", ",", "recolor", "=", "False", ",", "gui", "=", "False", ")", ":", "# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly", "if", "ssbio", ".", "utils", ".", "is_ipynb", "(", ")", ":", "import", "nglview", "as", "nv", "else", ":", "raise", "EnvironmentError", "(", "'Unable to display structure - not running in a Jupyter notebook environment'", ")", "if", "not", "self", ".", "structure_file", ":", "raise", "ValueError", "(", "\"Structure file not loaded\"", ")", "only_chains", "=", "ssbio", ".", "utils", ".", "force_list", "(", "only_chains", ")", "to_show_chains", "=", "'( '", "for", "c", "in", "only_chains", ":", "to_show_chains", "+=", "':{} or'", ".", "format", "(", "c", ")", "to_show_chains", "=", "to_show_chains", ".", "strip", "(", "' or '", ")", "to_show_chains", "+=", "' )'", "if", "self", ".", "file_type", "==", "'mmtf'", "or", "self", ".", "file_type", "==", "'mmtf.gz'", ":", "view", "=", "nv", ".", "NGLWidget", "(", ")", "view", ".", "add_component", "(", "self", ".", "structure_path", ")", "else", ":", "view", "=", "nv", ".", "show_structure_file", "(", "self", ".", "structure_path", ",", "gui", "=", "gui", ")", "if", "recolor", ":", "view", ".", "clear_representations", "(", ")", "if", "only_chains", ":", "view", ".", "add_cartoon", "(", "selection", "=", "'{} and (not hydrogen)'", ".", "format", "(", "to_show_chains", ")", ",", "color", "=", "'silver'", ",", "opacity", "=", "opacity", ")", "else", ":", "view", ".", "add_cartoon", "(", "selection", "=", "'protein'", ",", "color", "=", "'silver'", ",", "opacity", "=", "opacity", ")", "elif", "only_chains", ":", "view", ".", "clear_representations", "(", ")", "view", ".", "add_cartoon", "(", "selection", "=", "'{} and (not hydrogen)'", ".", "format", "(", "to_show_chains", ")", ",", "color", "=", "'silver'", ",", "opacity", "=", "opacity", ")", "return", "view" ]
Use NGLviewer to display a structure in a Jupyter notebook Args: only_chains (str, list): Chain ID or IDs to display opacity (float): Opacity of the structure recolor (bool): If structure should be cleaned and recolored to silver gui (bool): If the NGLview GUI should show up Returns: NGLviewer object
[ "Use", "NGLviewer", "to", "display", "a", "structure", "in", "a", "Jupyter", "notebook" ]
python
train
39.659574
AguaClara/aguaclara
aguaclara/design/cdc.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/cdc.py#L102-L110
def _len_tube(Flow, Diam, HeadLoss, conc_chem, temp, en_chem, KMinor): """Length of tube required to get desired head loss at maximum flow based on the Hagen-Poiseuille equation.""" num1 = pc.gravity.magnitude * HeadLoss * np.pi * (Diam**4) denom1 = 128 * viscosity_kinematic_chem(conc_chem, temp, en_chem) * Flow num2 = Flow * KMinor denom2 = 16 * np.pi * viscosity_kinematic_chem(conc_chem, temp, en_chem) len = ((num1/denom1) - (num2/denom2)) return len.magnitude
[ "def", "_len_tube", "(", "Flow", ",", "Diam", ",", "HeadLoss", ",", "conc_chem", ",", "temp", ",", "en_chem", ",", "KMinor", ")", ":", "num1", "=", "pc", ".", "gravity", ".", "magnitude", "*", "HeadLoss", "*", "np", ".", "pi", "*", "(", "Diam", "**", "4", ")", "denom1", "=", "128", "*", "viscosity_kinematic_chem", "(", "conc_chem", ",", "temp", ",", "en_chem", ")", "*", "Flow", "num2", "=", "Flow", "*", "KMinor", "denom2", "=", "16", "*", "np", ".", "pi", "*", "viscosity_kinematic_chem", "(", "conc_chem", ",", "temp", ",", "en_chem", ")", "len", "=", "(", "(", "num1", "/", "denom1", ")", "-", "(", "num2", "/", "denom2", ")", ")", "return", "len", ".", "magnitude" ]
Length of tube required to get desired head loss at maximum flow based on the Hagen-Poiseuille equation.
[ "Length", "of", "tube", "required", "to", "get", "desired", "head", "loss", "at", "maximum", "flow", "based", "on", "the", "Hagen", "-", "Poiseuille", "equation", "." ]
python
train
54.444444
avihad/twistes
twistes/client.py
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/client.py#L63-L79
def get(self, index, id, fields=None, doc_type=EsConst.ALL_VALUES, **query_params): """ Retrieve specific record by id `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_ :param index: the index name to query :param id: the id of the record :param fields: the fields you what to fetch from the record (str separated by comma's) :param doc_type: the doc type to search in :param query_params: params :return: """ if fields: query_params[EsConst.FIELDS] = fields path = self._es_parser.make_path(index, doc_type, id) result = yield self._perform_request(HttpMethod.GET, path, params=query_params) returnValue(result)
[ "def", "get", "(", "self", ",", "index", ",", "id", ",", "fields", "=", "None", ",", "doc_type", "=", "EsConst", ".", "ALL_VALUES", ",", "*", "*", "query_params", ")", ":", "if", "fields", ":", "query_params", "[", "EsConst", ".", "FIELDS", "]", "=", "fields", "path", "=", "self", ".", "_es_parser", ".", "make_path", "(", "index", ",", "doc_type", ",", "id", ")", "result", "=", "yield", "self", ".", "_perform_request", "(", "HttpMethod", ".", "GET", ",", "path", ",", "params", "=", "query_params", ")", "returnValue", "(", "result", ")" ]
Retrieve specific record by id `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_ :param index: the index name to query :param id: the id of the record :param fields: the fields you what to fetch from the record (str separated by comma's) :param doc_type: the doc type to search in :param query_params: params :return:
[ "Retrieve", "specific", "record", "by", "id", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "docs", "-", "get", ".", "html", ">", "_", ":", "param", "index", ":", "the", "index", "name", "to", "query", ":", "param", "id", ":", "the", "id", "of", "the", "record", ":", "param", "fields", ":", "the", "fields", "you", "what", "to", "fetch", "from", "the", "record", "(", "str", "separated", "by", "comma", "s", ")", ":", "param", "doc_type", ":", "the", "doc", "type", "to", "search", "in", ":", "param", "query_params", ":", "params", ":", "return", ":" ]
python
train
44.294118
tensorflow/cleverhans
cleverhans/utils.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L167-L173
def pair_visual(*args, **kwargs): """Deprecation wrapper""" warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. " "cleverhans.utils.pair_visual may be removed on or after " "2019-04-24.") from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual return new_pair_visual(*args, **kwargs)
[ "def", "pair_visual", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"`pair_visual` has moved to `cleverhans.plot.pyplot_image`. \"", "\"cleverhans.utils.pair_visual may be removed on or after \"", "\"2019-04-24.\"", ")", "from", "cleverhans", ".", "plot", ".", "pyplot_image", "import", "pair_visual", "as", "new_pair_visual", "return", "new_pair_visual", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Deprecation wrapper
[ "Deprecation", "wrapper" ]
python
train
50.714286
google/grr
grr/server/grr_response_server/gui/wsgiapp.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/wsgiapp.py#L226-L269
def _HandleHomepage(self, request): """Renders GRR home page by rendering base.html Jinja template.""" _ = request env = jinja2.Environment( loader=jinja2.FileSystemLoader(config.CONFIG["AdminUI.template_root"]), autoescape=True) create_time = psutil.Process(os.getpid()).create_time() context = { "heading": config.CONFIG["AdminUI.heading"], "report_url": config.CONFIG["AdminUI.report_url"], "help_url": config.CONFIG["AdminUI.help_url"], "timestamp": utils.SmartStr(create_time), "use_precompiled_js": config.CONFIG["AdminUI.use_precompiled_js"], # Used in conjunction with FirebaseWebAuthManager. "firebase_api_key": config.CONFIG["AdminUI.firebase_api_key"], "firebase_auth_domain": config.CONFIG["AdminUI.firebase_auth_domain"], "firebase_auth_provider": config.CONFIG["AdminUI.firebase_auth_provider"], "grr_version": config.CONFIG["Source.version_string"] } template = env.get_template("base.html") response = werkzeug_wrappers.Response( template.render(context), mimetype="text/html") # For a redirect-based Firebase authentication scheme we won't have any # user information at this point - therefore checking if the user is # present. try: StoreCSRFCookie(request.user, response) except RequestHasNoUser: pass return response
[ "def", "_HandleHomepage", "(", "self", ",", "request", ")", ":", "_", "=", "request", "env", "=", "jinja2", ".", "Environment", "(", "loader", "=", "jinja2", ".", "FileSystemLoader", "(", "config", ".", "CONFIG", "[", "\"AdminUI.template_root\"", "]", ")", ",", "autoescape", "=", "True", ")", "create_time", "=", "psutil", ".", "Process", "(", "os", ".", "getpid", "(", ")", ")", ".", "create_time", "(", ")", "context", "=", "{", "\"heading\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.heading\"", "]", ",", "\"report_url\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.report_url\"", "]", ",", "\"help_url\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.help_url\"", "]", ",", "\"timestamp\"", ":", "utils", ".", "SmartStr", "(", "create_time", ")", ",", "\"use_precompiled_js\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.use_precompiled_js\"", "]", ",", "# Used in conjunction with FirebaseWebAuthManager.", "\"firebase_api_key\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.firebase_api_key\"", "]", ",", "\"firebase_auth_domain\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.firebase_auth_domain\"", "]", ",", "\"firebase_auth_provider\"", ":", "config", ".", "CONFIG", "[", "\"AdminUI.firebase_auth_provider\"", "]", ",", "\"grr_version\"", ":", "config", ".", "CONFIG", "[", "\"Source.version_string\"", "]", "}", "template", "=", "env", ".", "get_template", "(", "\"base.html\"", ")", "response", "=", "werkzeug_wrappers", ".", "Response", "(", "template", ".", "render", "(", "context", ")", ",", "mimetype", "=", "\"text/html\"", ")", "# For a redirect-based Firebase authentication scheme we won't have any", "# user information at this point - therefore checking if the user is", "# present.", "try", ":", "StoreCSRFCookie", "(", "request", ".", "user", ",", "response", ")", "except", "RequestHasNoUser", ":", "pass", "return", "response" ]
Renders GRR home page by rendering base.html Jinja template.
[ "Renders", "GRR", "home", "page", "by", "rendering", "base", ".", "html", "Jinja", "template", "." ]
python
train
33.590909
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L19375-L19388
def environment_schedule_unset(self, name): """Schedules unsetting (removing) an environment variable when creating the next guest process. This affects the :py:func:`IGuestSession.environment_changes` attribute. in name of type str Name of the environment variable to unset. This cannot be empty nor can it contain any equal signs. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") self._call("environmentScheduleUnset", in_p=[name])
[ "def", "environment_schedule_unset", "(", "self", ",", "name", ")", ":", "if", "not", "isinstance", "(", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"name can only be an instance of type basestring\"", ")", "self", ".", "_call", "(", "\"environmentScheduleUnset\"", ",", "in_p", "=", "[", "name", "]", ")" ]
Schedules unsetting (removing) an environment variable when creating the next guest process. This affects the :py:func:`IGuestSession.environment_changes` attribute. in name of type str Name of the environment variable to unset. This cannot be empty nor can it contain any equal signs.
[ "Schedules", "unsetting", "(", "removing", ")", "an", "environment", "variable", "when", "creating", "the", "next", "guest", "process", ".", "This", "affects", "the", ":", "py", ":", "func", ":", "IGuestSession", ".", "environment_changes", "attribute", "." ]
python
train
42.642857
harlowja/fasteners
fasteners/lock.py
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L205-L238
def write_lock(self): """Context manager that grants a write lock. Will wait until no active readers. Blocks readers after acquiring. Guaranteed for locks to be processed in fair order (FIFO). Raises a ``RuntimeError`` if an active reader attempts to acquire a lock. """ me = self._current_thread() i_am_writer = self.is_writer(check_pending=False) if self.is_reader() and not i_am_writer: raise RuntimeError("Reader %s to writer privilege" " escalation not allowed" % me) if i_am_writer: # Already the writer; this allows for basic reentrancy. yield self else: with self._cond: self._pending_writers.append(me) while True: # No readers, and no active writer, am I next?? if len(self._readers) == 0 and self._writer is None: if self._pending_writers[0] == me: self._writer = self._pending_writers.popleft() break self._cond.wait() try: yield self finally: with self._cond: self._writer = None self._cond.notify_all()
[ "def", "write_lock", "(", "self", ")", ":", "me", "=", "self", ".", "_current_thread", "(", ")", "i_am_writer", "=", "self", ".", "is_writer", "(", "check_pending", "=", "False", ")", "if", "self", ".", "is_reader", "(", ")", "and", "not", "i_am_writer", ":", "raise", "RuntimeError", "(", "\"Reader %s to writer privilege\"", "\" escalation not allowed\"", "%", "me", ")", "if", "i_am_writer", ":", "# Already the writer; this allows for basic reentrancy.", "yield", "self", "else", ":", "with", "self", ".", "_cond", ":", "self", ".", "_pending_writers", ".", "append", "(", "me", ")", "while", "True", ":", "# No readers, and no active writer, am I next??", "if", "len", "(", "self", ".", "_readers", ")", "==", "0", "and", "self", ".", "_writer", "is", "None", ":", "if", "self", ".", "_pending_writers", "[", "0", "]", "==", "me", ":", "self", ".", "_writer", "=", "self", ".", "_pending_writers", ".", "popleft", "(", ")", "break", "self", ".", "_cond", ".", "wait", "(", ")", "try", ":", "yield", "self", "finally", ":", "with", "self", ".", "_cond", ":", "self", ".", "_writer", "=", "None", "self", ".", "_cond", ".", "notify_all", "(", ")" ]
Context manager that grants a write lock. Will wait until no active readers. Blocks readers after acquiring. Guaranteed for locks to be processed in fair order (FIFO). Raises a ``RuntimeError`` if an active reader attempts to acquire a lock.
[ "Context", "manager", "that", "grants", "a", "write", "lock", "." ]
python
train
38.852941
saltstack/salt
salt/modules/boto_elasticache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L390-L400
def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)]
[ "def", "list_cache_subnet_groups", "(", "name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "return", "[", "g", "[", "'CacheSubnetGroupName'", "]", "for", "g", "in", "get_all_cache_subnet_groups", "(", "name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "]" ]
Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1
[ "Return", "a", "list", "of", "all", "cache", "subnet", "group", "names" ]
python
train
35.636364
videntity/django-djmongo
djmongo/console/utils.py
https://github.com/videntity/django-djmongo/blob/7534e0981a2bc12634cf3f1ed03353623dc57565/djmongo/console/utils.py#L89-L108
def create_mongo_db(database_name, collection_name, initial_document): """Create a new database and collection by inserting one document.""" response_dict = {} try: mongodb_client_url = getattr(settings, 'MONGODB_CLIENT', 'mongodb://localhost:27017/') mc = MongoClient(mongodb_client_url,document_class=OrderedDict) db = mc[str(database_name)] collection = db[str(collection_name)] d = json.loads(initial_document, object_pairs_hook=OrderedDict) collection.save(d) except: # error connecting to mongodb response_dict['error'] = str(sys.exc_info()) return response_dict
[ "def", "create_mongo_db", "(", "database_name", ",", "collection_name", ",", "initial_document", ")", ":", "response_dict", "=", "{", "}", "try", ":", "mongodb_client_url", "=", "getattr", "(", "settings", ",", "'MONGODB_CLIENT'", ",", "'mongodb://localhost:27017/'", ")", "mc", "=", "MongoClient", "(", "mongodb_client_url", ",", "document_class", "=", "OrderedDict", ")", "db", "=", "mc", "[", "str", "(", "database_name", ")", "]", "collection", "=", "db", "[", "str", "(", "collection_name", ")", "]", "d", "=", "json", ".", "loads", "(", "initial_document", ",", "object_pairs_hook", "=", "OrderedDict", ")", "collection", ".", "save", "(", "d", ")", "except", ":", "# error connecting to mongodb", "response_dict", "[", "'error'", "]", "=", "str", "(", "sys", ".", "exc_info", "(", ")", ")", "return", "response_dict" ]
Create a new database and collection by inserting one document.
[ "Create", "a", "new", "database", "and", "collection", "by", "inserting", "one", "document", "." ]
python
train
33.55
fastai/fastai
fastai/train.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/train.py#L144-L147
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid): "Create an instance of `ClassificationInterpretation`" preds = learn.get_preds(ds_type=ds_type, with_loss=True) return cls(learn, *preds)
[ "def", "from_learner", "(", "cls", ",", "learn", ":", "Learner", ",", "ds_type", ":", "DatasetType", "=", "DatasetType", ".", "Valid", ")", ":", "preds", "=", "learn", ".", "get_preds", "(", "ds_type", "=", "ds_type", ",", "with_loss", "=", "True", ")", "return", "cls", "(", "learn", ",", "*", "preds", ")" ]
Create an instance of `ClassificationInterpretation`
[ "Create", "an", "instance", "of", "ClassificationInterpretation" ]
python
train
59.25
pycontribs/pyrax
pyrax/cloudmonitoring.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudmonitoring.py#L720-L731
def update_entity(self, entity, agent=None, metadata=None): """ Updates the specified entity's values with the supplied parameters. """ body = {} if agent: body["agent_id"] = utils.get_id(agent) if metadata: body["metadata"] = metadata if body: uri = "/%s/%s" % (self.uri_base, utils.get_id(entity)) resp, body = self.api.method_put(uri, body=body)
[ "def", "update_entity", "(", "self", ",", "entity", ",", "agent", "=", "None", ",", "metadata", "=", "None", ")", ":", "body", "=", "{", "}", "if", "agent", ":", "body", "[", "\"agent_id\"", "]", "=", "utils", ".", "get_id", "(", "agent", ")", "if", "metadata", ":", "body", "[", "\"metadata\"", "]", "=", "metadata", "if", "body", ":", "uri", "=", "\"/%s/%s\"", "%", "(", "self", ".", "uri_base", ",", "utils", ".", "get_id", "(", "entity", ")", ")", "resp", ",", "body", "=", "self", ".", "api", ".", "method_put", "(", "uri", ",", "body", "=", "body", ")" ]
Updates the specified entity's values with the supplied parameters.
[ "Updates", "the", "specified", "entity", "s", "values", "with", "the", "supplied", "parameters", "." ]
python
train
36.75
oscarbranson/latools
latools/latools.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L2490-L2525
def filter_defragment(self, threshold, mode='include', filt=True, samples=None, subset=None): """ Remove 'fragments' from the calculated filter Parameters ---------- threshold : int Contiguous data regions that contain this number or fewer points are considered 'fragments' mode : str Specifies wither to 'include' or 'exclude' the identified fragments. filt : bool or filt string Which filter to apply the defragmenter to. Defaults to True samples : array_like or None Which samples to apply this filter to. If None, applies to all samples. subset : str or number The subset of samples (defined by make_subset) you want to apply the filter to. Returns ------- None """ if samples is not None: subset = self.make_subset(samples) samples = self._get_samples(subset) for s in samples: f = self.data[s].filt.grab_filt(filt) self.data[s].filt.add(name='defrag_{:s}_{:.0f}'.format(mode, threshold), filt=filters.defrag(f, threshold, mode), info='Defrag {:s} filter with threshold {:.0f}'.format(mode, threshold), params=(threshold, mode, filt, samples, subset))
[ "def", "filter_defragment", "(", "self", ",", "threshold", ",", "mode", "=", "'include'", ",", "filt", "=", "True", ",", "samples", "=", "None", ",", "subset", "=", "None", ")", ":", "if", "samples", "is", "not", "None", ":", "subset", "=", "self", ".", "make_subset", "(", "samples", ")", "samples", "=", "self", ".", "_get_samples", "(", "subset", ")", "for", "s", "in", "samples", ":", "f", "=", "self", ".", "data", "[", "s", "]", ".", "filt", ".", "grab_filt", "(", "filt", ")", "self", ".", "data", "[", "s", "]", ".", "filt", ".", "add", "(", "name", "=", "'defrag_{:s}_{:.0f}'", ".", "format", "(", "mode", ",", "threshold", ")", ",", "filt", "=", "filters", ".", "defrag", "(", "f", ",", "threshold", ",", "mode", ")", ",", "info", "=", "'Defrag {:s} filter with threshold {:.0f}'", ".", "format", "(", "mode", ",", "threshold", ")", ",", "params", "=", "(", "threshold", ",", "mode", ",", "filt", ",", "samples", ",", "subset", ")", ")" ]
Remove 'fragments' from the calculated filter Parameters ---------- threshold : int Contiguous data regions that contain this number or fewer points are considered 'fragments' mode : str Specifies wither to 'include' or 'exclude' the identified fragments. filt : bool or filt string Which filter to apply the defragmenter to. Defaults to True samples : array_like or None Which samples to apply this filter to. If None, applies to all samples. subset : str or number The subset of samples (defined by make_subset) you want to apply the filter to. Returns ------- None
[ "Remove", "fragments", "from", "the", "calculated", "filter" ]
python
test
39.083333
alex-kostirin/pyatomac
atomac/ooldtp/__init__.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ooldtp/__init__.py#L570-L583
def deregisterevent(self, event_name): """ Remove callback of registered event @param event_name: Event name in at-spi format. @type event_name: string @return: 1 if registration was successful, 0 if not. @rtype: integer """ if event_name in self._pollEvents._callback: del self._pollEvents._callback[event_name] return self._remote_deregisterevent(event_name)
[ "def", "deregisterevent", "(", "self", ",", "event_name", ")", ":", "if", "event_name", "in", "self", ".", "_pollEvents", ".", "_callback", ":", "del", "self", ".", "_pollEvents", ".", "_callback", "[", "event_name", "]", "return", "self", ".", "_remote_deregisterevent", "(", "event_name", ")" ]
Remove callback of registered event @param event_name: Event name in at-spi format. @type event_name: string @return: 1 if registration was successful, 0 if not. @rtype: integer
[ "Remove", "callback", "of", "registered", "event" ]
python
valid
31
materialsproject/pymatgen
pymatgen/electronic_structure/boltztrap.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/boltztrap.py#L1696-L1707
def get_carrier_concentration(self): """ gives the carrier concentration (in cm^-3) Returns a dictionary {temp:[]} with an array of carrier concentration (in cm^-3) at each temperature The array relates to each step of electron chemical potential """ return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]] for temp in self._carrier_conc}
[ "def", "get_carrier_concentration", "(", "self", ")", ":", "return", "{", "temp", ":", "[", "1e24", "*", "i", "/", "self", ".", "vol", "for", "i", "in", "self", ".", "_carrier_conc", "[", "temp", "]", "]", "for", "temp", "in", "self", ".", "_carrier_conc", "}" ]
gives the carrier concentration (in cm^-3) Returns a dictionary {temp:[]} with an array of carrier concentration (in cm^-3) at each temperature The array relates to each step of electron chemical potential
[ "gives", "the", "carrier", "concentration", "(", "in", "cm^", "-", "3", ")" ]
python
train
36.25
fedora-infra/datanommer
datanommer.models/alembic/versions/1d4feffd78fe_add_historic_user_an.py
https://github.com/fedora-infra/datanommer/blob/4a20e216bb404b14f76c7065518fd081e989764d/datanommer.models/alembic/versions/1d4feffd78fe_add_historic_user_an.py#L48-L61
def _page(q, chunk=1000): """ Quick utility to page a query, 1000 items at a time. We need this so we don't OOM (out of memory) ourselves loading the world. """ offset = 0 while True: r = False for elem in q.limit(chunk).offset(offset): r = True yield elem offset += chunk if not r: break
[ "def", "_page", "(", "q", ",", "chunk", "=", "1000", ")", ":", "offset", "=", "0", "while", "True", ":", "r", "=", "False", "for", "elem", "in", "q", ".", "limit", "(", "chunk", ")", ".", "offset", "(", "offset", ")", ":", "r", "=", "True", "yield", "elem", "offset", "+=", "chunk", "if", "not", "r", ":", "break" ]
Quick utility to page a query, 1000 items at a time. We need this so we don't OOM (out of memory) ourselves loading the world.
[ "Quick", "utility", "to", "page", "a", "query", "1000", "items", "at", "a", "time", ".", "We", "need", "this", "so", "we", "don", "t", "OOM", "(", "out", "of", "memory", ")", "ourselves", "loading", "the", "world", "." ]
python
train
26
angr/angr
angr/state_plugins/symbolic_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/symbolic_memory.py#L1234-L1247
def permissions(self, addr, permissions=None): """ Retrieve the permissions of the page at address `addr`. :param addr: address to get the page permissions :param permissions: Integer or BVV to optionally set page permissions to :return: AST representing the permissions on the page """ out = self.mem.permissions(addr, permissions) # if unicorn is in play and we've marked a page writable, it must be uncached if permissions is not None and self.state.solver.is_true(permissions & 2 == 2): if self.state.has_plugin('unicorn'): self.state.unicorn.uncache_page(addr) return out
[ "def", "permissions", "(", "self", ",", "addr", ",", "permissions", "=", "None", ")", ":", "out", "=", "self", ".", "mem", ".", "permissions", "(", "addr", ",", "permissions", ")", "# if unicorn is in play and we've marked a page writable, it must be uncached", "if", "permissions", "is", "not", "None", "and", "self", ".", "state", ".", "solver", ".", "is_true", "(", "permissions", "&", "2", "==", "2", ")", ":", "if", "self", ".", "state", ".", "has_plugin", "(", "'unicorn'", ")", ":", "self", ".", "state", ".", "unicorn", ".", "uncache_page", "(", "addr", ")", "return", "out" ]
Retrieve the permissions of the page at address `addr`. :param addr: address to get the page permissions :param permissions: Integer or BVV to optionally set page permissions to :return: AST representing the permissions on the page
[ "Retrieve", "the", "permissions", "of", "the", "page", "at", "address", "addr", "." ]
python
train
49.285714
learningequality/ricecooker
ricecooker/managers/tree.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/managers/tree.py#L281-L300
def commit_channel(self, channel_id): """ commit_channel: commits channel to Kolibri Studio Args: channel_id (str): channel's id on Kolibri Studio Returns: channel id and link to uploadedchannel """ payload = { "channel_id":channel_id, "stage": config.STAGE, } response = config.SESSION.post(config.finish_channel_url(), data=json.dumps(payload)) if response.status_code != 200: config.LOGGER.error("\n\nCould not activate channel: {}\n".format(response._content.decode('utf-8'))) if response.status_code == 403: config.LOGGER.error("Channel can be viewed at {}\n\n".format(config.open_channel_url(channel_id, staging=True))) sys.exit() response.raise_for_status() new_channel = json.loads(response._content.decode("utf-8")) channel_link = config.open_channel_url(new_channel['new_channel']) return channel_id, channel_link
[ "def", "commit_channel", "(", "self", ",", "channel_id", ")", ":", "payload", "=", "{", "\"channel_id\"", ":", "channel_id", ",", "\"stage\"", ":", "config", ".", "STAGE", ",", "}", "response", "=", "config", ".", "SESSION", ".", "post", "(", "config", ".", "finish_channel_url", "(", ")", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ")", "if", "response", ".", "status_code", "!=", "200", ":", "config", ".", "LOGGER", ".", "error", "(", "\"\\n\\nCould not activate channel: {}\\n\"", ".", "format", "(", "response", ".", "_content", ".", "decode", "(", "'utf-8'", ")", ")", ")", "if", "response", ".", "status_code", "==", "403", ":", "config", ".", "LOGGER", ".", "error", "(", "\"Channel can be viewed at {}\\n\\n\"", ".", "format", "(", "config", ".", "open_channel_url", "(", "channel_id", ",", "staging", "=", "True", ")", ")", ")", "sys", ".", "exit", "(", ")", "response", ".", "raise_for_status", "(", ")", "new_channel", "=", "json", ".", "loads", "(", "response", ".", "_content", ".", "decode", "(", "\"utf-8\"", ")", ")", "channel_link", "=", "config", ".", "open_channel_url", "(", "new_channel", "[", "'new_channel'", "]", ")", "return", "channel_id", ",", "channel_link" ]
commit_channel: commits channel to Kolibri Studio Args: channel_id (str): channel's id on Kolibri Studio Returns: channel id and link to uploadedchannel
[ "commit_channel", ":", "commits", "channel", "to", "Kolibri", "Studio", "Args", ":", "channel_id", "(", "str", ")", ":", "channel", "s", "id", "on", "Kolibri", "Studio", "Returns", ":", "channel", "id", "and", "link", "to", "uploadedchannel" ]
python
train
50.2
cs01/gdbgui
gdbgui/backend.py
https://github.com/cs01/gdbgui/blob/5367f87554f8f7c671d1f4596c133bf1303154f0/gdbgui/backend.py#L369-L382
def send_msg_to_clients(client_ids, msg, error=False): """Send message to all clients""" if error: stream = "stderr" else: stream = "stdout" response = [{"message": None, "type": "console", "payload": msg, "stream": stream}] for client_id in client_ids: logger.info("emiting message to websocket client id " + client_id) socketio.emit( "gdb_response", response, namespace="/gdb_listener", room=client_id )
[ "def", "send_msg_to_clients", "(", "client_ids", ",", "msg", ",", "error", "=", "False", ")", ":", "if", "error", ":", "stream", "=", "\"stderr\"", "else", ":", "stream", "=", "\"stdout\"", "response", "=", "[", "{", "\"message\"", ":", "None", ",", "\"type\"", ":", "\"console\"", ",", "\"payload\"", ":", "msg", ",", "\"stream\"", ":", "stream", "}", "]", "for", "client_id", "in", "client_ids", ":", "logger", ".", "info", "(", "\"emiting message to websocket client id \"", "+", "client_id", ")", "socketio", ".", "emit", "(", "\"gdb_response\"", ",", "response", ",", "namespace", "=", "\"/gdb_listener\"", ",", "room", "=", "client_id", ")" ]
Send message to all clients
[ "Send", "message", "to", "all", "clients" ]
python
train
33.285714
senaite/senaite.core
bika/lims/content/abstractanalysis.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/abstractanalysis.py#L748-L767
def getAllowedInstruments(self): """Returns the allowed instruments for this analysis, either if the instrument was assigned directly (by using "Allows instrument entry of results") or indirectly via Method (by using "Allows manual entry of results") in Analysis Service edit view. :return: A list of instruments allowed for this Analysis :rtype: list of instruments """ service = self.getAnalysisService() if not service: return [] instruments = [] if self.getInstrumentEntryOfResults(): instruments = service.getInstruments() if self.getManualEntryOfResults(): for meth in self.getAllowedMethods(): instruments += meth.getInstruments() return list(set(instruments))
[ "def", "getAllowedInstruments", "(", "self", ")", ":", "service", "=", "self", ".", "getAnalysisService", "(", ")", "if", "not", "service", ":", "return", "[", "]", "instruments", "=", "[", "]", "if", "self", ".", "getInstrumentEntryOfResults", "(", ")", ":", "instruments", "=", "service", ".", "getInstruments", "(", ")", "if", "self", ".", "getManualEntryOfResults", "(", ")", ":", "for", "meth", "in", "self", ".", "getAllowedMethods", "(", ")", ":", "instruments", "+=", "meth", ".", "getInstruments", "(", ")", "return", "list", "(", "set", "(", "instruments", ")", ")" ]
Returns the allowed instruments for this analysis, either if the instrument was assigned directly (by using "Allows instrument entry of results") or indirectly via Method (by using "Allows manual entry of results") in Analysis Service edit view. :return: A list of instruments allowed for this Analysis :rtype: list of instruments
[ "Returns", "the", "allowed", "instruments", "for", "this", "analysis", "either", "if", "the", "instrument", "was", "assigned", "directly", "(", "by", "using", "Allows", "instrument", "entry", "of", "results", ")", "or", "indirectly", "via", "Method", "(", "by", "using", "Allows", "manual", "entry", "of", "results", ")", "in", "Analysis", "Service", "edit", "view", ".", ":", "return", ":", "A", "list", "of", "instruments", "allowed", "for", "this", "Analysis", ":", "rtype", ":", "list", "of", "instruments" ]
python
train
40.3
jpscaletti/solution
solution/fields/color.py
https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/fields/color.py#L74-L87
def normalize_rgb(r, g, b, a): """Transform a rgb[a] color to #hex[a]. """ r = int(r, 10) g = int(g, 10) b = int(b, 10) if a: a = float(a) * 256 if r > 255 or g > 255 or b > 255 or (a and a > 255): return None color = '#%02x%02x%02x' % (r, g, b) if a: color += '%02x' % int(a) return color
[ "def", "normalize_rgb", "(", "r", ",", "g", ",", "b", ",", "a", ")", ":", "r", "=", "int", "(", "r", ",", "10", ")", "g", "=", "int", "(", "g", ",", "10", ")", "b", "=", "int", "(", "b", ",", "10", ")", "if", "a", ":", "a", "=", "float", "(", "a", ")", "*", "256", "if", "r", ">", "255", "or", "g", ">", "255", "or", "b", ">", "255", "or", "(", "a", "and", "a", ">", "255", ")", ":", "return", "None", "color", "=", "'#%02x%02x%02x'", "%", "(", "r", ",", "g", ",", "b", ")", "if", "a", ":", "color", "+=", "'%02x'", "%", "int", "(", "a", ")", "return", "color" ]
Transform a rgb[a] color to #hex[a].
[ "Transform", "a", "rgb", "[", "a", "]", "color", "to", "#hex", "[", "a", "]", "." ]
python
train
24.285714
horazont/aioxmpp
aioxmpp/pep/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/pep/service.py#L326-L338
def close(self): """ Unclaim the PEP node and unregister the registered features. It is not necessary to call close if this claim is managed by :class:`~aioxmpp.pep.register_pep_node`. """ if self._closed: return self._closed = True self._pep_service._unclaim(self.node_namespace) self._unregister()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "return", "self", ".", "_closed", "=", "True", "self", ".", "_pep_service", ".", "_unclaim", "(", "self", ".", "node_namespace", ")", "self", ".", "_unregister", "(", ")" ]
Unclaim the PEP node and unregister the registered features. It is not necessary to call close if this claim is managed by :class:`~aioxmpp.pep.register_pep_node`.
[ "Unclaim", "the", "PEP", "node", "and", "unregister", "the", "registered", "features", "." ]
python
train
28.692308
bitesofcode/projexui
projexui/widgets/xloaderwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xloaderwidget.py#L202-L207
def incrementSub(self, amount=1): """ Increments the sub-progress bar by amount. """ self._subProgressBar.setValue(self.subValue() + amount) QApplication.instance().processEvents()
[ "def", "incrementSub", "(", "self", ",", "amount", "=", "1", ")", ":", "self", ".", "_subProgressBar", ".", "setValue", "(", "self", ".", "subValue", "(", ")", "+", "amount", ")", "QApplication", ".", "instance", "(", ")", ".", "processEvents", "(", ")" ]
Increments the sub-progress bar by amount.
[ "Increments", "the", "sub", "-", "progress", "bar", "by", "amount", "." ]
python
train
36
erget/StereoVision
stereovision/stereo_cameras.py
https://github.com/erget/StereoVision/blob/1adff45e291362f52188e0fd0211265845a4461a/stereovision/stereo_cameras.py#L79-L88
def get_frames_singleimage(self): """ Get current left and right frames from a single image, by splitting the image in half. """ frame = self.captures[0].read()[1] height, width, colors = frame.shape left_frame = frame[:, :width/2, :] right_frame = frame[:, width/2:, :] return [left_frame, right_frame]
[ "def", "get_frames_singleimage", "(", "self", ")", ":", "frame", "=", "self", ".", "captures", "[", "0", "]", ".", "read", "(", ")", "[", "1", "]", "height", ",", "width", ",", "colors", "=", "frame", ".", "shape", "left_frame", "=", "frame", "[", ":", ",", ":", "width", "/", "2", ",", ":", "]", "right_frame", "=", "frame", "[", ":", ",", "width", "/", "2", ":", ",", ":", "]", "return", "[", "left_frame", ",", "right_frame", "]" ]
Get current left and right frames from a single image, by splitting the image in half.
[ "Get", "current", "left", "and", "right", "frames", "from", "a", "single", "image", "by", "splitting", "the", "image", "in", "half", "." ]
python
train
36.6
vtraag/leidenalg
src/VertexPartition.py
https://github.com/vtraag/leidenalg/blob/a9e15116973a81048edf02ef7cf800d54debe1cc/src/VertexPartition.py#L376-L384
def weight_from_comm(self, v, comm): """ The total number of edges (or sum of weights) to node ``v`` from community ``comm``. See Also -------- :func:`~VertexPartition.MutableVertexPartition.weight_to_comm` """ return _c_leiden._MutableVertexPartition_weight_from_comm(self._partition, v, comm)
[ "def", "weight_from_comm", "(", "self", ",", "v", ",", "comm", ")", ":", "return", "_c_leiden", ".", "_MutableVertexPartition_weight_from_comm", "(", "self", ".", "_partition", ",", "v", ",", "comm", ")" ]
The total number of edges (or sum of weights) to node ``v`` from community ``comm``. See Also -------- :func:`~VertexPartition.MutableVertexPartition.weight_to_comm`
[ "The", "total", "number", "of", "edges", "(", "or", "sum", "of", "weights", ")", "to", "node", "v", "from", "community", "comm", "." ]
python
train
35
kolypto/py-good
good/schema/compiler.py
https://github.com/kolypto/py-good/blob/192ef19e79f6fd95c1cbd7c378a3074c7ad7a6d4/good/schema/compiler.py#L331-L338
def _compile_schema(self, schema): """ Compile another schema """ assert self.matcher == schema.matcher self.name = schema.name self.compiled_type = schema.compiled_type return schema.compiled
[ "def", "_compile_schema", "(", "self", ",", "schema", ")", ":", "assert", "self", ".", "matcher", "==", "schema", ".", "matcher", "self", ".", "name", "=", "schema", ".", "name", "self", ".", "compiled_type", "=", "schema", ".", "compiled_type", "return", "schema", ".", "compiled" ]
Compile another schema
[ "Compile", "another", "schema" ]
python
train
28.375
automl/HpBandSter
hpbandster/core/base_iteration.py
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/core/base_iteration.py#L105-L139
def register_result(self, job, skip_sanity_checks=False): """ function to register the result of a job This function is called from HB_master, don't call this from your script. """ if self.is_finished: raise RuntimeError("This HB iteration is finished, you can't register more results!") config_id = job.id config = job.kwargs['config'] budget = job.kwargs['budget'] timestamps = job.timestamps result = job.result exception = job.exception d = self.data[config_id] if not skip_sanity_checks: assert d.config == config, 'Configurations differ!' assert d.status == 'RUNNING', "Configuration wasn't scheduled for a run." assert d.budget == budget, 'Budgets differ (%f != %f)!'%(self.data[config_id]['budget'], budget) d.time_stamps[budget] = timestamps d.results[budget] = result if (not job.result is None) and np.isfinite(result['loss']): d.status = 'REVIEW' else: d.status = 'CRASHED' d.exceptions[budget] = exception self.num_running -= 1
[ "def", "register_result", "(", "self", ",", "job", ",", "skip_sanity_checks", "=", "False", ")", ":", "if", "self", ".", "is_finished", ":", "raise", "RuntimeError", "(", "\"This HB iteration is finished, you can't register more results!\"", ")", "config_id", "=", "job", ".", "id", "config", "=", "job", ".", "kwargs", "[", "'config'", "]", "budget", "=", "job", ".", "kwargs", "[", "'budget'", "]", "timestamps", "=", "job", ".", "timestamps", "result", "=", "job", ".", "result", "exception", "=", "job", ".", "exception", "d", "=", "self", ".", "data", "[", "config_id", "]", "if", "not", "skip_sanity_checks", ":", "assert", "d", ".", "config", "==", "config", ",", "'Configurations differ!'", "assert", "d", ".", "status", "==", "'RUNNING'", ",", "\"Configuration wasn't scheduled for a run.\"", "assert", "d", ".", "budget", "==", "budget", ",", "'Budgets differ (%f != %f)!'", "%", "(", "self", ".", "data", "[", "config_id", "]", "[", "'budget'", "]", ",", "budget", ")", "d", ".", "time_stamps", "[", "budget", "]", "=", "timestamps", "d", ".", "results", "[", "budget", "]", "=", "result", "if", "(", "not", "job", ".", "result", "is", "None", ")", "and", "np", ".", "isfinite", "(", "result", "[", "'loss'", "]", ")", ":", "d", ".", "status", "=", "'REVIEW'", "else", ":", "d", ".", "status", "=", "'CRASHED'", "d", ".", "exceptions", "[", "budget", "]", "=", "exception", "self", ".", "num_running", "-=", "1" ]
function to register the result of a job This function is called from HB_master, don't call this from your script.
[ "function", "to", "register", "the", "result", "of", "a", "job" ]
python
train
27.885714
erdc/RAPIDpy
RAPIDpy/rapid.py
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/rapid.py#L632-L800
def run(self, rapid_namelist_file=""): """ Run RAPID program and generate file based on inputs This will generate your rapid_namelist file and run RAPID from wherever you call this script (your working directory). Parameters ---------- rapid_namelist_file: str, optional Path of namelist file to use in the simulation. It will be updated with any parameters added to the RAPID manager. Linux Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_executable_location='~/work/rapid/src/rapid' use_all_processors=True, ) rapid_manager.update_parameters( rapid_connect_file='../rapid-io/input/rapid_connect.csv', Vlat_file='../rapid-io/input/m3_riv.nc', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', k_file='../rapid-io/input/k.csv', x_file='../rapid-io/input/x.csv', Qout_file='../rapid-io/output/Qout.nc', ) rapid_manager.update_reach_number_data() rapid_manager.update_simulation_runtime() rapid_manager.run( rapid_namelist_file='../rapid-io/input/rapid_namelist') Linux Reservoir Forcing Flows Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_executable_location='~/work/rapid/src/rapid', num_processors=4, IS_for_tot=4, IS_for_use=4, for_tot_id_file='../rapid-io/input/dam_id.csv', for_use_id_file='../rapid-io/input/dam_id.csv', Qfor_file='../rapid-io/input/qout_dams.csv', ZS_dtF=86400, BS_opt_for=True, ) rapid_manager.run( rapid_namelist_file='../rapid-io/input/rapid_namelist_regular') Windows with Cygwin Example: .. code:: python from RAPIDpy import RAPID from os import path rapid_exe_path = 'C:/cygwin64/home/username/rapid/run/rapid', rapid_manager = RAPID( rapid_executable_location=rapid_exe_path, cygwin_bin_location='C:/cygwin64/bin', use_all_processors=True, ZS_TauR=24*3600, ZS_dtR=15*60, ZS_TauM=365*24*3600, ZS_dtM=24*3600 ) rapid_input = 'C:/cygwin64/home/username/rapid-io/input' rapid_output = 'C:/cygwin64/home/username/rapid-io/output' rapid_manager.update_parameters( rapid_connect_file=path.join(rapid_input, 'rapid_connect.csv'), Vlat_file=path.join(rapid_input, 'm3_riv.nc'), riv_bas_id_file=path.join(rapid_input, 'riv_bas_id.csv'), k_file=path.join(rapid_input, 'k.csv'), x_file=path.join(rapid_input, 'x.csv'), Qout_file=path.join(rapid_output, 'Qout.nc'), ) rapid_manager.update_reach_number_data() rapid_manager.update_simulation_runtime() rapid_manager.run() """ if not self._rapid_executable_location: log("Missing rapid_executable_location. " "Please set before running this function ...", "ERROR") time_start = datetime.datetime.utcnow() temp_rapid_namelist_file = os.path.join(os.getcwd(), "rapid_namelist") if not rapid_namelist_file or not os.path.exists(rapid_namelist_file): # generate input file if it does not exist self.generate_namelist_file(temp_rapid_namelist_file) else: # update existing file self.update_namelist_file(rapid_namelist_file, temp_rapid_namelist_file) local_rapid_executable_location = \ os.path.join(os.path.dirname(temp_rapid_namelist_file), "rapid_exe_symlink") def rapid_cleanup(*args): """ Cleans up the rapid files generated by the process """ for arg in args: # remove files try: os.remove(arg) except OSError: pass # create link to RAPID if needed temp_link_to_rapid = "" # pylint: disable=no-member if self._rapid_executable_location != \ local_rapid_executable_location: rapid_cleanup(local_rapid_executable_location) if os.name == "nt": self._create_symlink_cygwin(self._rapid_executable_location, local_rapid_executable_location) else: os.symlink(self._rapid_executable_location, local_rapid_executable_location) temp_link_to_rapid = local_rapid_executable_location # run RAPID log("Running RAPID ...", "INFO") if os.name == "nt": local_rapid_executable_location = \ self._get_cygwin_path(local_rapid_executable_location) # htcondor will not allow mpiexec for single processor jobs # this was added for that purpose run_rapid_command = [local_rapid_executable_location, "-ksp_type", self._ksp_type] if self._num_processors > 1: run_rapid_command = [self._mpiexec_command, "-n", str(self._num_processors)] \ + run_rapid_command process = Popen(run_rapid_command, stdout=PIPE, stderr=PIPE, shell=False) out, err = process.communicate() if err: rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file) raise Exception(err) else: log('RAPID output:', "INFO") for line in out.split(b'\n'): print(line) rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file) log("Time to run RAPID: %s" % (datetime.datetime.utcnow()-time_start), "INFO")
[ "def", "run", "(", "self", ",", "rapid_namelist_file", "=", "\"\"", ")", ":", "if", "not", "self", ".", "_rapid_executable_location", ":", "log", "(", "\"Missing rapid_executable_location. \"", "\"Please set before running this function ...\"", ",", "\"ERROR\"", ")", "time_start", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "temp_rapid_namelist_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "\"rapid_namelist\"", ")", "if", "not", "rapid_namelist_file", "or", "not", "os", ".", "path", ".", "exists", "(", "rapid_namelist_file", ")", ":", "# generate input file if it does not exist", "self", ".", "generate_namelist_file", "(", "temp_rapid_namelist_file", ")", "else", ":", "# update existing file", "self", ".", "update_namelist_file", "(", "rapid_namelist_file", ",", "temp_rapid_namelist_file", ")", "local_rapid_executable_location", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "temp_rapid_namelist_file", ")", ",", "\"rapid_exe_symlink\"", ")", "def", "rapid_cleanup", "(", "*", "args", ")", ":", "\"\"\"\n Cleans up the rapid files generated by the process\n \"\"\"", "for", "arg", "in", "args", ":", "# remove files", "try", ":", "os", ".", "remove", "(", "arg", ")", "except", "OSError", ":", "pass", "# create link to RAPID if needed", "temp_link_to_rapid", "=", "\"\"", "# pylint: disable=no-member", "if", "self", ".", "_rapid_executable_location", "!=", "local_rapid_executable_location", ":", "rapid_cleanup", "(", "local_rapid_executable_location", ")", "if", "os", ".", "name", "==", "\"nt\"", ":", "self", ".", "_create_symlink_cygwin", "(", "self", ".", "_rapid_executable_location", ",", "local_rapid_executable_location", ")", "else", ":", "os", ".", "symlink", "(", "self", ".", "_rapid_executable_location", ",", "local_rapid_executable_location", ")", "temp_link_to_rapid", "=", "local_rapid_executable_location", "# run RAPID", "log", "(", "\"Running RAPID ...\"", ",", "\"INFO\"", ")", "if", "os", ".", "name", "==", "\"nt\"", ":", "local_rapid_executable_location", "=", "self", ".", "_get_cygwin_path", "(", "local_rapid_executable_location", ")", "# htcondor will not allow mpiexec for single processor jobs", "# this was added for that purpose", "run_rapid_command", "=", "[", "local_rapid_executable_location", ",", "\"-ksp_type\"", ",", "self", ".", "_ksp_type", "]", "if", "self", ".", "_num_processors", ">", "1", ":", "run_rapid_command", "=", "[", "self", ".", "_mpiexec_command", ",", "\"-n\"", ",", "str", "(", "self", ".", "_num_processors", ")", "]", "+", "run_rapid_command", "process", "=", "Popen", "(", "run_rapid_command", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "shell", "=", "False", ")", "out", ",", "err", "=", "process", ".", "communicate", "(", ")", "if", "err", ":", "rapid_cleanup", "(", "temp_link_to_rapid", ",", "temp_rapid_namelist_file", ")", "raise", "Exception", "(", "err", ")", "else", ":", "log", "(", "'RAPID output:'", ",", "\"INFO\"", ")", "for", "line", "in", "out", ".", "split", "(", "b'\\n'", ")", ":", "print", "(", "line", ")", "rapid_cleanup", "(", "temp_link_to_rapid", ",", "temp_rapid_namelist_file", ")", "log", "(", "\"Time to run RAPID: %s\"", "%", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "-", "time_start", ")", ",", "\"INFO\"", ")" ]
Run RAPID program and generate file based on inputs This will generate your rapid_namelist file and run RAPID from wherever you call this script (your working directory). Parameters ---------- rapid_namelist_file: str, optional Path of namelist file to use in the simulation. It will be updated with any parameters added to the RAPID manager. Linux Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_executable_location='~/work/rapid/src/rapid' use_all_processors=True, ) rapid_manager.update_parameters( rapid_connect_file='../rapid-io/input/rapid_connect.csv', Vlat_file='../rapid-io/input/m3_riv.nc', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', k_file='../rapid-io/input/k.csv', x_file='../rapid-io/input/x.csv', Qout_file='../rapid-io/output/Qout.nc', ) rapid_manager.update_reach_number_data() rapid_manager.update_simulation_runtime() rapid_manager.run( rapid_namelist_file='../rapid-io/input/rapid_namelist') Linux Reservoir Forcing Flows Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_executable_location='~/work/rapid/src/rapid', num_processors=4, IS_for_tot=4, IS_for_use=4, for_tot_id_file='../rapid-io/input/dam_id.csv', for_use_id_file='../rapid-io/input/dam_id.csv', Qfor_file='../rapid-io/input/qout_dams.csv', ZS_dtF=86400, BS_opt_for=True, ) rapid_manager.run( rapid_namelist_file='../rapid-io/input/rapid_namelist_regular') Windows with Cygwin Example: .. code:: python from RAPIDpy import RAPID from os import path rapid_exe_path = 'C:/cygwin64/home/username/rapid/run/rapid', rapid_manager = RAPID( rapid_executable_location=rapid_exe_path, cygwin_bin_location='C:/cygwin64/bin', use_all_processors=True, ZS_TauR=24*3600, ZS_dtR=15*60, ZS_TauM=365*24*3600, ZS_dtM=24*3600 ) rapid_input = 'C:/cygwin64/home/username/rapid-io/input' rapid_output = 'C:/cygwin64/home/username/rapid-io/output' rapid_manager.update_parameters( rapid_connect_file=path.join(rapid_input, 'rapid_connect.csv'), Vlat_file=path.join(rapid_input, 'm3_riv.nc'), riv_bas_id_file=path.join(rapid_input, 'riv_bas_id.csv'), k_file=path.join(rapid_input, 'k.csv'), x_file=path.join(rapid_input, 'x.csv'), Qout_file=path.join(rapid_output, 'Qout.nc'), ) rapid_manager.update_reach_number_data() rapid_manager.update_simulation_runtime() rapid_manager.run()
[ "Run", "RAPID", "program", "and", "generate", "file", "based", "on", "inputs", "This", "will", "generate", "your", "rapid_namelist", "file", "and", "run", "RAPID", "from", "wherever", "you", "call", "this", "script", "(", "your", "working", "directory", ")", "." ]
python
train
36.686391
inveniosoftware/invenio-stats
invenio_stats/ext.py
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/ext.py#L189-L192
def publish(self, event_type, events): """Publish events.""" assert event_type in self.events current_queues.queues['stats-{}'.format(event_type)].publish(events)
[ "def", "publish", "(", "self", ",", "event_type", ",", "events", ")", ":", "assert", "event_type", "in", "self", ".", "events", "current_queues", ".", "queues", "[", "'stats-{}'", ".", "format", "(", "event_type", ")", "]", ".", "publish", "(", "events", ")" ]
Publish events.
[ "Publish", "events", "." ]
python
valid
45.75
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L794-L802
def _SkipGroup(buffer, pos, end): """Skip sub-group. Returns the new position.""" while 1: (tag_bytes, pos) = ReadTag(buffer, pos) new_pos = SkipField(buffer, pos, end, tag_bytes) if new_pos == -1: return pos pos = new_pos
[ "def", "_SkipGroup", "(", "buffer", ",", "pos", ",", "end", ")", ":", "while", "1", ":", "(", "tag_bytes", ",", "pos", ")", "=", "ReadTag", "(", "buffer", ",", "pos", ")", "new_pos", "=", "SkipField", "(", "buffer", ",", "pos", ",", "end", ",", "tag_bytes", ")", "if", "new_pos", "==", "-", "1", ":", "return", "pos", "pos", "=", "new_pos" ]
Skip sub-group. Returns the new position.
[ "Skip", "sub", "-", "group", ".", "Returns", "the", "new", "position", "." ]
python
train
26.888889
moremoban/moban
setup.py
https://github.com/moremoban/moban/blob/5d1674ae461b065a9a54fe89c445cbf6d3cd63c0/setup.py#L157-L163
def read(afile): """Read a file into setup""" the_relative_file = os.path.join(HERE, afile) with codecs.open(the_relative_file, 'r', 'utf-8') as opened_file: content = filter_out_test_code(opened_file) content = "".join(list(content)) return content
[ "def", "read", "(", "afile", ")", ":", "the_relative_file", "=", "os", ".", "path", ".", "join", "(", "HERE", ",", "afile", ")", "with", "codecs", ".", "open", "(", "the_relative_file", ",", "'r'", ",", "'utf-8'", ")", "as", "opened_file", ":", "content", "=", "filter_out_test_code", "(", "opened_file", ")", "content", "=", "\"\"", ".", "join", "(", "list", "(", "content", ")", ")", "return", "content" ]
Read a file into setup
[ "Read", "a", "file", "into", "setup" ]
python
train
39.857143
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L199-L213
def _get_host_details(self): """Get the system details.""" # Assuming only one system present as part of collection, # as we are dealing with iLO's here. status, headers, system = self._rest_get('/rest/v1/Systems/1') if status < 300: stype = self._get_type(system) if stype not in ['ComputerSystem.0', 'ComputerSystem.1']: msg = "%s is not a valid system type " % stype raise exception.IloError(msg) else: msg = self._get_extended_error(system) raise exception.IloError(msg) return system
[ "def", "_get_host_details", "(", "self", ")", ":", "# Assuming only one system present as part of collection,", "# as we are dealing with iLO's here.", "status", ",", "headers", ",", "system", "=", "self", ".", "_rest_get", "(", "'/rest/v1/Systems/1'", ")", "if", "status", "<", "300", ":", "stype", "=", "self", ".", "_get_type", "(", "system", ")", "if", "stype", "not", "in", "[", "'ComputerSystem.0'", ",", "'ComputerSystem.1'", "]", ":", "msg", "=", "\"%s is not a valid system type \"", "%", "stype", "raise", "exception", ".", "IloError", "(", "msg", ")", "else", ":", "msg", "=", "self", ".", "_get_extended_error", "(", "system", ")", "raise", "exception", ".", "IloError", "(", "msg", ")", "return", "system" ]
Get the system details.
[ "Get", "the", "system", "details", "." ]
python
train
40.733333
picklepete/pyicloud
pyicloud/base.py
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/base.py#L195-L230
def authenticate(self): """ Handles authentication, and persists the X-APPLE-WEB-KB cookie so that subsequent logins will not cause additional e-mails from Apple. """ logger.info("Authenticating as %s", self.user['apple_id']) data = dict(self.user) # We authenticate every time, so "remember me" is not needed data.update({'extended_login': False}) try: req = self.session.post( self._base_login_url, params=self.params, data=json.dumps(data) ) except PyiCloudAPIResponseError as error: msg = 'Invalid email/password combination.' raise PyiCloudFailedLoginException(msg, error) resp = req.json() self.params.update({'dsid': resp['dsInfo']['dsid']}) if not os.path.exists(self._cookie_directory): os.mkdir(self._cookie_directory) self.session.cookies.save() logger.debug("Cookies saved to %s", self._get_cookiejar_path()) self.data = resp self.webservices = self.data['webservices'] logger.info("Authentication completed successfully") logger.debug(self.params)
[ "def", "authenticate", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Authenticating as %s\"", ",", "self", ".", "user", "[", "'apple_id'", "]", ")", "data", "=", "dict", "(", "self", ".", "user", ")", "# We authenticate every time, so \"remember me\" is not needed", "data", ".", "update", "(", "{", "'extended_login'", ":", "False", "}", ")", "try", ":", "req", "=", "self", ".", "session", ".", "post", "(", "self", ".", "_base_login_url", ",", "params", "=", "self", ".", "params", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "except", "PyiCloudAPIResponseError", "as", "error", ":", "msg", "=", "'Invalid email/password combination.'", "raise", "PyiCloudFailedLoginException", "(", "msg", ",", "error", ")", "resp", "=", "req", ".", "json", "(", ")", "self", ".", "params", ".", "update", "(", "{", "'dsid'", ":", "resp", "[", "'dsInfo'", "]", "[", "'dsid'", "]", "}", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_cookie_directory", ")", ":", "os", ".", "mkdir", "(", "self", ".", "_cookie_directory", ")", "self", ".", "session", ".", "cookies", ".", "save", "(", ")", "logger", ".", "debug", "(", "\"Cookies saved to %s\"", ",", "self", ".", "_get_cookiejar_path", "(", ")", ")", "self", ".", "data", "=", "resp", "self", ".", "webservices", "=", "self", ".", "data", "[", "'webservices'", "]", "logger", ".", "info", "(", "\"Authentication completed successfully\"", ")", "logger", ".", "debug", "(", "self", ".", "params", ")" ]
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that subsequent logins will not cause additional e-mails from Apple.
[ "Handles", "authentication", "and", "persists", "the", "X", "-", "APPLE", "-", "WEB", "-", "KB", "cookie", "so", "that", "subsequent", "logins", "will", "not", "cause", "additional", "e", "-", "mails", "from", "Apple", "." ]
python
train
33.138889
tommyod/streprogen
streprogen/program.py
https://github.com/tommyod/streprogen/blob/21b903618e8b2d398bceb394d18d7c74ca984def/streprogen/program.py#L715-L732
def to_html(self, table_width=5): """Write the program information to HTML code, which can be saved, printed and brought to the gym. Parameters ---------- table_width The table with of the HTML code. Returns ------- string HTML code. """ env = self.jinja2_environment template = env.get_template(self.TEMPLATE_NAMES['html']) return template.render(program=self, table_width=table_width)
[ "def", "to_html", "(", "self", ",", "table_width", "=", "5", ")", ":", "env", "=", "self", ".", "jinja2_environment", "template", "=", "env", ".", "get_template", "(", "self", ".", "TEMPLATE_NAMES", "[", "'html'", "]", ")", "return", "template", ".", "render", "(", "program", "=", "self", ",", "table_width", "=", "table_width", ")" ]
Write the program information to HTML code, which can be saved, printed and brought to the gym. Parameters ---------- table_width The table with of the HTML code. Returns ------- string HTML code.
[ "Write", "the", "program", "information", "to", "HTML", "code", "which", "can", "be", "saved", "printed", "and", "brought", "to", "the", "gym", "." ]
python
train
27.333333
dbcli/athenacli
athenacli/packages/special/iocommands.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/special/iocommands.py#L123-L154
def open_external_editor(filename=None, sql=None): """Open external editor, wait for the user to type in their query, return the query. :return: list with one tuple, query as first element. """ message = None filename = filename.strip().split(' ', 1)[0] if filename else None sql = sql or '' MARKER = '# Type your query above this line.\n' # Populate the editor buffer with the partial sql (if available) and a # placeholder comment. query = click.edit(u'{sql}\n\n{marker}'.format(sql=sql, marker=MARKER), filename=filename, extension='.sql') if filename: try: with open(filename, encoding='utf-8') as f: query = f.read() except IOError: message = 'Error reading file: %s.' % filename if query is not None: query = query.split(MARKER, 1)[0].rstrip('\n') else: # Don't return None for the caller to deal with. # Empty string is ok. query = sql return (query, message)
[ "def", "open_external_editor", "(", "filename", "=", "None", ",", "sql", "=", "None", ")", ":", "message", "=", "None", "filename", "=", "filename", ".", "strip", "(", ")", ".", "split", "(", "' '", ",", "1", ")", "[", "0", "]", "if", "filename", "else", "None", "sql", "=", "sql", "or", "''", "MARKER", "=", "'# Type your query above this line.\\n'", "# Populate the editor buffer with the partial sql (if available) and a", "# placeholder comment.", "query", "=", "click", ".", "edit", "(", "u'{sql}\\n\\n{marker}'", ".", "format", "(", "sql", "=", "sql", ",", "marker", "=", "MARKER", ")", ",", "filename", "=", "filename", ",", "extension", "=", "'.sql'", ")", "if", "filename", ":", "try", ":", "with", "open", "(", "filename", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "query", "=", "f", ".", "read", "(", ")", "except", "IOError", ":", "message", "=", "'Error reading file: %s.'", "%", "filename", "if", "query", "is", "not", "None", ":", "query", "=", "query", ".", "split", "(", "MARKER", ",", "1", ")", "[", "0", "]", ".", "rstrip", "(", "'\\n'", ")", "else", ":", "# Don't return None for the caller to deal with.", "# Empty string is ok.", "query", "=", "sql", "return", "(", "query", ",", "message", ")" ]
Open external editor, wait for the user to type in their query, return the query. :return: list with one tuple, query as first element.
[ "Open", "external", "editor", "wait", "for", "the", "user", "to", "type", "in", "their", "query", "return", "the", "query", ".", ":", "return", ":", "list", "with", "one", "tuple", "query", "as", "first", "element", "." ]
python
train
31.625
Julius2342/pyvlx
old_api/pyvlx/devices.py
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/devices.py#L79-L82
def load_blind(self, item): """Load blind from JSON.""" blind = Blind.from_config(self.pyvlx, item) self.add(blind)
[ "def", "load_blind", "(", "self", ",", "item", ")", ":", "blind", "=", "Blind", ".", "from_config", "(", "self", ".", "pyvlx", ",", "item", ")", "self", ".", "add", "(", "blind", ")" ]
Load blind from JSON.
[ "Load", "blind", "from", "JSON", "." ]
python
train
34
davidfokkema/artist
artist/multi_plot.py
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L559-L576
def set_scalebar_for_all(self, row_column_list=None, location='lower right'): """Show marker area scale for subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'. """ if row_column_list is None: for subplot in self.subplots: subplot.set_scalebar(location) else: for row, column in row_column_list: subplot = self.get_subplot_at(row, column) subplot.set_scalebar(location)
[ "def", "set_scalebar_for_all", "(", "self", ",", "row_column_list", "=", "None", ",", "location", "=", "'lower right'", ")", ":", "if", "row_column_list", "is", "None", ":", "for", "subplot", "in", "self", ".", "subplots", ":", "subplot", ".", "set_scalebar", "(", "location", ")", "else", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_scalebar", "(", "location", ")" ]
Show marker area scale for subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'.
[ "Show", "marker", "area", "scale", "for", "subplots", "." ]
python
train
42.388889
spacetelescope/drizzlepac
drizzlepac/util.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/util.py#L329-L343
def endStep(self,key): """ Record the end time for the step. If key==None, simply record ptime as end time for class to represent the overall runtime since the initialization of the class. """ ptime = _ptime() if key is not None: self.steps[key]['end'] = ptime self.steps[key]['elapsed'] = ptime[1] - self.steps[key]['start'][1] self.end = ptime print('==== Processing Step ',key,' finished at ',ptime[0]) print('')
[ "def", "endStep", "(", "self", ",", "key", ")", ":", "ptime", "=", "_ptime", "(", ")", "if", "key", "is", "not", "None", ":", "self", ".", "steps", "[", "key", "]", "[", "'end'", "]", "=", "ptime", "self", ".", "steps", "[", "key", "]", "[", "'elapsed'", "]", "=", "ptime", "[", "1", "]", "-", "self", ".", "steps", "[", "key", "]", "[", "'start'", "]", "[", "1", "]", "self", ".", "end", "=", "ptime", "print", "(", "'==== Processing Step '", ",", "key", ",", "' finished at '", ",", "ptime", "[", "0", "]", ")", "print", "(", "''", ")" ]
Record the end time for the step. If key==None, simply record ptime as end time for class to represent the overall runtime since the initialization of the class.
[ "Record", "the", "end", "time", "for", "the", "step", "." ]
python
train
33.8
iotaledger/iota.lib.py
iota/api.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/api.py#L732-L789
def get_new_addresses( self, index=0, count=1, security_level=AddressGenerator.DEFAULT_SECURITY_LEVEL, checksum=False, ): # type: (int, Optional[int], int, bool) -> dict """ Generates one or more new addresses from the seed. :param index: The key index of the first new address to generate (must be >= 1). :param count: Number of addresses to generate (must be >= 1). .. tip:: This is more efficient than calling ``get_new_address`` inside a loop. If ``None``, this method will progressively generate addresses and scan the Tangle until it finds one that has no transactions referencing it. :param security_level: Number of iterations to use when generating new addresses. Larger values take longer, but the resulting signatures are more secure. This value must be between 1 and 3, inclusive. :param checksum: Specify whether to return the address with the checksum. Defaults to ``False``. :return: Dict with the following structure:: { 'addresses': List[Address], Always a list, even if only one address was generated. } References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress """ return extended.GetNewAddressesCommand(self.adapter)( count=count, index=index, securityLevel=security_level, checksum=checksum, seed=self.seed, )
[ "def", "get_new_addresses", "(", "self", ",", "index", "=", "0", ",", "count", "=", "1", ",", "security_level", "=", "AddressGenerator", ".", "DEFAULT_SECURITY_LEVEL", ",", "checksum", "=", "False", ",", ")", ":", "# type: (int, Optional[int], int, bool) -> dict", "return", "extended", ".", "GetNewAddressesCommand", "(", "self", ".", "adapter", ")", "(", "count", "=", "count", ",", "index", "=", "index", ",", "securityLevel", "=", "security_level", ",", "checksum", "=", "checksum", ",", "seed", "=", "self", ".", "seed", ",", ")" ]
Generates one or more new addresses from the seed. :param index: The key index of the first new address to generate (must be >= 1). :param count: Number of addresses to generate (must be >= 1). .. tip:: This is more efficient than calling ``get_new_address`` inside a loop. If ``None``, this method will progressively generate addresses and scan the Tangle until it finds one that has no transactions referencing it. :param security_level: Number of iterations to use when generating new addresses. Larger values take longer, but the resulting signatures are more secure. This value must be between 1 and 3, inclusive. :param checksum: Specify whether to return the address with the checksum. Defaults to ``False``. :return: Dict with the following structure:: { 'addresses': List[Address], Always a list, even if only one address was generated. } References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress
[ "Generates", "one", "or", "more", "new", "addresses", "from", "the", "seed", "." ]
python
test
29.758621
tkem/uritools
uritools/defrag.py
https://github.com/tkem/uritools/blob/e77ba4acd937b68da9850138563debd4c925ef9f/uritools/defrag.py#L33-L41
def uridefrag(uristring): """Remove an existing fragment component from a URI reference string. """ if isinstance(uristring, bytes): parts = uristring.partition(b'#') else: parts = uristring.partition(u'#') return DefragResult(parts[0], parts[2] if parts[1] else None)
[ "def", "uridefrag", "(", "uristring", ")", ":", "if", "isinstance", "(", "uristring", ",", "bytes", ")", ":", "parts", "=", "uristring", ".", "partition", "(", "b'#'", ")", "else", ":", "parts", "=", "uristring", ".", "partition", "(", "u'#'", ")", "return", "DefragResult", "(", "parts", "[", "0", "]", ",", "parts", "[", "2", "]", "if", "parts", "[", "1", "]", "else", "None", ")" ]
Remove an existing fragment component from a URI reference string.
[ "Remove", "an", "existing", "fragment", "component", "from", "a", "URI", "reference", "string", "." ]
python
train
33
project-rig/rig
rig/place_and_route/route/ner.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L517-L577
def route(vertices_resources, nets, machine, constraints, placements, allocations={}, core_resource=Cores, radius=20): """Routing algorithm based on Neighbour Exploring Routing (NER). Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast routing, Parallel Computing (2014). http://dx.doi.org/10.1016/j.parco.2015.01.002 This algorithm attempts to use NER to generate routing trees for all nets and routes around broken links using A* graph search. If the system is fully connected, this algorithm will always succeed though no consideration of congestion or routing-table usage is attempted. Parameters ---------- radius : int Radius of area to search from each node. 20 is arbitrarily selected in the paper and shown to be acceptable in practice. If set to zero, this method is becomes longest dimension first routing. """ wrap_around = machine.has_wrap_around_links() # Vertices constrained to route to a specific link. {vertex: route} route_to_endpoint = {} for constraint in constraints: if isinstance(constraint, RouteEndpointConstraint): route_to_endpoint[constraint.vertex] = constraint.route routes = {} for net in nets: # Generate routing tree (assuming a perfect machine) root, lookup = ner_net(placements[net.source], set(placements[sink] for sink in net.sinks), machine.width, machine.height, wrap_around, radius) # Fix routes to avoid dead chips/links if route_has_dead_links(root, machine): root, lookup = avoid_dead_links(root, machine, wrap_around) # Add the sinks in the net to the RoutingTree for sink in net.sinks: tree_node = lookup[placements[sink]] if sink in route_to_endpoint: # Sinks with route-to-endpoint constraints must be routed # in the according directions. tree_node.children.append((route_to_endpoint[sink], sink)) else: cores = allocations.get(sink, {}).get(core_resource, None) if cores is not None: # Sinks with the core_resource resource specified must be # routed to that set of cores. for core in range(cores.start, cores.stop): tree_node.children.append((Routes.core(core), sink)) else: # Sinks without that resource are simply included without # an associated route tree_node.children.append((None, sink)) routes[net] = root return routes
[ "def", "route", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "placements", ",", "allocations", "=", "{", "}", ",", "core_resource", "=", "Cores", ",", "radius", "=", "20", ")", ":", "wrap_around", "=", "machine", ".", "has_wrap_around_links", "(", ")", "# Vertices constrained to route to a specific link. {vertex: route}", "route_to_endpoint", "=", "{", "}", "for", "constraint", "in", "constraints", ":", "if", "isinstance", "(", "constraint", ",", "RouteEndpointConstraint", ")", ":", "route_to_endpoint", "[", "constraint", ".", "vertex", "]", "=", "constraint", ".", "route", "routes", "=", "{", "}", "for", "net", "in", "nets", ":", "# Generate routing tree (assuming a perfect machine)", "root", ",", "lookup", "=", "ner_net", "(", "placements", "[", "net", ".", "source", "]", ",", "set", "(", "placements", "[", "sink", "]", "for", "sink", "in", "net", ".", "sinks", ")", ",", "machine", ".", "width", ",", "machine", ".", "height", ",", "wrap_around", ",", "radius", ")", "# Fix routes to avoid dead chips/links", "if", "route_has_dead_links", "(", "root", ",", "machine", ")", ":", "root", ",", "lookup", "=", "avoid_dead_links", "(", "root", ",", "machine", ",", "wrap_around", ")", "# Add the sinks in the net to the RoutingTree", "for", "sink", "in", "net", ".", "sinks", ":", "tree_node", "=", "lookup", "[", "placements", "[", "sink", "]", "]", "if", "sink", "in", "route_to_endpoint", ":", "# Sinks with route-to-endpoint constraints must be routed", "# in the according directions.", "tree_node", ".", "children", ".", "append", "(", "(", "route_to_endpoint", "[", "sink", "]", ",", "sink", ")", ")", "else", ":", "cores", "=", "allocations", ".", "get", "(", "sink", ",", "{", "}", ")", ".", "get", "(", "core_resource", ",", "None", ")", "if", "cores", "is", "not", "None", ":", "# Sinks with the core_resource resource specified must be", "# routed to that set of cores.", "for", "core", "in", "range", "(", "cores", ".", "start", ",", "cores", ".", "stop", ")", ":", "tree_node", ".", "children", ".", "append", "(", "(", "Routes", ".", "core", "(", "core", ")", ",", "sink", ")", ")", "else", ":", "# Sinks without that resource are simply included without", "# an associated route", "tree_node", ".", "children", ".", "append", "(", "(", "None", ",", "sink", ")", ")", "routes", "[", "net", "]", "=", "root", "return", "routes" ]
Routing algorithm based on Neighbour Exploring Routing (NER). Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast routing, Parallel Computing (2014). http://dx.doi.org/10.1016/j.parco.2015.01.002 This algorithm attempts to use NER to generate routing trees for all nets and routes around broken links using A* graph search. If the system is fully connected, this algorithm will always succeed though no consideration of congestion or routing-table usage is attempted. Parameters ---------- radius : int Radius of area to search from each node. 20 is arbitrarily selected in the paper and shown to be acceptable in practice. If set to zero, this method is becomes longest dimension first routing.
[ "Routing", "algorithm", "based", "on", "Neighbour", "Exploring", "Routing", "(", "NER", ")", "." ]
python
train
44.442623
voxpupuli/pypuppetdb
pypuppetdb/types.py
https://github.com/voxpupuli/pypuppetdb/blob/cedeecf48014b4ad5b8e2513ca8230c814f45603/pypuppetdb/types.py#L454-L459
def facts(self, **kwargs): """Get all facts of this node. Additional arguments may also be specified that will be passed to the query function. """ return self.__api.facts(query=EqualsOperator("certname", self.name), **kwargs)
[ "def", "facts", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__api", ".", "facts", "(", "query", "=", "EqualsOperator", "(", "\"certname\"", ",", "self", ".", "name", ")", ",", "*", "*", "kwargs", ")" ]
Get all facts of this node. Additional arguments may also be specified that will be passed to the query function.
[ "Get", "all", "facts", "of", "this", "node", ".", "Additional", "arguments", "may", "also", "be", "specified", "that", "will", "be", "passed", "to", "the", "query", "function", "." ]
python
valid
47.5
Knio/dominate
dominate/util.py
https://github.com/Knio/dominate/blob/1eb88f9fd797658eef83568a548e2ef9b546807d/dominate/util.py#L44-L51
def system(cmd, data=None): ''' pipes the output of a program ''' import subprocess s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE) out, err = s.communicate(data) return out.decode('utf8')
[ "def", "system", "(", "cmd", ",", "data", "=", "None", ")", ":", "import", "subprocess", "s", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "s", ".", "communicate", "(", "data", ")", "return", "out", ".", "decode", "(", "'utf8'", ")" ]
pipes the output of a program
[ "pipes", "the", "output", "of", "a", "program" ]
python
valid
29
quantopian/zipline
zipline/data/history_loader.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/history_loader.py#L48-L63
def load_pricing_adjustments(self, columns, dts, assets): """ Returns ------- adjustments : list[dict[int -> Adjustment]] A list, where each element corresponds to the `columns`, of mappings from index to adjustment objects to apply at that index. """ out = [None] * len(columns) for i, column in enumerate(columns): adjs = {} for asset in assets: adjs.update(self._get_adjustments_in_range( asset, dts, column)) out[i] = adjs return out
[ "def", "load_pricing_adjustments", "(", "self", ",", "columns", ",", "dts", ",", "assets", ")", ":", "out", "=", "[", "None", "]", "*", "len", "(", "columns", ")", "for", "i", ",", "column", "in", "enumerate", "(", "columns", ")", ":", "adjs", "=", "{", "}", "for", "asset", "in", "assets", ":", "adjs", ".", "update", "(", "self", ".", "_get_adjustments_in_range", "(", "asset", ",", "dts", ",", "column", ")", ")", "out", "[", "i", "]", "=", "adjs", "return", "out" ]
Returns ------- adjustments : list[dict[int -> Adjustment]] A list, where each element corresponds to the `columns`, of mappings from index to adjustment objects to apply at that index.
[ "Returns", "-------", "adjustments", ":", "list", "[", "dict", "[", "int", "-", ">", "Adjustment", "]]", "A", "list", "where", "each", "element", "corresponds", "to", "the", "columns", "of", "mappings", "from", "index", "to", "adjustment", "objects", "to", "apply", "at", "that", "index", "." ]
python
train
36.375
Clinical-Genomics/scout
scout/adapter/mongo/variant.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/variant.py#L525-L588
def get_region_vcf(self, case_obj, chrom=None, start=None, end=None, gene_obj=None, variant_type='clinical', category='snv', rank_threshold=None): """Produce a reduced vcf with variants from the specified coordinates This is used for the alignment viewer. Args: case_obj(dict): A case from the scout database variant_type(str): 'clinical' or 'research'. Default: 'clinical' category(str): 'snv' or 'sv'. Default: 'snv' rank_threshold(float): Only load variants above this score. Default: 5 chrom(str): Load variants from a certain chromosome start(int): Specify the start position end(int): Specify the end position gene_obj(dict): A gene object from the database Returns: file_name(str): Path to the temporary file """ rank_threshold = rank_threshold or -100 variant_file = None if variant_type == 'clinical': if category == 'snv': variant_file = case_obj['vcf_files'].get('vcf_snv') elif category == 'sv': variant_file = case_obj['vcf_files'].get('vcf_sv') elif category == 'str': variant_file = case_obj['vcf_files'].get('vcf_str') elif variant_type == 'research': if category == 'snv': variant_file = case_obj['vcf_files'].get('vcf_snv_research') elif category == 'sv': variant_file = case_obj['vcf_files'].get('vcf_sv_research') if not variant_file: raise SyntaxError("Vcf file does not seem to exist") vcf_obj = VCF(variant_file) region = "" if gene_obj: chrom = gene_obj['chromosome'] start = gene_obj['start'] end = gene_obj['end'] if chrom: if (start and end): region = "{0}:{1}-{2}".format(chrom, start, end) else: region = "{0}".format(chrom) else: rank_threshold = rank_threshold or 5 with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp: file_name = str(pathlib.Path(temp.name)) for header_line in vcf_obj.raw_header.split('\n'): if len(header_line) > 3: temp.write(header_line + '\n') for variant in vcf_obj(region): temp.write(str(variant)) return file_name
[ "def", "get_region_vcf", "(", "self", ",", "case_obj", ",", "chrom", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "gene_obj", "=", "None", ",", "variant_type", "=", "'clinical'", ",", "category", "=", "'snv'", ",", "rank_threshold", "=", "None", ")", ":", "rank_threshold", "=", "rank_threshold", "or", "-", "100", "variant_file", "=", "None", "if", "variant_type", "==", "'clinical'", ":", "if", "category", "==", "'snv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_snv'", ")", "elif", "category", "==", "'sv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_sv'", ")", "elif", "category", "==", "'str'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_str'", ")", "elif", "variant_type", "==", "'research'", ":", "if", "category", "==", "'snv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_snv_research'", ")", "elif", "category", "==", "'sv'", ":", "variant_file", "=", "case_obj", "[", "'vcf_files'", "]", ".", "get", "(", "'vcf_sv_research'", ")", "if", "not", "variant_file", ":", "raise", "SyntaxError", "(", "\"Vcf file does not seem to exist\"", ")", "vcf_obj", "=", "VCF", "(", "variant_file", ")", "region", "=", "\"\"", "if", "gene_obj", ":", "chrom", "=", "gene_obj", "[", "'chromosome'", "]", "start", "=", "gene_obj", "[", "'start'", "]", "end", "=", "gene_obj", "[", "'end'", "]", "if", "chrom", ":", "if", "(", "start", "and", "end", ")", ":", "region", "=", "\"{0}:{1}-{2}\"", ".", "format", "(", "chrom", ",", "start", ",", "end", ")", "else", ":", "region", "=", "\"{0}\"", ".", "format", "(", "chrom", ")", "else", ":", "rank_threshold", "=", "rank_threshold", "or", "5", "with", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w'", ",", "delete", "=", "False", ")", "as", "temp", ":", "file_name", "=", "str", "(", "pathlib", ".", "Path", "(", "temp", ".", "name", ")", ")", "for", "header_line", "in", "vcf_obj", ".", "raw_header", ".", "split", "(", "'\\n'", ")", ":", "if", "len", "(", "header_line", ")", ">", "3", ":", "temp", ".", "write", "(", "header_line", "+", "'\\n'", ")", "for", "variant", "in", "vcf_obj", "(", "region", ")", ":", "temp", ".", "write", "(", "str", "(", "variant", ")", ")", "return", "file_name" ]
Produce a reduced vcf with variants from the specified coordinates This is used for the alignment viewer. Args: case_obj(dict): A case from the scout database variant_type(str): 'clinical' or 'research'. Default: 'clinical' category(str): 'snv' or 'sv'. Default: 'snv' rank_threshold(float): Only load variants above this score. Default: 5 chrom(str): Load variants from a certain chromosome start(int): Specify the start position end(int): Specify the end position gene_obj(dict): A gene object from the database Returns: file_name(str): Path to the temporary file
[ "Produce", "a", "reduced", "vcf", "with", "variants", "from", "the", "specified", "coordinates", "This", "is", "used", "for", "the", "alignment", "viewer", "." ]
python
test
38.5625
hootnot/oanda-api-v20
oandapyV20/oandapyV20.py
https://github.com/hootnot/oanda-api-v20/blob/f300734238a6a3958e07e60456155fbc53748aa8/oandapyV20/oandapyV20.py#L262-L324
def request(self, endpoint): """Perform a request for the APIRequest instance 'endpoint'. Parameters ---------- endpoint : APIRequest The endpoint parameter contains an instance of an APIRequest containing the endpoint, method and optionally other parameters or body data. Raises ------ V20Error in case of HTTP response code >= 400 """ method = endpoint.method method = method.lower() params = None try: params = getattr(endpoint, "params") except AttributeError: # request does not have params params = {} headers = {} if hasattr(endpoint, "HEADERS"): headers = getattr(endpoint, "HEADERS") request_args = {} if method == 'get': request_args['params'] = params elif hasattr(endpoint, "data") and endpoint.data: request_args['json'] = endpoint.data # if any parameter for request then merge them request_args.update(self._request_params) # which API to access ? if not (hasattr(endpoint, "STREAM") and getattr(endpoint, "STREAM") is True): url = "{}/{}".format( TRADING_ENVIRONMENTS[self.environment]["api"], endpoint) response = self.__request(method, url, request_args, headers=headers) content = response.content.decode('utf-8') content = json.loads(content) # update endpoint endpoint.response = content endpoint.status_code = response.status_code return content else: url = "{}/{}".format( TRADING_ENVIRONMENTS[self.environment]["stream"], endpoint) endpoint.response = self.__stream_request(method, url, request_args, headers=headers) return endpoint.response
[ "def", "request", "(", "self", ",", "endpoint", ")", ":", "method", "=", "endpoint", ".", "method", "method", "=", "method", ".", "lower", "(", ")", "params", "=", "None", "try", ":", "params", "=", "getattr", "(", "endpoint", ",", "\"params\"", ")", "except", "AttributeError", ":", "# request does not have params", "params", "=", "{", "}", "headers", "=", "{", "}", "if", "hasattr", "(", "endpoint", ",", "\"HEADERS\"", ")", ":", "headers", "=", "getattr", "(", "endpoint", ",", "\"HEADERS\"", ")", "request_args", "=", "{", "}", "if", "method", "==", "'get'", ":", "request_args", "[", "'params'", "]", "=", "params", "elif", "hasattr", "(", "endpoint", ",", "\"data\"", ")", "and", "endpoint", ".", "data", ":", "request_args", "[", "'json'", "]", "=", "endpoint", ".", "data", "# if any parameter for request then merge them", "request_args", ".", "update", "(", "self", ".", "_request_params", ")", "# which API to access ?", "if", "not", "(", "hasattr", "(", "endpoint", ",", "\"STREAM\"", ")", "and", "getattr", "(", "endpoint", ",", "\"STREAM\"", ")", "is", "True", ")", ":", "url", "=", "\"{}/{}\"", ".", "format", "(", "TRADING_ENVIRONMENTS", "[", "self", ".", "environment", "]", "[", "\"api\"", "]", ",", "endpoint", ")", "response", "=", "self", ".", "__request", "(", "method", ",", "url", ",", "request_args", ",", "headers", "=", "headers", ")", "content", "=", "response", ".", "content", ".", "decode", "(", "'utf-8'", ")", "content", "=", "json", ".", "loads", "(", "content", ")", "# update endpoint", "endpoint", ".", "response", "=", "content", "endpoint", ".", "status_code", "=", "response", ".", "status_code", "return", "content", "else", ":", "url", "=", "\"{}/{}\"", ".", "format", "(", "TRADING_ENVIRONMENTS", "[", "self", ".", "environment", "]", "[", "\"stream\"", "]", ",", "endpoint", ")", "endpoint", ".", "response", "=", "self", ".", "__stream_request", "(", "method", ",", "url", ",", "request_args", ",", "headers", "=", "headers", ")", "return", "endpoint", ".", "response" ]
Perform a request for the APIRequest instance 'endpoint'. Parameters ---------- endpoint : APIRequest The endpoint parameter contains an instance of an APIRequest containing the endpoint, method and optionally other parameters or body data. Raises ------ V20Error in case of HTTP response code >= 400
[ "Perform", "a", "request", "for", "the", "APIRequest", "instance", "endpoint", "." ]
python
train
33.68254
csvsoundsystem/pytreasuryio
treasuryio/tweetbot.py
https://github.com/csvsoundsystem/pytreasuryio/blob/728caf815d16cd2f3548d8b67c84313de76f9be7/treasuryio/tweetbot.py#L16-L41
def tweet(tweet_text_func): ''' A decorator to make a function Tweet Parameters - `tweet_text_func` is a function that takes no parameters and returns a tweetable string For example:: @tweet def total_deposits_this_week(): # ... @tweet def not_an_interesting_tweet(): return 'This tweet is not data-driven.' ''' def tweet_func(): api = _connect_to_twitter() tweet = tweet_text_func() print "Tweeting: %s" % tweet api.update_status(tweet) return tweet return tweet_func
[ "def", "tweet", "(", "tweet_text_func", ")", ":", "def", "tweet_func", "(", ")", ":", "api", "=", "_connect_to_twitter", "(", ")", "tweet", "=", "tweet_text_func", "(", ")", "print", "\"Tweeting: %s\"", "%", "tweet", "api", ".", "update_status", "(", "tweet", ")", "return", "tweet", "return", "tweet_func" ]
A decorator to make a function Tweet Parameters - `tweet_text_func` is a function that takes no parameters and returns a tweetable string For example:: @tweet def total_deposits_this_week(): # ... @tweet def not_an_interesting_tweet(): return 'This tweet is not data-driven.'
[ "A", "decorator", "to", "make", "a", "function", "Tweet" ]
python
train
22.192308
python-openxml/python-docx
docx/oxml/text/run.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/text/run.py#L145-L160
def add_char(self, char): """ Process the next character of input through the translation finite state maching (FSM). There are two possible states, buffer pending and not pending, but those are hidden behind the ``.flush()`` method which must be called at the end of text to ensure any pending ``<w:t>`` element is written. """ if char == '\t': self.flush() self._r.add_tab() elif char in '\r\n': self.flush() self._r.add_br() else: self._bfr.append(char)
[ "def", "add_char", "(", "self", ",", "char", ")", ":", "if", "char", "==", "'\\t'", ":", "self", ".", "flush", "(", ")", "self", ".", "_r", ".", "add_tab", "(", ")", "elif", "char", "in", "'\\r\\n'", ":", "self", ".", "flush", "(", ")", "self", ".", "_r", ".", "add_br", "(", ")", "else", ":", "self", ".", "_bfr", ".", "append", "(", "char", ")" ]
Process the next character of input through the translation finite state maching (FSM). There are two possible states, buffer pending and not pending, but those are hidden behind the ``.flush()`` method which must be called at the end of text to ensure any pending ``<w:t>`` element is written.
[ "Process", "the", "next", "character", "of", "input", "through", "the", "translation", "finite", "state", "maching", "(", "FSM", ")", ".", "There", "are", "two", "possible", "states", "buffer", "pending", "and", "not", "pending", "but", "those", "are", "hidden", "behind", "the", ".", "flush", "()", "method", "which", "must", "be", "called", "at", "the", "end", "of", "text", "to", "ensure", "any", "pending", "<w", ":", "t", ">", "element", "is", "written", "." ]
python
train
36.3125
markovmodel/PyEMMA
pyemma/coordinates/api.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/api.py#L439-L524
def pipeline(stages, run=True, stride=1, chunksize=None): r""" Data analysis pipeline. Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it (unless prevented). If this function takes too long, consider loading data in memory. Alternatively if the data is to large to be loaded into memory make use of the stride parameter. Parameters ---------- stages : data input or list of pipeline stages If given a single pipeline stage this must be a data input constructed by :py:func:`source`. If a list of pipelining stages are given, the first stage must be a data input constructed by :py:func:`source`. run : bool, optional, default = True If True, the pipeline will be parametrized immediately with the given stages. If only an input stage is given, the run flag has no effect at this time. True also means that the pipeline will be immediately re-parametrized when further stages are added to it. *Attention* True means this function may take a long time to compute. If False, the pipeline will be passive, i.e. it will not do any computations before you call parametrize() stride : int, optional, default = 1 If set to 1, all input data will be used throughout the pipeline to parametrize its stages. Note that this could cause the parametrization step to be very slow for large data sets. Since molecular dynamics data is usually correlated at short timescales, it is often sufficient to parametrize the pipeline at a longer stride. See also stride option in the output functions of the pipeline. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Returns ------- pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` A pipeline object that is able to conduct big data analysis with limited memory in streaming mode. Examples -------- >>> import numpy as np >>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline Create some random data and cluster centers: >>> data = np.random.random((1000, 3)) >>> centers = data[np.random.choice(1000, 10)] >>> reader = source(data) Define a TICA transformation with lag time 10: >>> tica_obj = tica(lag=10) Assign any input to given centers: >>> assign = assign_to_centers(centers=centers) >>> pipe = pipeline([reader, tica_obj, assign]) >>> pipe.parametrize() .. autoclass:: pyemma.coordinates.pipelines.Pipeline :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :attributes: """ from pyemma.coordinates.pipelines import Pipeline if not isinstance(stages, list): stages = [stages] p = Pipeline(stages, param_stride=stride, chunksize=chunksize) if run: p.parametrize() return p
[ "def", "pipeline", "(", "stages", ",", "run", "=", "True", ",", "stride", "=", "1", ",", "chunksize", "=", "None", ")", ":", "from", "pyemma", ".", "coordinates", ".", "pipelines", "import", "Pipeline", "if", "not", "isinstance", "(", "stages", ",", "list", ")", ":", "stages", "=", "[", "stages", "]", "p", "=", "Pipeline", "(", "stages", ",", "param_stride", "=", "stride", ",", "chunksize", "=", "chunksize", ")", "if", "run", ":", "p", ".", "parametrize", "(", ")", "return", "p" ]
r""" Data analysis pipeline. Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it (unless prevented). If this function takes too long, consider loading data in memory. Alternatively if the data is to large to be loaded into memory make use of the stride parameter. Parameters ---------- stages : data input or list of pipeline stages If given a single pipeline stage this must be a data input constructed by :py:func:`source`. If a list of pipelining stages are given, the first stage must be a data input constructed by :py:func:`source`. run : bool, optional, default = True If True, the pipeline will be parametrized immediately with the given stages. If only an input stage is given, the run flag has no effect at this time. True also means that the pipeline will be immediately re-parametrized when further stages are added to it. *Attention* True means this function may take a long time to compute. If False, the pipeline will be passive, i.e. it will not do any computations before you call parametrize() stride : int, optional, default = 1 If set to 1, all input data will be used throughout the pipeline to parametrize its stages. Note that this could cause the parametrization step to be very slow for large data sets. Since molecular dynamics data is usually correlated at short timescales, it is often sufficient to parametrize the pipeline at a longer stride. See also stride option in the output functions of the pipeline. chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Returns ------- pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` A pipeline object that is able to conduct big data analysis with limited memory in streaming mode. Examples -------- >>> import numpy as np >>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline Create some random data and cluster centers: >>> data = np.random.random((1000, 3)) >>> centers = data[np.random.choice(1000, 10)] >>> reader = source(data) Define a TICA transformation with lag time 10: >>> tica_obj = tica(lag=10) Assign any input to given centers: >>> assign = assign_to_centers(centers=centers) >>> pipe = pipeline([reader, tica_obj, assign]) >>> pipe.parametrize() .. autoclass:: pyemma.coordinates.pipelines.Pipeline :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.coordinates.pipelines.Pipeline :attributes:
[ "r", "Data", "analysis", "pipeline", "." ]
python
train
38.418605
PMBio/limix-backup
limix/varDecomp/varianceDecomposition.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/varDecomp/varianceDecomposition.py#L385-L395
def _det_inference(self): """ Internal method for determining the inference method """ # 2 random effects with complete design -> gp2KronSum # TODO: add check for low-rankness, use GP3KronSumLR and GP2KronSumLR when possible if (self.n_randEffs==2) and (~sp.isnan(self.Y).any()): rv = 'GP2KronSum' else: rv = 'GP' return rv
[ "def", "_det_inference", "(", "self", ")", ":", "# 2 random effects with complete design -> gp2KronSum", "# TODO: add check for low-rankness, use GP3KronSumLR and GP2KronSumLR when possible", "if", "(", "self", ".", "n_randEffs", "==", "2", ")", "and", "(", "~", "sp", ".", "isnan", "(", "self", ".", "Y", ")", ".", "any", "(", ")", ")", ":", "rv", "=", "'GP2KronSum'", "else", ":", "rv", "=", "'GP'", "return", "rv" ]
Internal method for determining the inference method
[ "Internal", "method", "for", "determining", "the", "inference", "method" ]
python
train
36.454545
rjdkmr/do_x3dna
dnaMD/dnaMD/dnaEY.py
https://github.com/rjdkmr/do_x3dna/blob/fe910335eefcada76737f9e7cd6f25036cd32ab6/dnaMD/dnaMD/dnaEY.py#L1269-L1412
def getLocalDeformationEnergy(self, bp, complexDna, freeDnaFrames=None, boundDnaFrames=None, helical=False, unit='kT', which='all', outFile=None): r"""Deformation energy of the input DNA using local elastic properties The deformation energy of a base-step/s for probe DNA object with reference to the same base-step/s DNA present in the current DNA object. The deformation free energy is calculated using elastic matrix as follows .. math:: G = \frac{1}{2}\mathbf{xKx^T} When ``helical='False'`` .. math:: \mathbf{K} = \mathbf{K}_{base-step} .. math:: \mathbf{x} = \begin{bmatrix} (Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) & (\rho_i - \rho_0) & (\omega_i - \omega_0) \end{bmatrix} When ``helical='True'`` .. math:: \mathbf{K} = \mathbf{K}_{helical-base-step} .. math:: \mathbf{x} = \begin{bmatrix} (dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) & (\theta_i - \theta_0) & (\Omega_i - \Omega_0) \end{bmatrix} .. currentmodule:: dnaMD Parameters ---------- bp : list List of two base-steps forming the DNA segment. For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered. complexDna : :class:`dnaMD.DNA` Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated. freeDnaFrames : list To select a trajectory segment of current (free) DNA data. List of two trajectory frames between which parameters will be extracted. It can be used to select portions of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be considered. boundDnaFrames : list To select a trajectory segment of input (bound) DNA data. List of two trajectory frames between which parameters will be extracted. It can be used to select portions of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be considered. helical : bool If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise, by default, elastic matrix for **base-step** parameters are calculated. unit : str Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``. which : str or list For which motions (degrees of freedom), energy should be calculated. It should be either a list containing terms listed below or"all" for all energy terms. Following keywords are available: * ``'full'`` : Use entire elastic matrix -- all parameters with their coupling * ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling * ``'shift'`` or ``'x-disp'`` * ``'slide'`` or ``'y-idsp'`` * ``'rise'`` or ``'h-rise'`` * ``'tilt'`` or ``'inclination'`` * ``'roll'`` or ``'tip'`` * ``'twist'`` or ``'h-twist'`` outFile : str Output file in csv format. Returns ------- time : numpy.ndarray 1D array containing time values. energy : dict of numpy.ndarray Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA. """ if helical: energyTerms = ['full', 'diag', 'x-disp', 'y-disp', 'h-rise', 'inclination', 'tip', 'h-twist'] else: energyTerms = ['full', 'diag', 'shift', 'slide', 'rise', 'tilt', 'roll', 'twist'] if isinstance(which, str): if which != 'all': raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms)) else: which = energyTerms elif isinstance(which, list): for key in which: if key not in energyTerms: raise ValueError('{0} is not a supported keyword.\n Use from the following list: \n{1}'.format( which, energyTerms)) else: raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms)) means, esMatrix = self.calculateLocalElasticity(bp, frames=freeDnaFrames, helical=helical, unit=unit) time, array = self.extractLocalParameters(complexDna, bp, frames=boundDnaFrames, helical=helical) # Initialize energy dictionary energyOut = OrderedDict() for key in which: energyOut[key] = [] for i in range(array[0].shape[0]): vec = array[:, i] diff = vec - means for key in which: t_energy = self._calcLocalEnergy(diff, esMatrix, key) energyOut[key].append(t_energy) for key in which: energyOut[key] = np.asarray(energyOut[key]) # Write output file if outFile is not None: with open(outFile, 'w') as fout: fout.write('#Time') for name in which: fout.write(', {0}'.format(name)) fout.write('\n') for t in range(len(time)): fout.write('{0:.3f}'.format(time[t])) for name in which: fout.write(', {0:.5f}'.format(energyOut[name][t])) fout.write('\n') return time, energyOut
[ "def", "getLocalDeformationEnergy", "(", "self", ",", "bp", ",", "complexDna", ",", "freeDnaFrames", "=", "None", ",", "boundDnaFrames", "=", "None", ",", "helical", "=", "False", ",", "unit", "=", "'kT'", ",", "which", "=", "'all'", ",", "outFile", "=", "None", ")", ":", "if", "helical", ":", "energyTerms", "=", "[", "'full'", ",", "'diag'", ",", "'x-disp'", ",", "'y-disp'", ",", "'h-rise'", ",", "'inclination'", ",", "'tip'", ",", "'h-twist'", "]", "else", ":", "energyTerms", "=", "[", "'full'", ",", "'diag'", ",", "'shift'", ",", "'slide'", ",", "'rise'", ",", "'tilt'", ",", "'roll'", ",", "'twist'", "]", "if", "isinstance", "(", "which", ",", "str", ")", ":", "if", "which", "!=", "'all'", ":", "raise", "ValueError", "(", "'Either use \"all\" or use list of terms from this {0} list \\n.'", ".", "format", "(", "energyTerms", ")", ")", "else", ":", "which", "=", "energyTerms", "elif", "isinstance", "(", "which", ",", "list", ")", ":", "for", "key", "in", "which", ":", "if", "key", "not", "in", "energyTerms", ":", "raise", "ValueError", "(", "'{0} is not a supported keyword.\\n Use from the following list: \\n{1}'", ".", "format", "(", "which", ",", "energyTerms", ")", ")", "else", ":", "raise", "ValueError", "(", "'Either use \"all\" or use list of terms from this {0} list \\n.'", ".", "format", "(", "energyTerms", ")", ")", "means", ",", "esMatrix", "=", "self", ".", "calculateLocalElasticity", "(", "bp", ",", "frames", "=", "freeDnaFrames", ",", "helical", "=", "helical", ",", "unit", "=", "unit", ")", "time", ",", "array", "=", "self", ".", "extractLocalParameters", "(", "complexDna", ",", "bp", ",", "frames", "=", "boundDnaFrames", ",", "helical", "=", "helical", ")", "# Initialize energy dictionary", "energyOut", "=", "OrderedDict", "(", ")", "for", "key", "in", "which", ":", "energyOut", "[", "key", "]", "=", "[", "]", "for", "i", "in", "range", "(", "array", "[", "0", "]", ".", "shape", "[", "0", "]", ")", ":", "vec", "=", "array", "[", ":", ",", "i", "]", "diff", "=", "vec", "-", "means", "for", "key", "in", "which", ":", "t_energy", "=", "self", ".", "_calcLocalEnergy", "(", "diff", ",", "esMatrix", ",", "key", ")", "energyOut", "[", "key", "]", ".", "append", "(", "t_energy", ")", "for", "key", "in", "which", ":", "energyOut", "[", "key", "]", "=", "np", ".", "asarray", "(", "energyOut", "[", "key", "]", ")", "# Write output file", "if", "outFile", "is", "not", "None", ":", "with", "open", "(", "outFile", ",", "'w'", ")", "as", "fout", ":", "fout", ".", "write", "(", "'#Time'", ")", "for", "name", "in", "which", ":", "fout", ".", "write", "(", "', {0}'", ".", "format", "(", "name", ")", ")", "fout", ".", "write", "(", "'\\n'", ")", "for", "t", "in", "range", "(", "len", "(", "time", ")", ")", ":", "fout", ".", "write", "(", "'{0:.3f}'", ".", "format", "(", "time", "[", "t", "]", ")", ")", "for", "name", "in", "which", ":", "fout", ".", "write", "(", "', {0:.5f}'", ".", "format", "(", "energyOut", "[", "name", "]", "[", "t", "]", ")", ")", "fout", ".", "write", "(", "'\\n'", ")", "return", "time", ",", "energyOut" ]
r"""Deformation energy of the input DNA using local elastic properties The deformation energy of a base-step/s for probe DNA object with reference to the same base-step/s DNA present in the current DNA object. The deformation free energy is calculated using elastic matrix as follows .. math:: G = \frac{1}{2}\mathbf{xKx^T} When ``helical='False'`` .. math:: \mathbf{K} = \mathbf{K}_{base-step} .. math:: \mathbf{x} = \begin{bmatrix} (Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) & (\rho_i - \rho_0) & (\omega_i - \omega_0) \end{bmatrix} When ``helical='True'`` .. math:: \mathbf{K} = \mathbf{K}_{helical-base-step} .. math:: \mathbf{x} = \begin{bmatrix} (dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) & (\theta_i - \theta_0) & (\Omega_i - \Omega_0) \end{bmatrix} .. currentmodule:: dnaMD Parameters ---------- bp : list List of two base-steps forming the DNA segment. For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered. complexDna : :class:`dnaMD.DNA` Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated. freeDnaFrames : list To select a trajectory segment of current (free) DNA data. List of two trajectory frames between which parameters will be extracted. It can be used to select portions of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be considered. boundDnaFrames : list To select a trajectory segment of input (bound) DNA data. List of two trajectory frames between which parameters will be extracted. It can be used to select portions of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be considered. helical : bool If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise, by default, elastic matrix for **base-step** parameters are calculated. unit : str Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``. which : str or list For which motions (degrees of freedom), energy should be calculated. It should be either a list containing terms listed below or"all" for all energy terms. Following keywords are available: * ``'full'`` : Use entire elastic matrix -- all parameters with their coupling * ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling * ``'shift'`` or ``'x-disp'`` * ``'slide'`` or ``'y-idsp'`` * ``'rise'`` or ``'h-rise'`` * ``'tilt'`` or ``'inclination'`` * ``'roll'`` or ``'tip'`` * ``'twist'`` or ``'h-twist'`` outFile : str Output file in csv format. Returns ------- time : numpy.ndarray 1D array containing time values. energy : dict of numpy.ndarray Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA.
[ "r", "Deformation", "energy", "of", "the", "input", "DNA", "using", "local", "elastic", "properties" ]
python
train
39.930556
bcbio/bcbio-nextgen
bcbio/variation/population.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/population.py#L389-L417
def prep_db_parallel(samples, parallel_fn): """Prepares gemini databases in parallel, handling jointly called populations. """ batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls) to_process = [] has_batches = False for (name, caller), info in batch_groups.items(): fnames = [x[0] for x in info] to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras]) has_batches = True for name, caller, data, fname in singles: to_process.append([[fname], (str(name), caller, False), [data], extras]) output = parallel_fn("prep_gemini_db", to_process) out_fetch = {} for batch_id, out_file in output: out_fetch[tuple(batch_id)] = out_file out = [] for batch_name, data in out_retrieve: out_variants = [] for vrn in data["variants"]: use_population = vrn.pop("population", True) if use_population: vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])] out_variants.append(vrn) data["variants"] = out_variants out.append([data]) for x in extras: out.append([x]) return out
[ "def", "prep_db_parallel", "(", "samples", ",", "parallel_fn", ")", ":", "batch_groups", ",", "singles", ",", "out_retrieve", ",", "extras", "=", "_group_by_batches", "(", "samples", ",", "_has_variant_calls", ")", "to_process", "=", "[", "]", "has_batches", "=", "False", "for", "(", "name", ",", "caller", ")", ",", "info", "in", "batch_groups", ".", "items", "(", ")", ":", "fnames", "=", "[", "x", "[", "0", "]", "for", "x", "in", "info", "]", "to_process", ".", "append", "(", "[", "fnames", ",", "(", "str", "(", "name", ")", ",", "caller", ",", "True", ")", ",", "[", "x", "[", "1", "]", "for", "x", "in", "info", "]", ",", "extras", "]", ")", "has_batches", "=", "True", "for", "name", ",", "caller", ",", "data", ",", "fname", "in", "singles", ":", "to_process", ".", "append", "(", "[", "[", "fname", "]", ",", "(", "str", "(", "name", ")", ",", "caller", ",", "False", ")", ",", "[", "data", "]", ",", "extras", "]", ")", "output", "=", "parallel_fn", "(", "\"prep_gemini_db\"", ",", "to_process", ")", "out_fetch", "=", "{", "}", "for", "batch_id", ",", "out_file", "in", "output", ":", "out_fetch", "[", "tuple", "(", "batch_id", ")", "]", "=", "out_file", "out", "=", "[", "]", "for", "batch_name", ",", "data", "in", "out_retrieve", ":", "out_variants", "=", "[", "]", "for", "vrn", "in", "data", "[", "\"variants\"", "]", ":", "use_population", "=", "vrn", ".", "pop", "(", "\"population\"", ",", "True", ")", "if", "use_population", ":", "vrn", "[", "\"population\"", "]", "=", "out_fetch", "[", "(", "batch_name", ",", "vrn", "[", "\"variantcaller\"", "]", ")", "]", "out_variants", ".", "append", "(", "vrn", ")", "data", "[", "\"variants\"", "]", "=", "out_variants", "out", ".", "append", "(", "[", "data", "]", ")", "for", "x", "in", "extras", ":", "out", ".", "append", "(", "[", "x", "]", ")", "return", "out" ]
Prepares gemini databases in parallel, handling jointly called populations.
[ "Prepares", "gemini", "databases", "in", "parallel", "handling", "jointly", "called", "populations", "." ]
python
train
41.241379
mitsei/dlkit
dlkit/handcar/learning/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/objects.py#L575-L586
def get_assessment_ids(self): """Gets the Ids of any assessments associated with this activity. return: (osid.id.IdList) - list of assessment Ids raise: IllegalState - is_assessment_based_activity() is false compliance: mandatory - This method must be implemented. """ if not self.is_assessment_based_activity(): raise IllegalState() else: return [Id(a) for a in self._my_map['assessmentIds']]
[ "def", "get_assessment_ids", "(", "self", ")", ":", "if", "not", "self", ".", "is_assessment_based_activity", "(", ")", ":", "raise", "IllegalState", "(", ")", "else", ":", "return", "[", "Id", "(", "a", ")", "for", "a", "in", "self", ".", "_my_map", "[", "'assessmentIds'", "]", "]" ]
Gets the Ids of any assessments associated with this activity. return: (osid.id.IdList) - list of assessment Ids raise: IllegalState - is_assessment_based_activity() is false compliance: mandatory - This method must be implemented.
[ "Gets", "the", "Ids", "of", "any", "assessments", "associated", "with", "this", "activity", "." ]
python
train
38.75
Pixelapse/pyglass
pyglass/api.py
https://github.com/Pixelapse/pyglass/blob/83cd0ff2b0b7cdaf4ec6f54559a626e67455cd33/pyglass/api.py#L9-L23
def preview(src_path): ''' Generates a preview of src_path in the requested format. :returns: A list of preview paths, one for each page. ''' previews = [] if sketch.is_sketchfile(src_path): previews = sketch.preview(src_path) if not previews: previews = quicklook.preview(src_path) previews = [safely_decode(preview) for preview in previews] return previews
[ "def", "preview", "(", "src_path", ")", ":", "previews", "=", "[", "]", "if", "sketch", ".", "is_sketchfile", "(", "src_path", ")", ":", "previews", "=", "sketch", ".", "preview", "(", "src_path", ")", "if", "not", "previews", ":", "previews", "=", "quicklook", ".", "preview", "(", "src_path", ")", "previews", "=", "[", "safely_decode", "(", "preview", ")", "for", "preview", "in", "previews", "]", "return", "previews" ]
Generates a preview of src_path in the requested format. :returns: A list of preview paths, one for each page.
[ "Generates", "a", "preview", "of", "src_path", "in", "the", "requested", "format", ".", ":", "returns", ":", "A", "list", "of", "preview", "paths", "one", "for", "each", "page", "." ]
python
train
24.8
projectatomic/atomic-reactor
atomic_reactor/rpm_util.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/rpm_util.py#L36-L95
def parse_rpm_output(output, tags=None, separator=';'): """ Parse output of the rpm query. :param output: list, decoded output (str) from the rpm subprocess :param tags: list, str fields used for query output :return: list, dicts describing each rpm package """ if tags is None: tags = image_component_rpm_tags def field(tag): """ Get a field value by name """ try: value = fields[tags.index(tag)] except ValueError: return None if value == '(none)': return None return value components = [] sigmarker = 'Key ID ' for rpm in output: fields = rpm.rstrip('\n').split(separator) if len(fields) < len(tags): continue signature = field('SIGPGP:pgpsig') or field('SIGGPG:pgpsig') if signature: parts = signature.split(sigmarker, 1) if len(parts) > 1: signature = parts[1] component_rpm = { 'type': 'rpm', 'name': field('NAME'), 'version': field('VERSION'), 'release': field('RELEASE'), 'arch': field('ARCH'), 'sigmd5': field('SIGMD5'), 'signature': signature, } # Special handling for epoch as it must be an integer or None epoch = field('EPOCH') if epoch is not None: epoch = int(epoch) component_rpm['epoch'] = epoch if component_rpm['name'] != 'gpg-pubkey': components.append(component_rpm) return components
[ "def", "parse_rpm_output", "(", "output", ",", "tags", "=", "None", ",", "separator", "=", "';'", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "image_component_rpm_tags", "def", "field", "(", "tag", ")", ":", "\"\"\"\n Get a field value by name\n \"\"\"", "try", ":", "value", "=", "fields", "[", "tags", ".", "index", "(", "tag", ")", "]", "except", "ValueError", ":", "return", "None", "if", "value", "==", "'(none)'", ":", "return", "None", "return", "value", "components", "=", "[", "]", "sigmarker", "=", "'Key ID '", "for", "rpm", "in", "output", ":", "fields", "=", "rpm", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "separator", ")", "if", "len", "(", "fields", ")", "<", "len", "(", "tags", ")", ":", "continue", "signature", "=", "field", "(", "'SIGPGP:pgpsig'", ")", "or", "field", "(", "'SIGGPG:pgpsig'", ")", "if", "signature", ":", "parts", "=", "signature", ".", "split", "(", "sigmarker", ",", "1", ")", "if", "len", "(", "parts", ")", ">", "1", ":", "signature", "=", "parts", "[", "1", "]", "component_rpm", "=", "{", "'type'", ":", "'rpm'", ",", "'name'", ":", "field", "(", "'NAME'", ")", ",", "'version'", ":", "field", "(", "'VERSION'", ")", ",", "'release'", ":", "field", "(", "'RELEASE'", ")", ",", "'arch'", ":", "field", "(", "'ARCH'", ")", ",", "'sigmd5'", ":", "field", "(", "'SIGMD5'", ")", ",", "'signature'", ":", "signature", ",", "}", "# Special handling for epoch as it must be an integer or None", "epoch", "=", "field", "(", "'EPOCH'", ")", "if", "epoch", "is", "not", "None", ":", "epoch", "=", "int", "(", "epoch", ")", "component_rpm", "[", "'epoch'", "]", "=", "epoch", "if", "component_rpm", "[", "'name'", "]", "!=", "'gpg-pubkey'", ":", "components", ".", "append", "(", "component_rpm", ")", "return", "components" ]
Parse output of the rpm query. :param output: list, decoded output (str) from the rpm subprocess :param tags: list, str fields used for query output :return: list, dicts describing each rpm package
[ "Parse", "output", "of", "the", "rpm", "query", "." ]
python
train
25.9
coldfix/udiskie
udiskie/mount.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/mount.py#L611-L628
async def delete(self, device, remove=True): """ Detach the loop device. :param device: device object, block device path or mount path :param bool remove: whether to unmount the partition etc. :returns: whether the loop device is deleted """ device = self._find_device(device) if not self.is_handleable(device) or not device.is_loop: self._log.warn(_('not deleting {0}: unhandled device', device)) return False if remove: await self.auto_remove(device, force=True) self._log.debug(_('deleting {0}', device)) await device.delete() self._log.info(_('deleted {0}', device)) return True
[ "async", "def", "delete", "(", "self", ",", "device", ",", "remove", "=", "True", ")", ":", "device", "=", "self", ".", "_find_device", "(", "device", ")", "if", "not", "self", ".", "is_handleable", "(", "device", ")", "or", "not", "device", ".", "is_loop", ":", "self", ".", "_log", ".", "warn", "(", "_", "(", "'not deleting {0}: unhandled device'", ",", "device", ")", ")", "return", "False", "if", "remove", ":", "await", "self", ".", "auto_remove", "(", "device", ",", "force", "=", "True", ")", "self", ".", "_log", ".", "debug", "(", "_", "(", "'deleting {0}'", ",", "device", ")", ")", "await", "device", ".", "delete", "(", ")", "self", ".", "_log", ".", "info", "(", "_", "(", "'deleted {0}'", ",", "device", ")", ")", "return", "True" ]
Detach the loop device. :param device: device object, block device path or mount path :param bool remove: whether to unmount the partition etc. :returns: whether the loop device is deleted
[ "Detach", "the", "loop", "device", "." ]
python
train
39.222222
wharris/dougrain
dougrain/builder.py
https://github.com/wharris/dougrain/blob/45062a1562fc34793e40c6253a93aa91eb4cf855/dougrain/builder.py#L106-L151
def add_link(self, rel, target, wrap=False, **kwargs): """Adds a link to the document. This method adds a link to the given ``target`` to the document with the given ``rel``. If one or more links are already present for that link relationship type, the new link will be added to the existing links for that link relationship type. Unlike ``dougrain.Document.add_link``, this method does not detect equivalence between relationship types with different representations. If ``target`` is a string, a link is added with ``target`` as its ``href`` property and other properties from the keyword arguments. If ``target`` is a ``dougrain.Document`` object, a link is added with ``target``'s URL as its ``href`` property and other property from the keyword arguments. If ``target`` is a ``Builder`` object, a link is added with ``target``'s URL as its ``href`` property and other property from the keyword arguments. This method returns self, allowing it to be chained with additional method calls. Arguments: - ``rel``: a string specifying the link relationship type of the link. It should be a well-known link relation name from the IANA registry (http://www.iana.org/assignments/link-relations/link-relations.xml), a full URI, or a CURIE. - ``target``: the destination of the link. - ``wrap``: Defaults to False, but if True, specifies that the link object should be initally wrapped in a JSON array even if it is the first link for the given ``rel``. """ if isinstance(target, bytes): target = target.decode('utf-8') if isinstance(target, str) or isinstance(target, unicode): new_link = dict(href=target, **kwargs) else: new_link = dict(href=target.url(), **kwargs) self._add_rel('_links', rel, new_link, wrap) return self
[ "def", "add_link", "(", "self", ",", "rel", ",", "target", ",", "wrap", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "target", ",", "bytes", ")", ":", "target", "=", "target", ".", "decode", "(", "'utf-8'", ")", "if", "isinstance", "(", "target", ",", "str", ")", "or", "isinstance", "(", "target", ",", "unicode", ")", ":", "new_link", "=", "dict", "(", "href", "=", "target", ",", "*", "*", "kwargs", ")", "else", ":", "new_link", "=", "dict", "(", "href", "=", "target", ".", "url", "(", ")", ",", "*", "*", "kwargs", ")", "self", ".", "_add_rel", "(", "'_links'", ",", "rel", ",", "new_link", ",", "wrap", ")", "return", "self" ]
Adds a link to the document. This method adds a link to the given ``target`` to the document with the given ``rel``. If one or more links are already present for that link relationship type, the new link will be added to the existing links for that link relationship type. Unlike ``dougrain.Document.add_link``, this method does not detect equivalence between relationship types with different representations. If ``target`` is a string, a link is added with ``target`` as its ``href`` property and other properties from the keyword arguments. If ``target`` is a ``dougrain.Document`` object, a link is added with ``target``'s URL as its ``href`` property and other property from the keyword arguments. If ``target`` is a ``Builder`` object, a link is added with ``target``'s URL as its ``href`` property and other property from the keyword arguments. This method returns self, allowing it to be chained with additional method calls. Arguments: - ``rel``: a string specifying the link relationship type of the link. It should be a well-known link relation name from the IANA registry (http://www.iana.org/assignments/link-relations/link-relations.xml), a full URI, or a CURIE. - ``target``: the destination of the link. - ``wrap``: Defaults to False, but if True, specifies that the link object should be initally wrapped in a JSON array even if it is the first link for the given ``rel``.
[ "Adds", "a", "link", "to", "the", "document", "." ]
python
train
43.173913
vsjha18/nsetools
utils.py
https://github.com/vsjha18/nsetools/blob/c306b568471701c19195d2f17e112cc92022d3e0/utils.py#L29-L44
def byte_adaptor(fbuffer): """ provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer """ if six.PY3: strings = fbuffer.read().decode('latin-1') fbuffer = six.StringIO(strings) return fbuffer else: return fbuffer
[ "def", "byte_adaptor", "(", "fbuffer", ")", ":", "if", "six", ".", "PY3", ":", "strings", "=", "fbuffer", ".", "read", "(", ")", ".", "decode", "(", "'latin-1'", ")", "fbuffer", "=", "six", ".", "StringIO", "(", "strings", ")", "return", "fbuffer", "else", ":", "return", "fbuffer" ]
provides py3 compatibility by converting byte based file stream to string based file stream Arguments: fbuffer: file like objects containing bytes Returns: string buffer
[ "provides", "py3", "compatibility", "by", "converting", "byte", "based", "file", "stream", "to", "string", "based", "file", "stream" ]
python
train
24.375
openstack/horizon
openstack_dashboard/api/neutron.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L471-L480
def list_by_instance(self, instance_id): """Gets security groups of an instance. :returns: List of SecurityGroup objects associated with the instance """ ports = port_list(self.request, device_id=instance_id) sg_ids = [] for p in ports: sg_ids += p.security_groups return self._list(id=set(sg_ids)) if sg_ids else []
[ "def", "list_by_instance", "(", "self", ",", "instance_id", ")", ":", "ports", "=", "port_list", "(", "self", ".", "request", ",", "device_id", "=", "instance_id", ")", "sg_ids", "=", "[", "]", "for", "p", "in", "ports", ":", "sg_ids", "+=", "p", ".", "security_groups", "return", "self", ".", "_list", "(", "id", "=", "set", "(", "sg_ids", ")", ")", "if", "sg_ids", "else", "[", "]" ]
Gets security groups of an instance. :returns: List of SecurityGroup objects associated with the instance
[ "Gets", "security", "groups", "of", "an", "instance", "." ]
python
train
37.6
rosscdh/hellosign
hellosign/hellosign.py
https://github.com/rosscdh/hellosign/blob/4061c2733fa9f1b6ebefa99bed69df8373eb93b3/hellosign/hellosign.py#L43-L51
def add_doc(self, doc): """ Simple dict of {'name': '@filename.pdf'}""" if isinstance(doc, HelloDoc) and doc.validate(): self.docs.append(doc) else: if not doc.validate(): raise Exception("HelloDoc Errors %s" % (doc.errors,)) else: raise Exception("add_doc doc must be an instance of class HelloDoc")
[ "def", "add_doc", "(", "self", ",", "doc", ")", ":", "if", "isinstance", "(", "doc", ",", "HelloDoc", ")", "and", "doc", ".", "validate", "(", ")", ":", "self", ".", "docs", ".", "append", "(", "doc", ")", "else", ":", "if", "not", "doc", ".", "validate", "(", ")", ":", "raise", "Exception", "(", "\"HelloDoc Errors %s\"", "%", "(", "doc", ".", "errors", ",", ")", ")", "else", ":", "raise", "Exception", "(", "\"add_doc doc must be an instance of class HelloDoc\"", ")" ]
Simple dict of {'name': '@filename.pdf'}
[ "Simple", "dict", "of", "{", "name", ":" ]
python
train
42.666667
portfors-lab/sparkle
sparkle/data/open.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/data/open.py#L5-L29
def open_acqdata(filename, user='unknown', filemode='w-'): """Opens and returns the correct AcquisitionData object according to filename extention. Supported extentions: * .hdf5, .h5 for sparkle data * .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>` examples (if data file already exists):: data = open_acqdata('myexperiment.hdf5', filemode='r') print data.dataset_names() for batlab data:: data = open('mouse666.raw', filemode='r') print data.dataset_names() """ if filename.lower().endswith((".hdf5", ".h5")): return HDF5Data(filename, user, filemode) elif filename.lower().endswith((".pst", ".raw")): return BatlabData(filename, user, filemode) else: print "File format not supported: ", filename
[ "def", "open_acqdata", "(", "filename", ",", "user", "=", "'unknown'", ",", "filemode", "=", "'w-'", ")", ":", "if", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "(", "\".hdf5\"", ",", "\".h5\"", ")", ")", ":", "return", "HDF5Data", "(", "filename", ",", "user", ",", "filemode", ")", "elif", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "(", "\".pst\"", ",", "\".raw\"", ")", ")", ":", "return", "BatlabData", "(", "filename", ",", "user", ",", "filemode", ")", "else", ":", "print", "\"File format not supported: \"", ",", "filename" ]
Opens and returns the correct AcquisitionData object according to filename extention. Supported extentions: * .hdf5, .h5 for sparkle data * .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>` examples (if data file already exists):: data = open_acqdata('myexperiment.hdf5', filemode='r') print data.dataset_names() for batlab data:: data = open('mouse666.raw', filemode='r') print data.dataset_names()
[ "Opens", "and", "returns", "the", "correct", "AcquisitionData", "object", "according", "to", "filename", "extention", "." ]
python
train
38.52
molmod/molmod
molmod/molecules.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/molecules.py#L278-L292
def compute_rotsym(self, threshold=1e-3*angstrom): """Compute the rotational symmetry number. Optional argument: | ``threshold`` -- only when a rotation results in an rmsd below the given threshold, the rotation is considered to transform the molecule onto itself. """ # Generate a graph with a more permissive threshold for bond lengths: # (is convenient in case of transition state geometries) graph = MolecularGraph.from_geometry(self, scaling=1.5) try: return compute_rotsym(self, graph, threshold) except ValueError: raise ValueError("The rotational symmetry number can only be computed when the graph is fully connected.")
[ "def", "compute_rotsym", "(", "self", ",", "threshold", "=", "1e-3", "*", "angstrom", ")", ":", "# Generate a graph with a more permissive threshold for bond lengths:", "# (is convenient in case of transition state geometries)", "graph", "=", "MolecularGraph", ".", "from_geometry", "(", "self", ",", "scaling", "=", "1.5", ")", "try", ":", "return", "compute_rotsym", "(", "self", ",", "graph", ",", "threshold", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"The rotational symmetry number can only be computed when the graph is fully connected.\"", ")" ]
Compute the rotational symmetry number. Optional argument: | ``threshold`` -- only when a rotation results in an rmsd below the given threshold, the rotation is considered to transform the molecule onto itself.
[ "Compute", "the", "rotational", "symmetry", "number", "." ]
python
train
52.4
imjoey/pyhaproxy
pyhaproxy/parse.py
https://github.com/imjoey/pyhaproxy/blob/4f0904acfc6bdb29ba6104ce2f6724c0330441d3/pyhaproxy/parse.py#L57-L69
def build_global(self, global_node): """parse `global` section, and return the config.Global Args: global_node (TreeNode): `global` section treenode Returns: config.Global: an object """ config_block_lines = self.__build_config_block( global_node.config_block) return config.Global(config_block=config_block_lines)
[ "def", "build_global", "(", "self", ",", "global_node", ")", ":", "config_block_lines", "=", "self", ".", "__build_config_block", "(", "global_node", ".", "config_block", ")", "return", "config", ".", "Global", "(", "config_block", "=", "config_block_lines", ")" ]
parse `global` section, and return the config.Global Args: global_node (TreeNode): `global` section treenode Returns: config.Global: an object
[ "parse", "global", "section", "and", "return", "the", "config", ".", "Global" ]
python
train
30
thebigmunch/gmusicapi-wrapper
gmusicapi_wrapper/musicmanager.py
https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/musicmanager.py#L265-L369
def upload(self, filepaths, enable_matching=False, transcode_quality='320k', delete_on_success=False): """Upload local songs to Google Music. Parameters: filepaths (list or str): Filepath(s) to upload. enable_matching (bool): If ``True`` attempt to use `scan and match <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__. This requieres ffmpeg or avconv. transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality <http://trac.ffmpeg.org/wiki/Encode/MP3#VBREncoding>'__. If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality <http://trac.ffmpeg.org/wiki/Encode/MP3#CBREncoding>'__. Default: ``320k`` delete_on_success (bool): Delete successfully uploaded local files. Default: ``False`` Returns: A list of result dictionaries. :: [ {'result': 'uploaded', 'filepath': <filepath>, 'id': <song_id>}, # uploaded {'result': 'matched', 'filepath': <filepath>, 'id': <song_id>}, # matched {'result': 'error', 'filepath': <filepath>, 'message': <error_message>}, # error {'result': 'not_uploaded', 'filepath': <filepath>, 'id': <song_id>, 'message': <reason_message>}, # not_uploaded ALREADY_EXISTS {'result': 'not_uploaded', 'filepath': <filepath>, 'message': <reason_message>} # not_uploaded ] """ filenum = 0 total = len(filepaths) results = [] errors = {} pad = len(str(total)) exist_strings = ["ALREADY_EXISTS", "this song is already uploaded"] for result in self._upload(filepaths, enable_matching=enable_matching, transcode_quality=transcode_quality): filepath = filepaths[filenum] filenum += 1 uploaded, matched, not_uploaded, error = result if uploaded: logger.info( "({num:>{pad}}/{total}) Successfully uploaded -- {file} ({song_id})".format( num=filenum, pad=pad, total=total, file=filepath, song_id=uploaded[filepath] ) ) results.append({'result': 'uploaded', 'filepath': filepath, 'id': uploaded[filepath]}) elif matched: logger.info( "({num:>{pad}}/{total}) Successfully scanned and matched -- {file} ({song_id})".format( num=filenum, pad=pad, total=total, file=filepath, song_id=matched[filepath] ) ) results.append({'result': 'matched', 'filepath': filepath, 'id': matched[filepath]}) elif error: logger.warning("({num:>{pad}}/{total}) Error on upload -- {file}".format(num=filenum, pad=pad, total=total, file=filepath)) results.append({'result': 'error', 'filepath': filepath, 'message': error[filepath]}) errors.update(error) else: if any(exist_string in not_uploaded[filepath] for exist_string in exist_strings): response = "ALREADY EXISTS" song_id = GM_ID_RE.search(not_uploaded[filepath]).group(0) logger.info( "({num:>{pad}}/{total}) Failed to upload -- {file} ({song_id}) | {response}".format( num=filenum, pad=pad, total=total, file=filepath, response=response, song_id=song_id ) ) results.append({'result': 'not_uploaded', 'filepath': filepath, 'id': song_id, 'message': not_uploaded[filepath]}) else: response = not_uploaded[filepath] logger.info( "({num:>{pad}}/{total}) Failed to upload -- {file} | {response}".format( num=filenum, pad=pad, total=total, file=filepath, response=response ) ) results.append({'result': 'not_uploaded', 'filepath': filepath, 'message': not_uploaded[filepath]}) success = (uploaded or matched) or (not_uploaded and 'ALREADY_EXISTS' in not_uploaded[filepath]) if success and delete_on_success: try: os.remove(filepath) except (OSError, PermissionError): logger.warning("Failed to remove {} after successful upload".format(filepath)) if errors: logger.info("\n\nThe following errors occurred:\n") for filepath, e in errors.items(): logger.info("{file} | {error}".format(file=filepath, error=e)) logger.info("\nThese filepaths may need to be synced again.\n") return results
[ "def", "upload", "(", "self", ",", "filepaths", ",", "enable_matching", "=", "False", ",", "transcode_quality", "=", "'320k'", ",", "delete_on_success", "=", "False", ")", ":", "filenum", "=", "0", "total", "=", "len", "(", "filepaths", ")", "results", "=", "[", "]", "errors", "=", "{", "}", "pad", "=", "len", "(", "str", "(", "total", ")", ")", "exist_strings", "=", "[", "\"ALREADY_EXISTS\"", ",", "\"this song is already uploaded\"", "]", "for", "result", "in", "self", ".", "_upload", "(", "filepaths", ",", "enable_matching", "=", "enable_matching", ",", "transcode_quality", "=", "transcode_quality", ")", ":", "filepath", "=", "filepaths", "[", "filenum", "]", "filenum", "+=", "1", "uploaded", ",", "matched", ",", "not_uploaded", ",", "error", "=", "result", "if", "uploaded", ":", "logger", ".", "info", "(", "\"({num:>{pad}}/{total}) Successfully uploaded -- {file} ({song_id})\"", ".", "format", "(", "num", "=", "filenum", ",", "pad", "=", "pad", ",", "total", "=", "total", ",", "file", "=", "filepath", ",", "song_id", "=", "uploaded", "[", "filepath", "]", ")", ")", "results", ".", "append", "(", "{", "'result'", ":", "'uploaded'", ",", "'filepath'", ":", "filepath", ",", "'id'", ":", "uploaded", "[", "filepath", "]", "}", ")", "elif", "matched", ":", "logger", ".", "info", "(", "\"({num:>{pad}}/{total}) Successfully scanned and matched -- {file} ({song_id})\"", ".", "format", "(", "num", "=", "filenum", ",", "pad", "=", "pad", ",", "total", "=", "total", ",", "file", "=", "filepath", ",", "song_id", "=", "matched", "[", "filepath", "]", ")", ")", "results", ".", "append", "(", "{", "'result'", ":", "'matched'", ",", "'filepath'", ":", "filepath", ",", "'id'", ":", "matched", "[", "filepath", "]", "}", ")", "elif", "error", ":", "logger", ".", "warning", "(", "\"({num:>{pad}}/{total}) Error on upload -- {file}\"", ".", "format", "(", "num", "=", "filenum", ",", "pad", "=", "pad", ",", "total", "=", "total", ",", "file", "=", "filepath", ")", ")", "results", ".", "append", "(", "{", "'result'", ":", "'error'", ",", "'filepath'", ":", "filepath", ",", "'message'", ":", "error", "[", "filepath", "]", "}", ")", "errors", ".", "update", "(", "error", ")", "else", ":", "if", "any", "(", "exist_string", "in", "not_uploaded", "[", "filepath", "]", "for", "exist_string", "in", "exist_strings", ")", ":", "response", "=", "\"ALREADY EXISTS\"", "song_id", "=", "GM_ID_RE", ".", "search", "(", "not_uploaded", "[", "filepath", "]", ")", ".", "group", "(", "0", ")", "logger", ".", "info", "(", "\"({num:>{pad}}/{total}) Failed to upload -- {file} ({song_id}) | {response}\"", ".", "format", "(", "num", "=", "filenum", ",", "pad", "=", "pad", ",", "total", "=", "total", ",", "file", "=", "filepath", ",", "response", "=", "response", ",", "song_id", "=", "song_id", ")", ")", "results", ".", "append", "(", "{", "'result'", ":", "'not_uploaded'", ",", "'filepath'", ":", "filepath", ",", "'id'", ":", "song_id", ",", "'message'", ":", "not_uploaded", "[", "filepath", "]", "}", ")", "else", ":", "response", "=", "not_uploaded", "[", "filepath", "]", "logger", ".", "info", "(", "\"({num:>{pad}}/{total}) Failed to upload -- {file} | {response}\"", ".", "format", "(", "num", "=", "filenum", ",", "pad", "=", "pad", ",", "total", "=", "total", ",", "file", "=", "filepath", ",", "response", "=", "response", ")", ")", "results", ".", "append", "(", "{", "'result'", ":", "'not_uploaded'", ",", "'filepath'", ":", "filepath", ",", "'message'", ":", "not_uploaded", "[", "filepath", "]", "}", ")", "success", "=", "(", "uploaded", "or", "matched", ")", "or", "(", "not_uploaded", "and", "'ALREADY_EXISTS'", "in", "not_uploaded", "[", "filepath", "]", ")", "if", "success", "and", "delete_on_success", ":", "try", ":", "os", ".", "remove", "(", "filepath", ")", "except", "(", "OSError", ",", "PermissionError", ")", ":", "logger", ".", "warning", "(", "\"Failed to remove {} after successful upload\"", ".", "format", "(", "filepath", ")", ")", "if", "errors", ":", "logger", ".", "info", "(", "\"\\n\\nThe following errors occurred:\\n\"", ")", "for", "filepath", ",", "e", "in", "errors", ".", "items", "(", ")", ":", "logger", ".", "info", "(", "\"{file} | {error}\"", ".", "format", "(", "file", "=", "filepath", ",", "error", "=", "e", ")", ")", "logger", ".", "info", "(", "\"\\nThese filepaths may need to be synced again.\\n\"", ")", "return", "results" ]
Upload local songs to Google Music. Parameters: filepaths (list or str): Filepath(s) to upload. enable_matching (bool): If ``True`` attempt to use `scan and match <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__. This requieres ffmpeg or avconv. transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality <http://trac.ffmpeg.org/wiki/Encode/MP3#VBREncoding>'__. If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality <http://trac.ffmpeg.org/wiki/Encode/MP3#CBREncoding>'__. Default: ``320k`` delete_on_success (bool): Delete successfully uploaded local files. Default: ``False`` Returns: A list of result dictionaries. :: [ {'result': 'uploaded', 'filepath': <filepath>, 'id': <song_id>}, # uploaded {'result': 'matched', 'filepath': <filepath>, 'id': <song_id>}, # matched {'result': 'error', 'filepath': <filepath>, 'message': <error_message>}, # error {'result': 'not_uploaded', 'filepath': <filepath>, 'id': <song_id>, 'message': <reason_message>}, # not_uploaded ALREADY_EXISTS {'result': 'not_uploaded', 'filepath': <filepath>, 'message': <reason_message>} # not_uploaded ]
[ "Upload", "local", "songs", "to", "Google", "Music", "." ]
python
valid
37.685714
msikma/kanaconv
kanaconv/utils.py
https://github.com/msikma/kanaconv/blob/194f142e616ab5dd6d13a687b96b9f8abd1b4ea8/kanaconv/utils.py#L72-L95
def _switch_charset_list(characters, target=''): ''' Switches the character set of a list. If a character does not have an equivalent in the target script (e.g. ヹ when converting to hiragana), the original character is kept. ''' # Copy the list to avoid modifying the existing one. characters = characters[:] offset = block_offset * offsets[target]['direction'] for n in range(len(characters)): chars = list(characters[n]) for m in range(len(chars)): char = chars[m] char_offset = ord(char) + offset # Verify that the offset character is within the valid range. if in_range(char_offset, target): chars[m] = chr(char_offset) else: chars[m] = char characters[n] = ''.join(chars) return characters
[ "def", "_switch_charset_list", "(", "characters", ",", "target", "=", "''", ")", ":", "# Copy the list to avoid modifying the existing one.", "characters", "=", "characters", "[", ":", "]", "offset", "=", "block_offset", "*", "offsets", "[", "target", "]", "[", "'direction'", "]", "for", "n", "in", "range", "(", "len", "(", "characters", ")", ")", ":", "chars", "=", "list", "(", "characters", "[", "n", "]", ")", "for", "m", "in", "range", "(", "len", "(", "chars", ")", ")", ":", "char", "=", "chars", "[", "m", "]", "char_offset", "=", "ord", "(", "char", ")", "+", "offset", "# Verify that the offset character is within the valid range.", "if", "in_range", "(", "char_offset", ",", "target", ")", ":", "chars", "[", "m", "]", "=", "chr", "(", "char_offset", ")", "else", ":", "chars", "[", "m", "]", "=", "char", "characters", "[", "n", "]", "=", "''", ".", "join", "(", "chars", ")", "return", "characters" ]
Switches the character set of a list. If a character does not have an equivalent in the target script (e.g. ヹ when converting to hiragana), the original character is kept.
[ "Switches", "the", "character", "set", "of", "a", "list", ".", "If", "a", "character", "does", "not", "have", "an", "equivalent", "in", "the", "target", "script", "(", "e", ".", "g", ".", "ヹ", "when", "converting", "to", "hiragana", ")", "the", "original", "character", "is", "kept", "." ]
python
train
34.583333
SMTG-UCL/sumo
sumo/plotting/dos_plotter.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/plotting/dos_plotter.py#L190-L320
def get_plot(self, subplot=False, width=None, height=None, xmin=-6., xmax=6., yscale=1, colours=None, plot_total=True, legend_on=True, num_columns=2, legend_frame_on=False, legend_cutoff=3, xlabel='Energy (eV)', ylabel='Arb. units', zero_to_efermi=True, dpi=400, fonts=None, plt=None, style=None, no_base_style=False): """Get a :obj:`matplotlib.pyplot` object of the density of states. Args: subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. width (:obj:`float`, optional): The width of the plot. height (:obj:`float`, optional): The height of the plot. xmin (:obj:`float`, optional): The minimum energy on the x-axis. xmax (:obj:`float`, optional): The maximum energy on the x-axis. yscale (:obj:`float`, optional): Scaling factor for the y-axis. colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults to ``True``. num_columns (:obj:`int`, optional): The number of columns in the legend. legend_frame_on (:obj:`bool`, optional): Plot a frame around the graph legend. Defaults to ``False``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy) ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS) zero_to_efermi (:obj:`bool`, optional): Normalise the plot such that the Fermi level is set as 0 eV. dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for the image. fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a a single font, specified as a :obj:`str`, or several fonts, specified as a :obj:`list` of :obj:`str`. plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot` object to use for plotting. style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib style specifications, to be composed on top of Sumo base style. no_base_style (:obj:`bool`, optional): Prevent use of sumo base style. This can make alternative styles behave more predictably. Returns: :obj:`matplotlib.pyplot`: The density of states plot. """ plot_data = self.dos_plot_data(yscale=yscale, xmin=xmin, xmax=xmax, colours=colours, plot_total=plot_total, legend_cutoff=legend_cutoff, subplot=subplot, zero_to_efermi=zero_to_efermi) if subplot: nplots = len(plot_data['lines']) plt = pretty_subplot(nplots, 1, width=width, height=height, dpi=dpi, plt=plt) else: plt = pretty_plot(width=width, height=height, dpi=dpi, plt=plt) mask = plot_data['mask'] energies = plot_data['energies'][mask] fig = plt.gcf() lines = plot_data['lines'] spins = [Spin.up] if len(lines[0][0]['dens']) == 1 else \ [Spin.up, Spin.down] for i, line_set in enumerate(plot_data['lines']): if subplot: ax = fig.axes[i] else: ax = plt.gca() for line, spin in itertools.product(line_set, spins): if spin == Spin.up: label = line['label'] densities = line['dens'][spin][mask] elif spin == Spin.down: label = "" densities = -line['dens'][spin][mask] ax.fill_between(energies, densities, lw=0, facecolor=line['colour'], alpha=line['alpha']) ax.plot(energies, densities, label=label, color=line['colour']) ax.set_ylim(plot_data['ymin'], plot_data['ymax']) ax.set_xlim(xmin, xmax) ax.tick_params(axis='y', labelleft='off') ax.yaxis.set_minor_locator(AutoMinorLocator(2)) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) loc = 'upper right' if subplot else 'best' ncol = 1 if subplot else num_columns if legend_on: ax.legend(loc=loc, frameon=legend_frame_on, ncol=ncol) # no add axis labels and sort out ticks if subplot: ax.set_xlabel(xlabel) fig.subplots_adjust(hspace=0) plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False) if 'axes.labelcolor' in matplotlib.rcParams: ylabelcolor = matplotlib.rcParams['axes.labelcolor'] else: ylabelcolor = None fig.text(0.08, 0.5, ylabel, ha='left', color=ylabelcolor, va='center', rotation='vertical', transform=ax.transAxes) else: ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return plt
[ "def", "get_plot", "(", "self", ",", "subplot", "=", "False", ",", "width", "=", "None", ",", "height", "=", "None", ",", "xmin", "=", "-", "6.", ",", "xmax", "=", "6.", ",", "yscale", "=", "1", ",", "colours", "=", "None", ",", "plot_total", "=", "True", ",", "legend_on", "=", "True", ",", "num_columns", "=", "2", ",", "legend_frame_on", "=", "False", ",", "legend_cutoff", "=", "3", ",", "xlabel", "=", "'Energy (eV)'", ",", "ylabel", "=", "'Arb. units'", ",", "zero_to_efermi", "=", "True", ",", "dpi", "=", "400", ",", "fonts", "=", "None", ",", "plt", "=", "None", ",", "style", "=", "None", ",", "no_base_style", "=", "False", ")", ":", "plot_data", "=", "self", ".", "dos_plot_data", "(", "yscale", "=", "yscale", ",", "xmin", "=", "xmin", ",", "xmax", "=", "xmax", ",", "colours", "=", "colours", ",", "plot_total", "=", "plot_total", ",", "legend_cutoff", "=", "legend_cutoff", ",", "subplot", "=", "subplot", ",", "zero_to_efermi", "=", "zero_to_efermi", ")", "if", "subplot", ":", "nplots", "=", "len", "(", "plot_data", "[", "'lines'", "]", ")", "plt", "=", "pretty_subplot", "(", "nplots", ",", "1", ",", "width", "=", "width", ",", "height", "=", "height", ",", "dpi", "=", "dpi", ",", "plt", "=", "plt", ")", "else", ":", "plt", "=", "pretty_plot", "(", "width", "=", "width", ",", "height", "=", "height", ",", "dpi", "=", "dpi", ",", "plt", "=", "plt", ")", "mask", "=", "plot_data", "[", "'mask'", "]", "energies", "=", "plot_data", "[", "'energies'", "]", "[", "mask", "]", "fig", "=", "plt", ".", "gcf", "(", ")", "lines", "=", "plot_data", "[", "'lines'", "]", "spins", "=", "[", "Spin", ".", "up", "]", "if", "len", "(", "lines", "[", "0", "]", "[", "0", "]", "[", "'dens'", "]", ")", "==", "1", "else", "[", "Spin", ".", "up", ",", "Spin", ".", "down", "]", "for", "i", ",", "line_set", "in", "enumerate", "(", "plot_data", "[", "'lines'", "]", ")", ":", "if", "subplot", ":", "ax", "=", "fig", ".", "axes", "[", "i", "]", "else", ":", "ax", "=", "plt", ".", "gca", "(", ")", "for", "line", ",", "spin", "in", "itertools", ".", "product", "(", "line_set", ",", "spins", ")", ":", "if", "spin", "==", "Spin", ".", "up", ":", "label", "=", "line", "[", "'label'", "]", "densities", "=", "line", "[", "'dens'", "]", "[", "spin", "]", "[", "mask", "]", "elif", "spin", "==", "Spin", ".", "down", ":", "label", "=", "\"\"", "densities", "=", "-", "line", "[", "'dens'", "]", "[", "spin", "]", "[", "mask", "]", "ax", ".", "fill_between", "(", "energies", ",", "densities", ",", "lw", "=", "0", ",", "facecolor", "=", "line", "[", "'colour'", "]", ",", "alpha", "=", "line", "[", "'alpha'", "]", ")", "ax", ".", "plot", "(", "energies", ",", "densities", ",", "label", "=", "label", ",", "color", "=", "line", "[", "'colour'", "]", ")", "ax", ".", "set_ylim", "(", "plot_data", "[", "'ymin'", "]", ",", "plot_data", "[", "'ymax'", "]", ")", "ax", ".", "set_xlim", "(", "xmin", ",", "xmax", ")", "ax", ".", "tick_params", "(", "axis", "=", "'y'", ",", "labelleft", "=", "'off'", ")", "ax", ".", "yaxis", ".", "set_minor_locator", "(", "AutoMinorLocator", "(", "2", ")", ")", "ax", ".", "xaxis", ".", "set_minor_locator", "(", "AutoMinorLocator", "(", "2", ")", ")", "loc", "=", "'upper right'", "if", "subplot", "else", "'best'", "ncol", "=", "1", "if", "subplot", "else", "num_columns", "if", "legend_on", ":", "ax", ".", "legend", "(", "loc", "=", "loc", ",", "frameon", "=", "legend_frame_on", ",", "ncol", "=", "ncol", ")", "# no add axis labels and sort out ticks", "if", "subplot", ":", "ax", ".", "set_xlabel", "(", "xlabel", ")", "fig", ".", "subplots_adjust", "(", "hspace", "=", "0", ")", "plt", ".", "setp", "(", "[", "a", ".", "get_xticklabels", "(", ")", "for", "a", "in", "fig", ".", "axes", "[", ":", "-", "1", "]", "]", ",", "visible", "=", "False", ")", "if", "'axes.labelcolor'", "in", "matplotlib", ".", "rcParams", ":", "ylabelcolor", "=", "matplotlib", ".", "rcParams", "[", "'axes.labelcolor'", "]", "else", ":", "ylabelcolor", "=", "None", "fig", ".", "text", "(", "0.08", ",", "0.5", ",", "ylabel", ",", "ha", "=", "'left'", ",", "color", "=", "ylabelcolor", ",", "va", "=", "'center'", ",", "rotation", "=", "'vertical'", ",", "transform", "=", "ax", ".", "transAxes", ")", "else", ":", "ax", ".", "set_xlabel", "(", "xlabel", ")", "ax", ".", "set_ylabel", "(", "ylabel", ")", "return", "plt" ]
Get a :obj:`matplotlib.pyplot` object of the density of states. Args: subplot (:obj:`bool`, optional): Plot the density of states for each element on separate subplots. Defaults to ``False``. width (:obj:`float`, optional): The width of the plot. height (:obj:`float`, optional): The height of the plot. xmin (:obj:`float`, optional): The minimum energy on the x-axis. xmax (:obj:`float`, optional): The maximum energy on the x-axis. yscale (:obj:`float`, optional): Scaling factor for the y-axis. colours (:obj:`dict`, optional): Use custom colours for specific element and orbital combinations. Specified as a :obj:`dict` of :obj:`dict` of the colours. For example:: { 'Sn': {'s': 'r', 'p': 'b'}, 'O': {'s': '#000000'} } The colour can be a hex code, series of rgb value, or any other format supported by matplotlib. plot_total (:obj:`bool`, optional): Plot the total density of states. Defaults to ``True``. legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults to ``True``. num_columns (:obj:`int`, optional): The number of columns in the legend. legend_frame_on (:obj:`bool`, optional): Plot a frame around the graph legend. Defaults to ``False``. legend_cutoff (:obj:`float`, optional): The cut-off (in % of the maximum density of states within the plotting range) for an elemental orbital to be labelled in the legend. This prevents the legend from containing labels for orbitals that have very little contribution in the plotting range. xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy) ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS) zero_to_efermi (:obj:`bool`, optional): Normalise the plot such that the Fermi level is set as 0 eV. dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for the image. fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a a single font, specified as a :obj:`str`, or several fonts, specified as a :obj:`list` of :obj:`str`. plt (:obj:`matplotlib.pyplot`, optional): A :obj:`matplotlib.pyplot` object to use for plotting. style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib style specifications, to be composed on top of Sumo base style. no_base_style (:obj:`bool`, optional): Prevent use of sumo base style. This can make alternative styles behave more predictably. Returns: :obj:`matplotlib.pyplot`: The density of states plot.
[ "Get", "a", ":", "obj", ":", "matplotlib", ".", "pyplot", "object", "of", "the", "density", "of", "states", "." ]
python
train
46.984733
ktbyers/netmiko
netmiko/paloalto/paloalto_panos.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/paloalto/paloalto_panos.py#L141-L158
def strip_context_items(self, a_string): """Strip PaloAlto-specific output. PaloAlto will also put a configuration context: [edit] This method removes those lines. """ strings_to_strip = [r"\[edit.*\]"] response_list = a_string.split(self.RESPONSE_RETURN) last_line = response_list[-1] for pattern in strings_to_strip: if re.search(pattern, last_line): return self.RESPONSE_RETURN.join(response_list[:-1]) return a_string
[ "def", "strip_context_items", "(", "self", ",", "a_string", ")", ":", "strings_to_strip", "=", "[", "r\"\\[edit.*\\]\"", "]", "response_list", "=", "a_string", ".", "split", "(", "self", ".", "RESPONSE_RETURN", ")", "last_line", "=", "response_list", "[", "-", "1", "]", "for", "pattern", "in", "strings_to_strip", ":", "if", "re", ".", "search", "(", "pattern", ",", "last_line", ")", ":", "return", "self", ".", "RESPONSE_RETURN", ".", "join", "(", "response_list", "[", ":", "-", "1", "]", ")", "return", "a_string" ]
Strip PaloAlto-specific output. PaloAlto will also put a configuration context: [edit] This method removes those lines.
[ "Strip", "PaloAlto", "-", "specific", "output", "." ]
python
train
28.722222
codelv/enaml-native-cli
enamlnativecli/main.py
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L868-L881
def find_packages(path): """ Find all java files matching the "*Package.java" pattern within the given enaml package directory relative to the java source path. """ matches = [] root = join(path, 'src', 'main', 'java') for folder, dirnames, filenames in os.walk(root): for filename in fnmatch.filter(filenames, '*Package.java'): #: Open and make sure it's an EnamlPackage somewhere with open(join(folder, filename)) as f: if "implements EnamlPackage" in f.read(): package = os.path.relpath(folder, root) matches.append(os.path.join(package, filename)) return matches
[ "def", "find_packages", "(", "path", ")", ":", "matches", "=", "[", "]", "root", "=", "join", "(", "path", ",", "'src'", ",", "'main'", ",", "'java'", ")", "for", "folder", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "root", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "'*Package.java'", ")", ":", "#: Open and make sure it's an EnamlPackage somewhere", "with", "open", "(", "join", "(", "folder", ",", "filename", ")", ")", "as", "f", ":", "if", "\"implements EnamlPackage\"", "in", "f", ".", "read", "(", ")", ":", "package", "=", "os", ".", "path", ".", "relpath", "(", "folder", ",", "root", ")", "matches", ".", "append", "(", "os", ".", "path", ".", "join", "(", "package", ",", "filename", ")", ")", "return", "matches" ]
Find all java files matching the "*Package.java" pattern within the given enaml package directory relative to the java source path.
[ "Find", "all", "java", "files", "matching", "the", "*", "Package", ".", "java", "pattern", "within", "the", "given", "enaml", "package", "directory", "relative", "to", "the", "java", "source", "path", "." ]
python
train
51.5
tensorflow/cleverhans
cleverhans/train.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/train.py#L38-L274
def train(sess, loss, x_train, y_train, init_all=False, evaluate=None, feed=None, args=None, rng=None, var_list=None, fprop_args=None, optimizer=None, devices=None, x_batch_preprocessor=None, use_ema=False, ema_decay=.998, run_canary=None, loss_threshold=1e5, dataset_train=None, dataset_size=None): """ Run (optionally multi-replica, synchronous) training to minimize `loss` :param sess: TF session to use when training the graph :param loss: tensor, the loss to minimize :param x_train: numpy array with training inputs or tf Dataset :param y_train: numpy array with training outputs or tf Dataset :param init_all: (boolean) If set to true, all TF variables in the session are (re)initialized, otherwise only previously uninitialized variables are initialized before training. :param evaluate: function that is run after each training iteration (typically to display the test/validation accuracy). :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Should contain `nb_epochs`, `learning_rate`, `batch_size` :param rng: Instance of numpy.random.RandomState :param var_list: Optional list of parameters to train. :param fprop_args: dict, extra arguments to pass to fprop (loss and model). :param optimizer: Optimizer to be used for training :param devices: list of device names to use for training If None, defaults to: all GPUs, if GPUs are available all devices, if no GPUs are available :param x_batch_preprocessor: callable Takes a single tensor containing an x_train batch as input Returns a single tensor containing an x_train batch as output Called to preprocess the data before passing the data to the Loss :param use_ema: bool If true, uses an exponential moving average of the model parameters :param ema_decay: float or callable The decay parameter for EMA, if EMA is used If a callable rather than a float, this is a callable that takes the epoch and batch as arguments and returns the ema_decay for the current batch. :param loss_threshold: float Raise an exception if the loss exceeds this value. This is intended to rapidly detect numerical problems. Sometimes the loss may legitimately be higher than this value. In such cases, raise the value. If needed it can be np.inf. :param dataset_train: tf Dataset instance. Used as a replacement for x_train, y_train for faster performance. :param dataset_size: integer, the size of the dataset_train. :return: True if model trained """ # Check whether the hardware is working correctly canary.run_canary() if run_canary is not None: warnings.warn("The `run_canary` argument is deprecated. The canary " "is now much cheaper and thus runs all the time. The " "canary now uses its own loss function so it is not " "necessary to turn off the canary when training with " " a stochastic loss. Simply quit passing `run_canary`." "Passing `run_canary` may become an error on or after " "2019-10-16.") args = _ArgsWrapper(args or {}) fprop_args = fprop_args or {} # Check that necessary arguments were given (see doc above) # Be sure to support 0 epochs for debugging purposes if args.nb_epochs is None: raise ValueError("`args` must specify number of epochs") if optimizer is None: if args.learning_rate is None: raise ValueError("Learning rate was not given in args dict") assert args.batch_size, "Batch size was not given in args dict" if rng is None: rng = np.random.RandomState() if optimizer is None: optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) else: if not isinstance(optimizer, tf.train.Optimizer): raise ValueError("optimizer object must be from a child class of " "tf.train.Optimizer") grads = [] xs = [] preprocessed_xs = [] ys = [] if dataset_train is not None: assert x_train is None and y_train is None and x_batch_preprocessor is None if dataset_size is None: raise ValueError("You must provide a dataset size") data_iterator = dataset_train.make_one_shot_iterator().get_next() x_train, y_train = sess.run(data_iterator) devices = infer_devices(devices) for device in devices: with tf.device(device): x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:]) y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:]) xs.append(x) ys.append(y) if x_batch_preprocessor is not None: x = x_batch_preprocessor(x) # We need to keep track of these so that the canary can feed # preprocessed values. If the canary had to feed raw values, # stochastic preprocessing could make the canary fail. preprocessed_xs.append(x) loss_value = loss.fprop(x, y, **fprop_args) grads.append(optimizer.compute_gradients( loss_value, var_list=var_list)) num_devices = len(devices) print("num_devices: ", num_devices) grad = avg_grads(grads) # Trigger update operations within the default graph (such as batch_norm). with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_step = optimizer.apply_gradients(grad) epoch_tf = tf.placeholder(tf.int32, []) batch_tf = tf.placeholder(tf.int32, []) if use_ema: if callable(ema_decay): ema_decay = ema_decay(epoch_tf, batch_tf) ema = tf.train.ExponentialMovingAverage(decay=ema_decay) with tf.control_dependencies([train_step]): train_step = ema.apply(var_list) # Get pointers to the EMA's running average variables avg_params = [ema.average(param) for param in var_list] # Make temporary buffers used for swapping the live and running average # parameters tmp_params = [tf.Variable(param, trainable=False) for param in var_list] # Define the swapping operation param_to_tmp = [tf.assign(tmp, param) for tmp, param in safe_zip(tmp_params, var_list)] with tf.control_dependencies(param_to_tmp): avg_to_param = [tf.assign(param, avg) for param, avg in safe_zip(var_list, avg_params)] with tf.control_dependencies(avg_to_param): tmp_to_avg = [tf.assign(avg, tmp) for avg, tmp in safe_zip(avg_params, tmp_params)] swap = tmp_to_avg batch_size = args.batch_size assert batch_size % num_devices == 0 device_batch_size = batch_size // num_devices if init_all: sess.run(tf.global_variables_initializer()) else: initialize_uninitialized_global_variables(sess) for epoch in xrange(args.nb_epochs): if dataset_train is not None: nb_batches = int(math.ceil(float(dataset_size) / batch_size)) else: # Indices to shuffle training set index_shuf = list(range(len(x_train))) # Randomly repeat a few training examples each epoch to avoid # having a too-small batch while len(index_shuf) % batch_size != 0: index_shuf.append(rng.randint(len(x_train))) nb_batches = len(index_shuf) // batch_size rng.shuffle(index_shuf) # Shuffling here versus inside the loop doesn't seem to affect # timing very much, but shuffling here makes the code slightly # easier to read x_train_shuffled = x_train[index_shuf] y_train_shuffled = y_train[index_shuf] prev = time.time() for batch in range(nb_batches): if dataset_train is not None: x_train_shuffled, y_train_shuffled = sess.run(data_iterator) start, end = 0, batch_size else: # Compute batch start and end indices start = batch * batch_size end = (batch + 1) * batch_size # Perform one training step diff = end - start assert diff == batch_size feed_dict = {epoch_tf: epoch, batch_tf: batch} for dev_idx in xrange(num_devices): cur_start = start + dev_idx * device_batch_size cur_end = start + (dev_idx + 1) * device_batch_size feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end] feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end] if cur_end != end and dataset_train is None: msg = ("batch_size (%d) must be a multiple of num_devices " "(%d).\nCUDA_VISIBLE_DEVICES: %s" "\ndevices: %s") args = (batch_size, num_devices, os.environ['CUDA_VISIBLE_DEVICES'], str(devices)) raise ValueError(msg % args) if feed is not None: feed_dict.update(feed) _, loss_numpy = sess.run( [train_step, loss_value], feed_dict=feed_dict) if np.abs(loss_numpy) > loss_threshold: raise ValueError("Extreme loss during training: ", loss_numpy) if np.isnan(loss_numpy) or np.isinf(loss_numpy): raise ValueError("NaN/Inf loss during training") assert (dataset_train is not None or end == len(index_shuf)) # Check that all examples were used cur = time.time() _logger.info("Epoch " + str(epoch) + " took " + str(cur - prev) + " seconds") if evaluate is not None: if use_ema: # Before running evaluation, load the running average # parameters into the live slot, so we can see how well # the EMA parameters are performing sess.run(swap) evaluate() if use_ema: # Swap the parameters back, so that we continue training # on the live parameters sess.run(swap) if use_ema: # When training is done, swap the running average parameters into # the live slot, so that we use them when we deploy the model sess.run(swap) return True
[ "def", "train", "(", "sess", ",", "loss", ",", "x_train", ",", "y_train", ",", "init_all", "=", "False", ",", "evaluate", "=", "None", ",", "feed", "=", "None", ",", "args", "=", "None", ",", "rng", "=", "None", ",", "var_list", "=", "None", ",", "fprop_args", "=", "None", ",", "optimizer", "=", "None", ",", "devices", "=", "None", ",", "x_batch_preprocessor", "=", "None", ",", "use_ema", "=", "False", ",", "ema_decay", "=", ".998", ",", "run_canary", "=", "None", ",", "loss_threshold", "=", "1e5", ",", "dataset_train", "=", "None", ",", "dataset_size", "=", "None", ")", ":", "# Check whether the hardware is working correctly", "canary", ".", "run_canary", "(", ")", "if", "run_canary", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"The `run_canary` argument is deprecated. The canary \"", "\"is now much cheaper and thus runs all the time. The \"", "\"canary now uses its own loss function so it is not \"", "\"necessary to turn off the canary when training with \"", "\" a stochastic loss. Simply quit passing `run_canary`.\"", "\"Passing `run_canary` may become an error on or after \"", "\"2019-10-16.\"", ")", "args", "=", "_ArgsWrapper", "(", "args", "or", "{", "}", ")", "fprop_args", "=", "fprop_args", "or", "{", "}", "# Check that necessary arguments were given (see doc above)", "# Be sure to support 0 epochs for debugging purposes", "if", "args", ".", "nb_epochs", "is", "None", ":", "raise", "ValueError", "(", "\"`args` must specify number of epochs\"", ")", "if", "optimizer", "is", "None", ":", "if", "args", ".", "learning_rate", "is", "None", ":", "raise", "ValueError", "(", "\"Learning rate was not given in args dict\"", ")", "assert", "args", ".", "batch_size", ",", "\"Batch size was not given in args dict\"", "if", "rng", "is", "None", ":", "rng", "=", "np", ".", "random", ".", "RandomState", "(", ")", "if", "optimizer", "is", "None", ":", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "(", "learning_rate", "=", "args", ".", "learning_rate", ")", "else", ":", "if", "not", "isinstance", "(", "optimizer", ",", "tf", ".", "train", ".", "Optimizer", ")", ":", "raise", "ValueError", "(", "\"optimizer object must be from a child class of \"", "\"tf.train.Optimizer\"", ")", "grads", "=", "[", "]", "xs", "=", "[", "]", "preprocessed_xs", "=", "[", "]", "ys", "=", "[", "]", "if", "dataset_train", "is", "not", "None", ":", "assert", "x_train", "is", "None", "and", "y_train", "is", "None", "and", "x_batch_preprocessor", "is", "None", "if", "dataset_size", "is", "None", ":", "raise", "ValueError", "(", "\"You must provide a dataset size\"", ")", "data_iterator", "=", "dataset_train", ".", "make_one_shot_iterator", "(", ")", ".", "get_next", "(", ")", "x_train", ",", "y_train", "=", "sess", ".", "run", "(", "data_iterator", ")", "devices", "=", "infer_devices", "(", "devices", ")", "for", "device", "in", "devices", ":", "with", "tf", ".", "device", "(", "device", ")", ":", "x", "=", "tf", ".", "placeholder", "(", "x_train", ".", "dtype", ",", "(", "None", ",", ")", "+", "x_train", ".", "shape", "[", "1", ":", "]", ")", "y", "=", "tf", ".", "placeholder", "(", "y_train", ".", "dtype", ",", "(", "None", ",", ")", "+", "y_train", ".", "shape", "[", "1", ":", "]", ")", "xs", ".", "append", "(", "x", ")", "ys", ".", "append", "(", "y", ")", "if", "x_batch_preprocessor", "is", "not", "None", ":", "x", "=", "x_batch_preprocessor", "(", "x", ")", "# We need to keep track of these so that the canary can feed", "# preprocessed values. If the canary had to feed raw values,", "# stochastic preprocessing could make the canary fail.", "preprocessed_xs", ".", "append", "(", "x", ")", "loss_value", "=", "loss", ".", "fprop", "(", "x", ",", "y", ",", "*", "*", "fprop_args", ")", "grads", ".", "append", "(", "optimizer", ".", "compute_gradients", "(", "loss_value", ",", "var_list", "=", "var_list", ")", ")", "num_devices", "=", "len", "(", "devices", ")", "print", "(", "\"num_devices: \"", ",", "num_devices", ")", "grad", "=", "avg_grads", "(", "grads", ")", "# Trigger update operations within the default graph (such as batch_norm).", "with", "tf", ".", "control_dependencies", "(", "tf", ".", "get_collection", "(", "tf", ".", "GraphKeys", ".", "UPDATE_OPS", ")", ")", ":", "train_step", "=", "optimizer", ".", "apply_gradients", "(", "grad", ")", "epoch_tf", "=", "tf", ".", "placeholder", "(", "tf", ".", "int32", ",", "[", "]", ")", "batch_tf", "=", "tf", ".", "placeholder", "(", "tf", ".", "int32", ",", "[", "]", ")", "if", "use_ema", ":", "if", "callable", "(", "ema_decay", ")", ":", "ema_decay", "=", "ema_decay", "(", "epoch_tf", ",", "batch_tf", ")", "ema", "=", "tf", ".", "train", ".", "ExponentialMovingAverage", "(", "decay", "=", "ema_decay", ")", "with", "tf", ".", "control_dependencies", "(", "[", "train_step", "]", ")", ":", "train_step", "=", "ema", ".", "apply", "(", "var_list", ")", "# Get pointers to the EMA's running average variables", "avg_params", "=", "[", "ema", ".", "average", "(", "param", ")", "for", "param", "in", "var_list", "]", "# Make temporary buffers used for swapping the live and running average", "# parameters", "tmp_params", "=", "[", "tf", ".", "Variable", "(", "param", ",", "trainable", "=", "False", ")", "for", "param", "in", "var_list", "]", "# Define the swapping operation", "param_to_tmp", "=", "[", "tf", ".", "assign", "(", "tmp", ",", "param", ")", "for", "tmp", ",", "param", "in", "safe_zip", "(", "tmp_params", ",", "var_list", ")", "]", "with", "tf", ".", "control_dependencies", "(", "param_to_tmp", ")", ":", "avg_to_param", "=", "[", "tf", ".", "assign", "(", "param", ",", "avg", ")", "for", "param", ",", "avg", "in", "safe_zip", "(", "var_list", ",", "avg_params", ")", "]", "with", "tf", ".", "control_dependencies", "(", "avg_to_param", ")", ":", "tmp_to_avg", "=", "[", "tf", ".", "assign", "(", "avg", ",", "tmp", ")", "for", "avg", ",", "tmp", "in", "safe_zip", "(", "avg_params", ",", "tmp_params", ")", "]", "swap", "=", "tmp_to_avg", "batch_size", "=", "args", ".", "batch_size", "assert", "batch_size", "%", "num_devices", "==", "0", "device_batch_size", "=", "batch_size", "//", "num_devices", "if", "init_all", ":", "sess", ".", "run", "(", "tf", ".", "global_variables_initializer", "(", ")", ")", "else", ":", "initialize_uninitialized_global_variables", "(", "sess", ")", "for", "epoch", "in", "xrange", "(", "args", ".", "nb_epochs", ")", ":", "if", "dataset_train", "is", "not", "None", ":", "nb_batches", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "dataset_size", ")", "/", "batch_size", ")", ")", "else", ":", "# Indices to shuffle training set", "index_shuf", "=", "list", "(", "range", "(", "len", "(", "x_train", ")", ")", ")", "# Randomly repeat a few training examples each epoch to avoid", "# having a too-small batch", "while", "len", "(", "index_shuf", ")", "%", "batch_size", "!=", "0", ":", "index_shuf", ".", "append", "(", "rng", ".", "randint", "(", "len", "(", "x_train", ")", ")", ")", "nb_batches", "=", "len", "(", "index_shuf", ")", "//", "batch_size", "rng", ".", "shuffle", "(", "index_shuf", ")", "# Shuffling here versus inside the loop doesn't seem to affect", "# timing very much, but shuffling here makes the code slightly", "# easier to read", "x_train_shuffled", "=", "x_train", "[", "index_shuf", "]", "y_train_shuffled", "=", "y_train", "[", "index_shuf", "]", "prev", "=", "time", ".", "time", "(", ")", "for", "batch", "in", "range", "(", "nb_batches", ")", ":", "if", "dataset_train", "is", "not", "None", ":", "x_train_shuffled", ",", "y_train_shuffled", "=", "sess", ".", "run", "(", "data_iterator", ")", "start", ",", "end", "=", "0", ",", "batch_size", "else", ":", "# Compute batch start and end indices", "start", "=", "batch", "*", "batch_size", "end", "=", "(", "batch", "+", "1", ")", "*", "batch_size", "# Perform one training step", "diff", "=", "end", "-", "start", "assert", "diff", "==", "batch_size", "feed_dict", "=", "{", "epoch_tf", ":", "epoch", ",", "batch_tf", ":", "batch", "}", "for", "dev_idx", "in", "xrange", "(", "num_devices", ")", ":", "cur_start", "=", "start", "+", "dev_idx", "*", "device_batch_size", "cur_end", "=", "start", "+", "(", "dev_idx", "+", "1", ")", "*", "device_batch_size", "feed_dict", "[", "xs", "[", "dev_idx", "]", "]", "=", "x_train_shuffled", "[", "cur_start", ":", "cur_end", "]", "feed_dict", "[", "ys", "[", "dev_idx", "]", "]", "=", "y_train_shuffled", "[", "cur_start", ":", "cur_end", "]", "if", "cur_end", "!=", "end", "and", "dataset_train", "is", "None", ":", "msg", "=", "(", "\"batch_size (%d) must be a multiple of num_devices \"", "\"(%d).\\nCUDA_VISIBLE_DEVICES: %s\"", "\"\\ndevices: %s\"", ")", "args", "=", "(", "batch_size", ",", "num_devices", ",", "os", ".", "environ", "[", "'CUDA_VISIBLE_DEVICES'", "]", ",", "str", "(", "devices", ")", ")", "raise", "ValueError", "(", "msg", "%", "args", ")", "if", "feed", "is", "not", "None", ":", "feed_dict", ".", "update", "(", "feed", ")", "_", ",", "loss_numpy", "=", "sess", ".", "run", "(", "[", "train_step", ",", "loss_value", "]", ",", "feed_dict", "=", "feed_dict", ")", "if", "np", ".", "abs", "(", "loss_numpy", ")", ">", "loss_threshold", ":", "raise", "ValueError", "(", "\"Extreme loss during training: \"", ",", "loss_numpy", ")", "if", "np", ".", "isnan", "(", "loss_numpy", ")", "or", "np", ".", "isinf", "(", "loss_numpy", ")", ":", "raise", "ValueError", "(", "\"NaN/Inf loss during training\"", ")", "assert", "(", "dataset_train", "is", "not", "None", "or", "end", "==", "len", "(", "index_shuf", ")", ")", "# Check that all examples were used", "cur", "=", "time", ".", "time", "(", ")", "_logger", ".", "info", "(", "\"Epoch \"", "+", "str", "(", "epoch", ")", "+", "\" took \"", "+", "str", "(", "cur", "-", "prev", ")", "+", "\" seconds\"", ")", "if", "evaluate", "is", "not", "None", ":", "if", "use_ema", ":", "# Before running evaluation, load the running average", "# parameters into the live slot, so we can see how well", "# the EMA parameters are performing", "sess", ".", "run", "(", "swap", ")", "evaluate", "(", ")", "if", "use_ema", ":", "# Swap the parameters back, so that we continue training", "# on the live parameters", "sess", ".", "run", "(", "swap", ")", "if", "use_ema", ":", "# When training is done, swap the running average parameters into", "# the live slot, so that we use them when we deploy the model", "sess", ".", "run", "(", "swap", ")", "return", "True" ]
Run (optionally multi-replica, synchronous) training to minimize `loss` :param sess: TF session to use when training the graph :param loss: tensor, the loss to minimize :param x_train: numpy array with training inputs or tf Dataset :param y_train: numpy array with training outputs or tf Dataset :param init_all: (boolean) If set to true, all TF variables in the session are (re)initialized, otherwise only previously uninitialized variables are initialized before training. :param evaluate: function that is run after each training iteration (typically to display the test/validation accuracy). :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Should contain `nb_epochs`, `learning_rate`, `batch_size` :param rng: Instance of numpy.random.RandomState :param var_list: Optional list of parameters to train. :param fprop_args: dict, extra arguments to pass to fprop (loss and model). :param optimizer: Optimizer to be used for training :param devices: list of device names to use for training If None, defaults to: all GPUs, if GPUs are available all devices, if no GPUs are available :param x_batch_preprocessor: callable Takes a single tensor containing an x_train batch as input Returns a single tensor containing an x_train batch as output Called to preprocess the data before passing the data to the Loss :param use_ema: bool If true, uses an exponential moving average of the model parameters :param ema_decay: float or callable The decay parameter for EMA, if EMA is used If a callable rather than a float, this is a callable that takes the epoch and batch as arguments and returns the ema_decay for the current batch. :param loss_threshold: float Raise an exception if the loss exceeds this value. This is intended to rapidly detect numerical problems. Sometimes the loss may legitimately be higher than this value. In such cases, raise the value. If needed it can be np.inf. :param dataset_train: tf Dataset instance. Used as a replacement for x_train, y_train for faster performance. :param dataset_size: integer, the size of the dataset_train. :return: True if model trained
[ "Run", "(", "optionally", "multi", "-", "replica", "synchronous", ")", "training", "to", "minimize", "loss", ":", "param", "sess", ":", "TF", "session", "to", "use", "when", "training", "the", "graph", ":", "param", "loss", ":", "tensor", "the", "loss", "to", "minimize", ":", "param", "x_train", ":", "numpy", "array", "with", "training", "inputs", "or", "tf", "Dataset", ":", "param", "y_train", ":", "numpy", "array", "with", "training", "outputs", "or", "tf", "Dataset", ":", "param", "init_all", ":", "(", "boolean", ")", "If", "set", "to", "true", "all", "TF", "variables", "in", "the", "session", "are", "(", "re", ")", "initialized", "otherwise", "only", "previously", "uninitialized", "variables", "are", "initialized", "before", "training", ".", ":", "param", "evaluate", ":", "function", "that", "is", "run", "after", "each", "training", "iteration", "(", "typically", "to", "display", "the", "test", "/", "validation", "accuracy", ")", ".", ":", "param", "feed", ":", "An", "optional", "dictionary", "that", "is", "appended", "to", "the", "feeding", "dictionary", "before", "the", "session", "runs", ".", "Can", "be", "used", "to", "feed", "the", "learning", "phase", "of", "a", "Keras", "model", "for", "instance", ".", ":", "param", "args", ":", "dict", "or", "argparse", "Namespace", "object", ".", "Should", "contain", "nb_epochs", "learning_rate", "batch_size", ":", "param", "rng", ":", "Instance", "of", "numpy", ".", "random", ".", "RandomState", ":", "param", "var_list", ":", "Optional", "list", "of", "parameters", "to", "train", ".", ":", "param", "fprop_args", ":", "dict", "extra", "arguments", "to", "pass", "to", "fprop", "(", "loss", "and", "model", ")", ".", ":", "param", "optimizer", ":", "Optimizer", "to", "be", "used", "for", "training", ":", "param", "devices", ":", "list", "of", "device", "names", "to", "use", "for", "training", "If", "None", "defaults", "to", ":", "all", "GPUs", "if", "GPUs", "are", "available", "all", "devices", "if", "no", "GPUs", "are", "available", ":", "param", "x_batch_preprocessor", ":", "callable", "Takes", "a", "single", "tensor", "containing", "an", "x_train", "batch", "as", "input", "Returns", "a", "single", "tensor", "containing", "an", "x_train", "batch", "as", "output", "Called", "to", "preprocess", "the", "data", "before", "passing", "the", "data", "to", "the", "Loss", ":", "param", "use_ema", ":", "bool", "If", "true", "uses", "an", "exponential", "moving", "average", "of", "the", "model", "parameters", ":", "param", "ema_decay", ":", "float", "or", "callable", "The", "decay", "parameter", "for", "EMA", "if", "EMA", "is", "used", "If", "a", "callable", "rather", "than", "a", "float", "this", "is", "a", "callable", "that", "takes", "the", "epoch", "and", "batch", "as", "arguments", "and", "returns", "the", "ema_decay", "for", "the", "current", "batch", ".", ":", "param", "loss_threshold", ":", "float", "Raise", "an", "exception", "if", "the", "loss", "exceeds", "this", "value", ".", "This", "is", "intended", "to", "rapidly", "detect", "numerical", "problems", ".", "Sometimes", "the", "loss", "may", "legitimately", "be", "higher", "than", "this", "value", ".", "In", "such", "cases", "raise", "the", "value", ".", "If", "needed", "it", "can", "be", "np", ".", "inf", ".", ":", "param", "dataset_train", ":", "tf", "Dataset", "instance", ".", "Used", "as", "a", "replacement", "for", "x_train", "y_train", "for", "faster", "performance", ".", ":", "param", "dataset_size", ":", "integer", "the", "size", "of", "the", "dataset_train", ".", ":", "return", ":", "True", "if", "model", "trained" ]
python
train
41.696203
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L5080-L5103
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True): """Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian (not supported for fits) :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return: """ if path.endswith('.arrow'): self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith('.hdf5'): self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) elif path.endswith('.fits'): self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) if path.endswith('.parquet'): self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
[ "def", "export", "(", "self", ",", "path", ",", "column_names", "=", "None", ",", "byteorder", "=", "\"=\"", ",", "shuffle", "=", "False", ",", "selection", "=", "False", ",", "progress", "=", "None", ",", "virtual", "=", "False", ",", "sort", "=", "None", ",", "ascending", "=", "True", ")", ":", "if", "path", ".", "endswith", "(", "'.arrow'", ")", ":", "self", ".", "export_arrow", "(", "path", ",", "column_names", ",", "byteorder", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")", "elif", "path", ".", "endswith", "(", "'.hdf5'", ")", ":", "self", ".", "export_hdf5", "(", "path", ",", "column_names", ",", "byteorder", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")", "elif", "path", ".", "endswith", "(", "'.fits'", ")", ":", "self", ".", "export_fits", "(", "path", ",", "column_names", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")", "if", "path", ".", "endswith", "(", "'.parquet'", ")", ":", "self", ".", "export_parquet", "(", "path", ",", "column_names", ",", "shuffle", ",", "selection", ",", "progress", "=", "progress", ",", "virtual", "=", "virtual", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")" ]
Exports the DataFrame to a file written with arrow :param DataFrameLocal df: DataFrame to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian (not supported for fits) :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :param str sort: expression used for sorting the output :param bool ascending: sort ascending (True) or descending :return:
[ "Exports", "the", "DataFrame", "to", "a", "file", "written", "with", "arrow" ]
python
test
71.75
cdriehuys/django-rest-email-auth
rest_email_auth/serializers.py
https://github.com/cdriehuys/django-rest-email-auth/blob/7e752c4d77ae02d2d046f214f56e743aa12ab23f/rest_email_auth/serializers.py#L241-L262
def save(self): """ Send out a password reset if the provided data is valid. If the provided email address exists and is verified, a reset email is sent to the address. Returns: The password reset token if it was returned and ``None`` otherwise. """ try: email = models.EmailAddress.objects.get( email=self.validated_data["email"], is_verified=True ) except models.EmailAddress.DoesNotExist: return None token = models.PasswordResetToken.objects.create(email=email) token.send() return token
[ "def", "save", "(", "self", ")", ":", "try", ":", "email", "=", "models", ".", "EmailAddress", ".", "objects", ".", "get", "(", "email", "=", "self", ".", "validated_data", "[", "\"email\"", "]", ",", "is_verified", "=", "True", ")", "except", "models", ".", "EmailAddress", ".", "DoesNotExist", ":", "return", "None", "token", "=", "models", ".", "PasswordResetToken", ".", "objects", ".", "create", "(", "email", "=", "email", ")", "token", ".", "send", "(", ")", "return", "token" ]
Send out a password reset if the provided data is valid. If the provided email address exists and is verified, a reset email is sent to the address. Returns: The password reset token if it was returned and ``None`` otherwise.
[ "Send", "out", "a", "password", "reset", "if", "the", "provided", "data", "is", "valid", "." ]
python
valid
29
intelsdi-x/snap-plugin-lib-py
examples/processor/tag.py
https://github.com/intelsdi-x/snap-plugin-lib-py/blob/8da5d00ac5f9d2b48a7239563ac7788209891ca4/examples/processor/tag.py#L34-L55
def process(self, metrics, config): """Processes metrics. This method is called by the Snap deamon during the process phase of the execution of a Snap workflow. Examples of processing metrics include applying filtering, max, min, average functions as well as adding additional context to the metrics to name just a few. In this example we are adding a tag called 'context' to every metric. Args: metrics (obj:`list` of `snap_plugin.v1.Metric`): List of metrics to be processed. Returns: :obj:`list` of `snap_plugin.v1.Metric`: List of processed metrics. """ LOG.debug("Process called") for metric in metrics: metric.tags["instance-id"] = config["instance-id"] return metrics
[ "def", "process", "(", "self", ",", "metrics", ",", "config", ")", ":", "LOG", ".", "debug", "(", "\"Process called\"", ")", "for", "metric", "in", "metrics", ":", "metric", ".", "tags", "[", "\"instance-id\"", "]", "=", "config", "[", "\"instance-id\"", "]", "return", "metrics" ]
Processes metrics. This method is called by the Snap deamon during the process phase of the execution of a Snap workflow. Examples of processing metrics include applying filtering, max, min, average functions as well as adding additional context to the metrics to name just a few. In this example we are adding a tag called 'context' to every metric. Args: metrics (obj:`list` of `snap_plugin.v1.Metric`): List of metrics to be processed. Returns: :obj:`list` of `snap_plugin.v1.Metric`: List of processed metrics.
[ "Processes", "metrics", "." ]
python
train
37.363636
CI-WATER/gsshapy
gsshapy/orm/wms_dataset.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L236-L306
def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'): """ Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images. """ # Prepare rasters timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters) # Make sure the raster field is valid converter = RasterConverter(sqlAlchemyEngineOrSession=session) # Configure color ramp if isinstance(colorRamp, dict): converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints']) else: converter.setDefaultColorRamp(colorRamp) if documentName is None: documentName = self.fileExtension kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName, timeStampedRasters=timeStampedRasters, rasterIdFieldName='id', rasterFieldName='raster', documentName=documentName, alpha=alpha, drawOrder=drawOrder, cellSize=cellSize, noDataValue=noDataValue, resampleMethod=resampleMethod) if path: directory = os.path.dirname(path) archiveName = (os.path.split(path)[1]).split('.')[0] kmzPath = os.path.join(directory, (archiveName + '.kmz')) with ZipFile(kmzPath, 'w') as kmz: kmz.writestr(archiveName + '.kml', kmlString) for index, binaryPngString in enumerate(binaryPngStrings): kmz.writestr('raster{0}.png'.format(index), binaryPngString) return kmlString, binaryPngStrings
[ "def", "getAsKmlPngAnimation", "(", "self", ",", "session", ",", "projectFile", "=", "None", ",", "path", "=", "None", ",", "documentName", "=", "None", ",", "colorRamp", "=", "None", ",", "alpha", "=", "1.0", ",", "noDataValue", "=", "0", ",", "drawOrder", "=", "0", ",", "cellSize", "=", "None", ",", "resampleMethod", "=", "'NearestNeighbour'", ")", ":", "# Prepare rasters", "timeStampedRasters", "=", "self", ".", "_assembleRasterParams", "(", "projectFile", ",", "self", ".", "rasters", ")", "# Make sure the raster field is valid", "converter", "=", "RasterConverter", "(", "sqlAlchemyEngineOrSession", "=", "session", ")", "# Configure color ramp", "if", "isinstance", "(", "colorRamp", ",", "dict", ")", ":", "converter", ".", "setCustomColorRamp", "(", "colorRamp", "[", "'colors'", "]", ",", "colorRamp", "[", "'interpolatedPoints'", "]", ")", "else", ":", "converter", ".", "setDefaultColorRamp", "(", "colorRamp", ")", "if", "documentName", "is", "None", ":", "documentName", "=", "self", ".", "fileExtension", "kmlString", ",", "binaryPngStrings", "=", "converter", ".", "getAsKmlPngAnimation", "(", "tableName", "=", "WMSDatasetRaster", ".", "tableName", ",", "timeStampedRasters", "=", "timeStampedRasters", ",", "rasterIdFieldName", "=", "'id'", ",", "rasterFieldName", "=", "'raster'", ",", "documentName", "=", "documentName", ",", "alpha", "=", "alpha", ",", "drawOrder", "=", "drawOrder", ",", "cellSize", "=", "cellSize", ",", "noDataValue", "=", "noDataValue", ",", "resampleMethod", "=", "resampleMethod", ")", "if", "path", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "archiveName", "=", "(", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "kmzPath", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "(", "archiveName", "+", "'.kmz'", ")", ")", "with", "ZipFile", "(", "kmzPath", ",", "'w'", ")", "as", "kmz", ":", "kmz", ".", "writestr", "(", "archiveName", "+", "'.kml'", ",", "kmlString", ")", "for", "index", ",", "binaryPngString", "in", "enumerate", "(", "binaryPngStrings", ")", ":", "kmz", ".", "writestr", "(", "'raster{0}.png'", ".", "format", "(", "index", ")", ",", "binaryPngString", ")", "return", "kmlString", ",", "binaryPngStrings" ]
Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images.
[ "Retrieve", "the", "WMS", "dataset", "as", "a", "PNG", "time", "stamped", "KMZ" ]
python
train
64.690141
venthur/python-debianbts
debianbts/debianbts.py
https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L249-L282
def get_usertag(email, *tags): """Get buglists by usertags. Parameters ---------- email : str tags : tuple of strings If tags are given the dictionary is limited to the matching tags, if no tags are given all available tags are returned. Returns ------- mapping : dict a mapping of usertag -> buglist """ reply = _soap_client_call('get_usertag', email, *tags) map_el = reply('s-gensym3') mapping = {} # element <s-gensys3> in response can have standard type # xsi:type=apachens:Map (example, for email [email protected]) # OR no type, in this case keys are the names of child elements and # the array is contained in the child elements type_attr = map_el.attributes().get('xsi:type') if type_attr and type_attr.value == 'apachens:Map': for usertag_el in map_el.children() or []: tag = _uc(str(usertag_el('key'))) buglist_el = usertag_el('value') mapping[tag] = [int(bug) for bug in buglist_el.children() or []] else: for usertag_el in map_el.children() or []: tag = _uc(usertag_el.get_name()) mapping[tag] = [int(bug) for bug in usertag_el.children() or []] return mapping
[ "def", "get_usertag", "(", "email", ",", "*", "tags", ")", ":", "reply", "=", "_soap_client_call", "(", "'get_usertag'", ",", "email", ",", "*", "tags", ")", "map_el", "=", "reply", "(", "'s-gensym3'", ")", "mapping", "=", "{", "}", "# element <s-gensys3> in response can have standard type", "# xsi:type=apachens:Map (example, for email [email protected])", "# OR no type, in this case keys are the names of child elements and", "# the array is contained in the child elements", "type_attr", "=", "map_el", ".", "attributes", "(", ")", ".", "get", "(", "'xsi:type'", ")", "if", "type_attr", "and", "type_attr", ".", "value", "==", "'apachens:Map'", ":", "for", "usertag_el", "in", "map_el", ".", "children", "(", ")", "or", "[", "]", ":", "tag", "=", "_uc", "(", "str", "(", "usertag_el", "(", "'key'", ")", ")", ")", "buglist_el", "=", "usertag_el", "(", "'value'", ")", "mapping", "[", "tag", "]", "=", "[", "int", "(", "bug", ")", "for", "bug", "in", "buglist_el", ".", "children", "(", ")", "or", "[", "]", "]", "else", ":", "for", "usertag_el", "in", "map_el", ".", "children", "(", ")", "or", "[", "]", ":", "tag", "=", "_uc", "(", "usertag_el", ".", "get_name", "(", ")", ")", "mapping", "[", "tag", "]", "=", "[", "int", "(", "bug", ")", "for", "bug", "in", "usertag_el", ".", "children", "(", ")", "or", "[", "]", "]", "return", "mapping" ]
Get buglists by usertags. Parameters ---------- email : str tags : tuple of strings If tags are given the dictionary is limited to the matching tags, if no tags are given all available tags are returned. Returns ------- mapping : dict a mapping of usertag -> buglist
[ "Get", "buglists", "by", "usertags", "." ]
python
train
36.323529
jorgenschaefer/elpy
elpy/server.py
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/server.py#L217-L222
def rpc_fix_code_with_yapf(self, source, directory): """Formats Python code to conform to the PEP 8 style guide. """ source = get_source(source) return fix_code_with_yapf(source, directory)
[ "def", "rpc_fix_code_with_yapf", "(", "self", ",", "source", ",", "directory", ")", ":", "source", "=", "get_source", "(", "source", ")", "return", "fix_code_with_yapf", "(", "source", ",", "directory", ")" ]
Formats Python code to conform to the PEP 8 style guide.
[ "Formats", "Python", "code", "to", "conform", "to", "the", "PEP", "8", "style", "guide", "." ]
python
train
36.166667
Crypto-toolbox/bitex
bitex/api/WSS/bitfinex.py
https://github.com/Crypto-toolbox/bitex/blob/56d46ea3db6de5219a72dad9b052fbabc921232f/bitex/api/WSS/bitfinex.py#L215-L253
def stop(self): """ Stop all threads and modules of the client. :return: """ super(BitfinexWSS, self).stop() log.info("BitfinexWSS.stop(): Stopping client..") log.info("BitfinexWSS.stop(): Joining receiver thread..") try: self.receiver_thread.join() if self.receiver_thread.is_alive(): time.time(1) except AttributeError: log.debug("BitfinexWSS.stop(): Receiver thread was not running!") log.info("BitfinexWSS.stop(): Joining processing thread..") try: self.processing_thread.join() if self.processing_thread.is_alive(): time.time(1) except AttributeError: log.debug("BitfinexWSS.stop(): Processing thread was not running!") log.info("BitfinexWSS.stop(): Closing websocket conection..") try: self.conn.close() except WebSocketConnectionClosedException: pass except AttributeError: # Connection is None pass self.conn = None self.processing_thread = None self.receiver_thread = None log.info("BitfinexWSS.stop(): Done!")
[ "def", "stop", "(", "self", ")", ":", "super", "(", "BitfinexWSS", ",", "self", ")", ".", "stop", "(", ")", "log", ".", "info", "(", "\"BitfinexWSS.stop(): Stopping client..\"", ")", "log", ".", "info", "(", "\"BitfinexWSS.stop(): Joining receiver thread..\"", ")", "try", ":", "self", ".", "receiver_thread", ".", "join", "(", ")", "if", "self", ".", "receiver_thread", ".", "is_alive", "(", ")", ":", "time", ".", "time", "(", "1", ")", "except", "AttributeError", ":", "log", ".", "debug", "(", "\"BitfinexWSS.stop(): Receiver thread was not running!\"", ")", "log", ".", "info", "(", "\"BitfinexWSS.stop(): Joining processing thread..\"", ")", "try", ":", "self", ".", "processing_thread", ".", "join", "(", ")", "if", "self", ".", "processing_thread", ".", "is_alive", "(", ")", ":", "time", ".", "time", "(", "1", ")", "except", "AttributeError", ":", "log", ".", "debug", "(", "\"BitfinexWSS.stop(): Processing thread was not running!\"", ")", "log", ".", "info", "(", "\"BitfinexWSS.stop(): Closing websocket conection..\"", ")", "try", ":", "self", ".", "conn", ".", "close", "(", ")", "except", "WebSocketConnectionClosedException", ":", "pass", "except", "AttributeError", ":", "# Connection is None", "pass", "self", ".", "conn", "=", "None", "self", ".", "processing_thread", "=", "None", "self", ".", "receiver_thread", "=", "None", "log", ".", "info", "(", "\"BitfinexWSS.stop(): Done!\"", ")" ]
Stop all threads and modules of the client. :return:
[ "Stop", "all", "threads", "and", "modules", "of", "the", "client", ".", ":", "return", ":" ]
python
train
30.74359
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L328-L333
def boto_fix_security_token_in_profile(self, connect_args): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + self.boto_profile if boto.config.has_option(profile, 'aws_security_token'): connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') return connect_args
[ "def", "boto_fix_security_token_in_profile", "(", "self", ",", "connect_args", ")", ":", "profile", "=", "'profile '", "+", "self", ".", "boto_profile", "if", "boto", ".", "config", ".", "has_option", "(", "profile", ",", "'aws_security_token'", ")", ":", "connect_args", "[", "'security_token'", "]", "=", "boto", ".", "config", ".", "get", "(", "profile", ",", "'aws_security_token'", ")", "return", "connect_args" ]
monkey patch for boto issue boto/boto#2100
[ "monkey", "patch", "for", "boto", "issue", "boto", "/", "boto#2100" ]
python
train
58
bambinos/bambi
bambi/results.py
https://github.com/bambinos/bambi/blob/b4a0ced917968bb99ca20915317417d708387946/bambi/results.py#L369-L402
def to_df(self, varnames=None, ranefs=False, transformed=False, chains=None): ''' Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains concatenated. Args: varnames (list): List of variable names to include; if None (default), all eligible variables are included. ranefs (bool): Whether or not to include random effects in the returned DataFrame. Default is True. transformed (bool): Whether or not to include internally transformed variables in the result. Default is False. chains (int, list): Index, or list of indexes, of chains to concatenate. E.g., [1, 3] would concatenate the first and third chains, and ignore any others. If None (default), concatenates all available chains. ''' # filter out unwanted variables names = self._filter_names(varnames, ranefs, transformed) # concatenate the (pre-sliced) chains if chains is None: chains = list(range(self.n_chains)) chains = listify(chains) data = [self.data[:, i, :] for i in chains] data = np.concatenate(data, axis=0) # construct the trace DataFrame df = sum([self.level_dict[x] for x in names], []) df = pd.DataFrame({x: data[:, self.levels.index(x)] for x in df}) return df
[ "def", "to_df", "(", "self", ",", "varnames", "=", "None", ",", "ranefs", "=", "False", ",", "transformed", "=", "False", ",", "chains", "=", "None", ")", ":", "# filter out unwanted variables", "names", "=", "self", ".", "_filter_names", "(", "varnames", ",", "ranefs", ",", "transformed", ")", "# concatenate the (pre-sliced) chains", "if", "chains", "is", "None", ":", "chains", "=", "list", "(", "range", "(", "self", ".", "n_chains", ")", ")", "chains", "=", "listify", "(", "chains", ")", "data", "=", "[", "self", ".", "data", "[", ":", ",", "i", ",", ":", "]", "for", "i", "in", "chains", "]", "data", "=", "np", ".", "concatenate", "(", "data", ",", "axis", "=", "0", ")", "# construct the trace DataFrame", "df", "=", "sum", "(", "[", "self", ".", "level_dict", "[", "x", "]", "for", "x", "in", "names", "]", ",", "[", "]", ")", "df", "=", "pd", ".", "DataFrame", "(", "{", "x", ":", "data", "[", ":", ",", "self", ".", "levels", ".", "index", "(", "x", ")", "]", "for", "x", "in", "df", "}", ")", "return", "df" ]
Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains concatenated. Args: varnames (list): List of variable names to include; if None (default), all eligible variables are included. ranefs (bool): Whether or not to include random effects in the returned DataFrame. Default is True. transformed (bool): Whether or not to include internally transformed variables in the result. Default is False. chains (int, list): Index, or list of indexes, of chains to concatenate. E.g., [1, 3] would concatenate the first and third chains, and ignore any others. If None (default), concatenates all available chains.
[ "Returns", "the", "MCMC", "samples", "in", "a", "nice", "neat", "pandas", "DataFrame", "with", "all", "MCMC", "chains", "concatenated", "." ]
python
train
42.029412
Chyroc/WechatSogou
wechatsogou/request.py
https://github.com/Chyroc/WechatSogou/blob/2e0e9886f555fd8bcfc7ae9718ced6ce955cd24a/wechatsogou/request.py#L115-L158
def gen_hot_url(hot_index, page=1): """拼接 首页热门文章 URL Parameters ---------- hot_index : WechatSogouConst.hot_index 首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx page : int 页数 Returns ------- str 热门文章分类的url """ assert hasattr(WechatSogouConst.hot_index, hot_index) assert isinstance(page, int) and page > 0 index_urls = { WechatSogouConst.hot_index.hot: 0, # 热门 WechatSogouConst.hot_index.gaoxiao: 1, # 搞笑 WechatSogouConst.hot_index.health: 2, # 养生 WechatSogouConst.hot_index.sifanghua: 3, # 私房话 WechatSogouConst.hot_index.gossip: 4, # 八卦 WechatSogouConst.hot_index.technology: 5, # 科技 WechatSogouConst.hot_index.finance: 6, # 财经 WechatSogouConst.hot_index.car: 7, # 汽车 WechatSogouConst.hot_index.life: 8, # 生活 WechatSogouConst.hot_index.fashion: 9, # 时尚 WechatSogouConst.hot_index.mummy: 10, # 辣妈 / 育儿 WechatSogouConst.hot_index.travel: 11, # 旅行 WechatSogouConst.hot_index.job: 12, # 职场 WechatSogouConst.hot_index.food: 13, # 美食 WechatSogouConst.hot_index.history: 14, # 历史 WechatSogouConst.hot_index.study: 15, # 学霸 / 教育 WechatSogouConst.hot_index.constellation: 16, # 星座 WechatSogouConst.hot_index.sport: 17, # 体育 WechatSogouConst.hot_index.military: 18, # 军事 WechatSogouConst.hot_index.game: 19, # 游戏 WechatSogouConst.hot_index.pet: 20, # 萌宠 } return 'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'.format(index_urls[hot_index], page - 1)
[ "def", "gen_hot_url", "(", "hot_index", ",", "page", "=", "1", ")", ":", "assert", "hasattr", "(", "WechatSogouConst", ".", "hot_index", ",", "hot_index", ")", "assert", "isinstance", "(", "page", ",", "int", ")", "and", "page", ">", "0", "index_urls", "=", "{", "WechatSogouConst", ".", "hot_index", ".", "hot", ":", "0", ",", "# 热门", "WechatSogouConst", ".", "hot_index", ".", "gaoxiao", ":", "1", ",", "# 搞笑", "WechatSogouConst", ".", "hot_index", ".", "health", ":", "2", ",", "# 养生", "WechatSogouConst", ".", "hot_index", ".", "sifanghua", ":", "3", ",", "# 私房话", "WechatSogouConst", ".", "hot_index", ".", "gossip", ":", "4", ",", "# 八卦", "WechatSogouConst", ".", "hot_index", ".", "technology", ":", "5", ",", "# 科技", "WechatSogouConst", ".", "hot_index", ".", "finance", ":", "6", ",", "# 财经", "WechatSogouConst", ".", "hot_index", ".", "car", ":", "7", ",", "# 汽车", "WechatSogouConst", ".", "hot_index", ".", "life", ":", "8", ",", "# 生活", "WechatSogouConst", ".", "hot_index", ".", "fashion", ":", "9", ",", "# 时尚", "WechatSogouConst", ".", "hot_index", ".", "mummy", ":", "10", ",", "# 辣妈 / 育儿", "WechatSogouConst", ".", "hot_index", ".", "travel", ":", "11", ",", "# 旅行", "WechatSogouConst", ".", "hot_index", ".", "job", ":", "12", ",", "# 职场", "WechatSogouConst", ".", "hot_index", ".", "food", ":", "13", ",", "# 美食", "WechatSogouConst", ".", "hot_index", ".", "history", ":", "14", ",", "# 历史", "WechatSogouConst", ".", "hot_index", ".", "study", ":", "15", ",", "# 学霸 / 教育", "WechatSogouConst", ".", "hot_index", ".", "constellation", ":", "16", ",", "# 星座", "WechatSogouConst", ".", "hot_index", ".", "sport", ":", "17", ",", "# 体育", "WechatSogouConst", ".", "hot_index", ".", "military", ":", "18", ",", "# 军事", "WechatSogouConst", ".", "hot_index", ".", "game", ":", "19", ",", "# 游戏", "WechatSogouConst", ".", "hot_index", ".", "pet", ":", "20", ",", "# 萌宠", "}", "return", "'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'", ".", "format", "(", "index_urls", "[", "hot_index", "]", ",", "page", "-", "1", ")" ]
拼接 首页热门文章 URL Parameters ---------- hot_index : WechatSogouConst.hot_index 首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx page : int 页数 Returns ------- str 热门文章分类的url
[ "拼接", "首页热门文章", "URL" ]
python
train
39.386364
SmokinCaterpillar/pypet
examples/example_05_custom_parameter.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/examples/example_05_custom_parameter.py#L128-L144
def diff_lorenz(value_array, sigma, beta, rho): """The Lorenz attractor differential equation :param value_array: 3d array containing the x,y, and z component values. :param sigma: Constant attractor parameter :param beta: FConstant attractor parameter :param rho: Constant attractor parameter :return: 3d array of the Lorenz system evaluated at `value_array` """ diff_array = np.zeros(3) diff_array[0] = sigma * (value_array[1]-value_array[0]) diff_array[1] = value_array[0] * (rho - value_array[2]) - value_array[1] diff_array[2] = value_array[0] * value_array[1] - beta * value_array[2] return diff_array
[ "def", "diff_lorenz", "(", "value_array", ",", "sigma", ",", "beta", ",", "rho", ")", ":", "diff_array", "=", "np", ".", "zeros", "(", "3", ")", "diff_array", "[", "0", "]", "=", "sigma", "*", "(", "value_array", "[", "1", "]", "-", "value_array", "[", "0", "]", ")", "diff_array", "[", "1", "]", "=", "value_array", "[", "0", "]", "*", "(", "rho", "-", "value_array", "[", "2", "]", ")", "-", "value_array", "[", "1", "]", "diff_array", "[", "2", "]", "=", "value_array", "[", "0", "]", "*", "value_array", "[", "1", "]", "-", "beta", "*", "value_array", "[", "2", "]", "return", "diff_array" ]
The Lorenz attractor differential equation :param value_array: 3d array containing the x,y, and z component values. :param sigma: Constant attractor parameter :param beta: FConstant attractor parameter :param rho: Constant attractor parameter :return: 3d array of the Lorenz system evaluated at `value_array`
[ "The", "Lorenz", "attractor", "differential", "equation" ]
python
test
37.823529
a1ezzz/wasp-general
wasp_general/uri.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/uri.py#L478-L487
def strict_parse(cls, query_str, *specs, extra_parameters=True): """ Parse query and return :class:`.WStrictURIQuery` object :param query_str: query component of URI to parse :param specs: list of parameters specifications :param extra_parameters: whether parameters that was not specified in "specs" are allowed :return: WStrictURIQuery """ plain_result = cls.parse(query_str) return WStrictURIQuery(plain_result, *specs, extra_parameters=extra_parameters)
[ "def", "strict_parse", "(", "cls", ",", "query_str", ",", "*", "specs", ",", "extra_parameters", "=", "True", ")", ":", "plain_result", "=", "cls", ".", "parse", "(", "query_str", ")", "return", "WStrictURIQuery", "(", "plain_result", ",", "*", "specs", ",", "extra_parameters", "=", "extra_parameters", ")" ]
Parse query and return :class:`.WStrictURIQuery` object :param query_str: query component of URI to parse :param specs: list of parameters specifications :param extra_parameters: whether parameters that was not specified in "specs" are allowed :return: WStrictURIQuery
[ "Parse", "query", "and", "return", ":", "class", ":", ".", "WStrictURIQuery", "object" ]
python
train
46.5
wtsi-hgi/python-git-subrepo
gitsubrepo/_git.py
https://github.com/wtsi-hgi/python-git-subrepo/blob/bb2eb2bd9a7e51b862298ddb4168cc5b8633dad0/gitsubrepo/_git.py#L41-L47
def get_directory_relative_to_git_root(directory: str): """ Gets the path to the given directory relative to the git repository root in which it is a subdirectory. :param directory: the directory within a git repository :return: the path to the directory relative to the git repository root """ return os.path.relpath(os.path.realpath(directory), get_git_root_directory(directory))
[ "def", "get_directory_relative_to_git_root", "(", "directory", ":", "str", ")", ":", "return", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "realpath", "(", "directory", ")", ",", "get_git_root_directory", "(", "directory", ")", ")" ]
Gets the path to the given directory relative to the git repository root in which it is a subdirectory. :param directory: the directory within a git repository :return: the path to the directory relative to the git repository root
[ "Gets", "the", "path", "to", "the", "given", "directory", "relative", "to", "the", "git", "repository", "root", "in", "which", "it", "is", "a", "subdirectory", ".", ":", "param", "directory", ":", "the", "directory", "within", "a", "git", "repository", ":", "return", ":", "the", "path", "to", "the", "directory", "relative", "to", "the", "git", "repository", "root" ]
python
train
57
cloudmesh-cmd3/cmd3
cmd3/plugins/clear.py
https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/cmd3/plugins/clear.py#L29-L60
def do_banner(self, arg, arguments): """ :: Usage: banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT Arguments: TEXT The text message from which to create the banner CHAR The character for the frame. WIDTH Width of the banner INDENT indentation of the banner COLOR the color Options: -c CHAR The character for the frame. [default: #] -n WIDTH The width of the banner. [default: 70] -i INDENT The width of the banner. [default: 0] -r COLOR The color of the banner. [default: BLACK] Prints a banner form a one line text message. """ print arguments n = int(arguments['-n']) c = arguments['-c'] i = int(arguments['-i']) color = arguments['-r'].upper() Console._print(color, "", i * " " + (n-i) * c) Console._print(color, "", i * " " + c + " " + arguments['TEXT']) Console._print(color, "", i * " " + (n-i) * c)
[ "def", "do_banner", "(", "self", ",", "arg", ",", "arguments", ")", ":", "print", "arguments", "n", "=", "int", "(", "arguments", "[", "'-n'", "]", ")", "c", "=", "arguments", "[", "'-c'", "]", "i", "=", "int", "(", "arguments", "[", "'-i'", "]", ")", "color", "=", "arguments", "[", "'-r'", "]", ".", "upper", "(", ")", "Console", ".", "_print", "(", "color", ",", "\"\"", ",", "i", "*", "\" \"", "+", "(", "n", "-", "i", ")", "*", "c", ")", "Console", ".", "_print", "(", "color", ",", "\"\"", ",", "i", "*", "\" \"", "+", "c", "+", "\" \"", "+", "arguments", "[", "'TEXT'", "]", ")", "Console", ".", "_print", "(", "color", ",", "\"\"", ",", "i", "*", "\" \"", "+", "(", "n", "-", "i", ")", "*", "c", ")" ]
:: Usage: banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT Arguments: TEXT The text message from which to create the banner CHAR The character for the frame. WIDTH Width of the banner INDENT indentation of the banner COLOR the color Options: -c CHAR The character for the frame. [default: #] -n WIDTH The width of the banner. [default: 70] -i INDENT The width of the banner. [default: 0] -r COLOR The color of the banner. [default: BLACK] Prints a banner form a one line text message.
[ "::" ]
python
train
34.9375
mlperf/training
rnn_translator/pytorch/seq2seq/train/fp_optimizers.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/train/fp_optimizers.py#L14-L24
def set_grads(params, params_with_grad): """ Copies gradients from param_with_grad to params :param params: dst parameters :param params_with_grad: src parameters """ for param, param_w_grad in zip(params, params_with_grad): if param.grad is None: param.grad = torch.nn.Parameter(torch.empty_like(param)) param.grad.data.copy_(param_w_grad.grad.data)
[ "def", "set_grads", "(", "params", ",", "params_with_grad", ")", ":", "for", "param", ",", "param_w_grad", "in", "zip", "(", "params", ",", "params_with_grad", ")", ":", "if", "param", ".", "grad", "is", "None", ":", "param", ".", "grad", "=", "torch", ".", "nn", ".", "Parameter", "(", "torch", ".", "empty_like", "(", "param", ")", ")", "param", ".", "grad", ".", "data", ".", "copy_", "(", "param_w_grad", ".", "grad", ".", "data", ")" ]
Copies gradients from param_with_grad to params :param params: dst parameters :param params_with_grad: src parameters
[ "Copies", "gradients", "from", "param_with_grad", "to", "params" ]
python
train
39
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/pool.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/pool.py#L346-L358
def bind(self, database): """Associate the pool with a database. :type database: :class:`~google.cloud.spanner_v1.database.Database` :param database: database used by the pool: used to create sessions when needed. """ self._database = database for _ in xrange(self.size): session = self._new_session() session.create() self.put(session)
[ "def", "bind", "(", "self", ",", "database", ")", ":", "self", ".", "_database", "=", "database", "for", "_", "in", "xrange", "(", "self", ".", "size", ")", ":", "session", "=", "self", ".", "_new_session", "(", ")", "session", ".", "create", "(", ")", "self", ".", "put", "(", "session", ")" ]
Associate the pool with a database. :type database: :class:`~google.cloud.spanner_v1.database.Database` :param database: database used by the pool: used to create sessions when needed.
[ "Associate", "the", "pool", "with", "a", "database", "." ]
python
train
33.538462
pandas-dev/pandas
pandas/core/series.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2069-L2132
def quantile(self, q=0.5, interpolation='linear'): """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile numpy.percentile Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ self._check_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name return self._constructor(result, index=Float64Index(q), name=self.name) else: # scalar return result.iloc[0]
[ "def", "quantile", "(", "self", ",", "q", "=", "0.5", ",", "interpolation", "=", "'linear'", ")", ":", "self", ".", "_check_percentile", "(", "q", ")", "# We dispatch to DataFrame so that core.internals only has to worry", "# about 2D cases.", "df", "=", "self", ".", "to_frame", "(", ")", "result", "=", "df", ".", "quantile", "(", "q", "=", "q", ",", "interpolation", "=", "interpolation", ",", "numeric_only", "=", "False", ")", "if", "result", ".", "ndim", "==", "2", ":", "result", "=", "result", ".", "iloc", "[", ":", ",", "0", "]", "if", "is_list_like", "(", "q", ")", ":", "result", ".", "name", "=", "self", ".", "name", "return", "self", ".", "_constructor", "(", "result", ",", "index", "=", "Float64Index", "(", "q", ")", ",", "name", "=", "self", ".", "name", ")", "else", ":", "# scalar", "return", "result", ".", "iloc", "[", "0", "]" ]
Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile numpy.percentile Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64
[ "Return", "value", "at", "the", "given", "quantile", "." ]
python
train
31.421875