repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
serge-sans-paille/pythran
pythran/types/types.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/types.py#L106-L112
def register(self, ptype): """register ptype as a local typedef""" # Too many of them leads to memory burst if len(self.typedefs) < cfg.getint('typing', 'max_combiner'): self.typedefs.append(ptype) return True return False
[ "def", "register", "(", "self", ",", "ptype", ")", ":", "# Too many of them leads to memory burst", "if", "len", "(", "self", ".", "typedefs", ")", "<", "cfg", ".", "getint", "(", "'typing'", ",", "'max_combiner'", ")", ":", "self", ".", "typedefs", ".", "append", "(", "ptype", ")", "return", "True", "return", "False" ]
register ptype as a local typedef
[ "register", "ptype", "as", "a", "local", "typedef" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L943-L1062
def get_image_name(self): """ @rtype: int @return: Filename of the process main module. This method does it's best to retrieve the filename. However sometimes this is not possible, so C{None} may be returned instead. """ # Method 1: Module.fileName # It's cached if the filename was already found by the other methods, # if it came with the corresponding debug event, or it was found by the # toolhelp API. mainModule = None try: mainModule = self.get_main_module() name = mainModule.fileName if not name: name = None except (KeyError, AttributeError, WindowsError): ## traceback.print_exc() # XXX DEBUG name = None # Method 2: QueryFullProcessImageName() # Not implemented until Windows Vista. if not name: try: hProcess = self.get_handle( win32.PROCESS_QUERY_LIMITED_INFORMATION) name = win32.QueryFullProcessImageName(hProcess) except (AttributeError, WindowsError): ## traceback.print_exc() # XXX DEBUG name = None # Method 3: GetProcessImageFileName() # # Not implemented until Windows XP. # For more info see: # https://voidnish.wordpress.com/2005/06/20/getprocessimagefilenamequerydosdevice-trivia/ if not name: try: hProcess = self.get_handle(win32.PROCESS_QUERY_INFORMATION) name = win32.GetProcessImageFileName(hProcess) if name: name = PathOperations.native_to_win32_pathname(name) else: name = None except (AttributeError, WindowsError): ## traceback.print_exc() # XXX DEBUG if not name: name = None # Method 4: GetModuleFileNameEx() # Not implemented until Windows 2000. # # May be spoofed by malware, since this information resides # in usermode space (see http://www.ragestorm.net/blogs/?p=163). if not name: try: hProcess = self.get_handle( win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION ) try: name = win32.GetModuleFileNameEx(hProcess) except WindowsError: ## traceback.print_exc() # XXX DEBUG name = win32.GetModuleFileNameEx( hProcess, self.get_image_base()) if name: name = PathOperations.native_to_win32_pathname(name) else: name = None except (AttributeError, WindowsError): ## traceback.print_exc() # XXX DEBUG if not name: name = None # Method 5: PEB.ProcessParameters->ImagePathName # # May fail since it's using an undocumented internal structure. # # May be spoofed by malware, since this information resides # in usermode space (see http://www.ragestorm.net/blogs/?p=163). if not name: try: peb = self.get_peb() pp = self.read_structure(peb.ProcessParameters, win32.RTL_USER_PROCESS_PARAMETERS) s = pp.ImagePathName name = self.peek_string(s.Buffer, dwMaxSize=s.MaximumLength, fUnicode=True) if name: name = PathOperations.native_to_win32_pathname(name) else: name = None except (AttributeError, WindowsError): ## traceback.print_exc() # XXX DEBUG name = None # Method 6: Module.get_filename() # It tries to get the filename from the file handle. # # There are currently some problems due to the strange way the API # works - it returns the pathname without the drive letter, and I # couldn't figure out a way to fix it. if not name and mainModule is not None: try: name = mainModule.get_filename() if not name: name = None except (AttributeError, WindowsError): ## traceback.print_exc() # XXX DEBUG name = None # Remember the filename. if name and mainModule is not None: mainModule.fileName = name # Return the image filename, or None on error. return name
[ "def", "get_image_name", "(", "self", ")", ":", "# Method 1: Module.fileName", "# It's cached if the filename was already found by the other methods,", "# if it came with the corresponding debug event, or it was found by the", "# toolhelp API.", "mainModule", "=", "None", "try", ":", "mainModule", "=", "self", ".", "get_main_module", "(", ")", "name", "=", "mainModule", ".", "fileName", "if", "not", "name", ":", "name", "=", "None", "except", "(", "KeyError", ",", "AttributeError", ",", "WindowsError", ")", ":", "## traceback.print_exc() # XXX DEBUG", "name", "=", "None", "# Method 2: QueryFullProcessImageName()", "# Not implemented until Windows Vista.", "if", "not", "name", ":", "try", ":", "hProcess", "=", "self", ".", "get_handle", "(", "win32", ".", "PROCESS_QUERY_LIMITED_INFORMATION", ")", "name", "=", "win32", ".", "QueryFullProcessImageName", "(", "hProcess", ")", "except", "(", "AttributeError", ",", "WindowsError", ")", ":", "## traceback.print_exc() # XXX DEBUG", "name", "=", "None", "# Method 3: GetProcessImageFileName()", "#", "# Not implemented until Windows XP.", "# For more info see:", "# https://voidnish.wordpress.com/2005/06/20/getprocessimagefilenamequerydosdevice-trivia/", "if", "not", "name", ":", "try", ":", "hProcess", "=", "self", ".", "get_handle", "(", "win32", ".", "PROCESS_QUERY_INFORMATION", ")", "name", "=", "win32", ".", "GetProcessImageFileName", "(", "hProcess", ")", "if", "name", ":", "name", "=", "PathOperations", ".", "native_to_win32_pathname", "(", "name", ")", "else", ":", "name", "=", "None", "except", "(", "AttributeError", ",", "WindowsError", ")", ":", "## traceback.print_exc() # XXX DEBUG", "if", "not", "name", ":", "name", "=", "None", "# Method 4: GetModuleFileNameEx()", "# Not implemented until Windows 2000.", "#", "# May be spoofed by malware, since this information resides", "# in usermode space (see http://www.ragestorm.net/blogs/?p=163).", "if", "not", "name", ":", "try", ":", "hProcess", "=", "self", ".", "get_handle", "(", "win32", ".", "PROCESS_VM_READ", "|", "win32", ".", "PROCESS_QUERY_INFORMATION", ")", "try", ":", "name", "=", "win32", ".", "GetModuleFileNameEx", "(", "hProcess", ")", "except", "WindowsError", ":", "## traceback.print_exc() # XXX DEBUG", "name", "=", "win32", ".", "GetModuleFileNameEx", "(", "hProcess", ",", "self", ".", "get_image_base", "(", ")", ")", "if", "name", ":", "name", "=", "PathOperations", ".", "native_to_win32_pathname", "(", "name", ")", "else", ":", "name", "=", "None", "except", "(", "AttributeError", ",", "WindowsError", ")", ":", "## traceback.print_exc() # XXX DEBUG", "if", "not", "name", ":", "name", "=", "None", "# Method 5: PEB.ProcessParameters->ImagePathName", "#", "# May fail since it's using an undocumented internal structure.", "#", "# May be spoofed by malware, since this information resides", "# in usermode space (see http://www.ragestorm.net/blogs/?p=163).", "if", "not", "name", ":", "try", ":", "peb", "=", "self", ".", "get_peb", "(", ")", "pp", "=", "self", ".", "read_structure", "(", "peb", ".", "ProcessParameters", ",", "win32", ".", "RTL_USER_PROCESS_PARAMETERS", ")", "s", "=", "pp", ".", "ImagePathName", "name", "=", "self", ".", "peek_string", "(", "s", ".", "Buffer", ",", "dwMaxSize", "=", "s", ".", "MaximumLength", ",", "fUnicode", "=", "True", ")", "if", "name", ":", "name", "=", "PathOperations", ".", "native_to_win32_pathname", "(", "name", ")", "else", ":", "name", "=", "None", "except", "(", "AttributeError", ",", "WindowsError", ")", ":", "## traceback.print_exc() # XXX DEBUG", "name", "=", "None", "# Method 6: Module.get_filename()", "# It tries to get the filename from the file handle.", "#", "# There are currently some problems due to the strange way the API", "# works - it returns the pathname without the drive letter, and I", "# couldn't figure out a way to fix it.", "if", "not", "name", "and", "mainModule", "is", "not", "None", ":", "try", ":", "name", "=", "mainModule", ".", "get_filename", "(", ")", "if", "not", "name", ":", "name", "=", "None", "except", "(", "AttributeError", ",", "WindowsError", ")", ":", "## traceback.print_exc() # XXX DEBUG", "name", "=", "None", "# Remember the filename.", "if", "name", "and", "mainModule", "is", "not", "None", ":", "mainModule", ".", "fileName", "=", "name", "# Return the image filename, or None on error.", "return", "name" ]
@rtype: int @return: Filename of the process main module. This method does it's best to retrieve the filename. However sometimes this is not possible, so C{None} may be returned instead.
[ "@rtype", ":", "int", "@return", ":", "Filename", "of", "the", "process", "main", "module", "." ]
python
train
quantmind/pulsar
pulsar/apps/rpc/jsonrpc.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/rpc/jsonrpc.py#L253-L265
def get_params(self, *args, **kwargs): ''' Create an array or positional or named parameters Mixing positional and named parameters in one call is not possible. ''' kwargs.update(self._data) if args and kwargs: raise ValueError('Cannot mix positional and named parameters') if args: return list(args) else: return kwargs
[ "def", "get_params", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "self", ".", "_data", ")", "if", "args", "and", "kwargs", ":", "raise", "ValueError", "(", "'Cannot mix positional and named parameters'", ")", "if", "args", ":", "return", "list", "(", "args", ")", "else", ":", "return", "kwargs" ]
Create an array or positional or named parameters Mixing positional and named parameters in one call is not possible.
[ "Create", "an", "array", "or", "positional", "or", "named", "parameters", "Mixing", "positional", "and", "named", "parameters", "in", "one", "call", "is", "not", "possible", "." ]
python
train
coleifer/huey
huey/api.py
https://github.com/coleifer/huey/blob/416e8da1ca18442c08431a91bce373de7d2d200f/huey/api.py#L913-L990
def crontab(minute='*', hour='*', day='*', month='*', day_of_week='*'): """ Convert a "crontab"-style set of parameters into a test function that will return True when the given datetime matches the parameters set forth in the crontab. For day-of-week, 0=Sunday and 6=Saturday. Acceptable inputs: * = every distinct value */n = run every "n" times, i.e. hours='*/4' == 0, 4, 8, 12, 16, 20 m-n = run every time m..n m,n = run on m and n """ validation = ( ('m', month, range(1, 13)), ('d', day, range(1, 32)), ('w', day_of_week, range(8)), # 0-6, but also 7 for Sunday. ('H', hour, range(24)), ('M', minute, range(60)) ) cron_settings = [] for (date_str, value, acceptable) in validation: settings = set([]) if isinstance(value, int): value = str(value) for piece in value.split(','): if piece == '*': settings.update(acceptable) continue if piece.isdigit(): piece = int(piece) if piece not in acceptable: raise ValueError('%d is not a valid input' % piece) elif date_str == 'w': piece %= 7 settings.add(piece) else: dash_match = dash_re.match(piece) if dash_match: lhs, rhs = map(int, dash_match.groups()) if lhs not in acceptable or rhs not in acceptable: raise ValueError('%s is not a valid input' % piece) elif date_str == 'w': lhs %= 7 rhs %= 7 settings.update(range(lhs, rhs + 1)) continue # Handle stuff like */3, */6. every_match = every_re.match(piece) if every_match: if date_str == 'w': raise ValueError('Cannot perform this kind of matching' ' on day-of-week.') interval = int(every_match.groups()[0]) settings.update(acceptable[::interval]) cron_settings.append(sorted(list(settings))) def validate_date(timestamp): _, m, d, H, M, _, w, _, _ = timestamp.timetuple() # fix the weekday to be sunday=0 w = (w + 1) % 7 for (date_piece, selection) in zip((m, d, w, H, M), cron_settings): if date_piece not in selection: return False return True return validate_date
[ "def", "crontab", "(", "minute", "=", "'*'", ",", "hour", "=", "'*'", ",", "day", "=", "'*'", ",", "month", "=", "'*'", ",", "day_of_week", "=", "'*'", ")", ":", "validation", "=", "(", "(", "'m'", ",", "month", ",", "range", "(", "1", ",", "13", ")", ")", ",", "(", "'d'", ",", "day", ",", "range", "(", "1", ",", "32", ")", ")", ",", "(", "'w'", ",", "day_of_week", ",", "range", "(", "8", ")", ")", ",", "# 0-6, but also 7 for Sunday.", "(", "'H'", ",", "hour", ",", "range", "(", "24", ")", ")", ",", "(", "'M'", ",", "minute", ",", "range", "(", "60", ")", ")", ")", "cron_settings", "=", "[", "]", "for", "(", "date_str", ",", "value", ",", "acceptable", ")", "in", "validation", ":", "settings", "=", "set", "(", "[", "]", ")", "if", "isinstance", "(", "value", ",", "int", ")", ":", "value", "=", "str", "(", "value", ")", "for", "piece", "in", "value", ".", "split", "(", "','", ")", ":", "if", "piece", "==", "'*'", ":", "settings", ".", "update", "(", "acceptable", ")", "continue", "if", "piece", ".", "isdigit", "(", ")", ":", "piece", "=", "int", "(", "piece", ")", "if", "piece", "not", "in", "acceptable", ":", "raise", "ValueError", "(", "'%d is not a valid input'", "%", "piece", ")", "elif", "date_str", "==", "'w'", ":", "piece", "%=", "7", "settings", ".", "add", "(", "piece", ")", "else", ":", "dash_match", "=", "dash_re", ".", "match", "(", "piece", ")", "if", "dash_match", ":", "lhs", ",", "rhs", "=", "map", "(", "int", ",", "dash_match", ".", "groups", "(", ")", ")", "if", "lhs", "not", "in", "acceptable", "or", "rhs", "not", "in", "acceptable", ":", "raise", "ValueError", "(", "'%s is not a valid input'", "%", "piece", ")", "elif", "date_str", "==", "'w'", ":", "lhs", "%=", "7", "rhs", "%=", "7", "settings", ".", "update", "(", "range", "(", "lhs", ",", "rhs", "+", "1", ")", ")", "continue", "# Handle stuff like */3, */6.", "every_match", "=", "every_re", ".", "match", "(", "piece", ")", "if", "every_match", ":", "if", "date_str", "==", "'w'", ":", "raise", "ValueError", "(", "'Cannot perform this kind of matching'", "' on day-of-week.'", ")", "interval", "=", "int", "(", "every_match", ".", "groups", "(", ")", "[", "0", "]", ")", "settings", ".", "update", "(", "acceptable", "[", ":", ":", "interval", "]", ")", "cron_settings", ".", "append", "(", "sorted", "(", "list", "(", "settings", ")", ")", ")", "def", "validate_date", "(", "timestamp", ")", ":", "_", ",", "m", ",", "d", ",", "H", ",", "M", ",", "_", ",", "w", ",", "_", ",", "_", "=", "timestamp", ".", "timetuple", "(", ")", "# fix the weekday to be sunday=0", "w", "=", "(", "w", "+", "1", ")", "%", "7", "for", "(", "date_piece", ",", "selection", ")", "in", "zip", "(", "(", "m", ",", "d", ",", "w", ",", "H", ",", "M", ")", ",", "cron_settings", ")", ":", "if", "date_piece", "not", "in", "selection", ":", "return", "False", "return", "True", "return", "validate_date" ]
Convert a "crontab"-style set of parameters into a test function that will return True when the given datetime matches the parameters set forth in the crontab. For day-of-week, 0=Sunday and 6=Saturday. Acceptable inputs: * = every distinct value */n = run every "n" times, i.e. hours='*/4' == 0, 4, 8, 12, 16, 20 m-n = run every time m..n m,n = run on m and n
[ "Convert", "a", "crontab", "-", "style", "set", "of", "parameters", "into", "a", "test", "function", "that", "will", "return", "True", "when", "the", "given", "datetime", "matches", "the", "parameters", "set", "forth", "in", "the", "crontab", "." ]
python
train
silver-castle/mach9
mach9/config.py
https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/config.py#L193-L201
def load_environment_vars(self): """ Looks for any MACH9_ prefixed environment variables and applies them to the configuration if present. """ for k, v in os.environ.items(): if k.startswith(MACH9_PREFIX): _, config_key = k.split(MACH9_PREFIX, 1) self[config_key] = v
[ "def", "load_environment_vars", "(", "self", ")", ":", "for", "k", ",", "v", "in", "os", ".", "environ", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "MACH9_PREFIX", ")", ":", "_", ",", "config_key", "=", "k", ".", "split", "(", "MACH9_PREFIX", ",", "1", ")", "self", "[", "config_key", "]", "=", "v" ]
Looks for any MACH9_ prefixed environment variables and applies them to the configuration if present.
[ "Looks", "for", "any", "MACH9_", "prefixed", "environment", "variables", "and", "applies", "them", "to", "the", "configuration", "if", "present", "." ]
python
train
exhuma/config_resolver
config_resolver/core.py
https://github.com/exhuma/config_resolver/blob/2614ae3d7a49e437954254846b2963ad249b418c/config_resolver/core.py#L222-L232
def get_xdg_home(self): # type: () -> str """ Returns the value specified in the XDG_CONFIG_HOME environment variable or the appropriate default. """ config_home = getenv('XDG_CONFIG_HOME', '') if config_home: self._log.debug('XDG_CONFIG_HOME is set to %r', config_home) return expanduser(join(config_home, self.group_name, self.app_name)) return expanduser('~/.config/%s/%s' % (self.group_name, self.app_name))
[ "def", "get_xdg_home", "(", "self", ")", ":", "# type: () -> str", "config_home", "=", "getenv", "(", "'XDG_CONFIG_HOME'", ",", "''", ")", "if", "config_home", ":", "self", ".", "_log", ".", "debug", "(", "'XDG_CONFIG_HOME is set to %r'", ",", "config_home", ")", "return", "expanduser", "(", "join", "(", "config_home", ",", "self", ".", "group_name", ",", "self", ".", "app_name", ")", ")", "return", "expanduser", "(", "'~/.config/%s/%s'", "%", "(", "self", ".", "group_name", ",", "self", ".", "app_name", ")", ")" ]
Returns the value specified in the XDG_CONFIG_HOME environment variable or the appropriate default.
[ "Returns", "the", "value", "specified", "in", "the", "XDG_CONFIG_HOME", "environment", "variable", "or", "the", "appropriate", "default", "." ]
python
valid
campaignmonitor/createsend-python
lib/createsend/person.py
https://github.com/campaignmonitor/createsend-python/blob/4bfe2fd5cb2fc9d8f12280b23569eea0a6c66426/lib/createsend/person.py#L24-L33
def add(self, client_id, email_address, name, access_level, password): """Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person""" body = { "EmailAddress": email_address, "Name": name, "AccessLevel": access_level, "Password": password} response = self._post("/clients/%s/people.json" % client_id, json.dumps(body)) return json_to_py(response)
[ "def", "add", "(", "self", ",", "client_id", ",", "email_address", ",", "name", ",", "access_level", ",", "password", ")", ":", "body", "=", "{", "\"EmailAddress\"", ":", "email_address", ",", "\"Name\"", ":", "name", ",", "\"AccessLevel\"", ":", "access_level", ",", "\"Password\"", ":", "password", "}", "response", "=", "self", ".", "_post", "(", "\"/clients/%s/people.json\"", "%", "client_id", ",", "json", ".", "dumps", "(", "body", ")", ")", "return", "json_to_py", "(", "response", ")" ]
Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person
[ "Adds", "a", "person", "to", "a", "client", ".", "Password", "is", "optional", "and", "if", "not", "supplied", "an", "invitation", "will", "be", "emailed", "to", "the", "person" ]
python
train
maljovec/topopy
topopy/MorseComplex.py
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseComplex.py#L263-L298
def get_partitions(self, persistence=None): """ Returns the partitioned data based on a specified persistence level. @ In, persistence, a floating point value specifying the size of the smallest feature we want to track. Default = None means consider all features. @ Out, a dictionary lists where each key is a integer specifying the index of the maximum. Each entry will hold a list of indices specifying points that are associated to this maximum. """ if persistence is None: persistence = self.persistence partitions = {} # TODO: Possibly cache at the critical persistence values, # previously caching was done at every query level, but that # does not make sense as the partitions will only change once # the next value in self.persistences is attained. Honestly, # this is probably not a necessary optimization that needs to # be made. Consider instead, Yarden's way of storing the points # such that merged arrays will be adjacent. for key, items in self.base_partitions.items(): new_key = key while ( self.merge_sequence[new_key][0] < persistence and self.merge_sequence[new_key][1] != new_key ): new_key = self.merge_sequence[new_key][1] if new_key not in partitions: partitions[new_key] = [] partitions[new_key].extend(items) for key in partitions: partitions[key] = sorted(list(set(partitions[key]))) return partitions
[ "def", "get_partitions", "(", "self", ",", "persistence", "=", "None", ")", ":", "if", "persistence", "is", "None", ":", "persistence", "=", "self", ".", "persistence", "partitions", "=", "{", "}", "# TODO: Possibly cache at the critical persistence values,", "# previously caching was done at every query level, but that", "# does not make sense as the partitions will only change once", "# the next value in self.persistences is attained. Honestly,", "# this is probably not a necessary optimization that needs to", "# be made. Consider instead, Yarden's way of storing the points", "# such that merged arrays will be adjacent.", "for", "key", ",", "items", "in", "self", ".", "base_partitions", ".", "items", "(", ")", ":", "new_key", "=", "key", "while", "(", "self", ".", "merge_sequence", "[", "new_key", "]", "[", "0", "]", "<", "persistence", "and", "self", ".", "merge_sequence", "[", "new_key", "]", "[", "1", "]", "!=", "new_key", ")", ":", "new_key", "=", "self", ".", "merge_sequence", "[", "new_key", "]", "[", "1", "]", "if", "new_key", "not", "in", "partitions", ":", "partitions", "[", "new_key", "]", "=", "[", "]", "partitions", "[", "new_key", "]", ".", "extend", "(", "items", ")", "for", "key", "in", "partitions", ":", "partitions", "[", "key", "]", "=", "sorted", "(", "list", "(", "set", "(", "partitions", "[", "key", "]", ")", ")", ")", "return", "partitions" ]
Returns the partitioned data based on a specified persistence level. @ In, persistence, a floating point value specifying the size of the smallest feature we want to track. Default = None means consider all features. @ Out, a dictionary lists where each key is a integer specifying the index of the maximum. Each entry will hold a list of indices specifying points that are associated to this maximum.
[ "Returns", "the", "partitioned", "data", "based", "on", "a", "specified", "persistence", "level", "." ]
python
train
python-gitlab/python-gitlab
gitlab/__init__.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/__init__.py#L192-L203
def auth(self): """Performs an authentication. Uses either the private token, or the email/password pair. The `user` attribute will hold a `gitlab.objects.CurrentUser` object on success. """ if self.private_token or self.oauth_token: self._token_auth() else: self._credentials_auth()
[ "def", "auth", "(", "self", ")", ":", "if", "self", ".", "private_token", "or", "self", ".", "oauth_token", ":", "self", ".", "_token_auth", "(", ")", "else", ":", "self", ".", "_credentials_auth", "(", ")" ]
Performs an authentication. Uses either the private token, or the email/password pair. The `user` attribute will hold a `gitlab.objects.CurrentUser` object on success.
[ "Performs", "an", "authentication", "." ]
python
train
newville/wxmplot
examples/tifffile.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/examples/tifffile.py#L1447-L1452
def read_cz_lsm_info(fd, byte_order, dtype, count): """Read CS_LSM_INFO tag from file and return as numpy.rec.array.""" result = numpy.rec.fromfile(fd, CZ_LSM_INFO, 1, byteorder=byte_order)[0] {50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation return result
[ "def", "read_cz_lsm_info", "(", "fd", ",", "byte_order", ",", "dtype", ",", "count", ")", ":", "result", "=", "numpy", ".", "rec", ".", "fromfile", "(", "fd", ",", "CZ_LSM_INFO", ",", "1", ",", "byteorder", "=", "byte_order", ")", "[", "0", "]", "{", "50350412", ":", "'1.3'", ",", "67127628", ":", "'2.0'", "}", "[", "result", ".", "magic_number", "]", "# validation", "return", "result" ]
Read CS_LSM_INFO tag from file and return as numpy.rec.array.
[ "Read", "CS_LSM_INFO", "tag", "from", "file", "and", "return", "as", "numpy", ".", "rec", ".", "array", "." ]
python
train
ONSdigital/sdc-rabbit
sdc/rabbit/consumers.py
https://github.com/ONSdigital/sdc-rabbit/blob/985adfdb09cf1b263a1f311438baeb42cbcb503a/sdc/rabbit/consumers.py#L318-L325
def stop_consuming(self): """Tell RabbitMQ that you would like to stop consuming by sending the Basic.Cancel RPC command. """ if self._channel: logger.info('Sending a Basic.Cancel RPC command to RabbitMQ') self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
[ "def", "stop_consuming", "(", "self", ")", ":", "if", "self", ".", "_channel", ":", "logger", ".", "info", "(", "'Sending a Basic.Cancel RPC command to RabbitMQ'", ")", "self", ".", "_channel", ".", "basic_cancel", "(", "self", ".", "on_cancelok", ",", "self", ".", "_consumer_tag", ")" ]
Tell RabbitMQ that you would like to stop consuming by sending the Basic.Cancel RPC command.
[ "Tell", "RabbitMQ", "that", "you", "would", "like", "to", "stop", "consuming", "by", "sending", "the", "Basic", ".", "Cancel", "RPC", "command", "." ]
python
train
explosion/thinc
thinc/neural/_classes/rnn.py
https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/thinc/neural/_classes/rnn.py#L12-L14
def BiLSTM(nO, nI): """Create a bidirectional LSTM layer. Args: number out, number in""" return Bidirectional(LSTM(nO // 2, nI), LSTM(nO // 2, nI))
[ "def", "BiLSTM", "(", "nO", ",", "nI", ")", ":", "return", "Bidirectional", "(", "LSTM", "(", "nO", "//", "2", ",", "nI", ")", ",", "LSTM", "(", "nO", "//", "2", ",", "nI", ")", ")" ]
Create a bidirectional LSTM layer. Args: number out, number in
[ "Create", "a", "bidirectional", "LSTM", "layer", ".", "Args", ":", "number", "out", "number", "in" ]
python
train
digidotcom/python-wvalib
wva/cli.py
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/cli.py#L344-L348
def list(ctx): """List short name of all current subscriptions""" wva = get_wva(ctx) for subscription in wva.get_subscriptions(): print(subscription.short_name)
[ "def", "list", "(", "ctx", ")", ":", "wva", "=", "get_wva", "(", "ctx", ")", "for", "subscription", "in", "wva", ".", "get_subscriptions", "(", ")", ":", "print", "(", "subscription", ".", "short_name", ")" ]
List short name of all current subscriptions
[ "List", "short", "name", "of", "all", "current", "subscriptions" ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/directory.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/directory.py#L2135-L2169
def del_ip_address(self, ip_address, sync=True): """ delete ip address from this OS instance :param ip_address: the ip address to be deleted from this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the ipAddress object on list to be removed on next save(). :return: """ LOGGER.debug("OSInstance.del_ip_address") if not sync: self.ip_address_2_rm.append(ip_address) else: if ip_address.id is None: ip_address.sync() if self.id is not None and ip_address.id is not None: params = { 'id': self.id, 'ipAddressID': ip_address.id } args = {'http_operation': 'GET', 'operation_path': 'update/ipAddresses/delete', 'parameters': params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( 'OSInstance.del_ip_address - Problem while updating OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.ip_address_ids.remove(ip_address.id) ip_address.ipa_os_instance_id = None else: LOGGER.warning( 'OSInstance.del_ip_address - Problem while updating OS instance ' + self.name + '. Reason: IP Address ' + ip_address.ipAddress + ' id is None' )
[ "def", "del_ip_address", "(", "self", ",", "ip_address", ",", "sync", "=", "True", ")", ":", "LOGGER", ".", "debug", "(", "\"OSInstance.del_ip_address\"", ")", "if", "not", "sync", ":", "self", ".", "ip_address_2_rm", ".", "append", "(", "ip_address", ")", "else", ":", "if", "ip_address", ".", "id", "is", "None", ":", "ip_address", ".", "sync", "(", ")", "if", "self", ".", "id", "is", "not", "None", "and", "ip_address", ".", "id", "is", "not", "None", ":", "params", "=", "{", "'id'", ":", "self", ".", "id", ",", "'ipAddressID'", ":", "ip_address", ".", "id", "}", "args", "=", "{", "'http_operation'", ":", "'GET'", ",", "'operation_path'", ":", "'update/ipAddresses/delete'", ",", "'parameters'", ":", "params", "}", "response", "=", "OSInstanceService", ".", "requester", ".", "call", "(", "args", ")", "if", "response", ".", "rc", "!=", "0", ":", "LOGGER", ".", "warning", "(", "'OSInstance.del_ip_address - Problem while updating OS instance '", "+", "self", ".", "name", "+", "'. Reason: '", "+", "str", "(", "response", ".", "response_content", ")", "+", "'-'", "+", "str", "(", "response", ".", "error_message", ")", "+", "\" (\"", "+", "str", "(", "response", ".", "rc", ")", "+", "\")\"", ")", "else", ":", "self", ".", "ip_address_ids", ".", "remove", "(", "ip_address", ".", "id", ")", "ip_address", ".", "ipa_os_instance_id", "=", "None", "else", ":", "LOGGER", ".", "warning", "(", "'OSInstance.del_ip_address - Problem while updating OS instance '", "+", "self", ".", "name", "+", "'. Reason: IP Address '", "+", "ip_address", ".", "ipAddress", "+", "' id is None'", ")" ]
delete ip address from this OS instance :param ip_address: the ip address to be deleted from this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the ipAddress object on list to be removed on next save(). :return:
[ "delete", "ip", "address", "from", "this", "OS", "instance", ":", "param", "ip_address", ":", "the", "ip", "address", "to", "be", "deleted", "from", "this", "OS", "instance", ":", "param", "sync", ":", "If", "sync", "=", "True", "(", "default", ")", "synchronize", "with", "Ariane", "server", ".", "If", "sync", "=", "False", "add", "the", "ipAddress", "object", "on", "list", "to", "be", "removed", "on", "next", "save", "()", ".", ":", "return", ":" ]
python
train
python-odin/odinweb
odinweb/data_structures.py
https://github.com/python-odin/odinweb/blob/198424133584acc18cb41c8d18d91f803abc810f/odinweb/data_structures.py#L760-L787
def getlist(self, key, type_=None): # type: (Hashable, Callable) -> List[Any] """ Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type_: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ try: rv = dict.__getitem__(self, key) except KeyError: return [] if type_ is None: return list(rv) result = [] for item in rv: try: result.append(type_(item)) except ValueError: pass return result
[ "def", "getlist", "(", "self", ",", "key", ",", "type_", "=", "None", ")", ":", "# type: (Hashable, Callable) -> List[Any]", "try", ":", "rv", "=", "dict", ".", "__getitem__", "(", "self", ",", "key", ")", "except", "KeyError", ":", "return", "[", "]", "if", "type_", "is", "None", ":", "return", "list", "(", "rv", ")", "result", "=", "[", "]", "for", "item", "in", "rv", ":", "try", ":", "result", ".", "append", "(", "type_", "(", "item", ")", ")", "except", "ValueError", ":", "pass", "return", "result" ]
Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type_: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key.
[ "Return", "the", "list", "of", "items", "for", "a", "given", "key", ".", "If", "that", "key", "is", "not", "in", "the", "MultiDict", "the", "return", "value", "will", "be", "an", "empty", "list", ".", "Just", "as", "get", "getlist", "accepts", "a", "type", "parameter", ".", "All", "items", "will", "be", "converted", "with", "the", "callable", "defined", "there", "." ]
python
train
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L1299-L1321
def create(self, *args, **kwargs): """ Create an order Create a new order for a deal User needs to have access to the deal to create an order Each deal can have at most one order and error is returned when attempting to create more :calls: ``post /orders`` :param tuple *args: (optional) Single object representing Order resource. :param dict **kwargs: (optional) Order attributes. :return: Dictionary that support attriubte-style access and represents newely created Order resource. :rtype: dict """ if not args and not kwargs: raise Exception('attributes for Order are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, order = self.http_client.post("/orders", body=attributes) return order
[ "def", "create", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "args", "and", "not", "kwargs", ":", "raise", "Exception", "(", "'attributes for Order are missing'", ")", "attributes", "=", "args", "[", "0", "]", "if", "args", "else", "kwargs", "attributes", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "attributes", ".", "iteritems", "(", ")", "if", "k", "in", "self", ".", "OPTS_KEYS_TO_PERSIST", ")", "_", ",", "_", ",", "order", "=", "self", ".", "http_client", ".", "post", "(", "\"/orders\"", ",", "body", "=", "attributes", ")", "return", "order" ]
Create an order Create a new order for a deal User needs to have access to the deal to create an order Each deal can have at most one order and error is returned when attempting to create more :calls: ``post /orders`` :param tuple *args: (optional) Single object representing Order resource. :param dict **kwargs: (optional) Order attributes. :return: Dictionary that support attriubte-style access and represents newely created Order resource. :rtype: dict
[ "Create", "an", "order" ]
python
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L1045-L1072
def create_term(self, lemma, pos, morphofeat, tokens, id=None): """ Create a new term and add it to the term layer @type lemma: string @param lemma: The lemma of the term @type pos: string @param pos: The postrag(rst letter) of the POS attribute @type morphofeat: string @param morphofeat: The morphofeat (full morphological features) of the term @type tokens: sequence of L{Cwf} @param tokens: the token(s) that this term describes @type id: string @param id: the id of the term, if not given an id tXXX will be created """ if id is None: n = 1 if self.term_layer is None else len(self.term_layer.idx) + 1 id = "t{n}".format(**locals()) new_term = Cterm(type=self.type) new_term.set_id(id) new_term.set_lemma(lemma) new_term.set_pos(pos) new_term.set_morphofeat(morphofeat) new_span = Cspan() for token in tokens: new_span.add_target_id(token.get_id()) new_term.set_span(new_span) self.add_term(new_term) return new_term
[ "def", "create_term", "(", "self", ",", "lemma", ",", "pos", ",", "morphofeat", ",", "tokens", ",", "id", "=", "None", ")", ":", "if", "id", "is", "None", ":", "n", "=", "1", "if", "self", ".", "term_layer", "is", "None", "else", "len", "(", "self", ".", "term_layer", ".", "idx", ")", "+", "1", "id", "=", "\"t{n}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "new_term", "=", "Cterm", "(", "type", "=", "self", ".", "type", ")", "new_term", ".", "set_id", "(", "id", ")", "new_term", ".", "set_lemma", "(", "lemma", ")", "new_term", ".", "set_pos", "(", "pos", ")", "new_term", ".", "set_morphofeat", "(", "morphofeat", ")", "new_span", "=", "Cspan", "(", ")", "for", "token", "in", "tokens", ":", "new_span", ".", "add_target_id", "(", "token", ".", "get_id", "(", ")", ")", "new_term", ".", "set_span", "(", "new_span", ")", "self", ".", "add_term", "(", "new_term", ")", "return", "new_term" ]
Create a new term and add it to the term layer @type lemma: string @param lemma: The lemma of the term @type pos: string @param pos: The postrag(rst letter) of the POS attribute @type morphofeat: string @param morphofeat: The morphofeat (full morphological features) of the term @type tokens: sequence of L{Cwf} @param tokens: the token(s) that this term describes @type id: string @param id: the id of the term, if not given an id tXXX will be created
[ "Create", "a", "new", "term", "and", "add", "it", "to", "the", "term", "layer" ]
python
train
gem/oq-engine
openquake/hmtk/seismicity/selector.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/selector.py#L92-L106
def _get_decimal_from_datetime(time): ''' As the decimal time function requires inputs in the form of numpy arrays need to convert each value in the datetime object to a single numpy array ''' # Get decimal seconds from seconds + microseconds temp_seconds = np.float(time.second) + (np.float(time.microsecond) / 1.0E6) return decimal_time(np.array([time.year], dtype=int), np.array([time.month], dtype=int), np.array([time.day], dtype=int), np.array([time.hour], dtype=int), np.array([time.minute], dtype=int), np.array([temp_seconds], dtype=int))
[ "def", "_get_decimal_from_datetime", "(", "time", ")", ":", "# Get decimal seconds from seconds + microseconds", "temp_seconds", "=", "np", ".", "float", "(", "time", ".", "second", ")", "+", "(", "np", ".", "float", "(", "time", ".", "microsecond", ")", "/", "1.0E6", ")", "return", "decimal_time", "(", "np", ".", "array", "(", "[", "time", ".", "year", "]", ",", "dtype", "=", "int", ")", ",", "np", ".", "array", "(", "[", "time", ".", "month", "]", ",", "dtype", "=", "int", ")", ",", "np", ".", "array", "(", "[", "time", ".", "day", "]", ",", "dtype", "=", "int", ")", ",", "np", ".", "array", "(", "[", "time", ".", "hour", "]", ",", "dtype", "=", "int", ")", ",", "np", ".", "array", "(", "[", "time", ".", "minute", "]", ",", "dtype", "=", "int", ")", ",", "np", ".", "array", "(", "[", "temp_seconds", "]", ",", "dtype", "=", "int", ")", ")" ]
As the decimal time function requires inputs in the form of numpy arrays need to convert each value in the datetime object to a single numpy array
[ "As", "the", "decimal", "time", "function", "requires", "inputs", "in", "the", "form", "of", "numpy", "arrays", "need", "to", "convert", "each", "value", "in", "the", "datetime", "object", "to", "a", "single", "numpy", "array" ]
python
train
saltstack/salt
salt/states/pkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L3329-L3349
def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
[ "def", "mod_watch", "(", "name", ",", "*", "*", "kwargs", ")", ":", "sfun", "=", "kwargs", ".", "pop", "(", "'sfun'", ",", "None", ")", "mapfun", "=", "{", "'purged'", ":", "purged", ",", "'latest'", ":", "latest", ",", "'removed'", ":", "removed", ",", "'installed'", ":", "installed", "}", "if", "sfun", "in", "mapfun", ":", "return", "mapfun", "[", "sfun", "]", "(", "name", ",", "*", "*", "kwargs", ")", "return", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "'pkg.{0} does not work with the watch requisite'", ".", "format", "(", "sfun", ")", ",", "'result'", ":", "False", "}" ]
Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
[ "Install", "/", "reinstall", "a", "package", "based", "on", "a", "watch", "requisite" ]
python
train
dwavesystems/dwave_networkx
dwave_networkx/algorithms/coloring.py
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/algorithms/coloring.py#L339-L378
def is_cycle(G): """Determines whether the given graph is a cycle or circle graph. A cycle graph or circular graph is a graph that consists of a single cycle. https://en.wikipedia.org/wiki/Cycle_graph Parameters ---------- G : NetworkX graph Returns ------- is_cycle : bool True if the graph consists of a single cycle. """ trailing, leading = next(iter(G.edges)) start_node = trailing # travel around the graph, checking that each node has degree exactly two # also track how many nodes were visited n_visited = 1 while leading != start_node: neighbors = G[leading] if len(neighbors) != 2: return False node1, node2 = neighbors if node1 == trailing: trailing, leading = leading, node2 else: trailing, leading = leading, node1 n_visited += 1 # if we haven't visited all of the nodes, then it is not a connected cycle return n_visited == len(G)
[ "def", "is_cycle", "(", "G", ")", ":", "trailing", ",", "leading", "=", "next", "(", "iter", "(", "G", ".", "edges", ")", ")", "start_node", "=", "trailing", "# travel around the graph, checking that each node has degree exactly two", "# also track how many nodes were visited", "n_visited", "=", "1", "while", "leading", "!=", "start_node", ":", "neighbors", "=", "G", "[", "leading", "]", "if", "len", "(", "neighbors", ")", "!=", "2", ":", "return", "False", "node1", ",", "node2", "=", "neighbors", "if", "node1", "==", "trailing", ":", "trailing", ",", "leading", "=", "leading", ",", "node2", "else", ":", "trailing", ",", "leading", "=", "leading", ",", "node1", "n_visited", "+=", "1", "# if we haven't visited all of the nodes, then it is not a connected cycle", "return", "n_visited", "==", "len", "(", "G", ")" ]
Determines whether the given graph is a cycle or circle graph. A cycle graph or circular graph is a graph that consists of a single cycle. https://en.wikipedia.org/wiki/Cycle_graph Parameters ---------- G : NetworkX graph Returns ------- is_cycle : bool True if the graph consists of a single cycle.
[ "Determines", "whether", "the", "given", "graph", "is", "a", "cycle", "or", "circle", "graph", "." ]
python
train
ploneintranet/ploneintranet.workspace
src/ploneintranet/workspace/browser/roster.py
https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/browser/roster.py#L43-L82
def update_users(self, entries): """Update user properties on the roster """ ws = IWorkspace(self.context) members = ws.members # check user permissions against join policy join_policy = self.context.join_policy if (join_policy == "admin" and not checkPermission( "collective.workspace: Manage roster", self.context)): raise Unauthorized("You are not allowed to add users here") for entry in entries: id = entry.get('id') is_member = bool(entry.get('member')) is_admin = bool(entry.get('admin')) # Existing members if id in members: member = members[id] if not is_member: if checkPermission( "ploneintranet.workspace: Manage workspace", self.context): ws.membership_factory(ws, member).remove_from_team() else: raise Unauthorized( "Only team managers can remove members") elif not is_admin: ws.membership_factory(ws, member).groups -= {'Admins'} else: ws.membership_factory(ws, member).groups |= {'Admins'} # New members elif id not in members and (is_member or is_admin): groups = set() if is_admin: groups.add('Admins') ws.add_to_team(user=id, groups=groups)
[ "def", "update_users", "(", "self", ",", "entries", ")", ":", "ws", "=", "IWorkspace", "(", "self", ".", "context", ")", "members", "=", "ws", ".", "members", "# check user permissions against join policy", "join_policy", "=", "self", ".", "context", ".", "join_policy", "if", "(", "join_policy", "==", "\"admin\"", "and", "not", "checkPermission", "(", "\"collective.workspace: Manage roster\"", ",", "self", ".", "context", ")", ")", ":", "raise", "Unauthorized", "(", "\"You are not allowed to add users here\"", ")", "for", "entry", "in", "entries", ":", "id", "=", "entry", ".", "get", "(", "'id'", ")", "is_member", "=", "bool", "(", "entry", ".", "get", "(", "'member'", ")", ")", "is_admin", "=", "bool", "(", "entry", ".", "get", "(", "'admin'", ")", ")", "# Existing members", "if", "id", "in", "members", ":", "member", "=", "members", "[", "id", "]", "if", "not", "is_member", ":", "if", "checkPermission", "(", "\"ploneintranet.workspace: Manage workspace\"", ",", "self", ".", "context", ")", ":", "ws", ".", "membership_factory", "(", "ws", ",", "member", ")", ".", "remove_from_team", "(", ")", "else", ":", "raise", "Unauthorized", "(", "\"Only team managers can remove members\"", ")", "elif", "not", "is_admin", ":", "ws", ".", "membership_factory", "(", "ws", ",", "member", ")", ".", "groups", "-=", "{", "'Admins'", "}", "else", ":", "ws", ".", "membership_factory", "(", "ws", ",", "member", ")", ".", "groups", "|=", "{", "'Admins'", "}", "# New members", "elif", "id", "not", "in", "members", "and", "(", "is_member", "or", "is_admin", ")", ":", "groups", "=", "set", "(", ")", "if", "is_admin", ":", "groups", ".", "add", "(", "'Admins'", ")", "ws", ".", "add_to_team", "(", "user", "=", "id", ",", "groups", "=", "groups", ")" ]
Update user properties on the roster
[ "Update", "user", "properties", "on", "the", "roster" ]
python
train
mozilla/treeherder
treeherder/webapp/api/job_log_url.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/webapp/api/job_log_url.py#L23-L28
def retrieve(self, request, project, pk=None): """ Returns a job_log_url object given its ID """ log = JobLog.objects.get(id=pk) return Response(self._log_as_dict(log))
[ "def", "retrieve", "(", "self", ",", "request", ",", "project", ",", "pk", "=", "None", ")", ":", "log", "=", "JobLog", ".", "objects", ".", "get", "(", "id", "=", "pk", ")", "return", "Response", "(", "self", ".", "_log_as_dict", "(", "log", ")", ")" ]
Returns a job_log_url object given its ID
[ "Returns", "a", "job_log_url", "object", "given", "its", "ID" ]
python
train
tensorflow/tensorboard
tensorboard/compat/tensorflow_stub/io/gfile.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/compat/tensorflow_stub/io/gfile.py#L61-L71
def get_filesystem(filename): """Return the registered filesystem for the given file.""" filename = compat.as_str_any(filename) prefix = "" index = filename.find("://") if index >= 0: prefix = filename[:index] fs = _REGISTERED_FILESYSTEMS.get(prefix, None) if fs is None: raise ValueError("No recognized filesystem for prefix %s" % prefix) return fs
[ "def", "get_filesystem", "(", "filename", ")", ":", "filename", "=", "compat", ".", "as_str_any", "(", "filename", ")", "prefix", "=", "\"\"", "index", "=", "filename", ".", "find", "(", "\"://\"", ")", "if", "index", ">=", "0", ":", "prefix", "=", "filename", "[", ":", "index", "]", "fs", "=", "_REGISTERED_FILESYSTEMS", ".", "get", "(", "prefix", ",", "None", ")", "if", "fs", "is", "None", ":", "raise", "ValueError", "(", "\"No recognized filesystem for prefix %s\"", "%", "prefix", ")", "return", "fs" ]
Return the registered filesystem for the given file.
[ "Return", "the", "registered", "filesystem", "for", "the", "given", "file", "." ]
python
train
satellogic/telluric
telluric/georaster.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L286-L329
def _fill_pixels(one, other): # type: (_Raster, _Raster) -> _Raster """Merges two single band rasters with the same band by filling the pixels according to depth. """ assert len(one.band_names) == len(other.band_names) == 1, "Rasters are not single band" # We raise an error in the intersection is empty. # Other options include returning an "empty" raster or just None. # The problem with the former is that GeoRaster2 expects a 2D or 3D # numpy array, so there is no obvious way to signal that this raster # has no bands. Also, returning a (1, 1, 0) numpy array is useless # for future concatenation, so the expected shape should be used # instead. The problem with the latter is that it breaks concatenation # anyway and requires special attention. Suggestions welcome. if one.band_names != other.band_names: raise ValueError("rasters have no bands in common, use another merge strategy") new_image = one.image.copy() other_image = other.image # The values that I want to mask are the ones that: # * Were already masked in the other array, _or_ # * Were already unmasked in the one array, so I don't overwrite them other_values_mask = (np.ma.getmaskarray(other_image)[0] | (~np.ma.getmaskarray(one.image)[0])) # Reshape the mask to fit the future array other_values_mask = other_values_mask[None, ...] # Overwrite the values that I don't want to mask new_image[~other_values_mask] = other_image[~other_values_mask] # In other words, the values that I wanted to write are the ones that: # * Were already masked in the one array, _and_ # * Were not masked in the other array # The reason for using the inverted form is to retain the semantics # of "masked=True" that apply for masked arrays. The same logic # could be written, using the De Morgan's laws, as # other_values_mask = (one.image.mask[0] & (~other_image.mask[0]) # other_values_mask = other_values_mask[None, ...] # new_image[other_values_mask] = other_image[other_values_mask] # but here the word "mask" does not mean the same as in masked arrays. return _Raster(image=new_image, band_names=one.band_names)
[ "def", "_fill_pixels", "(", "one", ",", "other", ")", ":", "# type: (_Raster, _Raster) -> _Raster", "assert", "len", "(", "one", ".", "band_names", ")", "==", "len", "(", "other", ".", "band_names", ")", "==", "1", ",", "\"Rasters are not single band\"", "# We raise an error in the intersection is empty.", "# Other options include returning an \"empty\" raster or just None.", "# The problem with the former is that GeoRaster2 expects a 2D or 3D", "# numpy array, so there is no obvious way to signal that this raster", "# has no bands. Also, returning a (1, 1, 0) numpy array is useless", "# for future concatenation, so the expected shape should be used", "# instead. The problem with the latter is that it breaks concatenation", "# anyway and requires special attention. Suggestions welcome.", "if", "one", ".", "band_names", "!=", "other", ".", "band_names", ":", "raise", "ValueError", "(", "\"rasters have no bands in common, use another merge strategy\"", ")", "new_image", "=", "one", ".", "image", ".", "copy", "(", ")", "other_image", "=", "other", ".", "image", "# The values that I want to mask are the ones that:", "# * Were already masked in the other array, _or_", "# * Were already unmasked in the one array, so I don't overwrite them", "other_values_mask", "=", "(", "np", ".", "ma", ".", "getmaskarray", "(", "other_image", ")", "[", "0", "]", "|", "(", "~", "np", ".", "ma", ".", "getmaskarray", "(", "one", ".", "image", ")", "[", "0", "]", ")", ")", "# Reshape the mask to fit the future array", "other_values_mask", "=", "other_values_mask", "[", "None", ",", "...", "]", "# Overwrite the values that I don't want to mask", "new_image", "[", "~", "other_values_mask", "]", "=", "other_image", "[", "~", "other_values_mask", "]", "# In other words, the values that I wanted to write are the ones that:", "# * Were already masked in the one array, _and_", "# * Were not masked in the other array", "# The reason for using the inverted form is to retain the semantics", "# of \"masked=True\" that apply for masked arrays. The same logic", "# could be written, using the De Morgan's laws, as", "# other_values_mask = (one.image.mask[0] & (~other_image.mask[0])", "# other_values_mask = other_values_mask[None, ...]", "# new_image[other_values_mask] = other_image[other_values_mask]", "# but here the word \"mask\" does not mean the same as in masked arrays.", "return", "_Raster", "(", "image", "=", "new_image", ",", "band_names", "=", "one", ".", "band_names", ")" ]
Merges two single band rasters with the same band by filling the pixels according to depth.
[ "Merges", "two", "single", "band", "rasters", "with", "the", "same", "band", "by", "filling", "the", "pixels", "according", "to", "depth", "." ]
python
train
dwavesystems/dimod
dimod/binary_quadratic_model.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L826-L890
def scale(self, scalar, ignored_variables=None, ignored_interactions=None, ignore_offset=False): """Multiply by the specified scalar all the biases and offset of a binary quadratic model. Args: scalar (number): Value by which to scale the energy range of the binary quadratic model. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. Examples: This example creates a binary quadratic model and then scales it to half the original energy range. >>> import dimod ... >>> bqm = dimod.BinaryQuadraticModel({'a': -2.0, 'b': 2.0}, {('a', 'b'): -1.0}, 1.0, dimod.SPIN) >>> bqm.scale(0.5) >>> bqm.linear['a'] -1.0 >>> bqm.quadratic[('a', 'b')] -0.5 >>> bqm.offset 0.5 """ if ignored_variables is None: ignored_variables = set() elif not isinstance(ignored_variables, abc.Container): ignored_variables = set(ignored_variables) if ignored_interactions is None: ignored_interactions = set() elif not isinstance(ignored_interactions, abc.Container): ignored_interactions = set(ignored_interactions) linear = self.linear for v in linear: if v in ignored_variables: continue linear[v] *= scalar quadratic = self.quadratic for u, v in quadratic: if (u, v) in ignored_interactions or (v, u) in ignored_interactions: continue quadratic[(u, v)] *= scalar if not ignore_offset: self.offset *= scalar try: self._counterpart.scale(scalar, ignored_variables=ignored_variables, ignored_interactions=ignored_interactions) except AttributeError: pass
[ "def", "scale", "(", "self", ",", "scalar", ",", "ignored_variables", "=", "None", ",", "ignored_interactions", "=", "None", ",", "ignore_offset", "=", "False", ")", ":", "if", "ignored_variables", "is", "None", ":", "ignored_variables", "=", "set", "(", ")", "elif", "not", "isinstance", "(", "ignored_variables", ",", "abc", ".", "Container", ")", ":", "ignored_variables", "=", "set", "(", "ignored_variables", ")", "if", "ignored_interactions", "is", "None", ":", "ignored_interactions", "=", "set", "(", ")", "elif", "not", "isinstance", "(", "ignored_interactions", ",", "abc", ".", "Container", ")", ":", "ignored_interactions", "=", "set", "(", "ignored_interactions", ")", "linear", "=", "self", ".", "linear", "for", "v", "in", "linear", ":", "if", "v", "in", "ignored_variables", ":", "continue", "linear", "[", "v", "]", "*=", "scalar", "quadratic", "=", "self", ".", "quadratic", "for", "u", ",", "v", "in", "quadratic", ":", "if", "(", "u", ",", "v", ")", "in", "ignored_interactions", "or", "(", "v", ",", "u", ")", "in", "ignored_interactions", ":", "continue", "quadratic", "[", "(", "u", ",", "v", ")", "]", "*=", "scalar", "if", "not", "ignore_offset", ":", "self", ".", "offset", "*=", "scalar", "try", ":", "self", ".", "_counterpart", ".", "scale", "(", "scalar", ",", "ignored_variables", "=", "ignored_variables", ",", "ignored_interactions", "=", "ignored_interactions", ")", "except", "AttributeError", ":", "pass" ]
Multiply by the specified scalar all the biases and offset of a binary quadratic model. Args: scalar (number): Value by which to scale the energy range of the binary quadratic model. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. Examples: This example creates a binary quadratic model and then scales it to half the original energy range. >>> import dimod ... >>> bqm = dimod.BinaryQuadraticModel({'a': -2.0, 'b': 2.0}, {('a', 'b'): -1.0}, 1.0, dimod.SPIN) >>> bqm.scale(0.5) >>> bqm.linear['a'] -1.0 >>> bqm.quadratic[('a', 'b')] -0.5 >>> bqm.offset 0.5
[ "Multiply", "by", "the", "specified", "scalar", "all", "the", "biases", "and", "offset", "of", "a", "binary", "quadratic", "model", "." ]
python
train
pyrapt/rapt
rapt/treebrd/attributes.py
https://github.com/pyrapt/rapt/blob/0193a07aafff83a887fdc9e5e0f25eafa5b1b205/rapt/treebrd/attributes.py#L156-L181
def rename(self, names, prefix): """ Rename the Attributes' names, prefixes, or both. If names or prefix evaluates to None, the old version is used. Resulting names must be unambiguous. :param names: A list of new names for each attribute or an empty list. :param prefix: A new prefix for the name or None """ if names: if len(names) != len(self._contents): raise InputError('Attribute count mismatch.') if self.has_duplicates(names): raise InputError('Attributes are ambiguous.') else: # If the attributes are not renamed, but the relation / prefix is, # there is a risk of creating two or more attributes with the # same name and prefix. if prefix and self.has_duplicates(self.names): raise AttributeReferenceError('Attributes are ambiguous.') replacement = [] for old, name in itertools.zip_longest(self._contents, names): new_name = name or old.name new_prefix = prefix or old.prefix replacement.append(Attribute(new_name, new_prefix)) self._contents = replacement
[ "def", "rename", "(", "self", ",", "names", ",", "prefix", ")", ":", "if", "names", ":", "if", "len", "(", "names", ")", "!=", "len", "(", "self", ".", "_contents", ")", ":", "raise", "InputError", "(", "'Attribute count mismatch.'", ")", "if", "self", ".", "has_duplicates", "(", "names", ")", ":", "raise", "InputError", "(", "'Attributes are ambiguous.'", ")", "else", ":", "# If the attributes are not renamed, but the relation / prefix is,", "# there is a risk of creating two or more attributes with the", "# same name and prefix.", "if", "prefix", "and", "self", ".", "has_duplicates", "(", "self", ".", "names", ")", ":", "raise", "AttributeReferenceError", "(", "'Attributes are ambiguous.'", ")", "replacement", "=", "[", "]", "for", "old", ",", "name", "in", "itertools", ".", "zip_longest", "(", "self", ".", "_contents", ",", "names", ")", ":", "new_name", "=", "name", "or", "old", ".", "name", "new_prefix", "=", "prefix", "or", "old", ".", "prefix", "replacement", ".", "append", "(", "Attribute", "(", "new_name", ",", "new_prefix", ")", ")", "self", ".", "_contents", "=", "replacement" ]
Rename the Attributes' names, prefixes, or both. If names or prefix evaluates to None, the old version is used. Resulting names must be unambiguous. :param names: A list of new names for each attribute or an empty list. :param prefix: A new prefix for the name or None
[ "Rename", "the", "Attributes", "names", "prefixes", "or", "both", ".", "If", "names", "or", "prefix", "evaluates", "to", "None", "the", "old", "version", "is", "used", ".", "Resulting", "names", "must", "be", "unambiguous", ".", ":", "param", "names", ":", "A", "list", "of", "new", "names", "for", "each", "attribute", "or", "an", "empty", "list", ".", ":", "param", "prefix", ":", "A", "new", "prefix", "for", "the", "name", "or", "None" ]
python
train
hyperledger/indy-node
indy_node/server/upgrader.py
https://github.com/hyperledger/indy-node/blob/8fabd364eaf7d940a56df2911d9215b1e512a2de/indy_node/server/upgrader.py#L363-L399
def _cancelScheduledUpgrade(self, justification=None) -> None: """ Cancels scheduled upgrade :param when: time upgrade was scheduled to :param version: version upgrade scheduled for """ if self.scheduledAction: why_prefix = ": " why = justification if justification is None: why_prefix = ", " why = "cancellation reason not specified" ev_data = self.scheduledAction logger.info("Cancelling upgrade {}" " of node {}" " of package {}" " to version {}" " scheduled on {}" "{}{}" .format(ev_data.upgrade_id, self.nodeName, ev_data.pkg_name, ev_data.version, ev_data.when, why_prefix, why)) self._unscheduleAction() self._actionLog.append_cancelled(ev_data) self._notifier.sendMessageUponPoolUpgradeCancel( "Upgrade of package {} on node '{}' to version {} " "has been cancelled due to {}" .format(ev_data.pkg_name, self.nodeName, ev_data.version, why))
[ "def", "_cancelScheduledUpgrade", "(", "self", ",", "justification", "=", "None", ")", "->", "None", ":", "if", "self", ".", "scheduledAction", ":", "why_prefix", "=", "\": \"", "why", "=", "justification", "if", "justification", "is", "None", ":", "why_prefix", "=", "\", \"", "why", "=", "\"cancellation reason not specified\"", "ev_data", "=", "self", ".", "scheduledAction", "logger", ".", "info", "(", "\"Cancelling upgrade {}\"", "\" of node {}\"", "\" of package {}\"", "\" to version {}\"", "\" scheduled on {}\"", "\"{}{}\"", ".", "format", "(", "ev_data", ".", "upgrade_id", ",", "self", ".", "nodeName", ",", "ev_data", ".", "pkg_name", ",", "ev_data", ".", "version", ",", "ev_data", ".", "when", ",", "why_prefix", ",", "why", ")", ")", "self", ".", "_unscheduleAction", "(", ")", "self", ".", "_actionLog", ".", "append_cancelled", "(", "ev_data", ")", "self", ".", "_notifier", ".", "sendMessageUponPoolUpgradeCancel", "(", "\"Upgrade of package {} on node '{}' to version {} \"", "\"has been cancelled due to {}\"", ".", "format", "(", "ev_data", ".", "pkg_name", ",", "self", ".", "nodeName", ",", "ev_data", ".", "version", ",", "why", ")", ")" ]
Cancels scheduled upgrade :param when: time upgrade was scheduled to :param version: version upgrade scheduled for
[ "Cancels", "scheduled", "upgrade" ]
python
train
WojciechMula/canvas2svg
canvasvg.py
https://github.com/WojciechMula/canvas2svg/blob/c05d73d88499e5c565386a1765f79d9417a14dac/canvasvg.py#L318-L331
def SVGdocument(): "Create default SVG document" import xml.dom.minidom implementation = xml.dom.minidom.getDOMImplementation() doctype = implementation.createDocumentType( "svg", "-//W3C//DTD SVG 1.1//EN", "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" ) document= implementation.createDocument(None, "svg", doctype) document.documentElement.setAttribute( 'xmlns', 'http://www.w3.org/2000/svg' ) return document
[ "def", "SVGdocument", "(", ")", ":", "import", "xml", ".", "dom", ".", "minidom", "implementation", "=", "xml", ".", "dom", ".", "minidom", ".", "getDOMImplementation", "(", ")", "doctype", "=", "implementation", ".", "createDocumentType", "(", "\"svg\"", ",", "\"-//W3C//DTD SVG 1.1//EN\"", ",", "\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\"", ")", "document", "=", "implementation", ".", "createDocument", "(", "None", ",", "\"svg\"", ",", "doctype", ")", "document", ".", "documentElement", ".", "setAttribute", "(", "'xmlns'", ",", "'http://www.w3.org/2000/svg'", ")", "return", "document" ]
Create default SVG document
[ "Create", "default", "SVG", "document" ]
python
train
sjkingo/virtualenv-api
virtualenvapi/manage.py
https://github.com/sjkingo/virtualenv-api/blob/146a181e540ae2ae89c2542497dea0cedbc78839/virtualenvapi/manage.py#L166-L170
def _write_to_error(self, s, truncate=False): """Writes the given output to the error file, appending unless `truncate` is True.""" # if truncate is True, set write mode to truncate with open(self._errorfile, 'w' if truncate else 'a') as fp: fp.writelines((to_text(s)), )
[ "def", "_write_to_error", "(", "self", ",", "s", ",", "truncate", "=", "False", ")", ":", "# if truncate is True, set write mode to truncate", "with", "open", "(", "self", ".", "_errorfile", ",", "'w'", "if", "truncate", "else", "'a'", ")", "as", "fp", ":", "fp", ".", "writelines", "(", "(", "to_text", "(", "s", ")", ")", ",", ")" ]
Writes the given output to the error file, appending unless `truncate` is True.
[ "Writes", "the", "given", "output", "to", "the", "error", "file", "appending", "unless", "truncate", "is", "True", "." ]
python
train
vintasoftware/django-role-permissions
rolepermissions/permissions.py
https://github.com/vintasoftware/django-role-permissions/blob/28924361e689e994e0c3575e18104a1a5abd8de6/rolepermissions/permissions.py#L73-L91
def grant_permission(user, permission_name): """ Grant a user a specified permission. Permissions are only granted if they are in the scope any of the user's roles. If the permission is out of scope, a RolePermissionScopeException is raised. """ roles = get_user_roles(user) for role in roles: if permission_name in role.permission_names_list(): permission = get_permission(permission_name) user.user_permissions.add(permission) return raise RolePermissionScopeException( "This permission isn't in the scope of " "any of this user's roles.")
[ "def", "grant_permission", "(", "user", ",", "permission_name", ")", ":", "roles", "=", "get_user_roles", "(", "user", ")", "for", "role", "in", "roles", ":", "if", "permission_name", "in", "role", ".", "permission_names_list", "(", ")", ":", "permission", "=", "get_permission", "(", "permission_name", ")", "user", ".", "user_permissions", ".", "add", "(", "permission", ")", "return", "raise", "RolePermissionScopeException", "(", "\"This permission isn't in the scope of \"", "\"any of this user's roles.\"", ")" ]
Grant a user a specified permission. Permissions are only granted if they are in the scope any of the user's roles. If the permission is out of scope, a RolePermissionScopeException is raised.
[ "Grant", "a", "user", "a", "specified", "permission", "." ]
python
train
fred49/argtoolbox
argtoolbox/argtoolbox.py
https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L228-L242
def get_parser(self, **kwargs): """This method will create and return a new parser with prog_name, description, and a config file argument. """ self.parser = argparse.ArgumentParser(prog=self.prog_name, description=self._desc, add_help=False, **kwargs) # help is removed because parser.parse_known_args() show help, # often partial help. help action will be added during # reloading step for parser.parse_args() if self.use_config_file: self.parser.add_argument('--config-file', action="store", help="Other configuration file.") return self.parser
[ "def", "get_parser", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "self", ".", "prog_name", ",", "description", "=", "self", ".", "_desc", ",", "add_help", "=", "False", ",", "*", "*", "kwargs", ")", "# help is removed because parser.parse_known_args() show help,", "# often partial help. help action will be added during", "# reloading step for parser.parse_args()", "if", "self", ".", "use_config_file", ":", "self", ".", "parser", ".", "add_argument", "(", "'--config-file'", ",", "action", "=", "\"store\"", ",", "help", "=", "\"Other configuration file.\"", ")", "return", "self", ".", "parser" ]
This method will create and return a new parser with prog_name, description, and a config file argument.
[ "This", "method", "will", "create", "and", "return", "a", "new", "parser", "with", "prog_name", "description", "and", "a", "config", "file", "argument", "." ]
python
train
telminov/sw-django-utils
djutils/date_utils.py
https://github.com/telminov/sw-django-utils/blob/43b8491c87a5dd8fce145834c00198f4de14ceb9/djutils/date_utils.py#L48-L57
def random_date(dt_from, dt_to): """ This function will return a random datetime between two datetime objects. :param start: :param end: """ delta = dt_to - dt_from int_delta = (delta.days * 24 * 60 * 60) + delta.seconds random_second = randrange(int_delta) return dt_from + datetime.timedelta(seconds=random_second)
[ "def", "random_date", "(", "dt_from", ",", "dt_to", ")", ":", "delta", "=", "dt_to", "-", "dt_from", "int_delta", "=", "(", "delta", ".", "days", "*", "24", "*", "60", "*", "60", ")", "+", "delta", ".", "seconds", "random_second", "=", "randrange", "(", "int_delta", ")", "return", "dt_from", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "random_second", ")" ]
This function will return a random datetime between two datetime objects. :param start: :param end:
[ "This", "function", "will", "return", "a", "random", "datetime", "between", "two", "datetime", "objects", ".", ":", "param", "start", ":", ":", "param", "end", ":" ]
python
train
tensorforce/tensorforce
tensorforce/execution/base_runner.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/execution/base_runner.py#L53-L67
def reset(self, history=None): """ Resets the Runner's internal stats counters. If history is empty, use default values in history.get(). Args: history (dict): A dictionary containing an already run experiment's results. Keys should be: episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times) """ if not history: history = dict() self.episode_rewards = history.get("episode_rewards", list()) self.episode_timesteps = history.get("episode_timesteps", list()) self.episode_times = history.get("episode_times", list())
[ "def", "reset", "(", "self", ",", "history", "=", "None", ")", ":", "if", "not", "history", ":", "history", "=", "dict", "(", ")", "self", ".", "episode_rewards", "=", "history", ".", "get", "(", "\"episode_rewards\"", ",", "list", "(", ")", ")", "self", ".", "episode_timesteps", "=", "history", ".", "get", "(", "\"episode_timesteps\"", ",", "list", "(", ")", ")", "self", ".", "episode_times", "=", "history", ".", "get", "(", "\"episode_times\"", ",", "list", "(", ")", ")" ]
Resets the Runner's internal stats counters. If history is empty, use default values in history.get(). Args: history (dict): A dictionary containing an already run experiment's results. Keys should be: episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)
[ "Resets", "the", "Runner", "s", "internal", "stats", "counters", ".", "If", "history", "is", "empty", "use", "default", "values", "in", "history", ".", "get", "()", "." ]
python
valid
michael-lazar/rtv
rtv/page.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/page.py#L773-L846
def _draw_content(self): """ Loop through submissions and fill up the content page. """ n_rows, n_cols = self.term.stdscr.getmaxyx() window = self.term.stdscr.derwin(n_rows - self._row - 1, n_cols, self._row, 0) window.erase() win_n_rows, win_n_cols = window.getmaxyx() self._subwindows = [] page_index, cursor_index, inverted = self.nav.position step = self.nav.step # If not inverted, align the first submission with the top and draw # downwards. If inverted, align the first submission with the bottom # and draw upwards. cancel_inverted = True current_row = (win_n_rows - 1) if inverted else 0 available_rows = win_n_rows top_item_height = None if inverted else self.nav.top_item_height for data in self.content.iterate(page_index, step, win_n_cols - 2): subwin_n_rows = min(available_rows, data['n_rows']) subwin_inverted = inverted if top_item_height is not None: # Special case: draw the page as non-inverted, except for the # top element. This element will be drawn as inverted with a # restricted height subwin_n_rows = min(subwin_n_rows, top_item_height) subwin_inverted = True top_item_height = None subwin_n_cols = win_n_cols - data['h_offset'] start = current_row - subwin_n_rows + 1 if inverted else current_row subwindow = window.derwin(subwin_n_rows, subwin_n_cols, start, data['h_offset']) self._subwindows.append((subwindow, data, subwin_inverted)) available_rows -= (subwin_n_rows + 1) # Add one for the blank line current_row += step * (subwin_n_rows + 1) if available_rows <= 0: # Indicate the page is full and we can keep the inverted screen. cancel_inverted = False break if len(self._subwindows) == 1: # Never draw inverted if only one subwindow. The top of the # subwindow should always be aligned with the top of the screen. cancel_inverted = True if cancel_inverted and self.nav.inverted: # In some cases we need to make sure that the screen is NOT # inverted. Unfortunately, this currently means drawing the whole # page over again. Could not think of a better way to pre-determine # if the content will fill up the page, given that it is dependent # on the size of the terminal. self.nav.flip((len(self._subwindows) - 1)) self._draw_content() return if self.nav.cursor_index >= len(self._subwindows): # Don't allow the cursor to go over the number of subwindows # This could happen if the window is resized and the cursor index is # pushed out of bounds self.nav.cursor_index = len(self._subwindows) - 1 # Now that the windows are setup, we can take a second pass through # to draw the text onto each subwindow for index, (win, data, inverted) in enumerate(self._subwindows): if self.nav.absolute_index >= 0 and index == self.nav.cursor_index: win.bkgd(str(' '), self.term.attr('Selected')) with self.term.theme.turn_on_selected(): self._draw_item(win, data, inverted) else: win.bkgd(str(' '), self.term.attr('Normal')) self._draw_item(win, data, inverted) self._row += win_n_rows
[ "def", "_draw_content", "(", "self", ")", ":", "n_rows", ",", "n_cols", "=", "self", ".", "term", ".", "stdscr", ".", "getmaxyx", "(", ")", "window", "=", "self", ".", "term", ".", "stdscr", ".", "derwin", "(", "n_rows", "-", "self", ".", "_row", "-", "1", ",", "n_cols", ",", "self", ".", "_row", ",", "0", ")", "window", ".", "erase", "(", ")", "win_n_rows", ",", "win_n_cols", "=", "window", ".", "getmaxyx", "(", ")", "self", ".", "_subwindows", "=", "[", "]", "page_index", ",", "cursor_index", ",", "inverted", "=", "self", ".", "nav", ".", "position", "step", "=", "self", ".", "nav", ".", "step", "# If not inverted, align the first submission with the top and draw", "# downwards. If inverted, align the first submission with the bottom", "# and draw upwards.", "cancel_inverted", "=", "True", "current_row", "=", "(", "win_n_rows", "-", "1", ")", "if", "inverted", "else", "0", "available_rows", "=", "win_n_rows", "top_item_height", "=", "None", "if", "inverted", "else", "self", ".", "nav", ".", "top_item_height", "for", "data", "in", "self", ".", "content", ".", "iterate", "(", "page_index", ",", "step", ",", "win_n_cols", "-", "2", ")", ":", "subwin_n_rows", "=", "min", "(", "available_rows", ",", "data", "[", "'n_rows'", "]", ")", "subwin_inverted", "=", "inverted", "if", "top_item_height", "is", "not", "None", ":", "# Special case: draw the page as non-inverted, except for the", "# top element. This element will be drawn as inverted with a", "# restricted height", "subwin_n_rows", "=", "min", "(", "subwin_n_rows", ",", "top_item_height", ")", "subwin_inverted", "=", "True", "top_item_height", "=", "None", "subwin_n_cols", "=", "win_n_cols", "-", "data", "[", "'h_offset'", "]", "start", "=", "current_row", "-", "subwin_n_rows", "+", "1", "if", "inverted", "else", "current_row", "subwindow", "=", "window", ".", "derwin", "(", "subwin_n_rows", ",", "subwin_n_cols", ",", "start", ",", "data", "[", "'h_offset'", "]", ")", "self", ".", "_subwindows", ".", "append", "(", "(", "subwindow", ",", "data", ",", "subwin_inverted", ")", ")", "available_rows", "-=", "(", "subwin_n_rows", "+", "1", ")", "# Add one for the blank line", "current_row", "+=", "step", "*", "(", "subwin_n_rows", "+", "1", ")", "if", "available_rows", "<=", "0", ":", "# Indicate the page is full and we can keep the inverted screen.", "cancel_inverted", "=", "False", "break", "if", "len", "(", "self", ".", "_subwindows", ")", "==", "1", ":", "# Never draw inverted if only one subwindow. The top of the", "# subwindow should always be aligned with the top of the screen.", "cancel_inverted", "=", "True", "if", "cancel_inverted", "and", "self", ".", "nav", ".", "inverted", ":", "# In some cases we need to make sure that the screen is NOT", "# inverted. Unfortunately, this currently means drawing the whole", "# page over again. Could not think of a better way to pre-determine", "# if the content will fill up the page, given that it is dependent", "# on the size of the terminal.", "self", ".", "nav", ".", "flip", "(", "(", "len", "(", "self", ".", "_subwindows", ")", "-", "1", ")", ")", "self", ".", "_draw_content", "(", ")", "return", "if", "self", ".", "nav", ".", "cursor_index", ">=", "len", "(", "self", ".", "_subwindows", ")", ":", "# Don't allow the cursor to go over the number of subwindows", "# This could happen if the window is resized and the cursor index is", "# pushed out of bounds", "self", ".", "nav", ".", "cursor_index", "=", "len", "(", "self", ".", "_subwindows", ")", "-", "1", "# Now that the windows are setup, we can take a second pass through", "# to draw the text onto each subwindow", "for", "index", ",", "(", "win", ",", "data", ",", "inverted", ")", "in", "enumerate", "(", "self", ".", "_subwindows", ")", ":", "if", "self", ".", "nav", ".", "absolute_index", ">=", "0", "and", "index", "==", "self", ".", "nav", ".", "cursor_index", ":", "win", ".", "bkgd", "(", "str", "(", "' '", ")", ",", "self", ".", "term", ".", "attr", "(", "'Selected'", ")", ")", "with", "self", ".", "term", ".", "theme", ".", "turn_on_selected", "(", ")", ":", "self", ".", "_draw_item", "(", "win", ",", "data", ",", "inverted", ")", "else", ":", "win", ".", "bkgd", "(", "str", "(", "' '", ")", ",", "self", ".", "term", ".", "attr", "(", "'Normal'", ")", ")", "self", ".", "_draw_item", "(", "win", ",", "data", ",", "inverted", ")", "self", ".", "_row", "+=", "win_n_rows" ]
Loop through submissions and fill up the content page.
[ "Loop", "through", "submissions", "and", "fill", "up", "the", "content", "page", "." ]
python
train
DataBiosphere/toil
src/toil/provisioners/aws/__init__.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/aws/__init__.py#L71-L125
def choose_spot_zone(zones, bid, spot_history): """ Returns the zone to put the spot request based on, in order of priority: 1) zones with prices currently under the bid 2) zones with the most stable price :param list[boto.ec2.zone.Zone] zones: :param float bid: :param list[boto.ec2.spotpricehistory.SpotPriceHistory] spot_history: :rtype: str :return: the name of the selected zone >>> from collections import namedtuple >>> FauxHistory = namedtuple('FauxHistory', ['price', 'availability_zone']) >>> ZoneTuple = namedtuple('ZoneTuple', ['name']) >>> zones = [ZoneTuple('us-west-2a'), ZoneTuple('us-west-2b')] >>> spot_history = [FauxHistory(0.1, 'us-west-2a'), \ FauxHistory(0.2, 'us-west-2a'), \ FauxHistory(0.3, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2a' >>> spot_history=[FauxHistory(0.3, 'us-west-2a'), \ FauxHistory(0.2, 'us-west-2a'), \ FauxHistory(0.1, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2b' >>> spot_history=[FauxHistory(0.1, 'us-west-2a'), \ FauxHistory(0.7, 'us-west-2a'), \ FauxHistory(0.1, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2b' """ # Create two lists of tuples of form: [(zone.name, std_deviation), ...] one for zones # over the bid price and one for zones under bid price. Each are sorted by increasing # standard deviation values. markets_under_bid, markets_over_bid = [], [] for zone in zones: zone_histories = [zone_history for zone_history in spot_history if zone_history.availability_zone == zone.name] if zone_histories: price_deviation = std_dev([history.price for history in zone_histories]) recent_price = zone_histories[0].price else: price_deviation, recent_price = 0.0, bid zone_tuple = ZoneTuple(name=zone.name, price_deviation=price_deviation) (markets_over_bid, markets_under_bid)[recent_price < bid].append(zone_tuple) return min(markets_under_bid or markets_over_bid, key=attrgetter('price_deviation')).name
[ "def", "choose_spot_zone", "(", "zones", ",", "bid", ",", "spot_history", ")", ":", "# Create two lists of tuples of form: [(zone.name, std_deviation), ...] one for zones", "# over the bid price and one for zones under bid price. Each are sorted by increasing", "# standard deviation values.", "markets_under_bid", ",", "markets_over_bid", "=", "[", "]", ",", "[", "]", "for", "zone", "in", "zones", ":", "zone_histories", "=", "[", "zone_history", "for", "zone_history", "in", "spot_history", "if", "zone_history", ".", "availability_zone", "==", "zone", ".", "name", "]", "if", "zone_histories", ":", "price_deviation", "=", "std_dev", "(", "[", "history", ".", "price", "for", "history", "in", "zone_histories", "]", ")", "recent_price", "=", "zone_histories", "[", "0", "]", ".", "price", "else", ":", "price_deviation", ",", "recent_price", "=", "0.0", ",", "bid", "zone_tuple", "=", "ZoneTuple", "(", "name", "=", "zone", ".", "name", ",", "price_deviation", "=", "price_deviation", ")", "(", "markets_over_bid", ",", "markets_under_bid", ")", "[", "recent_price", "<", "bid", "]", ".", "append", "(", "zone_tuple", ")", "return", "min", "(", "markets_under_bid", "or", "markets_over_bid", ",", "key", "=", "attrgetter", "(", "'price_deviation'", ")", ")", ".", "name" ]
Returns the zone to put the spot request based on, in order of priority: 1) zones with prices currently under the bid 2) zones with the most stable price :param list[boto.ec2.zone.Zone] zones: :param float bid: :param list[boto.ec2.spotpricehistory.SpotPriceHistory] spot_history: :rtype: str :return: the name of the selected zone >>> from collections import namedtuple >>> FauxHistory = namedtuple('FauxHistory', ['price', 'availability_zone']) >>> ZoneTuple = namedtuple('ZoneTuple', ['name']) >>> zones = [ZoneTuple('us-west-2a'), ZoneTuple('us-west-2b')] >>> spot_history = [FauxHistory(0.1, 'us-west-2a'), \ FauxHistory(0.2, 'us-west-2a'), \ FauxHistory(0.3, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2a' >>> spot_history=[FauxHistory(0.3, 'us-west-2a'), \ FauxHistory(0.2, 'us-west-2a'), \ FauxHistory(0.1, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2b' >>> spot_history=[FauxHistory(0.1, 'us-west-2a'), \ FauxHistory(0.7, 'us-west-2a'), \ FauxHistory(0.1, 'us-west-2b'), \ FauxHistory(0.6, 'us-west-2b')] >>> choose_spot_zone(zones, 0.15, spot_history) 'us-west-2b'
[ "Returns", "the", "zone", "to", "put", "the", "spot", "request", "based", "on", "in", "order", "of", "priority", ":" ]
python
train
scanny/python-pptx
lab/cust-elm-classes/main.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/lab/cust-elm-classes/main.py#L40-L46
def _child_list(element, child_tagname): """ Return list containing the direct children of *element* having *child_tagname*. """ xpath = './%s' % child_tagname return element.xpath(xpath, namespaces=nsmap)
[ "def", "_child_list", "(", "element", ",", "child_tagname", ")", ":", "xpath", "=", "'./%s'", "%", "child_tagname", "return", "element", ".", "xpath", "(", "xpath", ",", "namespaces", "=", "nsmap", ")" ]
Return list containing the direct children of *element* having *child_tagname*.
[ "Return", "list", "containing", "the", "direct", "children", "of", "*", "element", "*", "having", "*", "child_tagname", "*", "." ]
python
train
securestate/termineter
lib/termineter/core.py
https://github.com/securestate/termineter/blob/d657d25d97c7739e650b951c396404e857e56625/lib/termineter/core.py#L376-L405
def serial_login(self): """ Attempt to log into the meter over the C12.18 protocol. Returns True on success, False on a failure. This can be called by modules in order to login with a username and password configured within the framework instance. """ if not self._serial_connected: raise termineter.errors.FrameworkRuntimeError('the serial interface is disconnected') username = self.options['USERNAME'] user_id = self.options['USER_ID'] password = self.options['PASSWORD'] if self.options['PASSWORD_HEX']: hex_regex = re.compile('^([0-9a-fA-F]{2})+$') if hex_regex.match(password) is None: self.print_error('Invalid characters in password') raise termineter.errors.FrameworkConfigurationError('invalid characters in password') password = binascii.a2b_hex(password) if len(username) > 10: self.print_error('Username cannot be longer than 10 characters') raise termineter.errors.FrameworkConfigurationError('username cannot be longer than 10 characters') if not (0 <= user_id <= 0xffff): self.print_error('User id must be between 0 and 0xffff') raise termineter.errors.FrameworkConfigurationError('user id must be between 0 and 0xffff') if len(password) > 20: self.print_error('Password cannot be longer than 20 characters') raise termineter.errors.FrameworkConfigurationError('password cannot be longer than 20 characters') if not self.serial_connection.login(username, user_id, password): return False return True
[ "def", "serial_login", "(", "self", ")", ":", "if", "not", "self", ".", "_serial_connected", ":", "raise", "termineter", ".", "errors", ".", "FrameworkRuntimeError", "(", "'the serial interface is disconnected'", ")", "username", "=", "self", ".", "options", "[", "'USERNAME'", "]", "user_id", "=", "self", ".", "options", "[", "'USER_ID'", "]", "password", "=", "self", ".", "options", "[", "'PASSWORD'", "]", "if", "self", ".", "options", "[", "'PASSWORD_HEX'", "]", ":", "hex_regex", "=", "re", ".", "compile", "(", "'^([0-9a-fA-F]{2})+$'", ")", "if", "hex_regex", ".", "match", "(", "password", ")", "is", "None", ":", "self", ".", "print_error", "(", "'Invalid characters in password'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'invalid characters in password'", ")", "password", "=", "binascii", ".", "a2b_hex", "(", "password", ")", "if", "len", "(", "username", ")", ">", "10", ":", "self", ".", "print_error", "(", "'Username cannot be longer than 10 characters'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'username cannot be longer than 10 characters'", ")", "if", "not", "(", "0", "<=", "user_id", "<=", "0xffff", ")", ":", "self", ".", "print_error", "(", "'User id must be between 0 and 0xffff'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'user id must be between 0 and 0xffff'", ")", "if", "len", "(", "password", ")", ">", "20", ":", "self", ".", "print_error", "(", "'Password cannot be longer than 20 characters'", ")", "raise", "termineter", ".", "errors", ".", "FrameworkConfigurationError", "(", "'password cannot be longer than 20 characters'", ")", "if", "not", "self", ".", "serial_connection", ".", "login", "(", "username", ",", "user_id", ",", "password", ")", ":", "return", "False", "return", "True" ]
Attempt to log into the meter over the C12.18 protocol. Returns True on success, False on a failure. This can be called by modules in order to login with a username and password configured within the framework instance.
[ "Attempt", "to", "log", "into", "the", "meter", "over", "the", "C12", ".", "18", "protocol", ".", "Returns", "True", "on", "success", "False", "on", "a", "failure", ".", "This", "can", "be", "called", "by", "modules", "in", "order", "to", "login", "with", "a", "username", "and", "password", "configured", "within", "the", "framework", "instance", "." ]
python
train
inasafe/inasafe
safe/utilities/memory_checker.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/memory_checker.py#L162-L180
def memory_error(): """Display an error when there is not enough memory.""" warning_heading = m.Heading( tr('Memory issue'), **WARNING_STYLE) warning_message = tr( 'There is not enough free memory to run this analysis.') suggestion_heading = m.Heading( tr('Suggestion'), **SUGGESTION_STYLE) suggestion = tr( 'Try zooming in to a smaller area or using a raster layer with a ' 'coarser resolution to speed up execution and reduce memory ' 'requirements. You could also try adding more RAM to your computer.') message = m.Message() message.add(warning_heading) message.add(warning_message) message.add(suggestion_heading) message.add(suggestion) send_static_message(dispatcher.Anonymous, message)
[ "def", "memory_error", "(", ")", ":", "warning_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Memory issue'", ")", ",", "*", "*", "WARNING_STYLE", ")", "warning_message", "=", "tr", "(", "'There is not enough free memory to run this analysis.'", ")", "suggestion_heading", "=", "m", ".", "Heading", "(", "tr", "(", "'Suggestion'", ")", ",", "*", "*", "SUGGESTION_STYLE", ")", "suggestion", "=", "tr", "(", "'Try zooming in to a smaller area or using a raster layer with a '", "'coarser resolution to speed up execution and reduce memory '", "'requirements. You could also try adding more RAM to your computer.'", ")", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "warning_heading", ")", "message", ".", "add", "(", "warning_message", ")", "message", ".", "add", "(", "suggestion_heading", ")", "message", ".", "add", "(", "suggestion", ")", "send_static_message", "(", "dispatcher", ".", "Anonymous", ",", "message", ")" ]
Display an error when there is not enough memory.
[ "Display", "an", "error", "when", "there", "is", "not", "enough", "memory", "." ]
python
train
paramiko/paramiko
paramiko/sftp_file.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/sftp_file.py#L343-L356
def truncate(self, size): """ Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. :param size: the new size of the file """ self.sftp._log( DEBUG, "truncate({}, {!r})".format(hexlify(self.handle), size) ) attr = SFTPAttributes() attr.st_size = size self.sftp._request(CMD_FSETSTAT, self.handle, attr)
[ "def", "truncate", "(", "self", ",", "size", ")", ":", "self", ".", "sftp", ".", "_log", "(", "DEBUG", ",", "\"truncate({}, {!r})\"", ".", "format", "(", "hexlify", "(", "self", ".", "handle", ")", ",", "size", ")", ")", "attr", "=", "SFTPAttributes", "(", ")", "attr", ".", "st_size", "=", "size", "self", ".", "sftp", ".", "_request", "(", "CMD_FSETSTAT", ",", "self", ".", "handle", ",", "attr", ")" ]
Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. :param size: the new size of the file
[ "Change", "the", "size", "of", "this", "file", ".", "This", "usually", "extends", "or", "shrinks", "the", "size", "of", "the", "file", "just", "like", "the", "truncate", "()", "method", "on", "Python", "file", "objects", "." ]
python
train
iron-io/iron_core_python
iron_core.py
https://github.com/iron-io/iron_core_python/blob/f09a160a854912efcb75a810702686bc25b74fa8/iron_core.py#L287-L303
def post(self, url, body="", headers={}, retry=True): """Execute an HTTP POST request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True. """ headers["Content-Length"] = str(len(body)) return self.request(url=url, method="POST", body=body, headers=headers, retry=retry)
[ "def", "post", "(", "self", ",", "url", ",", "body", "=", "\"\"", ",", "headers", "=", "{", "}", ",", "retry", "=", "True", ")", ":", "headers", "[", "\"Content-Length\"", "]", "=", "str", "(", "len", "(", "body", ")", ")", "return", "self", ".", "request", "(", "url", "=", "url", ",", "method", "=", "\"POST\"", ",", "body", "=", "body", ",", "headers", "=", "headers", ",", "retry", "=", "retry", ")" ]
Execute an HTTP POST request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True.
[ "Execute", "an", "HTTP", "POST", "request", "and", "return", "a", "dict", "containing", "the", "response", "and", "the", "response", "status", "code", "." ]
python
train
mdgoldberg/sportsref
sportsref/nba/seasons.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nba/seasons.py#L90-L95
def team_names_to_ids(self): """Mapping from full team names to 3-letter team IDs. :returns: Dictionary with tean names as keys and team IDs as values. """ d = self.team_ids_to_names() return {v: k for k, v in d.items()}
[ "def", "team_names_to_ids", "(", "self", ")", ":", "d", "=", "self", ".", "team_ids_to_names", "(", ")", "return", "{", "v", ":", "k", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "}" ]
Mapping from full team names to 3-letter team IDs. :returns: Dictionary with tean names as keys and team IDs as values.
[ "Mapping", "from", "full", "team", "names", "to", "3", "-", "letter", "team", "IDs", ".", ":", "returns", ":", "Dictionary", "with", "tean", "names", "as", "keys", "and", "team", "IDs", "as", "values", "." ]
python
test
materialsproject/pymatgen
pymatgen/electronic_structure/plotter.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/plotter.py#L3963-L3999
def fold_point(p, lattice, coords_are_cartesian=False): """ Folds a point with coordinates p inside the first Brillouin zone of the lattice. Args: p: coordinates of one point lattice: Lattice object used to convert from reciprocal to cartesian coordinates coords_are_cartesian: Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. Returns: The cartesian coordinates folded inside the first Brillouin zone """ if coords_are_cartesian: p = lattice.get_fractional_coords(p) else: p = np.array(p) p = np.mod(p + 0.5 - 1e-10, 1) - 0.5 + 1e-10 p = lattice.get_cartesian_coords(p) closest_lattice_point = None smallest_distance = 10000 for i in (-1, 0, 1): for j in (-1, 0, 1): for k in (-1, 0, 1): lattice_point = np.dot((i, j, k), lattice.matrix) dist = np.linalg.norm(p - lattice_point) if closest_lattice_point is None or dist < smallest_distance: closest_lattice_point = lattice_point smallest_distance = dist if not np.allclose(closest_lattice_point, (0, 0, 0)): p = p - closest_lattice_point return p
[ "def", "fold_point", "(", "p", ",", "lattice", ",", "coords_are_cartesian", "=", "False", ")", ":", "if", "coords_are_cartesian", ":", "p", "=", "lattice", ".", "get_fractional_coords", "(", "p", ")", "else", ":", "p", "=", "np", ".", "array", "(", "p", ")", "p", "=", "np", ".", "mod", "(", "p", "+", "0.5", "-", "1e-10", ",", "1", ")", "-", "0.5", "+", "1e-10", "p", "=", "lattice", ".", "get_cartesian_coords", "(", "p", ")", "closest_lattice_point", "=", "None", "smallest_distance", "=", "10000", "for", "i", "in", "(", "-", "1", ",", "0", ",", "1", ")", ":", "for", "j", "in", "(", "-", "1", ",", "0", ",", "1", ")", ":", "for", "k", "in", "(", "-", "1", ",", "0", ",", "1", ")", ":", "lattice_point", "=", "np", ".", "dot", "(", "(", "i", ",", "j", ",", "k", ")", ",", "lattice", ".", "matrix", ")", "dist", "=", "np", ".", "linalg", ".", "norm", "(", "p", "-", "lattice_point", ")", "if", "closest_lattice_point", "is", "None", "or", "dist", "<", "smallest_distance", ":", "closest_lattice_point", "=", "lattice_point", "smallest_distance", "=", "dist", "if", "not", "np", ".", "allclose", "(", "closest_lattice_point", ",", "(", "0", ",", "0", ",", "0", ")", ")", ":", "p", "=", "p", "-", "closest_lattice_point", "return", "p" ]
Folds a point with coordinates p inside the first Brillouin zone of the lattice. Args: p: coordinates of one point lattice: Lattice object used to convert from reciprocal to cartesian coordinates coords_are_cartesian: Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. Returns: The cartesian coordinates folded inside the first Brillouin zone
[ "Folds", "a", "point", "with", "coordinates", "p", "inside", "the", "first", "Brillouin", "zone", "of", "the", "lattice", "." ]
python
train
theiviaxx/python-perforce
perforce/models.py
https://github.com/theiviaxx/python-perforce/blob/01a3b01fe5949126fa0097d9a8ad386887823b5a/perforce/models.py#L377-L394
def canAdd(self, filename): """Determines if a filename can be added to the depot under the current client :param filename: File path to add :type filename: str """ try: result = self.run(['add', '-n', '-t', 'text', filename])[0] except errors.CommandError as err: LOGGER.debug(err) return False if result.get('code') not in ('error', 'info'): return True LOGGER.warn('Unable to add {}: {}'.format(filename, result['data'])) return False
[ "def", "canAdd", "(", "self", ",", "filename", ")", ":", "try", ":", "result", "=", "self", ".", "run", "(", "[", "'add'", ",", "'-n'", ",", "'-t'", ",", "'text'", ",", "filename", "]", ")", "[", "0", "]", "except", "errors", ".", "CommandError", "as", "err", ":", "LOGGER", ".", "debug", "(", "err", ")", "return", "False", "if", "result", ".", "get", "(", "'code'", ")", "not", "in", "(", "'error'", ",", "'info'", ")", ":", "return", "True", "LOGGER", ".", "warn", "(", "'Unable to add {}: {}'", ".", "format", "(", "filename", ",", "result", "[", "'data'", "]", ")", ")", "return", "False" ]
Determines if a filename can be added to the depot under the current client :param filename: File path to add :type filename: str
[ "Determines", "if", "a", "filename", "can", "be", "added", "to", "the", "depot", "under", "the", "current", "client" ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/base_handler.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/base_handler.py#L189-L206
def base_path(self): """Base path for all mapreduce-related urls. JSON handlers are mapped to /base_path/command/command_name thus they require special treatment. Raises: BadRequestPathError: if the path does not end with "/command". Returns: The base path. """ path = self.request.path base_path = path[:path.rfind("/")] if not base_path.endswith("/command"): raise BadRequestPathError( "Json handlers should have /command path prefix") return base_path[:base_path.rfind("/")]
[ "def", "base_path", "(", "self", ")", ":", "path", "=", "self", ".", "request", ".", "path", "base_path", "=", "path", "[", ":", "path", ".", "rfind", "(", "\"/\"", ")", "]", "if", "not", "base_path", ".", "endswith", "(", "\"/command\"", ")", ":", "raise", "BadRequestPathError", "(", "\"Json handlers should have /command path prefix\"", ")", "return", "base_path", "[", ":", "base_path", ".", "rfind", "(", "\"/\"", ")", "]" ]
Base path for all mapreduce-related urls. JSON handlers are mapped to /base_path/command/command_name thus they require special treatment. Raises: BadRequestPathError: if the path does not end with "/command". Returns: The base path.
[ "Base", "path", "for", "all", "mapreduce", "-", "related", "urls", "." ]
python
train
pazz/alot
alot/settings/manager.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/settings/manager.py#L397-L429
def get_keybindings(self, mode): """look up keybindings from `MODE-maps` sections :param mode: mode identifier :type mode: str :returns: dictionaries of key-cmd for global and specific mode :rtype: 2-tuple of dicts """ globalmaps, modemaps = {}, {} bindings = self._bindings # get bindings for mode `mode` # retain empty assignations to silence corresponding global mappings if mode in bindings.sections: for key in bindings[mode].scalars: value = bindings[mode][key] if isinstance(value, list): value = ','.join(value) modemaps[key] = value # get global bindings # ignore the ones already mapped in mode bindings for key in bindings.scalars: if key not in modemaps: value = bindings[key] if isinstance(value, list): value = ','.join(value) if value and value != '': globalmaps[key] = value # get rid of empty commands left in mode bindings for k, v in list(modemaps.items()): if not v: del modemaps[k] return globalmaps, modemaps
[ "def", "get_keybindings", "(", "self", ",", "mode", ")", ":", "globalmaps", ",", "modemaps", "=", "{", "}", ",", "{", "}", "bindings", "=", "self", ".", "_bindings", "# get bindings for mode `mode`", "# retain empty assignations to silence corresponding global mappings", "if", "mode", "in", "bindings", ".", "sections", ":", "for", "key", "in", "bindings", "[", "mode", "]", ".", "scalars", ":", "value", "=", "bindings", "[", "mode", "]", "[", "key", "]", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "','", ".", "join", "(", "value", ")", "modemaps", "[", "key", "]", "=", "value", "# get global bindings", "# ignore the ones already mapped in mode bindings", "for", "key", "in", "bindings", ".", "scalars", ":", "if", "key", "not", "in", "modemaps", ":", "value", "=", "bindings", "[", "key", "]", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "','", ".", "join", "(", "value", ")", "if", "value", "and", "value", "!=", "''", ":", "globalmaps", "[", "key", "]", "=", "value", "# get rid of empty commands left in mode bindings", "for", "k", ",", "v", "in", "list", "(", "modemaps", ".", "items", "(", ")", ")", ":", "if", "not", "v", ":", "del", "modemaps", "[", "k", "]", "return", "globalmaps", ",", "modemaps" ]
look up keybindings from `MODE-maps` sections :param mode: mode identifier :type mode: str :returns: dictionaries of key-cmd for global and specific mode :rtype: 2-tuple of dicts
[ "look", "up", "keybindings", "from", "MODE", "-", "maps", "sections" ]
python
train
grigi/talkey
talkey/base.py
https://github.com/grigi/talkey/blob/5d2d4a1f7001744c4fd9a79a883a3f2001522329/talkey/base.py#L234-L264
def play(self, filename, translate=False): # pragma: no cover ''' Plays the sounds. :filename: The input file name :translate: If True, it runs it through audioread which will translate from common compression formats to raw WAV. ''' # FIXME: Use platform-independent and async audio-output here # PyAudio looks most promising, too bad about: # --allow-external PyAudio --allow-unverified PyAudio if translate: with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f: fname = f.name with audioread.audio_open(filename) as f: with contextlib.closing(wave.open(fname, 'w')) as of: of.setnchannels(f.channels) of.setframerate(f.samplerate) of.setsampwidth(2) for buf in f: of.writeframes(buf) filename = fname if winsound: winsound.PlaySound(str(filename), winsound.SND_FILENAME) else: cmd = ['aplay', str(filename)] self._logger.debug('Executing %s', ' '.join([pipes.quote(arg) for arg in cmd])) subprocess.call(cmd) if translate: os.remove(fname)
[ "def", "play", "(", "self", ",", "filename", ",", "translate", "=", "False", ")", ":", "# pragma: no cover", "# FIXME: Use platform-independent and async audio-output here", "# PyAudio looks most promising, too bad about:", "# --allow-external PyAudio --allow-unverified PyAudio", "if", "translate", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.wav'", ",", "delete", "=", "False", ")", "as", "f", ":", "fname", "=", "f", ".", "name", "with", "audioread", ".", "audio_open", "(", "filename", ")", "as", "f", ":", "with", "contextlib", ".", "closing", "(", "wave", ".", "open", "(", "fname", ",", "'w'", ")", ")", "as", "of", ":", "of", ".", "setnchannels", "(", "f", ".", "channels", ")", "of", ".", "setframerate", "(", "f", ".", "samplerate", ")", "of", ".", "setsampwidth", "(", "2", ")", "for", "buf", "in", "f", ":", "of", ".", "writeframes", "(", "buf", ")", "filename", "=", "fname", "if", "winsound", ":", "winsound", ".", "PlaySound", "(", "str", "(", "filename", ")", ",", "winsound", ".", "SND_FILENAME", ")", "else", ":", "cmd", "=", "[", "'aplay'", ",", "str", "(", "filename", ")", "]", "self", ".", "_logger", ".", "debug", "(", "'Executing %s'", ",", "' '", ".", "join", "(", "[", "pipes", ".", "quote", "(", "arg", ")", "for", "arg", "in", "cmd", "]", ")", ")", "subprocess", ".", "call", "(", "cmd", ")", "if", "translate", ":", "os", ".", "remove", "(", "fname", ")" ]
Plays the sounds. :filename: The input file name :translate: If True, it runs it through audioread which will translate from common compression formats to raw WAV.
[ "Plays", "the", "sounds", "." ]
python
train
horazont/aioxmpp
aioxmpp/muc/self_ping.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/muc/self_ping.py#L156-L189
def _interpret_result(self, task): """ Interpret the result of a ping. :param task: The pinger task. The result or exception of the `task` is interpreted as follows: * :data:`None` result: *positive* * :class:`aioxmpp.errors.XMPPError`, ``service-unavailable``: *positive* * :class:`aioxmpp.errors.XMPPError`, ``feature-not-implemented``: *positive* * :class:`aioxmpp.errors.XMPPError`, ``item-not-found``: *inconclusive* * :class:`aioxmpp.errors.XMPPError`: *negative* * :class:`asyncio.TimeoutError`: *inconclusive* * Any other exception: *inconclusive* """ if task.exception() is None: self._on_fresh() return exc = task.exception() if isinstance(exc, aioxmpp.errors.XMPPError): if exc.condition in [ aioxmpp.errors.ErrorCondition.SERVICE_UNAVAILABLE, aioxmpp.errors.ErrorCondition.FEATURE_NOT_IMPLEMENTED]: self._on_fresh() return if exc.condition == aioxmpp.errors.ErrorCondition.ITEM_NOT_FOUND: return self._on_exited()
[ "def", "_interpret_result", "(", "self", ",", "task", ")", ":", "if", "task", ".", "exception", "(", ")", "is", "None", ":", "self", ".", "_on_fresh", "(", ")", "return", "exc", "=", "task", ".", "exception", "(", ")", "if", "isinstance", "(", "exc", ",", "aioxmpp", ".", "errors", ".", "XMPPError", ")", ":", "if", "exc", ".", "condition", "in", "[", "aioxmpp", ".", "errors", ".", "ErrorCondition", ".", "SERVICE_UNAVAILABLE", ",", "aioxmpp", ".", "errors", ".", "ErrorCondition", ".", "FEATURE_NOT_IMPLEMENTED", "]", ":", "self", ".", "_on_fresh", "(", ")", "return", "if", "exc", ".", "condition", "==", "aioxmpp", ".", "errors", ".", "ErrorCondition", ".", "ITEM_NOT_FOUND", ":", "return", "self", ".", "_on_exited", "(", ")" ]
Interpret the result of a ping. :param task: The pinger task. The result or exception of the `task` is interpreted as follows: * :data:`None` result: *positive* * :class:`aioxmpp.errors.XMPPError`, ``service-unavailable``: *positive* * :class:`aioxmpp.errors.XMPPError`, ``feature-not-implemented``: *positive* * :class:`aioxmpp.errors.XMPPError`, ``item-not-found``: *inconclusive* * :class:`aioxmpp.errors.XMPPError`: *negative* * :class:`asyncio.TimeoutError`: *inconclusive* * Any other exception: *inconclusive*
[ "Interpret", "the", "result", "of", "a", "ping", "." ]
python
train
txamqp/txamqp
src/txamqp/endpoint.py
https://github.com/txamqp/txamqp/blob/10caf998dd8c05a7321cd10c24a83832bf58bd0c/src/txamqp/endpoint.py#L103-L123
def connect(self, protocol_factory): """ Connect to the C{protocolFactory} to the AMQP broker specified by the URI of this endpoint. @param protocol_factory: An L{AMQFactory} building L{AMQClient} objects. @return: A L{Deferred} that results in an L{AMQClient} upon successful connection otherwise a L{Failure} wrapping L{ConnectError} or L{NoProtocol <twisted.internet.error.NoProtocol>}. """ # XXX Since AMQClient requires these parameters at __init__ time, we # need to override them in the provided factory. protocol_factory.set_vhost(self._vhost) protocol_factory.set_heartbeat(self._heartbeat) description = "tcp:{}:{}:timeout={}".format( self._host, self._port, self._timeout) endpoint = clientFromString(self._reactor, description) deferred = endpoint.connect(protocol_factory) return deferred.addCallback(self._authenticate)
[ "def", "connect", "(", "self", ",", "protocol_factory", ")", ":", "# XXX Since AMQClient requires these parameters at __init__ time, we", "# need to override them in the provided factory.", "protocol_factory", ".", "set_vhost", "(", "self", ".", "_vhost", ")", "protocol_factory", ".", "set_heartbeat", "(", "self", ".", "_heartbeat", ")", "description", "=", "\"tcp:{}:{}:timeout={}\"", ".", "format", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_timeout", ")", "endpoint", "=", "clientFromString", "(", "self", ".", "_reactor", ",", "description", ")", "deferred", "=", "endpoint", ".", "connect", "(", "protocol_factory", ")", "return", "deferred", ".", "addCallback", "(", "self", ".", "_authenticate", ")" ]
Connect to the C{protocolFactory} to the AMQP broker specified by the URI of this endpoint. @param protocol_factory: An L{AMQFactory} building L{AMQClient} objects. @return: A L{Deferred} that results in an L{AMQClient} upon successful connection otherwise a L{Failure} wrapping L{ConnectError} or L{NoProtocol <twisted.internet.error.NoProtocol>}.
[ "Connect", "to", "the", "C", "{", "protocolFactory", "}", "to", "the", "AMQP", "broker", "specified", "by", "the", "URI", "of", "this", "endpoint", "." ]
python
train
bokeh/bokeh
bokeh/application/handlers/code_runner.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/application/handlers/code_runner.py#L129-L145
def new_module(self): ''' Make a fresh module to run in. Returns: Module ''' self.reset_run_errors() if self._code is None: return None module_name = 'bk_script_' + make_id().replace('-', '') module = ModuleType(str(module_name)) # str needed for py2.7 module.__dict__['__file__'] = os.path.abspath(self._path) return module
[ "def", "new_module", "(", "self", ")", ":", "self", ".", "reset_run_errors", "(", ")", "if", "self", ".", "_code", "is", "None", ":", "return", "None", "module_name", "=", "'bk_script_'", "+", "make_id", "(", ")", ".", "replace", "(", "'-'", ",", "''", ")", "module", "=", "ModuleType", "(", "str", "(", "module_name", ")", ")", "# str needed for py2.7", "module", ".", "__dict__", "[", "'__file__'", "]", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "_path", ")", "return", "module" ]
Make a fresh module to run in. Returns: Module
[ "Make", "a", "fresh", "module", "to", "run", "in", "." ]
python
train
SmokinCaterpillar/pypet
pypet/brian2/network.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/brian2/network.py#L541-L563
def add_parameters(self, traj): """Adds parameters for a network simulation. Calls :func:`~pypet.brian2.network.NetworkComponent.add_parameters` for all components, analyser, and the network runner (in this order). :param traj: Trajectory container """ self._logger.info('Adding Parameters of Components') for component in self.components: component.add_parameters(traj) if self.analysers: self._logger.info('Adding Parameters of Analysers') for analyser in self.analysers: analyser.add_parameters(traj) self._logger.info('Adding Parameters of Runner') self.network_runner.add_parameters(traj)
[ "def", "add_parameters", "(", "self", ",", "traj", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Adding Parameters of Components'", ")", "for", "component", "in", "self", ".", "components", ":", "component", ".", "add_parameters", "(", "traj", ")", "if", "self", ".", "analysers", ":", "self", ".", "_logger", ".", "info", "(", "'Adding Parameters of Analysers'", ")", "for", "analyser", "in", "self", ".", "analysers", ":", "analyser", ".", "add_parameters", "(", "traj", ")", "self", ".", "_logger", ".", "info", "(", "'Adding Parameters of Runner'", ")", "self", ".", "network_runner", ".", "add_parameters", "(", "traj", ")" ]
Adds parameters for a network simulation. Calls :func:`~pypet.brian2.network.NetworkComponent.add_parameters` for all components, analyser, and the network runner (in this order). :param traj: Trajectory container
[ "Adds", "parameters", "for", "a", "network", "simulation", "." ]
python
test
apache/incubator-mxnet
python/mxnet/metric.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/metric.py#L194-L207
def get_name_value(self): """Returns zipped name and value pairs. Returns ------- list of tuples A (name, value) tuple list. """ name, value = self.get() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value))
[ "def", "get_name_value", "(", "self", ")", ":", "name", ",", "value", "=", "self", ".", "get", "(", ")", "if", "not", "isinstance", "(", "name", ",", "list", ")", ":", "name", "=", "[", "name", "]", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "[", "value", "]", "return", "list", "(", "zip", "(", "name", ",", "value", ")", ")" ]
Returns zipped name and value pairs. Returns ------- list of tuples A (name, value) tuple list.
[ "Returns", "zipped", "name", "and", "value", "pairs", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/compare_comply_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L4975-L4997
def _from_dict(cls, _dict): """Initialize a UnalignedElement object from a json dictionary.""" args = {} if 'document_label' in _dict: args['document_label'] = _dict.get('document_label') if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'text' in _dict: args['text'] = _dict.get('text') if 'types' in _dict: args['types'] = [ TypeLabelComparison._from_dict(x) for x in (_dict.get('types')) ] if 'categories' in _dict: args['categories'] = [ CategoryComparison._from_dict(x) for x in (_dict.get('categories')) ] if 'attributes' in _dict: args['attributes'] = [ Attribute._from_dict(x) for x in (_dict.get('attributes')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'document_label'", "in", "_dict", ":", "args", "[", "'document_label'", "]", "=", "_dict", ".", "get", "(", "'document_label'", ")", "if", "'location'", "in", "_dict", ":", "args", "[", "'location'", "]", "=", "Location", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'location'", ")", ")", "if", "'text'", "in", "_dict", ":", "args", "[", "'text'", "]", "=", "_dict", ".", "get", "(", "'text'", ")", "if", "'types'", "in", "_dict", ":", "args", "[", "'types'", "]", "=", "[", "TypeLabelComparison", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'types'", ")", ")", "]", "if", "'categories'", "in", "_dict", ":", "args", "[", "'categories'", "]", "=", "[", "CategoryComparison", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'categories'", ")", ")", "]", "if", "'attributes'", "in", "_dict", ":", "args", "[", "'attributes'", "]", "=", "[", "Attribute", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'attributes'", ")", ")", "]", "return", "cls", "(", "*", "*", "args", ")" ]
Initialize a UnalignedElement object from a json dictionary.
[ "Initialize", "a", "UnalignedElement", "object", "from", "a", "json", "dictionary", "." ]
python
train
sanger-pathogens/circlator
circlator/merge.py
https://github.com/sanger-pathogens/circlator/blob/a4befb8c9dbbcd4b3ad1899a95aa3e689d58b638/circlator/merge.py#L732-L737
def _contigs_dict_to_file(self, contigs, fname): '''Writes dictionary of contigs to file''' f = pyfastaq.utils.open_file_write(fname) for contig in sorted(contigs, key=lambda x:len(contigs[x]), reverse=True): print(contigs[contig], file=f) pyfastaq.utils.close(f)
[ "def", "_contigs_dict_to_file", "(", "self", ",", "contigs", ",", "fname", ")", ":", "f", "=", "pyfastaq", ".", "utils", ".", "open_file_write", "(", "fname", ")", "for", "contig", "in", "sorted", "(", "contigs", ",", "key", "=", "lambda", "x", ":", "len", "(", "contigs", "[", "x", "]", ")", ",", "reverse", "=", "True", ")", ":", "print", "(", "contigs", "[", "contig", "]", ",", "file", "=", "f", ")", "pyfastaq", ".", "utils", ".", "close", "(", "f", ")" ]
Writes dictionary of contigs to file
[ "Writes", "dictionary", "of", "contigs", "to", "file" ]
python
train
alpha-xone/xbbg
xbbg/blp.py
https://github.com/alpha-xone/xbbg/blob/70226eb19a72a08144b5d8cea9db4913200f7bc5/xbbg/blp.py#L607-L634
def check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame: """ Check exchange hours vs local hours Args: tickers: list of tickers tz_exch: exchange timezone tz_loc: local timezone Returns: Local and exchange hours """ cols = ['Trading_Day_Start_Time_EOD', 'Trading_Day_End_Time_EOD'] con, _ = create_connection() hours = con.ref(tickers=tickers, flds=cols) cur_dt = pd.Timestamp('today').strftime('%Y-%m-%d ') hours.loc[:, 'local'] = hours.value.astype(str).str[:-3] hours.loc[:, 'exch'] = pd.DatetimeIndex( cur_dt + hours.value.astype(str) ).tz_localize(tz_loc).tz_convert(tz_exch).strftime('%H:%M') hours = pd.concat([ hours.set_index(['ticker', 'field']).exch.unstack().loc[:, cols], hours.set_index(['ticker', 'field']).local.unstack().loc[:, cols], ], axis=1) hours.columns = ['Exch_Start', 'Exch_End', 'Local_Start', 'Local_End'] return hours
[ "def", "check_hours", "(", "tickers", ",", "tz_exch", ",", "tz_loc", "=", "DEFAULT_TZ", ")", "->", "pd", ".", "DataFrame", ":", "cols", "=", "[", "'Trading_Day_Start_Time_EOD'", ",", "'Trading_Day_End_Time_EOD'", "]", "con", ",", "_", "=", "create_connection", "(", ")", "hours", "=", "con", ".", "ref", "(", "tickers", "=", "tickers", ",", "flds", "=", "cols", ")", "cur_dt", "=", "pd", ".", "Timestamp", "(", "'today'", ")", ".", "strftime", "(", "'%Y-%m-%d '", ")", "hours", ".", "loc", "[", ":", ",", "'local'", "]", "=", "hours", ".", "value", ".", "astype", "(", "str", ")", ".", "str", "[", ":", "-", "3", "]", "hours", ".", "loc", "[", ":", ",", "'exch'", "]", "=", "pd", ".", "DatetimeIndex", "(", "cur_dt", "+", "hours", ".", "value", ".", "astype", "(", "str", ")", ")", ".", "tz_localize", "(", "tz_loc", ")", ".", "tz_convert", "(", "tz_exch", ")", ".", "strftime", "(", "'%H:%M'", ")", "hours", "=", "pd", ".", "concat", "(", "[", "hours", ".", "set_index", "(", "[", "'ticker'", ",", "'field'", "]", ")", ".", "exch", ".", "unstack", "(", ")", ".", "loc", "[", ":", ",", "cols", "]", ",", "hours", ".", "set_index", "(", "[", "'ticker'", ",", "'field'", "]", ")", ".", "local", ".", "unstack", "(", ")", ".", "loc", "[", ":", ",", "cols", "]", ",", "]", ",", "axis", "=", "1", ")", "hours", ".", "columns", "=", "[", "'Exch_Start'", ",", "'Exch_End'", ",", "'Local_Start'", ",", "'Local_End'", "]", "return", "hours" ]
Check exchange hours vs local hours Args: tickers: list of tickers tz_exch: exchange timezone tz_loc: local timezone Returns: Local and exchange hours
[ "Check", "exchange", "hours", "vs", "local", "hours" ]
python
valid
saltstack/salt
salt/modules/consul.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L1629-L1656
def catalog_datacenters(consul_url=None, token=None): ''' Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'catalog/datacenters' ret = _query(consul_url=consul_url, function=function, token=token) return ret
[ "def", "catalog_datacenters", "(", "consul_url", "=", "None", ",", "token", "=", "None", ")", ":", "ret", "=", "{", "}", "if", "not", "consul_url", ":", "consul_url", "=", "_get_config", "(", ")", "if", "not", "consul_url", ":", "log", ".", "error", "(", "'No Consul URL found.'", ")", "ret", "[", "'message'", "]", "=", "'No Consul URL found.'", "ret", "[", "'res'", "]", "=", "False", "return", "ret", "function", "=", "'catalog/datacenters'", "ret", "=", "_query", "(", "consul_url", "=", "consul_url", ",", "function", "=", "function", ",", "token", "=", "token", ")", "return", "ret" ]
Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters
[ "Return", "list", "of", "available", "datacenters", "from", "catalog", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L750-L791
def LoadState( self, config_parser ): """Set our window state from the given config_parser instance""" if not config_parser: return if ( not config_parser.has_section( 'window' ) or ( config_parser.has_option( 'window','maximized' ) and config_parser.getboolean( 'window', 'maximized' ) ) ): self.Maximize(True) try: width,height,x,y = [ config_parser.getint( 'window',key ) for key in ['width','height','x','y'] ] self.SetPosition( (x,y)) self.SetSize( (width,height)) except ConfigParser.NoSectionError, err: # the file isn't written yet, so don't even warn... pass except Exception, err: # this is just convenience, if it breaks in *any* way, ignore it... log.error( "Unable to load window preferences, ignoring: %s", traceback.format_exc() ) try: font_size = config_parser.getint('window', 'font_size') except Exception: pass # use the default, by default else: font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) font.SetPointSize(font_size) for ctrl in self.ProfileListControls: ctrl.SetFont(font) for control in self.ProfileListControls: control.LoadState( config_parser ) self.config = config_parser wx.EVT_CLOSE( self, self.OnCloseWindow )
[ "def", "LoadState", "(", "self", ",", "config_parser", ")", ":", "if", "not", "config_parser", ":", "return", "if", "(", "not", "config_parser", ".", "has_section", "(", "'window'", ")", "or", "(", "config_parser", ".", "has_option", "(", "'window'", ",", "'maximized'", ")", "and", "config_parser", ".", "getboolean", "(", "'window'", ",", "'maximized'", ")", ")", ")", ":", "self", ".", "Maximize", "(", "True", ")", "try", ":", "width", ",", "height", ",", "x", ",", "y", "=", "[", "config_parser", ".", "getint", "(", "'window'", ",", "key", ")", "for", "key", "in", "[", "'width'", ",", "'height'", ",", "'x'", ",", "'y'", "]", "]", "self", ".", "SetPosition", "(", "(", "x", ",", "y", ")", ")", "self", ".", "SetSize", "(", "(", "width", ",", "height", ")", ")", "except", "ConfigParser", ".", "NoSectionError", ",", "err", ":", "# the file isn't written yet, so don't even warn...", "pass", "except", "Exception", ",", "err", ":", "# this is just convenience, if it breaks in *any* way, ignore it...", "log", ".", "error", "(", "\"Unable to load window preferences, ignoring: %s\"", ",", "traceback", ".", "format_exc", "(", ")", ")", "try", ":", "font_size", "=", "config_parser", ".", "getint", "(", "'window'", ",", "'font_size'", ")", "except", "Exception", ":", "pass", "# use the default, by default", "else", ":", "font", "=", "wx", ".", "SystemSettings_GetFont", "(", "wx", ".", "SYS_DEFAULT_GUI_FONT", ")", "font", ".", "SetPointSize", "(", "font_size", ")", "for", "ctrl", "in", "self", ".", "ProfileListControls", ":", "ctrl", ".", "SetFont", "(", "font", ")", "for", "control", "in", "self", ".", "ProfileListControls", ":", "control", ".", "LoadState", "(", "config_parser", ")", "self", ".", "config", "=", "config_parser", "wx", ".", "EVT_CLOSE", "(", "self", ",", "self", ".", "OnCloseWindow", ")" ]
Set our window state from the given config_parser instance
[ "Set", "our", "window", "state", "from", "the", "given", "config_parser", "instance" ]
python
train
hearsaycorp/normalize
normalize/record/json.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/record/json.py#L291-L296
def json_to_initkwargs(self, json_data, kwargs): """Subclassing hook to specialize how JSON data is converted to keyword arguments""" if isinstance(json_data, basestring): json_data = json.loads(json_data) return json_to_initkwargs(self, json_data, kwargs)
[ "def", "json_to_initkwargs", "(", "self", ",", "json_data", ",", "kwargs", ")", ":", "if", "isinstance", "(", "json_data", ",", "basestring", ")", ":", "json_data", "=", "json", ".", "loads", "(", "json_data", ")", "return", "json_to_initkwargs", "(", "self", ",", "json_data", ",", "kwargs", ")" ]
Subclassing hook to specialize how JSON data is converted to keyword arguments
[ "Subclassing", "hook", "to", "specialize", "how", "JSON", "data", "is", "converted", "to", "keyword", "arguments" ]
python
train
trolldbois/ctypeslib
ctypeslib/codegen/typehandler.py
https://github.com/trolldbois/ctypeslib/blob/2aeb1942a5a32a5cc798c287cd0d9e684a0181a8/ctypeslib/codegen/typehandler.py#L154-L187
def _array_handler(self, _cursor_type): """ Handles all array types. Resolves it's element type and makes a Array typedesc. """ # The element type has been previously declared # we need to get the canonical typedef, in some cases _type = _cursor_type.get_canonical() size = _type.get_array_size() if size == -1 and _type.kind == TypeKind.INCOMPLETEARRAY: size = 0 # FIXME: Incomplete Array handling at end of record. # https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html # FIXME VARIABLEARRAY DEPENDENTSIZEDARRAY _array_type = _type.get_array_element_type() # .get_canonical() if self.is_fundamental_type(_array_type): _subtype = self.parse_cursor_type(_array_type) elif self.is_pointer_type(_array_type): # code.interact(local=locals()) # pointers to POD have no declaration ?? # FIXME test_struct_with_pointer x_n_t g[1] _subtype = self.parse_cursor_type(_array_type) elif self.is_array_type(_array_type): _subtype = self.parse_cursor_type(_array_type) else: _subtype_decl = _array_type.get_declaration() _subtype = self.parse_cursor(_subtype_decl) # if _subtype_decl.kind == CursorKind.NO_DECL_FOUND: # pass #_subtype_name = self.get_unique_name(_subtype_decl) #_subtype = self.get_registered(_subtype_name) obj = typedesc.ArrayType(_subtype, size) obj.location = _subtype.location return obj
[ "def", "_array_handler", "(", "self", ",", "_cursor_type", ")", ":", "# The element type has been previously declared", "# we need to get the canonical typedef, in some cases", "_type", "=", "_cursor_type", ".", "get_canonical", "(", ")", "size", "=", "_type", ".", "get_array_size", "(", ")", "if", "size", "==", "-", "1", "and", "_type", ".", "kind", "==", "TypeKind", ".", "INCOMPLETEARRAY", ":", "size", "=", "0", "# FIXME: Incomplete Array handling at end of record.", "# https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html", "# FIXME VARIABLEARRAY DEPENDENTSIZEDARRAY", "_array_type", "=", "_type", ".", "get_array_element_type", "(", ")", "# .get_canonical()", "if", "self", ".", "is_fundamental_type", "(", "_array_type", ")", ":", "_subtype", "=", "self", ".", "parse_cursor_type", "(", "_array_type", ")", "elif", "self", ".", "is_pointer_type", "(", "_array_type", ")", ":", "# code.interact(local=locals())", "# pointers to POD have no declaration ??", "# FIXME test_struct_with_pointer x_n_t g[1]", "_subtype", "=", "self", ".", "parse_cursor_type", "(", "_array_type", ")", "elif", "self", ".", "is_array_type", "(", "_array_type", ")", ":", "_subtype", "=", "self", ".", "parse_cursor_type", "(", "_array_type", ")", "else", ":", "_subtype_decl", "=", "_array_type", ".", "get_declaration", "(", ")", "_subtype", "=", "self", ".", "parse_cursor", "(", "_subtype_decl", ")", "# if _subtype_decl.kind == CursorKind.NO_DECL_FOUND:", "# pass", "#_subtype_name = self.get_unique_name(_subtype_decl)", "#_subtype = self.get_registered(_subtype_name)", "obj", "=", "typedesc", ".", "ArrayType", "(", "_subtype", ",", "size", ")", "obj", ".", "location", "=", "_subtype", ".", "location", "return", "obj" ]
Handles all array types. Resolves it's element type and makes a Array typedesc.
[ "Handles", "all", "array", "types", ".", "Resolves", "it", "s", "element", "type", "and", "makes", "a", "Array", "typedesc", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L2432-L2459
def obj_box_coord_rescale(coord=None, shape=None): """Scale down one coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1]. It is the reverse process of ``obj_box_coord_scale_to_pixelunit``. Parameters ------------ coords : list of 4 int or None One coordinates of one image e.g. [x, y, w, h]. shape : list of 2 int or None For [height, width]. Returns ------- list of 4 numbers New bounding box. Examples --------- >>> coord = tl.prepro.obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100]) [0.3, 0.4, 0.5, 0.5] """ if coord is None: coord = [] if shape is None: shape = [100, 200] return obj_box_coords_rescale(coords=[coord], shape=shape)[0]
[ "def", "obj_box_coord_rescale", "(", "coord", "=", "None", ",", "shape", "=", "None", ")", ":", "if", "coord", "is", "None", ":", "coord", "=", "[", "]", "if", "shape", "is", "None", ":", "shape", "=", "[", "100", ",", "200", "]", "return", "obj_box_coords_rescale", "(", "coords", "=", "[", "coord", "]", ",", "shape", "=", "shape", ")", "[", "0", "]" ]
Scale down one coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1]. It is the reverse process of ``obj_box_coord_scale_to_pixelunit``. Parameters ------------ coords : list of 4 int or None One coordinates of one image e.g. [x, y, w, h]. shape : list of 2 int or None For [height, width]. Returns ------- list of 4 numbers New bounding box. Examples --------- >>> coord = tl.prepro.obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100]) [0.3, 0.4, 0.5, 0.5]
[ "Scale", "down", "one", "coordinates", "from", "pixel", "unit", "to", "the", "ratio", "of", "image", "size", "i", ".", "e", ".", "in", "the", "range", "of", "[", "0", "1", "]", ".", "It", "is", "the", "reverse", "process", "of", "obj_box_coord_scale_to_pixelunit", "." ]
python
valid
mozilla/configman
configman/def_sources/for_argparse.py
https://github.com/mozilla/configman/blob/83159fed61cc4cbbe5a4a6a00d3acad8a0c39c96/configman/def_sources/for_argparse.py#L39-L52
def find_action_name_by_value(registry, target_action_instance): """the association of a name of an action class with a human readable string is exposed externally only at the time of argument definitions. This routine, when given a reference to argparse's internal action registry and an action, will find that action and return the name under which it was registered. """ target_type = type(target_action_instance) for key, value in six.iteritems(registry['action']): if value is target_type: if key is None: return 'store' return key return None
[ "def", "find_action_name_by_value", "(", "registry", ",", "target_action_instance", ")", ":", "target_type", "=", "type", "(", "target_action_instance", ")", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "registry", "[", "'action'", "]", ")", ":", "if", "value", "is", "target_type", ":", "if", "key", "is", "None", ":", "return", "'store'", "return", "key", "return", "None" ]
the association of a name of an action class with a human readable string is exposed externally only at the time of argument definitions. This routine, when given a reference to argparse's internal action registry and an action, will find that action and return the name under which it was registered.
[ "the", "association", "of", "a", "name", "of", "an", "action", "class", "with", "a", "human", "readable", "string", "is", "exposed", "externally", "only", "at", "the", "time", "of", "argument", "definitions", ".", "This", "routine", "when", "given", "a", "reference", "to", "argparse", "s", "internal", "action", "registry", "and", "an", "action", "will", "find", "that", "action", "and", "return", "the", "name", "under", "which", "it", "was", "registered", "." ]
python
train
tamasgal/km3pipe
km3pipe/db.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L553-L557
def streams(self): """A list of available streams""" if self._streams is None: self._streams = list(self._stream_df["STREAM"].values) return self._streams
[ "def", "streams", "(", "self", ")", ":", "if", "self", ".", "_streams", "is", "None", ":", "self", ".", "_streams", "=", "list", "(", "self", ".", "_stream_df", "[", "\"STREAM\"", "]", ".", "values", ")", "return", "self", ".", "_streams" ]
A list of available streams
[ "A", "list", "of", "available", "streams" ]
python
train
ramrod-project/database-brain
schema/brain/binary/data.py
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/binary/data.py#L44-L55
def put_buffer(filename, content, conn=None): """ helper function for put :param filename: <str> :param content: <bytes> :param conn: <rethinkdb.DefaultConnection> :return: <dict> """ obj_dict = {PRIMARY_FIELD: filename, CONTENT_FIELD: content, TIMESTAMP_FIELD: time()} return put(obj_dict, conn=conn)
[ "def", "put_buffer", "(", "filename", ",", "content", ",", "conn", "=", "None", ")", ":", "obj_dict", "=", "{", "PRIMARY_FIELD", ":", "filename", ",", "CONTENT_FIELD", ":", "content", ",", "TIMESTAMP_FIELD", ":", "time", "(", ")", "}", "return", "put", "(", "obj_dict", ",", "conn", "=", "conn", ")" ]
helper function for put :param filename: <str> :param content: <bytes> :param conn: <rethinkdb.DefaultConnection> :return: <dict>
[ "helper", "function", "for", "put", ":", "param", "filename", ":", "<str", ">", ":", "param", "content", ":", "<bytes", ">", ":", "param", "conn", ":", "<rethinkdb", ".", "DefaultConnection", ">", ":", "return", ":", "<dict", ">" ]
python
train
gwww/elkm1
elkm1_lib/elements.py
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elements.py#L90-L100
def get_descriptions(self, description_type): """ Gets the descriptions for specified type. When complete the callback is called with a list of descriptions """ (desc_type, max_units) = description_type results = [None] * max_units self.elk._descriptions_in_progress[desc_type] = (max_units, results, self._got_desc) self.elk.send(sd_encode(desc_type=desc_type, unit=0))
[ "def", "get_descriptions", "(", "self", ",", "description_type", ")", ":", "(", "desc_type", ",", "max_units", ")", "=", "description_type", "results", "=", "[", "None", "]", "*", "max_units", "self", ".", "elk", ".", "_descriptions_in_progress", "[", "desc_type", "]", "=", "(", "max_units", ",", "results", ",", "self", ".", "_got_desc", ")", "self", ".", "elk", ".", "send", "(", "sd_encode", "(", "desc_type", "=", "desc_type", ",", "unit", "=", "0", ")", ")" ]
Gets the descriptions for specified type. When complete the callback is called with a list of descriptions
[ "Gets", "the", "descriptions", "for", "specified", "type", ".", "When", "complete", "the", "callback", "is", "called", "with", "a", "list", "of", "descriptions" ]
python
train
numenta/htmresearch
htmresearch/algorithms/lateral_pooler.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/lateral_pooler.py#L116-L128
def _updateLateralConnections(self, epsilon, avgActivityPairs): """ Sets the weights of the lateral connections based on average pairwise activity of the SP's columns. Intuitively: The more two columns fire together on average the stronger the inhibitory connection gets. """ oldL = self.lateralConnections newL = avgActivityPairs.copy() np.fill_diagonal(newL, 0.0) newL = newL/np.sum(newL, axis=1, keepdims=True) self.lateralConnections[:,:] = (1 - epsilon)*oldL + epsilon*newL
[ "def", "_updateLateralConnections", "(", "self", ",", "epsilon", ",", "avgActivityPairs", ")", ":", "oldL", "=", "self", ".", "lateralConnections", "newL", "=", "avgActivityPairs", ".", "copy", "(", ")", "np", ".", "fill_diagonal", "(", "newL", ",", "0.0", ")", "newL", "=", "newL", "/", "np", ".", "sum", "(", "newL", ",", "axis", "=", "1", ",", "keepdims", "=", "True", ")", "self", ".", "lateralConnections", "[", ":", ",", ":", "]", "=", "(", "1", "-", "epsilon", ")", "*", "oldL", "+", "epsilon", "*", "newL" ]
Sets the weights of the lateral connections based on average pairwise activity of the SP's columns. Intuitively: The more two columns fire together on average the stronger the inhibitory connection gets.
[ "Sets", "the", "weights", "of", "the", "lateral", "connections", "based", "on", "average", "pairwise", "activity", "of", "the", "SP", "s", "columns", ".", "Intuitively", ":", "The", "more", "two", "columns", "fire", "together", "on", "average", "the", "stronger", "the", "inhibitory", "connection", "gets", "." ]
python
train
ianclegg/winrmlib
winrmlib/client.py
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/client.py#L53-L64
def create_session(): """ shell = CommandShell('http://192.168.145.132:5985/wsman', 'Administrator', 'Pa55w0rd') """ shell = CommandShell('http://192.168.137.238:5985/wsman', 'Administrator', 'Pa55w0rd') shell.open() command_id = shell.run('ipconfig', ['/all']) (stdout, stderr, exit_code) = shell.receive(command_id) sys.stdout.write(stdout.strip() + '\r\n') shell.close() return None
[ "def", "create_session", "(", ")", ":", "shell", "=", "CommandShell", "(", "'http://192.168.137.238:5985/wsman'", ",", "'Administrator'", ",", "'Pa55w0rd'", ")", "shell", ".", "open", "(", ")", "command_id", "=", "shell", ".", "run", "(", "'ipconfig'", ",", "[", "'/all'", "]", ")", "(", "stdout", ",", "stderr", ",", "exit_code", ")", "=", "shell", ".", "receive", "(", "command_id", ")", "sys", ".", "stdout", ".", "write", "(", "stdout", ".", "strip", "(", ")", "+", "'\\r\\n'", ")", "shell", ".", "close", "(", ")", "return", "None" ]
shell = CommandShell('http://192.168.145.132:5985/wsman', 'Administrator', 'Pa55w0rd')
[ "shell", "=", "CommandShell", "(", "http", ":", "//", "192", ".", "168", ".", "145", ".", "132", ":", "5985", "/", "wsman", "Administrator", "Pa55w0rd", ")" ]
python
train
ibis-project/ibis
ibis/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/client.py#L73-L90
def table(self, name, database=None): """ Create a table expression that references a particular table in the database Parameters ---------- name : string database : string, optional Returns ------- table : TableExpr """ qualified_name = self._fully_qualified_name(name, database) schema = self._get_table_schema(qualified_name) node = self.table_class(qualified_name, schema, self) return self.table_expr_class(node)
[ "def", "table", "(", "self", ",", "name", ",", "database", "=", "None", ")", ":", "qualified_name", "=", "self", ".", "_fully_qualified_name", "(", "name", ",", "database", ")", "schema", "=", "self", ".", "_get_table_schema", "(", "qualified_name", ")", "node", "=", "self", ".", "table_class", "(", "qualified_name", ",", "schema", ",", "self", ")", "return", "self", ".", "table_expr_class", "(", "node", ")" ]
Create a table expression that references a particular table in the database Parameters ---------- name : string database : string, optional Returns ------- table : TableExpr
[ "Create", "a", "table", "expression", "that", "references", "a", "particular", "table", "in", "the", "database" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/learning_rate.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/learning_rate.py#L186-L202
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None): """Learning rate warmup multiplier.""" if not warmup_steps: return tf.constant(1.) tf.logging.info("Applying %s learning rate warmup for %d steps", warmup_schedule, warmup_steps) warmup_steps = tf.to_float(warmup_steps) global_step = _global_step(hparams) if warmup_schedule == "exp": return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step) else: assert warmup_schedule == "linear" start = tf.constant(0.35) return ((tf.constant(1.) - start) / warmup_steps) * global_step + start
[ "def", "_learning_rate_warmup", "(", "warmup_steps", ",", "warmup_schedule", "=", "\"exp\"", ",", "hparams", "=", "None", ")", ":", "if", "not", "warmup_steps", ":", "return", "tf", ".", "constant", "(", "1.", ")", "tf", ".", "logging", ".", "info", "(", "\"Applying %s learning rate warmup for %d steps\"", ",", "warmup_schedule", ",", "warmup_steps", ")", "warmup_steps", "=", "tf", ".", "to_float", "(", "warmup_steps", ")", "global_step", "=", "_global_step", "(", "hparams", ")", "if", "warmup_schedule", "==", "\"exp\"", ":", "return", "tf", ".", "exp", "(", "tf", ".", "log", "(", "0.01", ")", "/", "warmup_steps", ")", "**", "(", "warmup_steps", "-", "global_step", ")", "else", ":", "assert", "warmup_schedule", "==", "\"linear\"", "start", "=", "tf", ".", "constant", "(", "0.35", ")", "return", "(", "(", "tf", ".", "constant", "(", "1.", ")", "-", "start", ")", "/", "warmup_steps", ")", "*", "global_step", "+", "start" ]
Learning rate warmup multiplier.
[ "Learning", "rate", "warmup", "multiplier", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L113-L117
def p_pragma(self, p): 'pragma : LPAREN TIMES ID TIMES RPAREN' p[0] = Pragma(PragmaEntry(p[3], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_pragma", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Pragma", "(", "PragmaEntry", "(", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
pragma : LPAREN TIMES ID TIMES RPAREN
[ "pragma", ":", "LPAREN", "TIMES", "ID", "TIMES", "RPAREN" ]
python
train
pjamesjoyce/lcopt
lcopt/model.py
https://github.com/pjamesjoyce/lcopt/blob/3f1caca31fece4a3068a384900707e6d21d04597/lcopt/model.py#L338-L352
def create_product (self, name, location='GLO', unit='kg', **kwargs): """ Create a new product in the model database """ new_product = item_factory(name=name, location=location, unit=unit, type='product', **kwargs) if not self.exists_in_database(new_product['code']): self.add_to_database(new_product) #print ('{} added to database'.format(name)) return self.get_exchange(name) else: #print('{} already exists in this database'.format(name)) return False
[ "def", "create_product", "(", "self", ",", "name", ",", "location", "=", "'GLO'", ",", "unit", "=", "'kg'", ",", "*", "*", "kwargs", ")", ":", "new_product", "=", "item_factory", "(", "name", "=", "name", ",", "location", "=", "location", ",", "unit", "=", "unit", ",", "type", "=", "'product'", ",", "*", "*", "kwargs", ")", "if", "not", "self", ".", "exists_in_database", "(", "new_product", "[", "'code'", "]", ")", ":", "self", ".", "add_to_database", "(", "new_product", ")", "#print ('{} added to database'.format(name))", "return", "self", ".", "get_exchange", "(", "name", ")", "else", ":", "#print('{} already exists in this database'.format(name))", "return", "False" ]
Create a new product in the model database
[ "Create", "a", "new", "product", "in", "the", "model", "database" ]
python
train
mesbahamin/chronophore
chronophore/tkview.py
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/tkview.py#L136-L155
def _show_feedback_label(self, message, seconds=None): """Display a message in lbl_feedback, which then times out after some number of seconds. Use after() to schedule a callback to hide the feedback message. This works better than using threads, which can cause problems in Tk. """ if seconds is None: seconds = CONFIG['MESSAGE_DURATION'] # cancel any existing callback to clear the feedback # label. this prevents flickering and inconsistent # timing during rapid input. with contextlib.suppress(AttributeError): self.root.after_cancel(self.clear_feedback) logger.debug('Label feedback: "{}"'.format(message)) self.feedback.set(message) self.clear_feedback = self.root.after( 1000 * seconds, lambda: self.feedback.set("") )
[ "def", "_show_feedback_label", "(", "self", ",", "message", ",", "seconds", "=", "None", ")", ":", "if", "seconds", "is", "None", ":", "seconds", "=", "CONFIG", "[", "'MESSAGE_DURATION'", "]", "# cancel any existing callback to clear the feedback", "# label. this prevents flickering and inconsistent", "# timing during rapid input.", "with", "contextlib", ".", "suppress", "(", "AttributeError", ")", ":", "self", ".", "root", ".", "after_cancel", "(", "self", ".", "clear_feedback", ")", "logger", ".", "debug", "(", "'Label feedback: \"{}\"'", ".", "format", "(", "message", ")", ")", "self", ".", "feedback", ".", "set", "(", "message", ")", "self", ".", "clear_feedback", "=", "self", ".", "root", ".", "after", "(", "1000", "*", "seconds", ",", "lambda", ":", "self", ".", "feedback", ".", "set", "(", "\"\"", ")", ")" ]
Display a message in lbl_feedback, which then times out after some number of seconds. Use after() to schedule a callback to hide the feedback message. This works better than using threads, which can cause problems in Tk.
[ "Display", "a", "message", "in", "lbl_feedback", "which", "then", "times", "out", "after", "some", "number", "of", "seconds", ".", "Use", "after", "()", "to", "schedule", "a", "callback", "to", "hide", "the", "feedback", "message", ".", "This", "works", "better", "than", "using", "threads", "which", "can", "cause", "problems", "in", "Tk", "." ]
python
train
dmlc/gluon-nlp
scripts/word_embeddings/evaluation.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/evaluation.py#L145-L197
def evaluate_similarity(args, token_embedding, ctx, logfile=None, global_step=0): """Evaluate on specified similarity datasets.""" results = [] for similarity_function in args.similarity_functions: evaluator = nlp.embedding.evaluation.WordEmbeddingSimilarity( idx_to_vec=token_embedding.idx_to_vec, similarity_function=similarity_function) evaluator.initialize(ctx=ctx) if not args.no_hybridize: evaluator.hybridize() # Evaluate all datasets for (dataset_name, dataset_kwargs, dataset) in iterate_similarity_datasets(args): initial_length = len(dataset) dataset_coded = [[ token_embedding.token_to_idx[d[0]], token_embedding.token_to_idx[d[1]], d[2] ] for d in dataset if d[0] in token_embedding.token_to_idx and d[1] in token_embedding.token_to_idx] num_dropped = initial_length - len(dataset_coded) # All words are unknown if not len(dataset_coded): correlation = 0 else: words1, words2, scores = zip(*dataset_coded) pred_similarity = evaluator( mx.nd.array(words1, ctx=ctx), mx.nd.array(words2, ctx=ctx)) sr = stats.spearmanr(pred_similarity.asnumpy(), np.array(scores)) correlation = sr.correlation logging.info( 'Spearman rank correlation on %s (%s pairs) %s with %s:\t%s', dataset.__class__.__name__, len(dataset_coded), str(dataset_kwargs), similarity_function, correlation) result = dict( task='similarity', dataset_name=dataset_name, dataset_kwargs=dataset_kwargs, similarity_function=similarity_function, spearmanr=correlation, num_dropped=num_dropped, global_step=global_step, ) log_similarity_result(logfile, result) results.append(result) return results
[ "def", "evaluate_similarity", "(", "args", ",", "token_embedding", ",", "ctx", ",", "logfile", "=", "None", ",", "global_step", "=", "0", ")", ":", "results", "=", "[", "]", "for", "similarity_function", "in", "args", ".", "similarity_functions", ":", "evaluator", "=", "nlp", ".", "embedding", ".", "evaluation", ".", "WordEmbeddingSimilarity", "(", "idx_to_vec", "=", "token_embedding", ".", "idx_to_vec", ",", "similarity_function", "=", "similarity_function", ")", "evaluator", ".", "initialize", "(", "ctx", "=", "ctx", ")", "if", "not", "args", ".", "no_hybridize", ":", "evaluator", ".", "hybridize", "(", ")", "# Evaluate all datasets", "for", "(", "dataset_name", ",", "dataset_kwargs", ",", "dataset", ")", "in", "iterate_similarity_datasets", "(", "args", ")", ":", "initial_length", "=", "len", "(", "dataset", ")", "dataset_coded", "=", "[", "[", "token_embedding", ".", "token_to_idx", "[", "d", "[", "0", "]", "]", ",", "token_embedding", ".", "token_to_idx", "[", "d", "[", "1", "]", "]", ",", "d", "[", "2", "]", "]", "for", "d", "in", "dataset", "if", "d", "[", "0", "]", "in", "token_embedding", ".", "token_to_idx", "and", "d", "[", "1", "]", "in", "token_embedding", ".", "token_to_idx", "]", "num_dropped", "=", "initial_length", "-", "len", "(", "dataset_coded", ")", "# All words are unknown", "if", "not", "len", "(", "dataset_coded", ")", ":", "correlation", "=", "0", "else", ":", "words1", ",", "words2", ",", "scores", "=", "zip", "(", "*", "dataset_coded", ")", "pred_similarity", "=", "evaluator", "(", "mx", ".", "nd", ".", "array", "(", "words1", ",", "ctx", "=", "ctx", ")", ",", "mx", ".", "nd", ".", "array", "(", "words2", ",", "ctx", "=", "ctx", ")", ")", "sr", "=", "stats", ".", "spearmanr", "(", "pred_similarity", ".", "asnumpy", "(", ")", ",", "np", ".", "array", "(", "scores", ")", ")", "correlation", "=", "sr", ".", "correlation", "logging", ".", "info", "(", "'Spearman rank correlation on %s (%s pairs) %s with %s:\\t%s'", ",", "dataset", ".", "__class__", ".", "__name__", ",", "len", "(", "dataset_coded", ")", ",", "str", "(", "dataset_kwargs", ")", ",", "similarity_function", ",", "correlation", ")", "result", "=", "dict", "(", "task", "=", "'similarity'", ",", "dataset_name", "=", "dataset_name", ",", "dataset_kwargs", "=", "dataset_kwargs", ",", "similarity_function", "=", "similarity_function", ",", "spearmanr", "=", "correlation", ",", "num_dropped", "=", "num_dropped", ",", "global_step", "=", "global_step", ",", ")", "log_similarity_result", "(", "logfile", ",", "result", ")", "results", ".", "append", "(", "result", ")", "return", "results" ]
Evaluate on specified similarity datasets.
[ "Evaluate", "on", "specified", "similarity", "datasets", "." ]
python
train
dahlia/wikidata
wikidata/client.py
https://github.com/dahlia/wikidata/blob/b07c9f8fffc59b088ec9dd428d0ec4d989c82db4/wikidata/client.py#L142-L165
def guess_entity_type(self, entity_id: EntityId) -> Optional[EntityType]: r"""Guess :class:`~.entity.EntityType` from the given :class:`~.entity.EntityId`. It could return :const:`None` when it fails to guess. .. note:: It always fails to guess when :attr:`entity_type_guess` is configued to :const:`False`. :return: The guessed :class:`~.entity.EntityId`, or :const:`None` if it fails to guess. :rtype: :class:`~typing.Optional`\ [:class:`~.entity.EntityType`] .. versionadded:: 0.2.0 """ if not self.entity_type_guess: return None if entity_id[0] == 'Q': return EntityType.item elif entity_id[0] == 'P': return EntityType.property return None
[ "def", "guess_entity_type", "(", "self", ",", "entity_id", ":", "EntityId", ")", "->", "Optional", "[", "EntityType", "]", ":", "if", "not", "self", ".", "entity_type_guess", ":", "return", "None", "if", "entity_id", "[", "0", "]", "==", "'Q'", ":", "return", "EntityType", ".", "item", "elif", "entity_id", "[", "0", "]", "==", "'P'", ":", "return", "EntityType", ".", "property", "return", "None" ]
r"""Guess :class:`~.entity.EntityType` from the given :class:`~.entity.EntityId`. It could return :const:`None` when it fails to guess. .. note:: It always fails to guess when :attr:`entity_type_guess` is configued to :const:`False`. :return: The guessed :class:`~.entity.EntityId`, or :const:`None` if it fails to guess. :rtype: :class:`~typing.Optional`\ [:class:`~.entity.EntityType`] .. versionadded:: 0.2.0
[ "r", "Guess", ":", "class", ":", "~", ".", "entity", ".", "EntityType", "from", "the", "given", ":", "class", ":", "~", ".", "entity", ".", "EntityId", ".", "It", "could", "return", ":", "const", ":", "None", "when", "it", "fails", "to", "guess", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/SexCheck/sex_check.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/SexCheck/sex_check.py#L488-L504
def createPedChr24UsingPlink(options): """Run plink to create a ped format. :param options: the options. :type options: argparse.Namespace Uses Plink to create a ``ped`` file of markers on the chromosome ``24``. It uses the ``recodeA`` options to use additive coding. It also subsets the data to keep only samples with sex problems. """ plinkCommand = ["plink", "--noweb", "--bfile", options.bfile, "--chr", "24", "--recodeA", "--keep", options.out + ".list_problem_sex_ids", "--out", options.out + ".chr24_recodeA"] runCommand(plinkCommand)
[ "def", "createPedChr24UsingPlink", "(", "options", ")", ":", "plinkCommand", "=", "[", "\"plink\"", ",", "\"--noweb\"", ",", "\"--bfile\"", ",", "options", ".", "bfile", ",", "\"--chr\"", ",", "\"24\"", ",", "\"--recodeA\"", ",", "\"--keep\"", ",", "options", ".", "out", "+", "\".list_problem_sex_ids\"", ",", "\"--out\"", ",", "options", ".", "out", "+", "\".chr24_recodeA\"", "]", "runCommand", "(", "plinkCommand", ")" ]
Run plink to create a ped format. :param options: the options. :type options: argparse.Namespace Uses Plink to create a ``ped`` file of markers on the chromosome ``24``. It uses the ``recodeA`` options to use additive coding. It also subsets the data to keep only samples with sex problems.
[ "Run", "plink", "to", "create", "a", "ped", "format", "." ]
python
train
trevisanj/a99
a99/parts.py
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/parts.py#L101-L106
def one_liner_str(self): """Returns string (supposed to be) shorter than str() and not contain newline""" assert self.less_attrs is not None, "Forgot to set attrs class variable" s_format = "{}={}" s = "; ".join([s_format.format(x, self.__getattribute__(x)) for x in self.less_attrs]) return s
[ "def", "one_liner_str", "(", "self", ")", ":", "assert", "self", ".", "less_attrs", "is", "not", "None", ",", "\"Forgot to set attrs class variable\"", "s_format", "=", "\"{}={}\"", "s", "=", "\"; \"", ".", "join", "(", "[", "s_format", ".", "format", "(", "x", ",", "self", ".", "__getattribute__", "(", "x", ")", ")", "for", "x", "in", "self", ".", "less_attrs", "]", ")", "return", "s" ]
Returns string (supposed to be) shorter than str() and not contain newline
[ "Returns", "string", "(", "supposed", "to", "be", ")", "shorter", "than", "str", "()", "and", "not", "contain", "newline" ]
python
train
horazont/aioxmpp
aioxmpp/callbacks.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/callbacks.py#L472-L503
def connect(self, f, mode=None): """ Connect an object `f` to the signal. The type the object needs to have depends on `mode`, but usually it needs to be a callable. :meth:`connect` returns an opaque token which can be used with :meth:`disconnect` to disconnect the object from the signal. The default value for `mode` is :attr:`STRONG`. Any decorator can be used as argument for `mode` and it is applied to `f`. The result is stored internally and is what will be called when the signal is being emitted. If the result of `mode` returns a false value during emission, the connection is removed. .. note:: The return values required by the callable returned by `mode` and the one required by a callable passed to `f` using the predefined modes are complementary! A callable `f` needs to return true to be removed from the connections, while a callable returned by the `mode` decorator needs to return false. Existing modes are listed below. """ mode = mode or self.STRONG self.logger.debug("connecting %r with mode %r", f, mode) return self._connect(mode(f))
[ "def", "connect", "(", "self", ",", "f", ",", "mode", "=", "None", ")", ":", "mode", "=", "mode", "or", "self", ".", "STRONG", "self", ".", "logger", ".", "debug", "(", "\"connecting %r with mode %r\"", ",", "f", ",", "mode", ")", "return", "self", ".", "_connect", "(", "mode", "(", "f", ")", ")" ]
Connect an object `f` to the signal. The type the object needs to have depends on `mode`, but usually it needs to be a callable. :meth:`connect` returns an opaque token which can be used with :meth:`disconnect` to disconnect the object from the signal. The default value for `mode` is :attr:`STRONG`. Any decorator can be used as argument for `mode` and it is applied to `f`. The result is stored internally and is what will be called when the signal is being emitted. If the result of `mode` returns a false value during emission, the connection is removed. .. note:: The return values required by the callable returned by `mode` and the one required by a callable passed to `f` using the predefined modes are complementary! A callable `f` needs to return true to be removed from the connections, while a callable returned by the `mode` decorator needs to return false. Existing modes are listed below.
[ "Connect", "an", "object", "f", "to", "the", "signal", ".", "The", "type", "the", "object", "needs", "to", "have", "depends", "on", "mode", "but", "usually", "it", "needs", "to", "be", "a", "callable", "." ]
python
train
tanghaibao/goatools
goatools/wr_tbl_class.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl_class.py#L155-L174
def _init_fmtname2wbfmtobj(self, workbook, **kws): """Initialize fmtname2wbfmtobj.""" wbfmtdict = [ kws.get('format_txt0', self.dflt_wbfmtdict[0]), kws.get('format_txt1', self.dflt_wbfmtdict[1]), kws.get('format_txt2', self.dflt_wbfmtdict[2]), kws.get('format_txt3', self.dflt_wbfmtdict[3])] fmtname2wbfmtobj = { 'plain': workbook.add_format(wbfmtdict[0]), 'plain bold': workbook.add_format(wbfmtdict[3]), 'very light grey' : workbook.add_format(wbfmtdict[1]), 'light grey' :workbook.add_format(wbfmtdict[2])} # Use a xlsx namedtuple field value to set row color ntval2wbfmtdict = kws.get('ntval2wbfmtdict', None) if ntval2wbfmtdict is not None: for ntval, wbfmtdict in ntval2wbfmtdict.items(): fmtname2wbfmtobj[ntval] = workbook.add_format(wbfmtdict) if 'ntfld_wbfmt' not in kws: sys.stdout.write("**WARNING: 'ntfld_wbfmt' NOT PRESENT\n") return fmtname2wbfmtobj
[ "def", "_init_fmtname2wbfmtobj", "(", "self", ",", "workbook", ",", "*", "*", "kws", ")", ":", "wbfmtdict", "=", "[", "kws", ".", "get", "(", "'format_txt0'", ",", "self", ".", "dflt_wbfmtdict", "[", "0", "]", ")", ",", "kws", ".", "get", "(", "'format_txt1'", ",", "self", ".", "dflt_wbfmtdict", "[", "1", "]", ")", ",", "kws", ".", "get", "(", "'format_txt2'", ",", "self", ".", "dflt_wbfmtdict", "[", "2", "]", ")", ",", "kws", ".", "get", "(", "'format_txt3'", ",", "self", ".", "dflt_wbfmtdict", "[", "3", "]", ")", "]", "fmtname2wbfmtobj", "=", "{", "'plain'", ":", "workbook", ".", "add_format", "(", "wbfmtdict", "[", "0", "]", ")", ",", "'plain bold'", ":", "workbook", ".", "add_format", "(", "wbfmtdict", "[", "3", "]", ")", ",", "'very light grey'", ":", "workbook", ".", "add_format", "(", "wbfmtdict", "[", "1", "]", ")", ",", "'light grey'", ":", "workbook", ".", "add_format", "(", "wbfmtdict", "[", "2", "]", ")", "}", "# Use a xlsx namedtuple field value to set row color", "ntval2wbfmtdict", "=", "kws", ".", "get", "(", "'ntval2wbfmtdict'", ",", "None", ")", "if", "ntval2wbfmtdict", "is", "not", "None", ":", "for", "ntval", ",", "wbfmtdict", "in", "ntval2wbfmtdict", ".", "items", "(", ")", ":", "fmtname2wbfmtobj", "[", "ntval", "]", "=", "workbook", ".", "add_format", "(", "wbfmtdict", ")", "if", "'ntfld_wbfmt'", "not", "in", "kws", ":", "sys", ".", "stdout", ".", "write", "(", "\"**WARNING: 'ntfld_wbfmt' NOT PRESENT\\n\"", ")", "return", "fmtname2wbfmtobj" ]
Initialize fmtname2wbfmtobj.
[ "Initialize", "fmtname2wbfmtobj", "." ]
python
train
mozilla-iot/webthing-python
webthing/thing.py
https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/thing.py#L298-L305
def add_event(self, event): """ Add a new event and notify subscribers. event -- the event that occurred """ self.events.append(event) self.event_notify(event)
[ "def", "add_event", "(", "self", ",", "event", ")", ":", "self", ".", "events", ".", "append", "(", "event", ")", "self", ".", "event_notify", "(", "event", ")" ]
Add a new event and notify subscribers. event -- the event that occurred
[ "Add", "a", "new", "event", "and", "notify", "subscribers", "." ]
python
test
Titan-C/slaveparticles
slaveparticles/spins.py
https://github.com/Titan-C/slaveparticles/blob/e4c2f5afb1a7b195517ef2f1b5cc758965036aab/slaveparticles/spins.py#L154-L166
def update_H(self, mean_field, l): """Updates the spin hamiltonian and recalculates its eigenbasis""" self.H_s = self.spin_hamiltonian(mean_field, l) try: self.eig_energies, self.eig_states = diagonalize(self.H_s) except np.linalg.linalg.LinAlgError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) raise except ValueError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) print(mean_field, l) raise
[ "def", "update_H", "(", "self", ",", "mean_field", ",", "l", ")", ":", "self", ".", "H_s", "=", "self", ".", "spin_hamiltonian", "(", "mean_field", ",", "l", ")", "try", ":", "self", ".", "eig_energies", ",", "self", ".", "eig_states", "=", "diagonalize", "(", "self", ".", "H_s", ")", "except", "np", ".", "linalg", ".", "linalg", ".", "LinAlgError", ":", "np", ".", "savez", "(", "'errorhamil'", ",", "H", "=", "self", ".", "H_s", ",", "fiel", "=", "mean_field", ",", "lamb", "=", "l", ")", "raise", "except", "ValueError", ":", "np", ".", "savez", "(", "'errorhamil'", ",", "H", "=", "self", ".", "H_s", ",", "fiel", "=", "mean_field", ",", "lamb", "=", "l", ")", "print", "(", "mean_field", ",", "l", ")", "raise" ]
Updates the spin hamiltonian and recalculates its eigenbasis
[ "Updates", "the", "spin", "hamiltonian", "and", "recalculates", "its", "eigenbasis" ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L179-L185
def users_set_avatar(self, avatar_url, **kwargs): """Set a user’s avatar""" if avatar_url.startswith('http://') or avatar_url.startswith('https://'): return self.__call_api_post('users.setAvatar', avatarUrl=avatar_url, kwargs=kwargs) else: avatar_file = {"image": open(avatar_url, "rb")} return self.__call_api_post('users.setAvatar', files=avatar_file, kwargs=kwargs)
[ "def", "users_set_avatar", "(", "self", ",", "avatar_url", ",", "*", "*", "kwargs", ")", ":", "if", "avatar_url", ".", "startswith", "(", "'http://'", ")", "or", "avatar_url", ".", "startswith", "(", "'https://'", ")", ":", "return", "self", ".", "__call_api_post", "(", "'users.setAvatar'", ",", "avatarUrl", "=", "avatar_url", ",", "kwargs", "=", "kwargs", ")", "else", ":", "avatar_file", "=", "{", "\"image\"", ":", "open", "(", "avatar_url", ",", "\"rb\"", ")", "}", "return", "self", ".", "__call_api_post", "(", "'users.setAvatar'", ",", "files", "=", "avatar_file", ",", "kwargs", "=", "kwargs", ")" ]
Set a user’s avatar
[ "Set", "a", "user’s", "avatar" ]
python
train
fishtown-analytics/dbt
core/dbt/graph/selector.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/graph/selector.py#L93-L114
def _node_is_match(qualified_name, package_names, fqn): """Determine if a qualfied name matches an fqn, given the set of package names in the graph. :param List[str] qualified_name: The components of the selector or node name, split on '.'. :param Set[str] package_names: The set of pacakge names in the graph. :param List[str] fqn: The node's fully qualified name in the graph. """ if len(qualified_name) == 1 and fqn[-1] == qualified_name[0]: return True if qualified_name[0] in package_names: if is_selected_node(fqn, qualified_name): return True for package_name in package_names: local_qualified_node_name = [package_name] + qualified_name if is_selected_node(fqn, local_qualified_node_name): return True return False
[ "def", "_node_is_match", "(", "qualified_name", ",", "package_names", ",", "fqn", ")", ":", "if", "len", "(", "qualified_name", ")", "==", "1", "and", "fqn", "[", "-", "1", "]", "==", "qualified_name", "[", "0", "]", ":", "return", "True", "if", "qualified_name", "[", "0", "]", "in", "package_names", ":", "if", "is_selected_node", "(", "fqn", ",", "qualified_name", ")", ":", "return", "True", "for", "package_name", "in", "package_names", ":", "local_qualified_node_name", "=", "[", "package_name", "]", "+", "qualified_name", "if", "is_selected_node", "(", "fqn", ",", "local_qualified_node_name", ")", ":", "return", "True", "return", "False" ]
Determine if a qualfied name matches an fqn, given the set of package names in the graph. :param List[str] qualified_name: The components of the selector or node name, split on '.'. :param Set[str] package_names: The set of pacakge names in the graph. :param List[str] fqn: The node's fully qualified name in the graph.
[ "Determine", "if", "a", "qualfied", "name", "matches", "an", "fqn", "given", "the", "set", "of", "package", "names", "in", "the", "graph", "." ]
python
train
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1219-L1277
def _numToTwoByteString(value, numberOfDecimals=0, LsbFirst=False, signed=False): """Convert a numerical value to a two-byte string, possibly scaling it. Args: * value (float or int): The numerical value to be converted. * numberOfDecimals (int): Number of decimals, 0 or more, for scaling. * LsbFirst (bol): Whether the least significant byte should be first in the resulting string. * signed (bol): Whether negative values should be accepted. Returns: A two-byte string. Raises: TypeError, ValueError. Gives DeprecationWarning instead of ValueError for some values in Python 2.6. Use ``numberOfDecimals=1`` to multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. Use the parameter ``signed=True`` if making a bytestring that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). The byte order is controlled by the ``LsbFirst`` parameter, as seen here: ====================== ============= ==================================== ``LsbFirst`` parameter Endianness Description ====================== ============= ==================================== False (default) Big-endian Most significant byte is sent first True Little-endian Least significant byte is sent first ====================== ============= ==================================== For example: To store for example value=77.0, use ``numberOfDecimals = 1`` if the register will hold it as 770 internally. The value 770 (dec) is 0302 (hex), where the most significant byte is 03 (hex) and the least significant byte is 02 (hex). With ``LsbFirst = False``, the most significant byte is given first why the resulting string is ``\\x03\\x02``, which has the length 2. """ _checkNumerical(value, description='inputvalue') _checkInt(numberOfDecimals, minvalue=0, description='number of decimals') _checkBool(LsbFirst, description='LsbFirst') _checkBool(signed, description='signed parameter') multiplier = 10 ** numberOfDecimals integer = int(float(value) * multiplier) if LsbFirst: formatcode = '<' # Little-endian else: formatcode = '>' # Big-endian if signed: formatcode += 'h' # (Signed) short (2 bytes) else: formatcode += 'H' # Unsigned short (2 bytes) outstring = _pack(formatcode, integer) assert len(outstring) == 2 return outstring
[ "def", "_numToTwoByteString", "(", "value", ",", "numberOfDecimals", "=", "0", ",", "LsbFirst", "=", "False", ",", "signed", "=", "False", ")", ":", "_checkNumerical", "(", "value", ",", "description", "=", "'inputvalue'", ")", "_checkInt", "(", "numberOfDecimals", ",", "minvalue", "=", "0", ",", "description", "=", "'number of decimals'", ")", "_checkBool", "(", "LsbFirst", ",", "description", "=", "'LsbFirst'", ")", "_checkBool", "(", "signed", ",", "description", "=", "'signed parameter'", ")", "multiplier", "=", "10", "**", "numberOfDecimals", "integer", "=", "int", "(", "float", "(", "value", ")", "*", "multiplier", ")", "if", "LsbFirst", ":", "formatcode", "=", "'<'", "# Little-endian", "else", ":", "formatcode", "=", "'>'", "# Big-endian", "if", "signed", ":", "formatcode", "+=", "'h'", "# (Signed) short (2 bytes)", "else", ":", "formatcode", "+=", "'H'", "# Unsigned short (2 bytes)", "outstring", "=", "_pack", "(", "formatcode", ",", "integer", ")", "assert", "len", "(", "outstring", ")", "==", "2", "return", "outstring" ]
Convert a numerical value to a two-byte string, possibly scaling it. Args: * value (float or int): The numerical value to be converted. * numberOfDecimals (int): Number of decimals, 0 or more, for scaling. * LsbFirst (bol): Whether the least significant byte should be first in the resulting string. * signed (bol): Whether negative values should be accepted. Returns: A two-byte string. Raises: TypeError, ValueError. Gives DeprecationWarning instead of ValueError for some values in Python 2.6. Use ``numberOfDecimals=1`` to multiply ``value`` by 10 before sending it to the slave register. Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register. Use the parameter ``signed=True`` if making a bytestring that can hold negative values. Then negative input will be automatically converted into upper range data (two's complement). The byte order is controlled by the ``LsbFirst`` parameter, as seen here: ====================== ============= ==================================== ``LsbFirst`` parameter Endianness Description ====================== ============= ==================================== False (default) Big-endian Most significant byte is sent first True Little-endian Least significant byte is sent first ====================== ============= ==================================== For example: To store for example value=77.0, use ``numberOfDecimals = 1`` if the register will hold it as 770 internally. The value 770 (dec) is 0302 (hex), where the most significant byte is 03 (hex) and the least significant byte is 02 (hex). With ``LsbFirst = False``, the most significant byte is given first why the resulting string is ``\\x03\\x02``, which has the length 2.
[ "Convert", "a", "numerical", "value", "to", "a", "two", "-", "byte", "string", "possibly", "scaling", "it", "." ]
python
train
juju/python-libjuju
juju/client/connection.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/connection.py#L280-L348
async def rpc(self, msg, encoder=None): '''Make an RPC to the API. The message is encoded as JSON using the given encoder if any. :param msg: Parameters for the call (will be encoded as JSON). :param encoder: Encoder to be used when encoding the message. :return: The result of the call. :raises JujuAPIError: When there's an error returned. :raises JujuError: ''' self.__request_id__ += 1 msg['request-id'] = self.__request_id__ if'params' not in msg: msg['params'] = {} if "version" not in msg: msg['version'] = self.facades[msg['type']] outgoing = json.dumps(msg, indent=2, cls=encoder) log.debug('connection {} -> {}'.format(id(self), outgoing)) for attempt in range(3): if self.monitor.status == Monitor.DISCONNECTED: # closed cleanly; shouldn't try to reconnect raise websockets.exceptions.ConnectionClosed( 0, 'websocket closed') try: await self.ws.send(outgoing) break except websockets.ConnectionClosed: if attempt == 2: raise log.warning('RPC: Connection closed, reconnecting') # the reconnect has to be done in a separate task because, # if it is triggered by the pinger, then this RPC call will # be cancelled when the pinger is cancelled by the reconnect, # and we don't want the reconnect to be aborted halfway through await asyncio.wait([self.reconnect()], loop=self.loop) if self.monitor.status != Monitor.CONNECTED: # reconnect failed; abort and shutdown log.error('RPC: Automatic reconnect failed') raise result = await self._recv(msg['request-id']) log.debug('connection {} <- {}'.format(id(self), result)) if not result: return result if 'error' in result: # API Error Response raise errors.JujuAPIError(result) if 'response' not in result: # This may never happen return result if 'results' in result['response']: # Check for errors in a result list. # TODO This loses the results that might have succeeded. # Perhaps JujuError should return all the results including # errors, or perhaps a keyword parameter to the rpc method # could be added to trigger this behaviour. err_results = [] for res in result['response']['results']: if res.get('error', {}).get('message'): err_results.append(res['error']['message']) if err_results: raise errors.JujuError(err_results) elif result['response'].get('error', {}).get('message'): raise errors.JujuError(result['response']['error']['message']) return result
[ "async", "def", "rpc", "(", "self", ",", "msg", ",", "encoder", "=", "None", ")", ":", "self", ".", "__request_id__", "+=", "1", "msg", "[", "'request-id'", "]", "=", "self", ".", "__request_id__", "if", "'params'", "not", "in", "msg", ":", "msg", "[", "'params'", "]", "=", "{", "}", "if", "\"version\"", "not", "in", "msg", ":", "msg", "[", "'version'", "]", "=", "self", ".", "facades", "[", "msg", "[", "'type'", "]", "]", "outgoing", "=", "json", ".", "dumps", "(", "msg", ",", "indent", "=", "2", ",", "cls", "=", "encoder", ")", "log", ".", "debug", "(", "'connection {} -> {}'", ".", "format", "(", "id", "(", "self", ")", ",", "outgoing", ")", ")", "for", "attempt", "in", "range", "(", "3", ")", ":", "if", "self", ".", "monitor", ".", "status", "==", "Monitor", ".", "DISCONNECTED", ":", "# closed cleanly; shouldn't try to reconnect", "raise", "websockets", ".", "exceptions", ".", "ConnectionClosed", "(", "0", ",", "'websocket closed'", ")", "try", ":", "await", "self", ".", "ws", ".", "send", "(", "outgoing", ")", "break", "except", "websockets", ".", "ConnectionClosed", ":", "if", "attempt", "==", "2", ":", "raise", "log", ".", "warning", "(", "'RPC: Connection closed, reconnecting'", ")", "# the reconnect has to be done in a separate task because,", "# if it is triggered by the pinger, then this RPC call will", "# be cancelled when the pinger is cancelled by the reconnect,", "# and we don't want the reconnect to be aborted halfway through", "await", "asyncio", ".", "wait", "(", "[", "self", ".", "reconnect", "(", ")", "]", ",", "loop", "=", "self", ".", "loop", ")", "if", "self", ".", "monitor", ".", "status", "!=", "Monitor", ".", "CONNECTED", ":", "# reconnect failed; abort and shutdown", "log", ".", "error", "(", "'RPC: Automatic reconnect failed'", ")", "raise", "result", "=", "await", "self", ".", "_recv", "(", "msg", "[", "'request-id'", "]", ")", "log", ".", "debug", "(", "'connection {} <- {}'", ".", "format", "(", "id", "(", "self", ")", ",", "result", ")", ")", "if", "not", "result", ":", "return", "result", "if", "'error'", "in", "result", ":", "# API Error Response", "raise", "errors", ".", "JujuAPIError", "(", "result", ")", "if", "'response'", "not", "in", "result", ":", "# This may never happen", "return", "result", "if", "'results'", "in", "result", "[", "'response'", "]", ":", "# Check for errors in a result list.", "# TODO This loses the results that might have succeeded.", "# Perhaps JujuError should return all the results including", "# errors, or perhaps a keyword parameter to the rpc method", "# could be added to trigger this behaviour.", "err_results", "=", "[", "]", "for", "res", "in", "result", "[", "'response'", "]", "[", "'results'", "]", ":", "if", "res", ".", "get", "(", "'error'", ",", "{", "}", ")", ".", "get", "(", "'message'", ")", ":", "err_results", ".", "append", "(", "res", "[", "'error'", "]", "[", "'message'", "]", ")", "if", "err_results", ":", "raise", "errors", ".", "JujuError", "(", "err_results", ")", "elif", "result", "[", "'response'", "]", ".", "get", "(", "'error'", ",", "{", "}", ")", ".", "get", "(", "'message'", ")", ":", "raise", "errors", ".", "JujuError", "(", "result", "[", "'response'", "]", "[", "'error'", "]", "[", "'message'", "]", ")", "return", "result" ]
Make an RPC to the API. The message is encoded as JSON using the given encoder if any. :param msg: Parameters for the call (will be encoded as JSON). :param encoder: Encoder to be used when encoding the message. :return: The result of the call. :raises JujuAPIError: When there's an error returned. :raises JujuError:
[ "Make", "an", "RPC", "to", "the", "API", ".", "The", "message", "is", "encoded", "as", "JSON", "using", "the", "given", "encoder", "if", "any", ".", ":", "param", "msg", ":", "Parameters", "for", "the", "call", "(", "will", "be", "encoded", "as", "JSON", ")", ".", ":", "param", "encoder", ":", "Encoder", "to", "be", "used", "when", "encoding", "the", "message", ".", ":", "return", ":", "The", "result", "of", "the", "call", ".", ":", "raises", "JujuAPIError", ":", "When", "there", "s", "an", "error", "returned", ".", ":", "raises", "JujuError", ":" ]
python
train
mozilla/mozilla-django-oidc
mozilla_django_oidc/auth.py
https://github.com/mozilla/mozilla-django-oidc/blob/e780130deacccbafc85a92f48d1407e042f5f955/mozilla_django_oidc/auth.py#L301-L330
def get_or_create_user(self, access_token, id_token, payload): """Returns a User instance if 1 user is found. Creates a user if not found and configured to do so. Returns nothing if multiple users are matched.""" user_info = self.get_userinfo(access_token, id_token, payload) email = user_info.get('email') claims_verified = self.verify_claims(user_info) if not claims_verified: msg = 'Claims verification failed' raise SuspiciousOperation(msg) # email based filtering users = self.filter_users_by_claims(user_info) if len(users) == 1: return self.update_user(users[0], user_info) elif len(users) > 1: # In the rare case that two user accounts have the same email address, # bail. Randomly selecting one seems really wrong. msg = 'Multiple users returned' raise SuspiciousOperation(msg) elif self.get_settings('OIDC_CREATE_USER', True): user = self.create_user(user_info) return user else: LOGGER.debug('Login failed: No user with email %s found, and ' 'OIDC_CREATE_USER is False', email) return None
[ "def", "get_or_create_user", "(", "self", ",", "access_token", ",", "id_token", ",", "payload", ")", ":", "user_info", "=", "self", ".", "get_userinfo", "(", "access_token", ",", "id_token", ",", "payload", ")", "email", "=", "user_info", ".", "get", "(", "'email'", ")", "claims_verified", "=", "self", ".", "verify_claims", "(", "user_info", ")", "if", "not", "claims_verified", ":", "msg", "=", "'Claims verification failed'", "raise", "SuspiciousOperation", "(", "msg", ")", "# email based filtering", "users", "=", "self", ".", "filter_users_by_claims", "(", "user_info", ")", "if", "len", "(", "users", ")", "==", "1", ":", "return", "self", ".", "update_user", "(", "users", "[", "0", "]", ",", "user_info", ")", "elif", "len", "(", "users", ")", ">", "1", ":", "# In the rare case that two user accounts have the same email address,", "# bail. Randomly selecting one seems really wrong.", "msg", "=", "'Multiple users returned'", "raise", "SuspiciousOperation", "(", "msg", ")", "elif", "self", ".", "get_settings", "(", "'OIDC_CREATE_USER'", ",", "True", ")", ":", "user", "=", "self", ".", "create_user", "(", "user_info", ")", "return", "user", "else", ":", "LOGGER", ".", "debug", "(", "'Login failed: No user with email %s found, and '", "'OIDC_CREATE_USER is False'", ",", "email", ")", "return", "None" ]
Returns a User instance if 1 user is found. Creates a user if not found and configured to do so. Returns nothing if multiple users are matched.
[ "Returns", "a", "User", "instance", "if", "1", "user", "is", "found", ".", "Creates", "a", "user", "if", "not", "found", "and", "configured", "to", "do", "so", ".", "Returns", "nothing", "if", "multiple", "users", "are", "matched", "." ]
python
train
SiLab-Bonn/online_monitor
online_monitor/converter/transceiver.py
https://github.com/SiLab-Bonn/online_monitor/blob/113c7d33e9ea4bc18520cefa329462faa406cc08/online_monitor/converter/transceiver.py#L192-L202
def send_data(self, data): ''' This function can be overwritten in derived class Std. function is to broadcast all receiver data to all backends ''' for frontend_data in data: serialized_data = self.serialize_data(frontend_data) if sys.version_info >= (3, 0): serialized_data = serialized_data.encode('utf-8') for actual_backend in self.backends: actual_backend[1].send(serialized_data)
[ "def", "send_data", "(", "self", ",", "data", ")", ":", "for", "frontend_data", "in", "data", ":", "serialized_data", "=", "self", ".", "serialize_data", "(", "frontend_data", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "serialized_data", "=", "serialized_data", ".", "encode", "(", "'utf-8'", ")", "for", "actual_backend", "in", "self", ".", "backends", ":", "actual_backend", "[", "1", "]", ".", "send", "(", "serialized_data", ")" ]
This function can be overwritten in derived class Std. function is to broadcast all receiver data to all backends
[ "This", "function", "can", "be", "overwritten", "in", "derived", "class" ]
python
train
galactics/beyond
beyond/orbits/orbit.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/orbits/orbit.py#L440-L443
def va(self): """Velocity at apocenter """ return np.sqrt(self.mu * (2 / (self.ra) - 1 / self.kep.a))
[ "def", "va", "(", "self", ")", ":", "return", "np", ".", "sqrt", "(", "self", ".", "mu", "*", "(", "2", "/", "(", "self", ".", "ra", ")", "-", "1", "/", "self", ".", "kep", ".", "a", ")", ")" ]
Velocity at apocenter
[ "Velocity", "at", "apocenter" ]
python
train
pytroll/satpy
satpy/readers/eps_l1b.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/eps_l1b.py#L180-L186
def keys(self): """List of reader's keys. """ keys = [] for val in self.form.scales.values(): keys += val.dtype.fields.keys() return keys
[ "def", "keys", "(", "self", ")", ":", "keys", "=", "[", "]", "for", "val", "in", "self", ".", "form", ".", "scales", ".", "values", "(", ")", ":", "keys", "+=", "val", ".", "dtype", ".", "fields", ".", "keys", "(", ")", "return", "keys" ]
List of reader's keys.
[ "List", "of", "reader", "s", "keys", "." ]
python
train
pysal/giddy
giddy/markov.py
https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L832-L855
def _maybe_classify(self, y, k, cutoffs): '''Helper method for classifying continuous data. ''' rows, cols = y.shape if cutoffs is None: if self.fixed: mcyb = mc.Quantiles(y.flatten(), k=k) yb = mcyb.yb.reshape(y.shape) cutoffs = mcyb.bins k = len(cutoffs) return yb, cutoffs[:-1], k else: yb = np.array([mc.Quantiles(y[:, i], k=k).yb for i in np.arange(cols)]).transpose() return yb, None, k else: cutoffs = list(cutoffs) + [np.inf] cutoffs = np.array(cutoffs) yb = mc.User_Defined(y.flatten(), np.array(cutoffs)).yb.reshape( y.shape) k = len(cutoffs) return yb, cutoffs[:-1], k
[ "def", "_maybe_classify", "(", "self", ",", "y", ",", "k", ",", "cutoffs", ")", ":", "rows", ",", "cols", "=", "y", ".", "shape", "if", "cutoffs", "is", "None", ":", "if", "self", ".", "fixed", ":", "mcyb", "=", "mc", ".", "Quantiles", "(", "y", ".", "flatten", "(", ")", ",", "k", "=", "k", ")", "yb", "=", "mcyb", ".", "yb", ".", "reshape", "(", "y", ".", "shape", ")", "cutoffs", "=", "mcyb", ".", "bins", "k", "=", "len", "(", "cutoffs", ")", "return", "yb", ",", "cutoffs", "[", ":", "-", "1", "]", ",", "k", "else", ":", "yb", "=", "np", ".", "array", "(", "[", "mc", ".", "Quantiles", "(", "y", "[", ":", ",", "i", "]", ",", "k", "=", "k", ")", ".", "yb", "for", "i", "in", "np", ".", "arange", "(", "cols", ")", "]", ")", ".", "transpose", "(", ")", "return", "yb", ",", "None", ",", "k", "else", ":", "cutoffs", "=", "list", "(", "cutoffs", ")", "+", "[", "np", ".", "inf", "]", "cutoffs", "=", "np", ".", "array", "(", "cutoffs", ")", "yb", "=", "mc", ".", "User_Defined", "(", "y", ".", "flatten", "(", ")", ",", "np", ".", "array", "(", "cutoffs", ")", ")", ".", "yb", ".", "reshape", "(", "y", ".", "shape", ")", "k", "=", "len", "(", "cutoffs", ")", "return", "yb", ",", "cutoffs", "[", ":", "-", "1", "]", ",", "k" ]
Helper method for classifying continuous data.
[ "Helper", "method", "for", "classifying", "continuous", "data", "." ]
python
train
mwgielen/jackal
jackal/core.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L55-L76
def search(self, number=None, *args, **kwargs): """ Searches the elasticsearch instance to retrieve the requested documents. """ search = self.create_search(*args, **kwargs) try: if number: response = search[0:number] else: args, _ = self.core_parser.parse_known_args() if args.number: response = search[0:args.number] else: response = search.scan() return [hit for hit in response] except NotFoundError: print_error("The index was not found, have you initialized the index?") return [] except (ConnectionError, TransportError): print_error("Cannot connect to elasticsearch") return []
[ "def", "search", "(", "self", ",", "number", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "search", "=", "self", ".", "create_search", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "if", "number", ":", "response", "=", "search", "[", "0", ":", "number", "]", "else", ":", "args", ",", "_", "=", "self", ".", "core_parser", ".", "parse_known_args", "(", ")", "if", "args", ".", "number", ":", "response", "=", "search", "[", "0", ":", "args", ".", "number", "]", "else", ":", "response", "=", "search", ".", "scan", "(", ")", "return", "[", "hit", "for", "hit", "in", "response", "]", "except", "NotFoundError", ":", "print_error", "(", "\"The index was not found, have you initialized the index?\"", ")", "return", "[", "]", "except", "(", "ConnectionError", ",", "TransportError", ")", ":", "print_error", "(", "\"Cannot connect to elasticsearch\"", ")", "return", "[", "]" ]
Searches the elasticsearch instance to retrieve the requested documents.
[ "Searches", "the", "elasticsearch", "instance", "to", "retrieve", "the", "requested", "documents", "." ]
python
valid
pytroll/trollimage
trollimage/image.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/image.py#L603-L625
def _rgb2l(self, mode): """Convert from RGB to monochrome L. """ self._check_modes(("RGB", "RGBA")) kb_ = 0.114 kr_ = 0.299 r__ = self.channels[0] g__ = self.channels[1] b__ = self.channels[2] y__ = kr_ * r__ + (1 - kr_ - kb_) * g__ + kb_ * b__ if self.fill_value is not None: self.fill_value = ([rgb2ycbcr(self.fill_value[0], self.fill_value[1], self.fill_value[2])[0]] + self.fill_value[3:]) self.channels = [y__] + self.channels[3:] self.mode = mode
[ "def", "_rgb2l", "(", "self", ",", "mode", ")", ":", "self", ".", "_check_modes", "(", "(", "\"RGB\"", ",", "\"RGBA\"", ")", ")", "kb_", "=", "0.114", "kr_", "=", "0.299", "r__", "=", "self", ".", "channels", "[", "0", "]", "g__", "=", "self", ".", "channels", "[", "1", "]", "b__", "=", "self", ".", "channels", "[", "2", "]", "y__", "=", "kr_", "*", "r__", "+", "(", "1", "-", "kr_", "-", "kb_", ")", "*", "g__", "+", "kb_", "*", "b__", "if", "self", ".", "fill_value", "is", "not", "None", ":", "self", ".", "fill_value", "=", "(", "[", "rgb2ycbcr", "(", "self", ".", "fill_value", "[", "0", "]", ",", "self", ".", "fill_value", "[", "1", "]", ",", "self", ".", "fill_value", "[", "2", "]", ")", "[", "0", "]", "]", "+", "self", ".", "fill_value", "[", "3", ":", "]", ")", "self", ".", "channels", "=", "[", "y__", "]", "+", "self", ".", "channels", "[", "3", ":", "]", "self", ".", "mode", "=", "mode" ]
Convert from RGB to monochrome L.
[ "Convert", "from", "RGB", "to", "monochrome", "L", "." ]
python
train
wuher/devil
devil/fields/factory.py
https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/fields/factory.py#L171-L184
def _create_value(self, data, name, spec): """ Create the value for a field. :param data: the whole data for the entity (all fields). :param name: name of the initialized field. :param spec: spec for the whole entity. """ field = getattr(self, 'create_' + name, None) if field: # this factory has a special creator function for this field return field(data, name, spec) value = data.get(name) return spec.fields[name].clean(value)
[ "def", "_create_value", "(", "self", ",", "data", ",", "name", ",", "spec", ")", ":", "field", "=", "getattr", "(", "self", ",", "'create_'", "+", "name", ",", "None", ")", "if", "field", ":", "# this factory has a special creator function for this field", "return", "field", "(", "data", ",", "name", ",", "spec", ")", "value", "=", "data", ".", "get", "(", "name", ")", "return", "spec", ".", "fields", "[", "name", "]", ".", "clean", "(", "value", ")" ]
Create the value for a field. :param data: the whole data for the entity (all fields). :param name: name of the initialized field. :param spec: spec for the whole entity.
[ "Create", "the", "value", "for", "a", "field", "." ]
python
train
junaruga/rpm-py-installer
install.py
https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L1707-L1758
def sh_e(cls, cmd, **kwargs): """Run the command. It behaves like "sh -e". It raises InstallError if the command failed. """ Log.debug('CMD: {0}'.format(cmd)) cmd_kwargs = { 'shell': True, } cmd_kwargs.update(kwargs) env = os.environ.copy() # Better to parse English output env['LC_ALL'] = 'en_US.utf-8' if 'env' in kwargs: env.update(kwargs['env']) cmd_kwargs['env'] = env # Capture stderr to show it on error message. cmd_kwargs['stderr'] = subprocess.PIPE proc = None try: proc = subprocess.Popen(cmd, **cmd_kwargs) stdout, stderr = proc.communicate() returncode = proc.returncode message_format = ( 'CMD Return Code: [{0}], Stdout: [{1}], Stderr: [{2}]' ) Log.debug(message_format.format(returncode, stdout, stderr)) if stdout is not None: stdout = stdout.decode('utf-8') if stderr is not None: stderr = stderr.decode('utf-8') if returncode != 0: message = 'CMD: [{0}], Return Code: [{1}] at [{2}]'.format( cmd, returncode, os.getcwd()) if stderr is not None: message += ' Stderr: [{0}]'.format(stderr) ie = CmdError(message) ie.stdout = stdout ie.stderr = stderr raise ie return (stdout, stderr) except Exception as e: try: proc.kill() except Exception: pass raise e
[ "def", "sh_e", "(", "cls", ",", "cmd", ",", "*", "*", "kwargs", ")", ":", "Log", ".", "debug", "(", "'CMD: {0}'", ".", "format", "(", "cmd", ")", ")", "cmd_kwargs", "=", "{", "'shell'", ":", "True", ",", "}", "cmd_kwargs", ".", "update", "(", "kwargs", ")", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "# Better to parse English output", "env", "[", "'LC_ALL'", "]", "=", "'en_US.utf-8'", "if", "'env'", "in", "kwargs", ":", "env", ".", "update", "(", "kwargs", "[", "'env'", "]", ")", "cmd_kwargs", "[", "'env'", "]", "=", "env", "# Capture stderr to show it on error message.", "cmd_kwargs", "[", "'stderr'", "]", "=", "subprocess", ".", "PIPE", "proc", "=", "None", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "*", "*", "cmd_kwargs", ")", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", ")", "returncode", "=", "proc", ".", "returncode", "message_format", "=", "(", "'CMD Return Code: [{0}], Stdout: [{1}], Stderr: [{2}]'", ")", "Log", ".", "debug", "(", "message_format", ".", "format", "(", "returncode", ",", "stdout", ",", "stderr", ")", ")", "if", "stdout", "is", "not", "None", ":", "stdout", "=", "stdout", ".", "decode", "(", "'utf-8'", ")", "if", "stderr", "is", "not", "None", ":", "stderr", "=", "stderr", ".", "decode", "(", "'utf-8'", ")", "if", "returncode", "!=", "0", ":", "message", "=", "'CMD: [{0}], Return Code: [{1}] at [{2}]'", ".", "format", "(", "cmd", ",", "returncode", ",", "os", ".", "getcwd", "(", ")", ")", "if", "stderr", "is", "not", "None", ":", "message", "+=", "' Stderr: [{0}]'", ".", "format", "(", "stderr", ")", "ie", "=", "CmdError", "(", "message", ")", "ie", ".", "stdout", "=", "stdout", "ie", ".", "stderr", "=", "stderr", "raise", "ie", "return", "(", "stdout", ",", "stderr", ")", "except", "Exception", "as", "e", ":", "try", ":", "proc", ".", "kill", "(", ")", "except", "Exception", ":", "pass", "raise", "e" ]
Run the command. It behaves like "sh -e". It raises InstallError if the command failed.
[ "Run", "the", "command", ".", "It", "behaves", "like", "sh", "-", "e", "." ]
python
train
LonamiWebs/Telethon
telethon_generator/docswriter.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_generator/docswriter.py#L42-L68
def write_head(self, title, css_path, default_css): """Writes the head part for the generated document, with the given title and CSS """ self.title = title self.write( '''<!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>{title}</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link id="style" href="{rel_css}/docs.dark.css" rel="stylesheet"> <script> document.getElementById("style").href = "{rel_css}/docs." + (localStorage.getItem("theme") || "{def_css}") + ".css"; </script> <link href="https://fonts.googleapis.com/css?family=Nunito|Source+Code+Pro" rel="stylesheet"> </head> <body> <div id="main_div">''', title=title, rel_css=self._rel(css_path), def_css=default_css )
[ "def", "write_head", "(", "self", ",", "title", ",", "css_path", ",", "default_css", ")", ":", "self", ".", "title", "=", "title", "self", ".", "write", "(", "'''<!DOCTYPE html>\n<html>\n<head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\">\n <title>{title}</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <link id=\"style\" href=\"{rel_css}/docs.dark.css\" rel=\"stylesheet\">\n <script>\n document.getElementById(\"style\").href = \"{rel_css}/docs.\"\n + (localStorage.getItem(\"theme\") || \"{def_css}\")\n + \".css\";\n </script>\n <link href=\"https://fonts.googleapis.com/css?family=Nunito|Source+Code+Pro\"\n rel=\"stylesheet\">\n</head>\n<body>\n<div id=\"main_div\">'''", ",", "title", "=", "title", ",", "rel_css", "=", "self", ".", "_rel", "(", "css_path", ")", ",", "def_css", "=", "default_css", ")" ]
Writes the head part for the generated document, with the given title and CSS
[ "Writes", "the", "head", "part", "for", "the", "generated", "document", "with", "the", "given", "title", "and", "CSS" ]
python
train
bitesofcode/projex
projex/plugin.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/plugin.py#L303-L331
def addPluginPath(cls, pluginpath): """ Adds the plugin path for this class to the given path. The inputted pluginpath value can either be a list of strings, or a string containing paths separated by the OS specific path separator (':' on Mac & Linux, ';' on Windows) :param pluginpath | [<str>, ..] || <str> """ prop_key = '_%s__pluginpath' % cls.__name__ curr_path = getattr(cls, prop_key, None) if not curr_path: curr_path = [] setattr(cls, prop_key, curr_path) if isinstance(pluginpath, basestring): pluginpath = pluginpath.split(os.path.pathsep) for path in pluginpath: if not path: continue path = os.path.expanduser(os.path.expandvars(path)) paths = path.split(os.path.pathsep) if len(paths) > 1: cls.addPluginPath(paths) else: curr_path.append(path)
[ "def", "addPluginPath", "(", "cls", ",", "pluginpath", ")", ":", "prop_key", "=", "'_%s__pluginpath'", "%", "cls", ".", "__name__", "curr_path", "=", "getattr", "(", "cls", ",", "prop_key", ",", "None", ")", "if", "not", "curr_path", ":", "curr_path", "=", "[", "]", "setattr", "(", "cls", ",", "prop_key", ",", "curr_path", ")", "if", "isinstance", "(", "pluginpath", ",", "basestring", ")", ":", "pluginpath", "=", "pluginpath", ".", "split", "(", "os", ".", "path", ".", "pathsep", ")", "for", "path", "in", "pluginpath", ":", "if", "not", "path", ":", "continue", "path", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "expandvars", "(", "path", ")", ")", "paths", "=", "path", ".", "split", "(", "os", ".", "path", ".", "pathsep", ")", "if", "len", "(", "paths", ")", ">", "1", ":", "cls", ".", "addPluginPath", "(", "paths", ")", "else", ":", "curr_path", ".", "append", "(", "path", ")" ]
Adds the plugin path for this class to the given path. The inputted pluginpath value can either be a list of strings, or a string containing paths separated by the OS specific path separator (':' on Mac & Linux, ';' on Windows) :param pluginpath | [<str>, ..] || <str>
[ "Adds", "the", "plugin", "path", "for", "this", "class", "to", "the", "given", "path", ".", "The", "inputted", "pluginpath", "value", "can", "either", "be", "a", "list", "of", "strings", "or", "a", "string", "containing", "paths", "separated", "by", "the", "OS", "specific", "path", "separator", "(", ":", "on", "Mac", "&", "Linux", ";", "on", "Windows", ")", ":", "param", "pluginpath", "|", "[", "<str", ">", "..", "]", "||", "<str", ">" ]
python
train
gwpy/gwpy
gwpy/timeseries/io/nds2.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/nds2.py#L204-L213
def _create_series(ndschan, value, start, end, series_class=TimeSeries): """Create a timeseries to cover the specified [start, end) limits To cover a gap in data returned from NDS """ channel = Channel.from_nds2(ndschan) nsamp = int((end - start) * channel.sample_rate.value) return series_class(numpy_ones(nsamp) * value, t0=start, sample_rate=channel.sample_rate, unit=channel.unit, channel=channel)
[ "def", "_create_series", "(", "ndschan", ",", "value", ",", "start", ",", "end", ",", "series_class", "=", "TimeSeries", ")", ":", "channel", "=", "Channel", ".", "from_nds2", "(", "ndschan", ")", "nsamp", "=", "int", "(", "(", "end", "-", "start", ")", "*", "channel", ".", "sample_rate", ".", "value", ")", "return", "series_class", "(", "numpy_ones", "(", "nsamp", ")", "*", "value", ",", "t0", "=", "start", ",", "sample_rate", "=", "channel", ".", "sample_rate", ",", "unit", "=", "channel", ".", "unit", ",", "channel", "=", "channel", ")" ]
Create a timeseries to cover the specified [start, end) limits To cover a gap in data returned from NDS
[ "Create", "a", "timeseries", "to", "cover", "the", "specified", "[", "start", "end", ")", "limits" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/interface/management/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/management/__init__.py#L414-L437
def _set_shutdown_management_oper(self, v, load=False): """ Setter method for shutdown_management_oper, mapped from YANG variable /interface/management/shutdown_management_oper (string) If this variable is read-only (config: false) in the source YANG file, then _set_shutdown_management_oper is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_shutdown_management_oper() directly. YANG Description: Show the status of this management interface. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="shutdown_management_oper", rest_name="oper-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Show the status of this management interface.', u'alt-name': u'oper-status'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """shutdown_management_oper must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="shutdown_management_oper", rest_name="oper-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Show the status of this management interface.', u'alt-name': u'oper-status'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=False)""", }) self.__shutdown_management_oper = t if hasattr(self, '_set'): self._set()
[ "def", "_set_shutdown_management_oper", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "unicode", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"shutdown_management_oper\"", ",", "rest_name", "=", "\"oper-status\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Show the status of this management interface.'", ",", "u'alt-name'", ":", "u'oper-status'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'string'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"shutdown_management_oper must be of a type compatible with string\"\"\"", ",", "'defined-type'", ":", "\"string\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"shutdown_management_oper\", rest_name=\"oper-status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Show the status of this management interface.', u'alt-name': u'oper-status'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__shutdown_management_oper", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for shutdown_management_oper, mapped from YANG variable /interface/management/shutdown_management_oper (string) If this variable is read-only (config: false) in the source YANG file, then _set_shutdown_management_oper is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_shutdown_management_oper() directly. YANG Description: Show the status of this management interface.
[ "Setter", "method", "for", "shutdown_management_oper", "mapped", "from", "YANG", "variable", "/", "interface", "/", "management", "/", "shutdown_management_oper", "(", "string", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_shutdown_management_oper", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_shutdown_management_oper", "()", "directly", "." ]
python
train
veltzer/pydmt
pydmt/core/pydmt.py
https://github.com/veltzer/pydmt/blob/11d3db7ea079756c1e4137d3dd8a2cabbcc98bf7/pydmt/core/pydmt.py#L119-L130
def build_all(self) -> BuildProcessStats: """ Build all the targets, very high level method :return: """ stats = BuildProcessStats() for builder in self.builders: self.build_by_builder( builder=builder, stats=stats, ) return stats
[ "def", "build_all", "(", "self", ")", "->", "BuildProcessStats", ":", "stats", "=", "BuildProcessStats", "(", ")", "for", "builder", "in", "self", ".", "builders", ":", "self", ".", "build_by_builder", "(", "builder", "=", "builder", ",", "stats", "=", "stats", ",", ")", "return", "stats" ]
Build all the targets, very high level method :return:
[ "Build", "all", "the", "targets", "very", "high", "level", "method", ":", "return", ":" ]
python
train
googleapis/google-cloud-python
datastore/google/cloud/datastore/helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/helpers.py#L270-L299
def key_from_protobuf(pb): """Factory method for creating a key based on a protobuf. The protobuf should be one returned from the Cloud Datastore Protobuf API. :type pb: :class:`.entity_pb2.Key` :param pb: The Protobuf representing the key. :rtype: :class:`google.cloud.datastore.key.Key` :returns: a new `Key` instance """ path_args = [] for element in pb.path: path_args.append(element.kind) if element.id: # Simple field (int64) path_args.append(element.id) # This is safe: we expect proto objects returned will only have # one of `name` or `id` set. if element.name: # Simple field (string) path_args.append(element.name) project = None if pb.partition_id.project_id: # Simple field (string) project = pb.partition_id.project_id namespace = None if pb.partition_id.namespace_id: # Simple field (string) namespace = pb.partition_id.namespace_id return Key(*path_args, namespace=namespace, project=project)
[ "def", "key_from_protobuf", "(", "pb", ")", ":", "path_args", "=", "[", "]", "for", "element", "in", "pb", ".", "path", ":", "path_args", ".", "append", "(", "element", ".", "kind", ")", "if", "element", ".", "id", ":", "# Simple field (int64)", "path_args", ".", "append", "(", "element", ".", "id", ")", "# This is safe: we expect proto objects returned will only have", "# one of `name` or `id` set.", "if", "element", ".", "name", ":", "# Simple field (string)", "path_args", ".", "append", "(", "element", ".", "name", ")", "project", "=", "None", "if", "pb", ".", "partition_id", ".", "project_id", ":", "# Simple field (string)", "project", "=", "pb", ".", "partition_id", ".", "project_id", "namespace", "=", "None", "if", "pb", ".", "partition_id", ".", "namespace_id", ":", "# Simple field (string)", "namespace", "=", "pb", ".", "partition_id", ".", "namespace_id", "return", "Key", "(", "*", "path_args", ",", "namespace", "=", "namespace", ",", "project", "=", "project", ")" ]
Factory method for creating a key based on a protobuf. The protobuf should be one returned from the Cloud Datastore Protobuf API. :type pb: :class:`.entity_pb2.Key` :param pb: The Protobuf representing the key. :rtype: :class:`google.cloud.datastore.key.Key` :returns: a new `Key` instance
[ "Factory", "method", "for", "creating", "a", "key", "based", "on", "a", "protobuf", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1617-L1623
def help_center_articles_labels_list(self, locale=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/labels#list-all-labels" api_path = "/api/v2/help_center/articles/labels.json" if locale: api_opt_path = "/api/v2/help_center/{locale}/articles/labels.json" api_path = api_opt_path.format(locale=locale) return self.call(api_path, **kwargs)
[ "def", "help_center_articles_labels_list", "(", "self", ",", "locale", "=", "None", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/help_center/articles/labels.json\"", "if", "locale", ":", "api_opt_path", "=", "\"/api/v2/help_center/{locale}/articles/labels.json\"", "api_path", "=", "api_opt_path", ".", "format", "(", "locale", "=", "locale", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/help_center/labels#list-all-labels
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "help_center", "/", "labels#list", "-", "all", "-", "labels" ]
python
train
numenta/htmresearch
htmresearch/frameworks/layers/physical_objects.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L316-L337
def plot(self, numPoints=100): """ Specific plotting method for cylinders. """ fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # generate cylinder x = np.linspace(- self.radius, self.radius, numPoints) z = np.linspace(- self.height / 2., self.height / 2., numPoints) Xc, Zc = np.meshgrid(x, z) Yc = np.sqrt(self.radius ** 2 - Xc ** 2) # plot ax.plot_surface(Xc, Yc, Zc, alpha=0.2, rstride=20, cstride=10) ax.plot_surface(Xc, -Yc, Zc, alpha=0.2, rstride=20, cstride=10) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Z") plt.title("{}".format(self)) return fig, ax
[ "def", "plot", "(", "self", ",", "numPoints", "=", "100", ")", ":", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ",", "projection", "=", "'3d'", ")", "# generate cylinder", "x", "=", "np", ".", "linspace", "(", "-", "self", ".", "radius", ",", "self", ".", "radius", ",", "numPoints", ")", "z", "=", "np", ".", "linspace", "(", "-", "self", ".", "height", "/", "2.", ",", "self", ".", "height", "/", "2.", ",", "numPoints", ")", "Xc", ",", "Zc", "=", "np", ".", "meshgrid", "(", "x", ",", "z", ")", "Yc", "=", "np", ".", "sqrt", "(", "self", ".", "radius", "**", "2", "-", "Xc", "**", "2", ")", "# plot", "ax", ".", "plot_surface", "(", "Xc", ",", "Yc", ",", "Zc", ",", "alpha", "=", "0.2", ",", "rstride", "=", "20", ",", "cstride", "=", "10", ")", "ax", ".", "plot_surface", "(", "Xc", ",", "-", "Yc", ",", "Zc", ",", "alpha", "=", "0.2", ",", "rstride", "=", "20", ",", "cstride", "=", "10", ")", "ax", ".", "set_xlabel", "(", "\"X\"", ")", "ax", ".", "set_ylabel", "(", "\"Y\"", ")", "ax", ".", "set_zlabel", "(", "\"Z\"", ")", "plt", ".", "title", "(", "\"{}\"", ".", "format", "(", "self", ")", ")", "return", "fig", ",", "ax" ]
Specific plotting method for cylinders.
[ "Specific", "plotting", "method", "for", "cylinders", "." ]
python
train
Azure/azure-kusto-python
azure-kusto-data/azure/kusto/data/request.py
https://github.com/Azure/azure-kusto-python/blob/92466a2ae175d6353d1dee3496a02517b2a71a86/azure-kusto-data/azure/kusto/data/request.py#L133-L149
def with_aad_user_password_authentication(cls, connection_string, user_id, password, authority_id="common"): """Creates a KustoConnection string builder that will authenticate with AAD user name and password. :param str connection_string: Kusto connection string should by of the format: https://<clusterName>.kusto.windows.net :param str user_id: AAD user ID. :param str password: Corresponding password of the AAD user. :param str authority_id: optional param. defaults to "common" """ _assert_value_is_valid(user_id) _assert_value_is_valid(password) kcsb = cls(connection_string) kcsb[kcsb.ValidKeywords.aad_federated_security] = True kcsb[kcsb.ValidKeywords.aad_user_id] = user_id kcsb[kcsb.ValidKeywords.password] = password kcsb[kcsb.ValidKeywords.authority_id] = authority_id return kcsb
[ "def", "with_aad_user_password_authentication", "(", "cls", ",", "connection_string", ",", "user_id", ",", "password", ",", "authority_id", "=", "\"common\"", ")", ":", "_assert_value_is_valid", "(", "user_id", ")", "_assert_value_is_valid", "(", "password", ")", "kcsb", "=", "cls", "(", "connection_string", ")", "kcsb", "[", "kcsb", ".", "ValidKeywords", ".", "aad_federated_security", "]", "=", "True", "kcsb", "[", "kcsb", ".", "ValidKeywords", ".", "aad_user_id", "]", "=", "user_id", "kcsb", "[", "kcsb", ".", "ValidKeywords", ".", "password", "]", "=", "password", "kcsb", "[", "kcsb", ".", "ValidKeywords", ".", "authority_id", "]", "=", "authority_id", "return", "kcsb" ]
Creates a KustoConnection string builder that will authenticate with AAD user name and password. :param str connection_string: Kusto connection string should by of the format: https://<clusterName>.kusto.windows.net :param str user_id: AAD user ID. :param str password: Corresponding password of the AAD user. :param str authority_id: optional param. defaults to "common"
[ "Creates", "a", "KustoConnection", "string", "builder", "that", "will", "authenticate", "with", "AAD", "user", "name", "and", "password", ".", ":", "param", "str", "connection_string", ":", "Kusto", "connection", "string", "should", "by", "of", "the", "format", ":", "https", ":", "//", "<clusterName", ">", ".", "kusto", ".", "windows", ".", "net", ":", "param", "str", "user_id", ":", "AAD", "user", "ID", ".", ":", "param", "str", "password", ":", "Corresponding", "password", "of", "the", "AAD", "user", ".", ":", "param", "str", "authority_id", ":", "optional", "param", ".", "defaults", "to", "common" ]
python
train