repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
tanghaibao/goatools
goatools/parsers/david_chart.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/parsers/david_chart.py#L70-L72
def wr_xlsx(self, fout_xlsx, nts): """Write specified namedtuples into an Excel spreadsheet.""" wr_xlsx(fout_xlsx, nts, prt_flds=self.prt_flds, fld2col_widths=self.fld2col_widths)
[ "def", "wr_xlsx", "(", "self", ",", "fout_xlsx", ",", "nts", ")", ":", "wr_xlsx", "(", "fout_xlsx", ",", "nts", ",", "prt_flds", "=", "self", ".", "prt_flds", ",", "fld2col_widths", "=", "self", ".", "fld2col_widths", ")" ]
Write specified namedtuples into an Excel spreadsheet.
[ "Write", "specified", "namedtuples", "into", "an", "Excel", "spreadsheet", "." ]
python
train
64.333333
twisted/twistedchecker
twistedchecker/core/runner.py
https://github.com/twisted/twistedchecker/blob/80060e1c07cf5d67d747dbec8ec0e5ee913e8929/twistedchecker/core/runner.py#L368-L385
def generateDiff(self, oldWarnings, newWarnings): """ Generate diff between given two lists of warnings. @param oldWarnings: parsed old warnings @param newWarnings: parsed new warnings @return: a dict object of diff """ diffWarnings = {} for modulename in newWarnings: diffInModule = ( newWarnings[modulename] - oldWarnings.get(modulename, set())) if diffInModule: diffWarnings[modulename] = diffInModule return diffWarnings
[ "def", "generateDiff", "(", "self", ",", "oldWarnings", ",", "newWarnings", ")", ":", "diffWarnings", "=", "{", "}", "for", "modulename", "in", "newWarnings", ":", "diffInModule", "=", "(", "newWarnings", "[", "modulename", "]", "-", "oldWarnings", ".", "get", "(", "modulename", ",", "set", "(", ")", ")", ")", "if", "diffInModule", ":", "diffWarnings", "[", "modulename", "]", "=", "diffInModule", "return", "diffWarnings" ]
Generate diff between given two lists of warnings. @param oldWarnings: parsed old warnings @param newWarnings: parsed new warnings @return: a dict object of diff
[ "Generate", "diff", "between", "given", "two", "lists", "of", "warnings", "." ]
python
train
30.777778
jlmadurga/permabots
permabots/views/api/bot.py
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/bot.py#L245-L256
def put(self, request, bot_id, id, format=None): """ Update existing KikBot --- serializer: KikBotUpdateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request """ return super(KikBotDetail, self).put(request, bot_id, id, format)
[ "def", "put", "(", "self", ",", "request", ",", "bot_id", ",", "id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "KikBotDetail", ",", "self", ")", ".", "put", "(", "request", ",", "bot_id", ",", "id", ",", "format", ")" ]
Update existing KikBot --- serializer: KikBotUpdateSerializer responseMessages: - code: 401 message: Not authenticated - code: 400 message: Not valid request
[ "Update", "existing", "KikBot", "---", "serializer", ":", "KikBotUpdateSerializer", "responseMessages", ":", "-", "code", ":", "401", "message", ":", "Not", "authenticated", "-", "code", ":", "400", "message", ":", "Not", "valid", "request" ]
python
train
31.916667
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/set_contains_all_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/set_contains_all_codec.py#L10-L17
def calculate_size(name, items): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += INT_SIZE_IN_BYTES for items_item in items: data_size += calculate_size_data(items_item) return data_size
[ "def", "calculate_size", "(", "name", ",", "items", ")", ":", "data_size", "=", "0", "data_size", "+=", "calculate_size_str", "(", "name", ")", "data_size", "+=", "INT_SIZE_IN_BYTES", "for", "items_item", "in", "items", ":", "data_size", "+=", "calculate_size_data", "(", "items_item", ")", "return", "data_size" ]
Calculates the request payload size
[ "Calculates", "the", "request", "payload", "size" ]
python
train
33.75
MatterMiners/cobald
cobald/daemon/core/config.py
https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/daemon/core/config.py#L10-L23
def load(config_path: str): """ Load a configuration and keep it alive for the given context :param config_path: path to a configuration file """ # we bind the config to _ to keep it alive if os.path.splitext(config_path)[1] in ('.yaml', '.yml'): _ = load_yaml_configuration(config_path, translator=PipelineTranslator()) elif os.path.splitext(config_path)[1] == '.py': _ = load_python_configuration(config_path) else: raise ValueError('Unknown configuration extension: %r' % os.path.splitext(config_path)[1]) yield
[ "def", "load", "(", "config_path", ":", "str", ")", ":", "# we bind the config to _ to keep it alive", "if", "os", ".", "path", ".", "splitext", "(", "config_path", ")", "[", "1", "]", "in", "(", "'.yaml'", ",", "'.yml'", ")", ":", "_", "=", "load_yaml_configuration", "(", "config_path", ",", "translator", "=", "PipelineTranslator", "(", ")", ")", "elif", "os", ".", "path", ".", "splitext", "(", "config_path", ")", "[", "1", "]", "==", "'.py'", ":", "_", "=", "load_python_configuration", "(", "config_path", ")", "else", ":", "raise", "ValueError", "(", "'Unknown configuration extension: %r'", "%", "os", ".", "path", ".", "splitext", "(", "config_path", ")", "[", "1", "]", ")", "yield" ]
Load a configuration and keep it alive for the given context :param config_path: path to a configuration file
[ "Load", "a", "configuration", "and", "keep", "it", "alive", "for", "the", "given", "context" ]
python
train
40.142857
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/oinspect.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/oinspect.py#L461-L479
def _format_fields(self, fields, title_width=12): """Formats a list of fields for display. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default 12. """ out = [] header = self.__head for title, content in fields: if len(content.splitlines()) > 1: title = header(title + ":") + "\n" else: title = header((title+":").ljust(title_width)) out.append(title + content) return "\n".join(out)
[ "def", "_format_fields", "(", "self", ",", "fields", ",", "title_width", "=", "12", ")", ":", "out", "=", "[", "]", "header", "=", "self", ".", "__head", "for", "title", ",", "content", "in", "fields", ":", "if", "len", "(", "content", ".", "splitlines", "(", ")", ")", ">", "1", ":", "title", "=", "header", "(", "title", "+", "\":\"", ")", "+", "\"\\n\"", "else", ":", "title", "=", "header", "(", "(", "title", "+", "\":\"", ")", ".", "ljust", "(", "title_width", ")", ")", "out", ".", "append", "(", "title", "+", "content", ")", "return", "\"\\n\"", ".", "join", "(", "out", ")" ]
Formats a list of fields for display. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default 12.
[ "Formats", "a", "list", "of", "fields", "for", "display", "." ]
python
test
33.157895
angr/angr
angr/simos/javavm.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/simos/javavm.py#L194-L258
def state_call(self, addr, *args, **kwargs): """ Create a native or a Java call state. :param addr: Soot or native addr of the invoke target. :param args: List of SootArgument values. """ state = kwargs.pop('base_state', None) # check if we need to setup a native or a java callsite if isinstance(addr, SootAddressDescriptor): # JAVA CALLSITE # ret addr precedence: ret_addr kwarg > base_state.addr > terminator ret_addr = kwargs.pop('ret_addr', state.addr if state else SootAddressTerminator()) cc = kwargs.pop('cc', SimCCSoot(self.arch)) if state is None: state = self.state_blank(addr=addr, **kwargs) else: state = state.copy() state.regs.ip = addr cc.setup_callsite(state, ret_addr, args) return state else: # NATIVE CALLSITE # setup native argument values native_arg_values = [] for arg in args: if arg.type in ArchSoot.primitive_types or \ arg.type == "JNIEnv": # the value of primitive types and the JNIEnv pointer # are just getting copied into the native memory native_arg_value = arg.value if self.arch.bits == 32 and arg.type == "long": # On 32 bit architecture, long values (w/ 64 bit) are copied # as two 32 bit integer # TODO is this correct? upper = native_arg_value.get_bytes(0, 4) lower = native_arg_value.get_bytes(4, 4) idx = args.index(arg) args = args[:idx] \ + (SootArgument(upper, 'int'), SootArgument(lower, 'int')) \ + args[idx+1:] native_arg_values += [upper, lower] continue else: # argument has a relative type # => map Java reference to an opaque reference, which the native code # can use to access the Java object through the JNI interface native_arg_value = state.jni_references.create_new_reference(obj=arg.value) native_arg_values += [native_arg_value] # setup native return type ret_type = kwargs.pop('ret_type') native_ret_type = self.get_native_type(ret_type) # setup function prototype, so the SimCC know how to init the callsite arg_types = [self.get_native_type(arg.type) for arg in args] prototype = SimTypeFunction(args=arg_types, returnty=native_ret_type) native_cc = self.get_native_cc(func_ty=prototype) # setup native invoke state return self.native_simos.state_call(addr, *native_arg_values, base_state=state, ret_addr=self.native_return_hook_addr, cc=native_cc, **kwargs)
[ "def", "state_call", "(", "self", ",", "addr", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "state", "=", "kwargs", ".", "pop", "(", "'base_state'", ",", "None", ")", "# check if we need to setup a native or a java callsite", "if", "isinstance", "(", "addr", ",", "SootAddressDescriptor", ")", ":", "# JAVA CALLSITE", "# ret addr precedence: ret_addr kwarg > base_state.addr > terminator", "ret_addr", "=", "kwargs", ".", "pop", "(", "'ret_addr'", ",", "state", ".", "addr", "if", "state", "else", "SootAddressTerminator", "(", ")", ")", "cc", "=", "kwargs", ".", "pop", "(", "'cc'", ",", "SimCCSoot", "(", "self", ".", "arch", ")", ")", "if", "state", "is", "None", ":", "state", "=", "self", ".", "state_blank", "(", "addr", "=", "addr", ",", "*", "*", "kwargs", ")", "else", ":", "state", "=", "state", ".", "copy", "(", ")", "state", ".", "regs", ".", "ip", "=", "addr", "cc", ".", "setup_callsite", "(", "state", ",", "ret_addr", ",", "args", ")", "return", "state", "else", ":", "# NATIVE CALLSITE", "# setup native argument values", "native_arg_values", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "arg", ".", "type", "in", "ArchSoot", ".", "primitive_types", "or", "arg", ".", "type", "==", "\"JNIEnv\"", ":", "# the value of primitive types and the JNIEnv pointer", "# are just getting copied into the native memory", "native_arg_value", "=", "arg", ".", "value", "if", "self", ".", "arch", ".", "bits", "==", "32", "and", "arg", ".", "type", "==", "\"long\"", ":", "# On 32 bit architecture, long values (w/ 64 bit) are copied", "# as two 32 bit integer", "# TODO is this correct?", "upper", "=", "native_arg_value", ".", "get_bytes", "(", "0", ",", "4", ")", "lower", "=", "native_arg_value", ".", "get_bytes", "(", "4", ",", "4", ")", "idx", "=", "args", ".", "index", "(", "arg", ")", "args", "=", "args", "[", ":", "idx", "]", "+", "(", "SootArgument", "(", "upper", ",", "'int'", ")", ",", "SootArgument", "(", "lower", ",", "'int'", ")", ")", "+", "args", "[", "idx", "+", "1", ":", "]", "native_arg_values", "+=", "[", "upper", ",", "lower", "]", "continue", "else", ":", "# argument has a relative type", "# => map Java reference to an opaque reference, which the native code", "# can use to access the Java object through the JNI interface", "native_arg_value", "=", "state", ".", "jni_references", ".", "create_new_reference", "(", "obj", "=", "arg", ".", "value", ")", "native_arg_values", "+=", "[", "native_arg_value", "]", "# setup native return type", "ret_type", "=", "kwargs", ".", "pop", "(", "'ret_type'", ")", "native_ret_type", "=", "self", ".", "get_native_type", "(", "ret_type", ")", "# setup function prototype, so the SimCC know how to init the callsite", "arg_types", "=", "[", "self", ".", "get_native_type", "(", "arg", ".", "type", ")", "for", "arg", "in", "args", "]", "prototype", "=", "SimTypeFunction", "(", "args", "=", "arg_types", ",", "returnty", "=", "native_ret_type", ")", "native_cc", "=", "self", ".", "get_native_cc", "(", "func_ty", "=", "prototype", ")", "# setup native invoke state", "return", "self", ".", "native_simos", ".", "state_call", "(", "addr", ",", "*", "native_arg_values", ",", "base_state", "=", "state", ",", "ret_addr", "=", "self", ".", "native_return_hook_addr", ",", "cc", "=", "native_cc", ",", "*", "*", "kwargs", ")" ]
Create a native or a Java call state. :param addr: Soot or native addr of the invoke target. :param args: List of SootArgument values.
[ "Create", "a", "native", "or", "a", "Java", "call", "state", "." ]
python
train
49.061538
Ezhil-Language-Foundation/open-tamil
tamil/txt2unicode/encode2unicode.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/txt2unicode/encode2unicode.py#L292-L330
def auto2unicode(text): """ This function tries to identify encode in available encodings. If it finds, then it will convert text into unicode string. Author : Arulalan.T 04.08.2014 """ _all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes() # get unique word which falls under any one of available encodes from # user passed text lines unique_chars = _get_unique_ch(text, _all_common_encodes_) # count common encode chars clen = len(_all_common_encodes_) msg = "Sorry, couldn't find encode :-(\n" msg += 'Need more words to find unique encode out side of %d ' % clen msg += 'common compound characters' if not unique_chars: print(msg) return '' # end of if not unique_chars: for encode_name, encode_keys in _all_unique_encodes_: if not len(encode_keys): continue for ch in encode_keys: # check either encode char is presnent in word if ch in unique_chars: # found encode print(("Found encode : ", encode_name)) encode = _all_encodes_[encode_name] return encode2unicode(text, encode) # end of if ch in unique_chars: # end of ifor ch in encode_keys: else: print(msg) return ''
[ "def", "auto2unicode", "(", "text", ")", ":", "_all_unique_encodes_", ",", "_all_common_encodes_", "=", "_get_unique_common_encodes", "(", ")", "# get unique word which falls under any one of available encodes from\r", "# user passed text lines\r", "unique_chars", "=", "_get_unique_ch", "(", "text", ",", "_all_common_encodes_", ")", "# count common encode chars\r", "clen", "=", "len", "(", "_all_common_encodes_", ")", "msg", "=", "\"Sorry, couldn't find encode :-(\\n\"", "msg", "+=", "'Need more words to find unique encode out side of %d '", "%", "clen", "msg", "+=", "'common compound characters'", "if", "not", "unique_chars", ":", "print", "(", "msg", ")", "return", "''", "# end of if not unique_chars:\r", "for", "encode_name", ",", "encode_keys", "in", "_all_unique_encodes_", ":", "if", "not", "len", "(", "encode_keys", ")", ":", "continue", "for", "ch", "in", "encode_keys", ":", "# check either encode char is presnent in word\r", "if", "ch", "in", "unique_chars", ":", "# found encode\r", "print", "(", "(", "\"Found encode : \"", ",", "encode_name", ")", ")", "encode", "=", "_all_encodes_", "[", "encode_name", "]", "return", "encode2unicode", "(", "text", ",", "encode", ")", "# end of if ch in unique_chars:\r", "# end of ifor ch in encode_keys:\r", "else", ":", "print", "(", "msg", ")", "return", "''" ]
This function tries to identify encode in available encodings. If it finds, then it will convert text into unicode string. Author : Arulalan.T 04.08.2014
[ "This", "function", "tries", "to", "identify", "encode", "in", "available", "encodings", ".", "If", "it", "finds", "then", "it", "will", "convert", "text", "into", "unicode", "string", ".", "Author", ":", "Arulalan", ".", "T", "04", ".", "08", ".", "2014" ]
python
train
34.102564
PolyJIT/benchbuild
benchbuild/utils/wrapping.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L86-L136
def wrap(name, project, sprefix=None, python=sys.executable): """ Wrap the binary :name: with the runtime extension of the project. This module generates a python tool that replaces :name: The function in runner only accepts the replaced binaries name as argument. We use the cloudpickle package to perform the serialization, make sure :runner: can be serialized with it and you're fine. Args: name: Binary we want to wrap project: The project that contains the runtime_extension we want to run instead of the binary. Returns: A plumbum command, ready to launch. """ env = __create_jinja_env() template = env.get_template('run_static.py.inc') name_absolute = os.path.abspath(name) real_f = name_absolute + PROJECT_BIN_F_EXT if sprefix: run(uchroot()["/bin/mv", strip_path_prefix(name_absolute, sprefix), strip_path_prefix(real_f, sprefix)]) else: run(mv[name_absolute, real_f]) project_file = persist(project, suffix=".project") env = CFG['env'].value bin_path = list_to_path(env.get('PATH', [])) bin_path = list_to_path([bin_path, os.environ["PATH"]]) bin_lib_path = list_to_path(env.get('LD_LIBRARY_PATH', [])) bin_lib_path = list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]]) with open(name_absolute, 'w') as wrapper: wrapper.write( template.render( runf=strip_path_prefix(real_f, sprefix), project_file=strip_path_prefix(project_file, sprefix), path=str(bin_path), ld_library_path=str(bin_lib_path), python=python, )) run(chmod["+x", name_absolute]) return local[name_absolute]
[ "def", "wrap", "(", "name", ",", "project", ",", "sprefix", "=", "None", ",", "python", "=", "sys", ".", "executable", ")", ":", "env", "=", "__create_jinja_env", "(", ")", "template", "=", "env", ".", "get_template", "(", "'run_static.py.inc'", ")", "name_absolute", "=", "os", ".", "path", ".", "abspath", "(", "name", ")", "real_f", "=", "name_absolute", "+", "PROJECT_BIN_F_EXT", "if", "sprefix", ":", "run", "(", "uchroot", "(", ")", "[", "\"/bin/mv\"", ",", "strip_path_prefix", "(", "name_absolute", ",", "sprefix", ")", ",", "strip_path_prefix", "(", "real_f", ",", "sprefix", ")", "]", ")", "else", ":", "run", "(", "mv", "[", "name_absolute", ",", "real_f", "]", ")", "project_file", "=", "persist", "(", "project", ",", "suffix", "=", "\".project\"", ")", "env", "=", "CFG", "[", "'env'", "]", ".", "value", "bin_path", "=", "list_to_path", "(", "env", ".", "get", "(", "'PATH'", ",", "[", "]", ")", ")", "bin_path", "=", "list_to_path", "(", "[", "bin_path", ",", "os", ".", "environ", "[", "\"PATH\"", "]", "]", ")", "bin_lib_path", "=", "list_to_path", "(", "env", ".", "get", "(", "'LD_LIBRARY_PATH'", ",", "[", "]", ")", ")", "bin_lib_path", "=", "list_to_path", "(", "[", "bin_lib_path", ",", "os", ".", "environ", "[", "\"LD_LIBRARY_PATH\"", "]", "]", ")", "with", "open", "(", "name_absolute", ",", "'w'", ")", "as", "wrapper", ":", "wrapper", ".", "write", "(", "template", ".", "render", "(", "runf", "=", "strip_path_prefix", "(", "real_f", ",", "sprefix", ")", ",", "project_file", "=", "strip_path_prefix", "(", "project_file", ",", "sprefix", ")", ",", "path", "=", "str", "(", "bin_path", ")", ",", "ld_library_path", "=", "str", "(", "bin_lib_path", ")", ",", "python", "=", "python", ",", ")", ")", "run", "(", "chmod", "[", "\"+x\"", ",", "name_absolute", "]", ")", "return", "local", "[", "name_absolute", "]" ]
Wrap the binary :name: with the runtime extension of the project. This module generates a python tool that replaces :name: The function in runner only accepts the replaced binaries name as argument. We use the cloudpickle package to perform the serialization, make sure :runner: can be serialized with it and you're fine. Args: name: Binary we want to wrap project: The project that contains the runtime_extension we want to run instead of the binary. Returns: A plumbum command, ready to launch.
[ "Wrap", "the", "binary", ":", "name", ":", "with", "the", "runtime", "extension", "of", "the", "project", "." ]
python
train
34.490196
spacetelescope/acstools
acstools/calacs.py
https://github.com/spacetelescope/acstools/blob/bbf8dd080cefcbf88529ec87c420f9e1b8002554/acstools/calacs.py#L28-L104
def calacs(input_file, exec_path=None, time_stamps=False, temp_files=False, verbose=False, debug=False, quiet=False, single_core=False, exe_args=None): """ Run the calacs.e executable as from the shell. By default this will run the calacs given by 'calacs.e'. Parameters ---------- input_file : str Name of input file. exec_path : str, optional The complete path to a calacs executable. time_stamps : bool, optional Set to True to turn on the printing of time stamps. temp_files : bool, optional Set to True to have CALACS save temporary files. verbose : bool, optional Set to True for verbose output. debug : bool, optional Set to True to turn on debugging output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in CALACS will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['calacs.e'] if time_stamps: call_list.append('-t') if temp_files: call_list.append('-s') if verbose: call_list.append('-v') if debug: call_list.append('-d') if quiet: call_list.append('-q') if single_core: call_list.append('-1') if not os.path.exists(input_file): raise IOError('Input file not found: ' + input_file) call_list.append(input_file) if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list)
[ "def", "calacs", "(", "input_file", ",", "exec_path", "=", "None", ",", "time_stamps", "=", "False", ",", "temp_files", "=", "False", ",", "verbose", "=", "False", ",", "debug", "=", "False", ",", "quiet", "=", "False", ",", "single_core", "=", "False", ",", "exe_args", "=", "None", ")", ":", "if", "exec_path", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "exec_path", ")", ":", "raise", "OSError", "(", "'Executable not found: '", "+", "exec_path", ")", "call_list", "=", "[", "exec_path", "]", "else", ":", "call_list", "=", "[", "'calacs.e'", "]", "if", "time_stamps", ":", "call_list", ".", "append", "(", "'-t'", ")", "if", "temp_files", ":", "call_list", ".", "append", "(", "'-s'", ")", "if", "verbose", ":", "call_list", ".", "append", "(", "'-v'", ")", "if", "debug", ":", "call_list", ".", "append", "(", "'-d'", ")", "if", "quiet", ":", "call_list", ".", "append", "(", "'-q'", ")", "if", "single_core", ":", "call_list", ".", "append", "(", "'-1'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "input_file", ")", ":", "raise", "IOError", "(", "'Input file not found: '", "+", "input_file", ")", "call_list", ".", "append", "(", "input_file", ")", "if", "exe_args", ":", "call_list", ".", "extend", "(", "exe_args", ")", "subprocess", ".", "check_call", "(", "call_list", ")" ]
Run the calacs.e executable as from the shell. By default this will run the calacs given by 'calacs.e'. Parameters ---------- input_file : str Name of input file. exec_path : str, optional The complete path to a calacs executable. time_stamps : bool, optional Set to True to turn on the printing of time stamps. temp_files : bool, optional Set to True to have CALACS save temporary files. verbose : bool, optional Set to True for verbose output. debug : bool, optional Set to True to turn on debugging output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in CALACS will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1']
[ "Run", "the", "calacs", ".", "e", "executable", "as", "from", "the", "shell", "." ]
python
train
25.064935
totalgood/pugnlp
src/pugnlp/stats.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/stats.py#L788-L798
def json_from_cov_df(df, threshold=.5, gain=2., n=None, indent=1): """Produce a json string describing the graph (list of edges) from a square auto-correlation/covariance matrix { "nodes": [{"group": 1, "name": "the"}, {"group": 1, "name": "and"}, {"group": 1, "name": "our"}, {"group": 2, "name": "that"},... "links": [{"source": 0, "target": 0, "value": 2.637520131294177}, {"source": 0, "target": 1, "value": 1.343999676850537}, ... """ nodes, edges = graph_from_cov_df(df=df, threshold=threshold, gain=gain, n=n) return json.dumps({'nodes': nodes, 'links': edges}, indent=indent)
[ "def", "json_from_cov_df", "(", "df", ",", "threshold", "=", ".5", ",", "gain", "=", "2.", ",", "n", "=", "None", ",", "indent", "=", "1", ")", ":", "nodes", ",", "edges", "=", "graph_from_cov_df", "(", "df", "=", "df", ",", "threshold", "=", "threshold", ",", "gain", "=", "gain", ",", "n", "=", "n", ")", "return", "json", ".", "dumps", "(", "{", "'nodes'", ":", "nodes", ",", "'links'", ":", "edges", "}", ",", "indent", "=", "indent", ")" ]
Produce a json string describing the graph (list of edges) from a square auto-correlation/covariance matrix { "nodes": [{"group": 1, "name": "the"}, {"group": 1, "name": "and"}, {"group": 1, "name": "our"}, {"group": 2, "name": "that"},... "links": [{"source": 0, "target": 0, "value": 2.637520131294177}, {"source": 0, "target": 1, "value": 1.343999676850537}, ...
[ "Produce", "a", "json", "string", "describing", "the", "graph", "(", "list", "of", "edges", ")", "from", "a", "square", "auto", "-", "correlation", "/", "covariance", "matrix", "{", "nodes", ":", "[", "{", "group", ":", "1", "name", ":", "the", "}", "{", "group", ":", "1", "name", ":", "and", "}", "{", "group", ":", "1", "name", ":", "our", "}", "{", "group", ":", "2", "name", ":", "that", "}", "...", "links", ":", "[", "{", "source", ":", "0", "target", ":", "0", "value", ":", "2", ".", "637520131294177", "}", "{", "source", ":", "0", "target", ":", "1", "value", ":", "1", ".", "343999676850537", "}", "..." ]
python
train
61.090909
saltstack/salt
salt/states/dracr.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/dracr.py#L41-L96
def property_present(properties, admin_username='root', admin_password='calvin', host=None, **kwargs): ''' properties = {} ''' ret = {'name': host, 'context': {'Host': host}, 'result': True, 'changes': {}, 'comment': ''} if host is None: output = __salt__['cmd.run_all']('ipmitool lan print') stdout = output['stdout'] reg = re.compile(r'\s*IP Address\s*:\s*(\d+.\d+.\d+.\d+)\s*') for line in stdout: result = reg.match(line) if result is not None: # we want group(1) as this is match in parentheses host = result.group(1) break if not host: ret['result'] = False ret['comment'] = 'Unknown host!' return ret properties_get = {} for key, value in properties.items(): response = __salt__['dracr.get_property'](host, admin_username, admin_password, key) if response is False or response['retcode'] != 0: ret['result'] = False ret['comment'] = 'Failed to get property from idrac' return ret properties_get[key] = response['stdout'].split('\n')[-1].split('=')[-1] if __opts__['test']: for key, value in properties.items(): if properties_get[key] == value: ret['changes'][key] = 'Won\'t be changed' else: ret['changes'][key] = 'Will be changed to {0}'.format(properties_get[key]) return ret for key, value in properties.items(): if properties_get[key] != value: response = __salt__['dracr.set_property'](host, admin_username, admin_password, key, value) if response is False or response['retcode'] != 0: ret['result'] = False ret['comment'] = 'Failed to set property from idrac' return ret ret['changes'][key] = 'will be changed - old value {0} , new value {1}'.format(properties_get[key], value) return ret
[ "def", "property_present", "(", "properties", ",", "admin_username", "=", "'root'", ",", "admin_password", "=", "'calvin'", ",", "host", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "host", ",", "'context'", ":", "{", "'Host'", ":", "host", "}", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "if", "host", "is", "None", ":", "output", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'ipmitool lan print'", ")", "stdout", "=", "output", "[", "'stdout'", "]", "reg", "=", "re", ".", "compile", "(", "r'\\s*IP Address\\s*:\\s*(\\d+.\\d+.\\d+.\\d+)\\s*'", ")", "for", "line", "in", "stdout", ":", "result", "=", "reg", ".", "match", "(", "line", ")", "if", "result", "is", "not", "None", ":", "# we want group(1) as this is match in parentheses", "host", "=", "result", ".", "group", "(", "1", ")", "break", "if", "not", "host", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Unknown host!'", "return", "ret", "properties_get", "=", "{", "}", "for", "key", ",", "value", "in", "properties", ".", "items", "(", ")", ":", "response", "=", "__salt__", "[", "'dracr.get_property'", "]", "(", "host", ",", "admin_username", ",", "admin_password", ",", "key", ")", "if", "response", "is", "False", "or", "response", "[", "'retcode'", "]", "!=", "0", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to get property from idrac'", "return", "ret", "properties_get", "[", "key", "]", "=", "response", "[", "'stdout'", "]", ".", "split", "(", "'\\n'", ")", "[", "-", "1", "]", ".", "split", "(", "'='", ")", "[", "-", "1", "]", "if", "__opts__", "[", "'test'", "]", ":", "for", "key", ",", "value", "in", "properties", ".", "items", "(", ")", ":", "if", "properties_get", "[", "key", "]", "==", "value", ":", "ret", "[", "'changes'", "]", "[", "key", "]", "=", "'Won\\'t be changed'", "else", ":", "ret", "[", "'changes'", "]", "[", "key", "]", "=", "'Will be changed to {0}'", ".", "format", "(", "properties_get", "[", "key", "]", ")", "return", "ret", "for", "key", ",", "value", "in", "properties", ".", "items", "(", ")", ":", "if", "properties_get", "[", "key", "]", "!=", "value", ":", "response", "=", "__salt__", "[", "'dracr.set_property'", "]", "(", "host", ",", "admin_username", ",", "admin_password", ",", "key", ",", "value", ")", "if", "response", "is", "False", "or", "response", "[", "'retcode'", "]", "!=", "0", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to set property from idrac'", "return", "ret", "ret", "[", "'changes'", "]", "[", "key", "]", "=", "'will be changed - old value {0} , new value {1}'", ".", "format", "(", "properties_get", "[", "key", "]", ",", "value", ")", "return", "ret" ]
properties = {}
[ "properties", "=", "{}" ]
python
train
35.625
tcalmant/ipopo
pelix/shell/report.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/report.py#L547-L559
def ipopo_factories(self): """ List of iPOPO factories """ try: with use_ipopo(self.__context) as ipopo: return { name: ipopo.get_factory_details(name) for name in ipopo.get_factories() } except BundleException: # iPOPO is not available: return None
[ "def", "ipopo_factories", "(", "self", ")", ":", "try", ":", "with", "use_ipopo", "(", "self", ".", "__context", ")", "as", "ipopo", ":", "return", "{", "name", ":", "ipopo", ".", "get_factory_details", "(", "name", ")", "for", "name", "in", "ipopo", ".", "get_factories", "(", ")", "}", "except", "BundleException", ":", "# iPOPO is not available:", "return", "None" ]
List of iPOPO factories
[ "List", "of", "iPOPO", "factories" ]
python
train
29.615385
OpenHydrology/floodestimation
floodestimation/fehdata.py
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L176-L184
def amax_files(): """ Return all annual maximum flow (`*.am`) files in cache folder and sub folders. :return: List of file paths :rtype: list """ return [os.path.join(dp, f) for dp, dn, filenames in os.walk(CACHE_FOLDER) for f in filenames if os.path.splitext(f)[1].lower() == '.am']
[ "def", "amax_files", "(", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "dp", ",", "f", ")", "for", "dp", ",", "dn", ",", "filenames", "in", "os", ".", "walk", "(", "CACHE_FOLDER", ")", "for", "f", "in", "filenames", "if", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "1", "]", ".", "lower", "(", ")", "==", "'.am'", "]" ]
Return all annual maximum flow (`*.am`) files in cache folder and sub folders. :return: List of file paths :rtype: list
[ "Return", "all", "annual", "maximum", "flow", "(", "*", ".", "am", ")", "files", "in", "cache", "folder", "and", "sub", "folders", "." ]
python
train
34.666667
dtmilano/AndroidViewClient
src/com/dtmilano/android/viewclient.py
https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L1061-L1107
def writeImageToFile(self, filename, _format="PNG"): ''' Write the View image to the specified filename in the specified format. @type filename: str @param filename: Absolute path and optional filename receiving the image. If this points to a directory, then the filename is determined by this View unique ID and format extension. @type _format: str @param _format: Image format (default format is PNG) ''' filename = self.device.substituteDeviceTemplate(filename) if not os.path.isabs(filename): raise ValueError("writeImageToFile expects an absolute path (fielname='%s')" % filename) if os.path.isdir(filename): filename = os.path.join(filename, self.variableNameFromId() + '.' + _format.lower()) if DEBUG: print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s format" % (filename, _format) #self.device.takeSnapshot().getSubImage(self.getPositionAndSize()).writeToFile(filename, _format) # crop: # im.crop(box) ⇒ image # Returns a copy of a rectangular region from the current image. # The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate. ((l, t), (r, b)) = self.getCoords() box = (l, t, r, b) if DEBUG: print >> sys.stderr, "writeImageToFile: cropping", box, " reconnect=", self.device.reconnect if self.uiAutomatorHelper: if DEBUG_UI_AUTOMATOR_HELPER: print >> sys.stderr, "Taking screenshot using UiAutomatorHelper" received = self.uiAutomatorHelper.takeScreenshot() stream = StringIO.StringIO(received) try: from PIL import Image image = Image.open(stream) except ImportError as ex: # FIXME: this method should be global self.pilNotInstalledWarning() sys.exit(1) except IOError, ex: print >> sys.stderr, ex print repr(stream) sys.exit(1) else: image = self.device.takeSnapshot(reconnect=self.device.reconnect) image.crop(box).save(filename, _format)
[ "def", "writeImageToFile", "(", "self", ",", "filename", ",", "_format", "=", "\"PNG\"", ")", ":", "filename", "=", "self", ".", "device", ".", "substituteDeviceTemplate", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "filename", ")", ":", "raise", "ValueError", "(", "\"writeImageToFile expects an absolute path (fielname='%s')\"", "%", "filename", ")", "if", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "filename", ",", "self", ".", "variableNameFromId", "(", ")", "+", "'.'", "+", "_format", ".", "lower", "(", ")", ")", "if", "DEBUG", ":", "print", ">>", "sys", ".", "stderr", ",", "\"writeImageToFile: saving image to '%s' in %s format\"", "%", "(", "filename", ",", "_format", ")", "#self.device.takeSnapshot().getSubImage(self.getPositionAndSize()).writeToFile(filename, _format)", "# crop:", "# im.crop(box) ⇒ image", "# Returns a copy of a rectangular region from the current image.", "# The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate.", "(", "(", "l", ",", "t", ")", ",", "(", "r", ",", "b", ")", ")", "=", "self", ".", "getCoords", "(", ")", "box", "=", "(", "l", ",", "t", ",", "r", ",", "b", ")", "if", "DEBUG", ":", "print", ">>", "sys", ".", "stderr", ",", "\"writeImageToFile: cropping\"", ",", "box", ",", "\" reconnect=\"", ",", "self", ".", "device", ".", "reconnect", "if", "self", ".", "uiAutomatorHelper", ":", "if", "DEBUG_UI_AUTOMATOR_HELPER", ":", "print", ">>", "sys", ".", "stderr", ",", "\"Taking screenshot using UiAutomatorHelper\"", "received", "=", "self", ".", "uiAutomatorHelper", ".", "takeScreenshot", "(", ")", "stream", "=", "StringIO", ".", "StringIO", "(", "received", ")", "try", ":", "from", "PIL", "import", "Image", "image", "=", "Image", ".", "open", "(", "stream", ")", "except", "ImportError", "as", "ex", ":", "# FIXME: this method should be global", "self", ".", "pilNotInstalledWarning", "(", ")", "sys", ".", "exit", "(", "1", ")", "except", "IOError", ",", "ex", ":", "print", ">>", "sys", ".", "stderr", ",", "ex", "print", "repr", "(", "stream", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "image", "=", "self", ".", "device", ".", "takeSnapshot", "(", "reconnect", "=", "self", ".", "device", ".", "reconnect", ")", "image", ".", "crop", "(", "box", ")", ".", "save", "(", "filename", ",", "_format", ")" ]
Write the View image to the specified filename in the specified format. @type filename: str @param filename: Absolute path and optional filename receiving the image. If this points to a directory, then the filename is determined by this View unique ID and format extension. @type _format: str @param _format: Image format (default format is PNG)
[ "Write", "the", "View", "image", "to", "the", "specified", "filename", "in", "the", "specified", "format", "." ]
python
train
48.06383
minhhoit/yacms
yacms/blog/management/base.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/management/base.py#L106-L121
def add_comment(self, post=None, name=None, email=None, pub_date=None, website=None, body=None): """ Adds a comment to the post provided. """ if post is None: if not self.posts: raise CommandError("Cannot add comments without posts") post = self.posts[-1] post["comments"].append({ "user_name": name, "user_email": email, "submit_date": pub_date, "user_url": website, "comment": body, })
[ "def", "add_comment", "(", "self", ",", "post", "=", "None", ",", "name", "=", "None", ",", "email", "=", "None", ",", "pub_date", "=", "None", ",", "website", "=", "None", ",", "body", "=", "None", ")", ":", "if", "post", "is", "None", ":", "if", "not", "self", ".", "posts", ":", "raise", "CommandError", "(", "\"Cannot add comments without posts\"", ")", "post", "=", "self", ".", "posts", "[", "-", "1", "]", "post", "[", "\"comments\"", "]", ".", "append", "(", "{", "\"user_name\"", ":", "name", ",", "\"user_email\"", ":", "email", ",", "\"submit_date\"", ":", "pub_date", ",", "\"user_url\"", ":", "website", ",", "\"comment\"", ":", "body", ",", "}", ")" ]
Adds a comment to the post provided.
[ "Adds", "a", "comment", "to", "the", "post", "provided", "." ]
python
train
33.75
edx/edx-enterprise
enterprise/management/commands/assign_enterprise_user_roles.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/management/commands/assign_enterprise_user_roles.py#L92-L97
def _get_enterprise_admin_users_batch(self, start, end): """ Returns a batched queryset of User objects. """ LOGGER.info('Fetching new batch of enterprise admin users from indexes: %s to %s', start, end) return User.objects.filter(groups__name=ENTERPRISE_DATA_API_ACCESS_GROUP, is_staff=False)[start:end]
[ "def", "_get_enterprise_admin_users_batch", "(", "self", ",", "start", ",", "end", ")", ":", "LOGGER", ".", "info", "(", "'Fetching new batch of enterprise admin users from indexes: %s to %s'", ",", "start", ",", "end", ")", "return", "User", ".", "objects", ".", "filter", "(", "groups__name", "=", "ENTERPRISE_DATA_API_ACCESS_GROUP", ",", "is_staff", "=", "False", ")", "[", "start", ":", "end", "]" ]
Returns a batched queryset of User objects.
[ "Returns", "a", "batched", "queryset", "of", "User", "objects", "." ]
python
valid
56.5
blockcypher/blockcypher-python
blockcypher/utils.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/utils.py#L433-L443
def _long_to_bytes(n, length, byteorder): """Convert a long to a bytestring For use in python version prior to 3.2 Source: http://bugs.python.org/issue16580#msg177208 """ if byteorder == 'little': indexes = range(length) else: indexes = reversed(range(length)) return bytearray((n >> i * 8) & 0xff for i in indexes)
[ "def", "_long_to_bytes", "(", "n", ",", "length", ",", "byteorder", ")", ":", "if", "byteorder", "==", "'little'", ":", "indexes", "=", "range", "(", "length", ")", "else", ":", "indexes", "=", "reversed", "(", "range", "(", "length", ")", ")", "return", "bytearray", "(", "(", "n", ">>", "i", "*", "8", ")", "&", "0xff", "for", "i", "in", "indexes", ")" ]
Convert a long to a bytestring For use in python version prior to 3.2 Source: http://bugs.python.org/issue16580#msg177208
[ "Convert", "a", "long", "to", "a", "bytestring", "For", "use", "in", "python", "version", "prior", "to", "3", ".", "2", "Source", ":", "http", ":", "//", "bugs", ".", "python", ".", "org", "/", "issue16580#msg177208" ]
python
train
32.090909
macbre/phantomas-python
phantomas/utils.py
https://github.com/macbre/phantomas-python/blob/63b1b1bd3fc97feb460beb6ae509bfb5cccf04f5/phantomas/utils.py#L11-L34
def format_args(options): """ Convert hash/key options into arguments list """ args = list() for key, value in options.items(): # convert foo_bar key into --foo-bar option key = key.replace('_', '-') if value is True: # key: True # --key args.append('--{key}'.format(key=key)) elif is_sequence(value): # key: ['foo', 'bar'] # --key=foo,bar values = [str(val) for val in value] args.append('--{key}={values}'.format( key=key, values=','.join(values))) else: # key: 'foo' # --key=foo args.append('--{key}={value}'.format(key=key, value=value)) return args
[ "def", "format_args", "(", "options", ")", ":", "args", "=", "list", "(", ")", "for", "key", ",", "value", "in", "options", ".", "items", "(", ")", ":", "# convert foo_bar key into --foo-bar option", "key", "=", "key", ".", "replace", "(", "'_'", ",", "'-'", ")", "if", "value", "is", "True", ":", "# key: True", "# --key", "args", ".", "append", "(", "'--{key}'", ".", "format", "(", "key", "=", "key", ")", ")", "elif", "is_sequence", "(", "value", ")", ":", "# key: ['foo', 'bar']", "# --key=foo,bar", "values", "=", "[", "str", "(", "val", ")", "for", "val", "in", "value", "]", "args", ".", "append", "(", "'--{key}={values}'", ".", "format", "(", "key", "=", "key", ",", "values", "=", "','", ".", "join", "(", "values", ")", ")", ")", "else", ":", "# key: 'foo'", "# --key=foo", "args", ".", "append", "(", "'--{key}={value}'", ".", "format", "(", "key", "=", "key", ",", "value", "=", "value", ")", ")", "return", "args" ]
Convert hash/key options into arguments list
[ "Convert", "hash", "/", "key", "options", "into", "arguments", "list" ]
python
train
30.208333
osrg/ryu
ryu/lib/ovs/bridge.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/bridge.py#L318-L328
def db_get_map(self, table, record, column): """ Gets dict type value of 'column' in 'record' in 'table'. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl get TBL REC COL """ val = self.db_get_val(table, record, column) assert isinstance(val, dict) return val
[ "def", "db_get_map", "(", "self", ",", "table", ",", "record", ",", "column", ")", ":", "val", "=", "self", ".", "db_get_val", "(", "table", ",", "record", ",", "column", ")", "assert", "isinstance", "(", "val", ",", "dict", ")", "return", "val" ]
Gets dict type value of 'column' in 'record' in 'table'. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl get TBL REC COL
[ "Gets", "dict", "type", "value", "of", "column", "in", "record", "in", "table", "." ]
python
train
31.636364
proteanhq/protean
src/protean/utils/query.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/utils/query.py#L68-L75
def _unregister_lookup(cls, lookup, lookup_name=None): """ Remove given lookup from cls lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del cls.class_lookups[lookup_name]
[ "def", "_unregister_lookup", "(", "cls", ",", "lookup", ",", "lookup_name", "=", "None", ")", ":", "if", "lookup_name", "is", "None", ":", "lookup_name", "=", "lookup", ".", "lookup_name", "del", "cls", ".", "class_lookups", "[", "lookup_name", "]" ]
Remove given lookup from cls lookups. For use in tests only as it's not thread-safe.
[ "Remove", "given", "lookup", "from", "cls", "lookups", ".", "For", "use", "in", "tests", "only", "as", "it", "s", "not", "thread", "-", "safe", "." ]
python
train
36.5
bethgelab/foolbox
foolbox/adversarial.py
https://github.com/bethgelab/foolbox/blob/8ab54248c70e45d8580a7d9ee44c9c0fb5755c4a/foolbox/adversarial.py#L290-L314
def predictions(self, image, strict=True, return_details=False): """Interface to model.predictions for attacks. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). strict : bool Controls if the bounds for the pixel values should be checked. """ in_bounds = self.in_bounds(image) assert not strict or in_bounds self._total_prediction_calls += 1 predictions = self.__model.predictions(image) is_adversarial, is_best, distance = self.__is_adversarial( image, predictions, in_bounds) assert predictions.ndim == 1 if return_details: return predictions, is_adversarial, is_best, distance else: return predictions, is_adversarial
[ "def", "predictions", "(", "self", ",", "image", ",", "strict", "=", "True", ",", "return_details", "=", "False", ")", ":", "in_bounds", "=", "self", ".", "in_bounds", "(", "image", ")", "assert", "not", "strict", "or", "in_bounds", "self", ".", "_total_prediction_calls", "+=", "1", "predictions", "=", "self", ".", "__model", ".", "predictions", "(", "image", ")", "is_adversarial", ",", "is_best", ",", "distance", "=", "self", ".", "__is_adversarial", "(", "image", ",", "predictions", ",", "in_bounds", ")", "assert", "predictions", ".", "ndim", "==", "1", "if", "return_details", ":", "return", "predictions", ",", "is_adversarial", ",", "is_best", ",", "distance", "else", ":", "return", "predictions", ",", "is_adversarial" ]
Interface to model.predictions for attacks. Parameters ---------- image : `numpy.ndarray` Single input with shape as expected by the model (without the batch dimension). strict : bool Controls if the bounds for the pixel values should be checked.
[ "Interface", "to", "model", ".", "predictions", "for", "attacks", "." ]
python
valid
34.4
petl-developers/petl
petl/util/random.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/util/random.py#L15-L45
def randomtable(numflds=5, numrows=100, wait=0, seed=None): """ Construct a table with random numerical data. Use `numflds` and `numrows` to specify the number of fields and rows respectively. Set `wait` to a float greater than zero to simulate a delay on each row generation (number of seconds per row). E.g.:: >>> import petl as etl >>> table = etl.randomtable(3, 100, seed=42) >>> table +----------------------+----------------------+---------------------+ | f0 | f1 | f2 | +======================+======================+=====================+ | 0.6394267984578837 | 0.025010755222666936 | 0.27502931836911926 | +----------------------+----------------------+---------------------+ | 0.22321073814882275 | 0.7364712141640124 | 0.6766994874229113 | +----------------------+----------------------+---------------------+ | 0.8921795677048454 | 0.08693883262941615 | 0.4219218196852704 | +----------------------+----------------------+---------------------+ | 0.029797219438070344 | 0.21863797480360336 | 0.5053552881033624 | +----------------------+----------------------+---------------------+ | 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 | +----------------------+----------------------+---------------------+ ... Note that the data are generated on the fly and are not stored in memory, so this function can be used to simulate very large tables. """ return RandomTable(numflds, numrows, wait=wait, seed=seed)
[ "def", "randomtable", "(", "numflds", "=", "5", ",", "numrows", "=", "100", ",", "wait", "=", "0", ",", "seed", "=", "None", ")", ":", "return", "RandomTable", "(", "numflds", ",", "numrows", ",", "wait", "=", "wait", ",", "seed", "=", "seed", ")" ]
Construct a table with random numerical data. Use `numflds` and `numrows` to specify the number of fields and rows respectively. Set `wait` to a float greater than zero to simulate a delay on each row generation (number of seconds per row). E.g.:: >>> import petl as etl >>> table = etl.randomtable(3, 100, seed=42) >>> table +----------------------+----------------------+---------------------+ | f0 | f1 | f2 | +======================+======================+=====================+ | 0.6394267984578837 | 0.025010755222666936 | 0.27502931836911926 | +----------------------+----------------------+---------------------+ | 0.22321073814882275 | 0.7364712141640124 | 0.6766994874229113 | +----------------------+----------------------+---------------------+ | 0.8921795677048454 | 0.08693883262941615 | 0.4219218196852704 | +----------------------+----------------------+---------------------+ | 0.029797219438070344 | 0.21863797480360336 | 0.5053552881033624 | +----------------------+----------------------+---------------------+ | 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 | +----------------------+----------------------+---------------------+ ... Note that the data are generated on the fly and are not stored in memory, so this function can be used to simulate very large tables.
[ "Construct", "a", "table", "with", "random", "numerical", "data", ".", "Use", "numflds", "and", "numrows", "to", "specify", "the", "number", "of", "fields", "and", "rows", "respectively", ".", "Set", "wait", "to", "a", "float", "greater", "than", "zero", "to", "simulate", "a", "delay", "on", "each", "row", "generation", "(", "number", "of", "seconds", "per", "row", ")", ".", "E", ".", "g", ".", "::" ]
python
train
53.096774
LuqueDaniel/pybooru
pybooru/api_moebooru.py
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_moebooru.py#L126-L135
def post_revert_tags(self, post_id, history_id): """Function to reverts a post to a previous set of tags (Requires login) (UNTESTED). Parameters: post_id (int): The post id number to update. history_id (int): The id number of the tag history. """ params = {'id': post_id, 'history_id': history_id} return self._get('post/revert_tags', params, 'PUT')
[ "def", "post_revert_tags", "(", "self", ",", "post_id", ",", "history_id", ")", ":", "params", "=", "{", "'id'", ":", "post_id", ",", "'history_id'", ":", "history_id", "}", "return", "self", ".", "_get", "(", "'post/revert_tags'", ",", "params", ",", "'PUT'", ")" ]
Function to reverts a post to a previous set of tags (Requires login) (UNTESTED). Parameters: post_id (int): The post id number to update. history_id (int): The id number of the tag history.
[ "Function", "to", "reverts", "a", "post", "to", "a", "previous", "set", "of", "tags", "(", "Requires", "login", ")", "(", "UNTESTED", ")", "." ]
python
train
41.3
chrisspen/burlap
burlap/apache.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/apache.py#L558-L570
def configure_modrpaf(self): """ Installs the mod-rpaf Apache module. https://github.com/gnif/mod_rpaf """ r = self.local_renderer if r.env.modrpaf_enabled: self.install_packages() self.enable_mod('rpaf') else: if self.last_manifest.modrpaf_enabled: self.disable_mod('mod_rpaf')
[ "def", "configure_modrpaf", "(", "self", ")", ":", "r", "=", "self", ".", "local_renderer", "if", "r", ".", "env", ".", "modrpaf_enabled", ":", "self", ".", "install_packages", "(", ")", "self", ".", "enable_mod", "(", "'rpaf'", ")", "else", ":", "if", "self", ".", "last_manifest", ".", "modrpaf_enabled", ":", "self", ".", "disable_mod", "(", "'mod_rpaf'", ")" ]
Installs the mod-rpaf Apache module. https://github.com/gnif/mod_rpaf
[ "Installs", "the", "mod", "-", "rpaf", "Apache", "module", "." ]
python
valid
28.846154
vlasovskikh/funcparserlib
funcparserlib/util.py
https://github.com/vlasovskikh/funcparserlib/blob/0b689920babcf6079a4b3e8721cc10bbc089d81c/funcparserlib/util.py#L25-L49
def pretty_tree(x, kids, show): """(a, (a -> list(a)), (a -> str)) -> str Returns a pseudographic tree representation of x similar to the tree command in Unix. """ (MID, END, CONT, LAST, ROOT) = (u'|-- ', u'`-- ', u'| ', u' ', u'') def rec(x, indent, sym): line = indent + sym + show(x) xs = kids(x) if len(xs) == 0: return line else: if sym == MID: next_indent = indent + CONT elif sym == ROOT: next_indent = indent + ROOT else: next_indent = indent + LAST syms = [MID] * (len(xs) - 1) + [END] lines = [rec(x, next_indent, sym) for x, sym in zip(xs, syms)] return u'\n'.join([line] + lines) return rec(x, u'', ROOT)
[ "def", "pretty_tree", "(", "x", ",", "kids", ",", "show", ")", ":", "(", "MID", ",", "END", ",", "CONT", ",", "LAST", ",", "ROOT", ")", "=", "(", "u'|-- '", ",", "u'`-- '", ",", "u'| '", ",", "u' '", ",", "u''", ")", "def", "rec", "(", "x", ",", "indent", ",", "sym", ")", ":", "line", "=", "indent", "+", "sym", "+", "show", "(", "x", ")", "xs", "=", "kids", "(", "x", ")", "if", "len", "(", "xs", ")", "==", "0", ":", "return", "line", "else", ":", "if", "sym", "==", "MID", ":", "next_indent", "=", "indent", "+", "CONT", "elif", "sym", "==", "ROOT", ":", "next_indent", "=", "indent", "+", "ROOT", "else", ":", "next_indent", "=", "indent", "+", "LAST", "syms", "=", "[", "MID", "]", "*", "(", "len", "(", "xs", ")", "-", "1", ")", "+", "[", "END", "]", "lines", "=", "[", "rec", "(", "x", ",", "next_indent", ",", "sym", ")", "for", "x", ",", "sym", "in", "zip", "(", "xs", ",", "syms", ")", "]", "return", "u'\\n'", ".", "join", "(", "[", "line", "]", "+", "lines", ")", "return", "rec", "(", "x", ",", "u''", ",", "ROOT", ")" ]
(a, (a -> list(a)), (a -> str)) -> str Returns a pseudographic tree representation of x similar to the tree command in Unix.
[ "(", "a", "(", "a", "-", ">", "list", "(", "a", "))", "(", "a", "-", ">", "str", "))", "-", ">", "str" ]
python
train
31.68
iblancasa/GitHubCity
src/githubcity/ghcity.py
https://github.com/iblancasa/GitHubCity/blob/c5299c6859dbefbd869e2ac6ff2faff2a39cf32f/src/githubcity/ghcity.py#L178-L198
def getConfig(self): """Return the configuration of the city. :return: configuration of the city. :rtype: dict. """ config = {} config["name"] = self.city config["intervals"] = self.__intervals config["last_date"] = self.__lastDay config["excludedUsers"] = [] config["excludedLocations"] = [] for e in self.__excludedUsers: config["excludedUsers"].append(e) for e in self.__excludedLocations: config["excludedLocations"].append(e) config["locations"] = self.__locations return config
[ "def", "getConfig", "(", "self", ")", ":", "config", "=", "{", "}", "config", "[", "\"name\"", "]", "=", "self", ".", "city", "config", "[", "\"intervals\"", "]", "=", "self", ".", "__intervals", "config", "[", "\"last_date\"", "]", "=", "self", ".", "__lastDay", "config", "[", "\"excludedUsers\"", "]", "=", "[", "]", "config", "[", "\"excludedLocations\"", "]", "=", "[", "]", "for", "e", "in", "self", ".", "__excludedUsers", ":", "config", "[", "\"excludedUsers\"", "]", ".", "append", "(", "e", ")", "for", "e", "in", "self", ".", "__excludedLocations", ":", "config", "[", "\"excludedLocations\"", "]", ".", "append", "(", "e", ")", "config", "[", "\"locations\"", "]", "=", "self", ".", "__locations", "return", "config" ]
Return the configuration of the city. :return: configuration of the city. :rtype: dict.
[ "Return", "the", "configuration", "of", "the", "city", "." ]
python
train
28.714286
mattloper/chumpy
chumpy/ch.py
https://github.com/mattloper/chumpy/blob/a3cfdb1be3c8265c369c507b22f6f3f89414c772/chumpy/ch.py#L185-L189
def sid(self): """Semantic id.""" pnames = list(self.terms)+list(self.dterms) pnames.sort() return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__]))
[ "def", "sid", "(", "self", ")", ":", "pnames", "=", "list", "(", "self", ".", "terms", ")", "+", "list", "(", "self", ".", "dterms", ")", "pnames", ".", "sort", "(", ")", "return", "(", "self", ".", "__class__", ",", "tuple", "(", "[", "(", "k", ",", "id", "(", "self", ".", "__dict__", "[", "k", "]", ")", ")", "for", "k", "in", "pnames", "if", "k", "in", "self", ".", "__dict__", "]", ")", ")" ]
Semantic id.
[ "Semantic", "id", "." ]
python
train
43.4
CybOXProject/mixbox
mixbox/signals.py
https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/signals.py#L75-L89
def __live_receivers(signal): """Return all signal handlers that are currently still alive for the input `signal`. Args: signal: A signal name. Returns: A list of callable receivers for the input signal. """ with __lock: __purge() receivers = [funcref() for funcref in __receivers[signal]] return receivers
[ "def", "__live_receivers", "(", "signal", ")", ":", "with", "__lock", ":", "__purge", "(", ")", "receivers", "=", "[", "funcref", "(", ")", "for", "funcref", "in", "__receivers", "[", "signal", "]", "]", "return", "receivers" ]
Return all signal handlers that are currently still alive for the input `signal`. Args: signal: A signal name. Returns: A list of callable receivers for the input signal.
[ "Return", "all", "signal", "handlers", "that", "are", "currently", "still", "alive", "for", "the", "input", "signal", "." ]
python
train
23.666667
mlenzen/collections-extended
collections_extended/bags.py
https://github.com/mlenzen/collections-extended/blob/ee9e86f6bbef442dbebcb3a5970642c5c969e2cf/collections_extended/bags.py#L238-L252
def is_subset(self, other): """Check that every element in self has a count <= in other. Args: other (Set) """ if isinstance(other, _basebag): for elem, count in self.counts(): if not count <= other.count(elem): return False else: for elem in self: if self.count(elem) > 1 or elem not in other: return False return True
[ "def", "is_subset", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "_basebag", ")", ":", "for", "elem", ",", "count", "in", "self", ".", "counts", "(", ")", ":", "if", "not", "count", "<=", "other", ".", "count", "(", "elem", ")", ":", "return", "False", "else", ":", "for", "elem", "in", "self", ":", "if", "self", ".", "count", "(", "elem", ")", ">", "1", "or", "elem", "not", "in", "other", ":", "return", "False", "return", "True" ]
Check that every element in self has a count <= in other. Args: other (Set)
[ "Check", "that", "every", "element", "in", "self", "has", "a", "count", "<", "=", "in", "other", "." ]
python
train
23
rlabbe/filterpy
filterpy/examples/radar_sim.py
https://github.com/rlabbe/filterpy/blob/8123214de798ffb63db968bb0b9492ee74e77950/filterpy/examples/radar_sim.py#L39-L53
def get_range(self, process_err_pct=0.05): """ Returns slant range to the object. Call once for each new measurement at dt time from last call. """ vel = self.vel + 5 * randn() alt = self.alt + 10 * randn() self.pos += vel*self.dt err = (self.pos * process_err_pct) * randn() slant_range = (self.pos**2 + alt**2)**.5 + err return slant_range
[ "def", "get_range", "(", "self", ",", "process_err_pct", "=", "0.05", ")", ":", "vel", "=", "self", ".", "vel", "+", "5", "*", "randn", "(", ")", "alt", "=", "self", ".", "alt", "+", "10", "*", "randn", "(", ")", "self", ".", "pos", "+=", "vel", "*", "self", ".", "dt", "err", "=", "(", "self", ".", "pos", "*", "process_err_pct", ")", "*", "randn", "(", ")", "slant_range", "=", "(", "self", ".", "pos", "**", "2", "+", "alt", "**", "2", ")", "**", ".5", "+", "err", "return", "slant_range" ]
Returns slant range to the object. Call once for each new measurement at dt time from last call.
[ "Returns", "slant", "range", "to", "the", "object", ".", "Call", "once", "for", "each", "new", "measurement", "at", "dt", "time", "from", "last", "call", "." ]
python
train
27.4
pyblish/pyblish-lite
pyblish_lite/control.py
https://github.com/pyblish/pyblish-lite/blob/9172b81c7ae19a36e99c89dd16e102201992dc20/pyblish_lite/control.py#L255-L276
def cleanup(self): """Forcefully delete objects from memory In an ideal world, this shouldn't be necessary. Garbage collection guarantees that anything without reference is automatically removed. However, because this application is designed to be run multiple times from the same interpreter process, extra case must be taken to ensure there are no memory leaks. Explicitly deleting objects shines a light on where objects may still be referenced in the form of an error. No errors means this was uneccesary, but that's ok. """ for instance in self.context: del(instance) for plugin in self.plugins: del(plugin)
[ "def", "cleanup", "(", "self", ")", ":", "for", "instance", "in", "self", ".", "context", ":", "del", "(", "instance", ")", "for", "plugin", "in", "self", ".", "plugins", ":", "del", "(", "plugin", ")" ]
Forcefully delete objects from memory In an ideal world, this shouldn't be necessary. Garbage collection guarantees that anything without reference is automatically removed. However, because this application is designed to be run multiple times from the same interpreter process, extra case must be taken to ensure there are no memory leaks. Explicitly deleting objects shines a light on where objects may still be referenced in the form of an error. No errors means this was uneccesary, but that's ok.
[ "Forcefully", "delete", "objects", "from", "memory" ]
python
train
32.954545
ambitioninc/rabbitmq-admin
rabbitmq_admin/base.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L125-L133
def _delete(self, *args, **kwargs): """ A wrapper for deleting things :returns: The response of your delete :rtype: dict """ response = requests.delete(*args, **kwargs) response.raise_for_status()
[ "def", "_delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "requests", ".", "delete", "(", "*", "args", ",", "*", "*", "kwargs", ")", "response", ".", "raise_for_status", "(", ")" ]
A wrapper for deleting things :returns: The response of your delete :rtype: dict
[ "A", "wrapper", "for", "deleting", "things" ]
python
train
27.222222
aiidateam/aiida-nwchem
aiida_nwchem/parsers/basic.py
https://github.com/aiidateam/aiida-nwchem/blob/21034e7f8ea8249948065c28030f4b572a6ecf05/aiida_nwchem/parsers/basic.py#L32-L78
def _get_output_nodes(self, output_path, error_path): """ Extracts output nodes from the standard output and standard error files. """ from aiida.orm.data.array.trajectory import TrajectoryData import re state = None step = None scale = None with open(output_path) as f: lines = [x.strip('\n') for x in f.readlines()] result_dict = dict() trajectory = None for line in lines: if state is None and re.match('^\s*NWChem SCF Module\s*$',line): state = 'nwchem-scf-module' continue if state is None and re.match('^\s*NWChem Geometry Optimization\s*$',line): state = 'nwchem-geometry-optimisation' trajectory = TrajectoryData() continue if state == 'nwchem-scf-module' and re.match('^\s*Final RHF \s*results\s*$',line): state = 'final-rhf-results' continue if re.match('^\s*\-*\s*$',line): continue if state == 'final-rhf-results': result = re.match('^\s*([^=]+?)\s*=\s*([\-\d\.]+)$',line) if result: key = re.sub('[^a-zA-Z0-9]+', '_', result.group(1).lower()) result_dict[key] = result.group(2) else: state = 'nwchem-scf-module' if state == 'nwchem-geometry-optimisation' and re.match('^\s*Step\s+\d+\s*$',line): result = re.match('^\s*Step\s+(\d+)\s*$',line) step = result.group(1) continue if state == 'nwchem-geometry-optimisation' and \ re.match('^\s*Output coordinates in a.u.',line): state = 'nwchem-geometry-optimisation-coordinates' result = re.match('scale by \s(*[\-\d\.]+)',line) scale = result.group(1) continue return [('parameters', ParameterData(dict=result_dict))]
[ "def", "_get_output_nodes", "(", "self", ",", "output_path", ",", "error_path", ")", ":", "from", "aiida", ".", "orm", ".", "data", ".", "array", ".", "trajectory", "import", "TrajectoryData", "import", "re", "state", "=", "None", "step", "=", "None", "scale", "=", "None", "with", "open", "(", "output_path", ")", "as", "f", ":", "lines", "=", "[", "x", ".", "strip", "(", "'\\n'", ")", "for", "x", "in", "f", ".", "readlines", "(", ")", "]", "result_dict", "=", "dict", "(", ")", "trajectory", "=", "None", "for", "line", "in", "lines", ":", "if", "state", "is", "None", "and", "re", ".", "match", "(", "'^\\s*NWChem SCF Module\\s*$'", ",", "line", ")", ":", "state", "=", "'nwchem-scf-module'", "continue", "if", "state", "is", "None", "and", "re", ".", "match", "(", "'^\\s*NWChem Geometry Optimization\\s*$'", ",", "line", ")", ":", "state", "=", "'nwchem-geometry-optimisation'", "trajectory", "=", "TrajectoryData", "(", ")", "continue", "if", "state", "==", "'nwchem-scf-module'", "and", "re", ".", "match", "(", "'^\\s*Final RHF \\s*results\\s*$'", ",", "line", ")", ":", "state", "=", "'final-rhf-results'", "continue", "if", "re", ".", "match", "(", "'^\\s*\\-*\\s*$'", ",", "line", ")", ":", "continue", "if", "state", "==", "'final-rhf-results'", ":", "result", "=", "re", ".", "match", "(", "'^\\s*([^=]+?)\\s*=\\s*([\\-\\d\\.]+)$'", ",", "line", ")", "if", "result", ":", "key", "=", "re", ".", "sub", "(", "'[^a-zA-Z0-9]+'", ",", "'_'", ",", "result", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ")", "result_dict", "[", "key", "]", "=", "result", ".", "group", "(", "2", ")", "else", ":", "state", "=", "'nwchem-scf-module'", "if", "state", "==", "'nwchem-geometry-optimisation'", "and", "re", ".", "match", "(", "'^\\s*Step\\s+\\d+\\s*$'", ",", "line", ")", ":", "result", "=", "re", ".", "match", "(", "'^\\s*Step\\s+(\\d+)\\s*$'", ",", "line", ")", "step", "=", "result", ".", "group", "(", "1", ")", "continue", "if", "state", "==", "'nwchem-geometry-optimisation'", "and", "re", ".", "match", "(", "'^\\s*Output coordinates in a.u.'", ",", "line", ")", ":", "state", "=", "'nwchem-geometry-optimisation-coordinates'", "result", "=", "re", ".", "match", "(", "'scale by \\s(*[\\-\\d\\.]+)'", ",", "line", ")", "scale", "=", "result", ".", "group", "(", "1", ")", "continue", "return", "[", "(", "'parameters'", ",", "ParameterData", "(", "dict", "=", "result_dict", ")", ")", "]" ]
Extracts output nodes from the standard output and standard error files.
[ "Extracts", "output", "nodes", "from", "the", "standard", "output", "and", "standard", "error", "files", "." ]
python
train
42.638298
redhat-cip/python-dciclient
dciclient/v1/shell_commands/feeder.py
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/shell_commands/feeder.py#L148-L160
def reset_api_secret(context, id, etag): """reset_api_secret(context, id, etag) Reset a Feeder api_secret. >>> dcictl feeder-reset-api-secret [OPTIONS] :param string id: ID of the feeder [required] :param string etag: Entity tag of the feeder resource [required] """ result = feeder.reset_api_secret(context, id=id, etag=etag) utils.format_output(result, context.format, headers=['id', 'api_secret', 'etag'])
[ "def", "reset_api_secret", "(", "context", ",", "id", ",", "etag", ")", ":", "result", "=", "feeder", ".", "reset_api_secret", "(", "context", ",", "id", "=", "id", ",", "etag", "=", "etag", ")", "utils", ".", "format_output", "(", "result", ",", "context", ".", "format", ",", "headers", "=", "[", "'id'", ",", "'api_secret'", ",", "'etag'", "]", ")" ]
reset_api_secret(context, id, etag) Reset a Feeder api_secret. >>> dcictl feeder-reset-api-secret [OPTIONS] :param string id: ID of the feeder [required] :param string etag: Entity tag of the feeder resource [required]
[ "reset_api_secret", "(", "context", "id", "etag", ")" ]
python
train
35
NeuroML/pyNeuroML
pyneuroml/tune/NeuroMLController.py
https://github.com/NeuroML/pyNeuroML/blob/aeba2e3040b360bb26556f643cccbfb3dac3b8fb/pyneuroml/tune/NeuroMLController.py#L154-L323
def run_individual(sim_var, reference, neuroml_file, nml_doc, still_included, generate_dir, target, sim_time, dt, simulator, cleanup = True, show=False): """ Run an individual simulation. The candidate data has been flattened into the sim_var dict. The sim_var dict contains parameter:value key value pairs, which are applied to the model before it is simulated. """ for var_name in sim_var.keys(): individual_var_names = var_name.split('+') for individual_var_name in individual_var_names: words = individual_var_name.split('/') type, id1 = words[0].split(':') if ':' in words[1]: variable, id2 = words[1].split(':') else: variable = words[1] id2 = None units = words[2] value = sim_var[var_name] pyneuroml.pynml.print_comment_v(' Changing value of %s (%s) in %s (%s) to: %s %s'%(variable, id2, type, id1, value, units)) if type == 'channel': channel = nml_doc.get_by_id(id1) if channel: print("Setting channel %s"%(channel)) if variable == 'vShift': channel.v_shift = '%s %s'%(value, units) else: pyneuroml.pynml.print_comment_v('Could not find channel with id %s from expression: %s'%(id1, individual_var_name)) exit() elif type == 'cell': cell = None for c in nml_doc.cells: if c.id == id1: cell = c if variable == 'channelDensity': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.cond_density = '%s %s'%(value, units) elif variable == 'vShift_channelDensity': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.v_shift = '%s %s'%(value, units) elif variable == 'channelDensityNernst': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_density_nernsts: if cd.id == id2: chanDens = cd chanDens.cond_density = '%s %s'%(value, units) elif variable == 'erev_id': # change all values of erev in channelDensity elements with only this id chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.erev = '%s %s'%(value, units) elif variable == 'erev_ion': # change all values of erev in channelDensity elements with this ion chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.ion == id2: chanDens = cd chanDens.erev = '%s %s'%(value, units) elif variable == 'specificCapacitance': specCap = None for sc in cell.biophysical_properties.membrane_properties.specific_capacitances: if (sc.segment_groups == None and id2 == 'all') or sc.segment_groups == id2 : specCap = sc specCap.value = '%s %s'%(value, units) elif variable == 'resistivity': resistivity = None for rs in cell.biophysical_properties.intracellular_properties.resistivities: if (rs.segment_groups == None and id2 == 'all') or rs.segment_groups == id2 : resistivity = rs resistivity.value = '%s %s'%(value, units) else: pyneuroml.pynml.print_comment_v('Unknown variable (%s) in variable expression: %s'%(variable, individual_var_name)) exit() elif type == 'izhikevich2007Cell': izhcell = None for c in nml_doc.izhikevich2007_cells: if c.id == id1: izhcell = c izhcell.__setattr__(variable, '%s %s'%(value, units)) else: pyneuroml.pynml.print_comment_v('Unknown type (%s) in variable expression: %s'%(type, individual_var_name)) new_neuroml_file = '%s/%s'%(generate_dir,os.path.basename(neuroml_file)) if new_neuroml_file == neuroml_file: pyneuroml.pynml.print_comment_v('Cannot use a directory for generating into (%s) which is the same location of the NeuroML file (%s)!'% \ (neuroml_file, generate_dir)) pyneuroml.pynml.write_neuroml2_file(nml_doc, new_neuroml_file) for include in still_included: inc_loc = '%s/%s'%(os.path.dirname(os.path.abspath(neuroml_file)),include) pyneuroml.pynml.print_comment_v("Copying non included file %s to %s (%s) beside %s"%(inc_loc, generate_dir,os.path.abspath(generate_dir), new_neuroml_file)) shutil.copy(inc_loc, generate_dir) from pyneuroml.tune.NeuroMLSimulation import NeuroMLSimulation sim = NeuroMLSimulation(reference, neuroml_file = new_neuroml_file, target = target, sim_time = sim_time, dt = dt, simulator = simulator, generate_dir = generate_dir, cleanup = cleanup, nml_doc = nml_doc) sim.go() if show: sim.show() return sim.t, sim.volts
[ "def", "run_individual", "(", "sim_var", ",", "reference", ",", "neuroml_file", ",", "nml_doc", ",", "still_included", ",", "generate_dir", ",", "target", ",", "sim_time", ",", "dt", ",", "simulator", ",", "cleanup", "=", "True", ",", "show", "=", "False", ")", ":", "for", "var_name", "in", "sim_var", ".", "keys", "(", ")", ":", "individual_var_names", "=", "var_name", ".", "split", "(", "'+'", ")", "for", "individual_var_name", "in", "individual_var_names", ":", "words", "=", "individual_var_name", ".", "split", "(", "'/'", ")", "type", ",", "id1", "=", "words", "[", "0", "]", ".", "split", "(", "':'", ")", "if", "':'", "in", "words", "[", "1", "]", ":", "variable", ",", "id2", "=", "words", "[", "1", "]", ".", "split", "(", "':'", ")", "else", ":", "variable", "=", "words", "[", "1", "]", "id2", "=", "None", "units", "=", "words", "[", "2", "]", "value", "=", "sim_var", "[", "var_name", "]", "pyneuroml", ".", "pynml", ".", "print_comment_v", "(", "' Changing value of %s (%s) in %s (%s) to: %s %s'", "%", "(", "variable", ",", "id2", ",", "type", ",", "id1", ",", "value", ",", "units", ")", ")", "if", "type", "==", "'channel'", ":", "channel", "=", "nml_doc", ".", "get_by_id", "(", "id1", ")", "if", "channel", ":", "print", "(", "\"Setting channel %s\"", "%", "(", "channel", ")", ")", "if", "variable", "==", "'vShift'", ":", "channel", ".", "v_shift", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "else", ":", "pyneuroml", ".", "pynml", ".", "print_comment_v", "(", "'Could not find channel with id %s from expression: %s'", "%", "(", "id1", ",", "individual_var_name", ")", ")", "exit", "(", ")", "elif", "type", "==", "'cell'", ":", "cell", "=", "None", "for", "c", "in", "nml_doc", ".", "cells", ":", "if", "c", ".", "id", "==", "id1", ":", "cell", "=", "c", "if", "variable", "==", "'channelDensity'", ":", "chanDens", "=", "None", "for", "cd", "in", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_densities", "+", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_density_v_shifts", ":", "if", "cd", ".", "id", "==", "id2", ":", "chanDens", "=", "cd", "chanDens", ".", "cond_density", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "elif", "variable", "==", "'vShift_channelDensity'", ":", "chanDens", "=", "None", "for", "cd", "in", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_density_v_shifts", ":", "if", "cd", ".", "id", "==", "id2", ":", "chanDens", "=", "cd", "chanDens", ".", "v_shift", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "elif", "variable", "==", "'channelDensityNernst'", ":", "chanDens", "=", "None", "for", "cd", "in", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_density_nernsts", ":", "if", "cd", ".", "id", "==", "id2", ":", "chanDens", "=", "cd", "chanDens", ".", "cond_density", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "elif", "variable", "==", "'erev_id'", ":", "# change all values of erev in channelDensity elements with only this id", "chanDens", "=", "None", "for", "cd", "in", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_densities", "+", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_density_v_shifts", ":", "if", "cd", ".", "id", "==", "id2", ":", "chanDens", "=", "cd", "chanDens", ".", "erev", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "elif", "variable", "==", "'erev_ion'", ":", "# change all values of erev in channelDensity elements with this ion", "chanDens", "=", "None", "for", "cd", "in", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_densities", "+", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "channel_density_v_shifts", ":", "if", "cd", ".", "ion", "==", "id2", ":", "chanDens", "=", "cd", "chanDens", ".", "erev", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "elif", "variable", "==", "'specificCapacitance'", ":", "specCap", "=", "None", "for", "sc", "in", "cell", ".", "biophysical_properties", ".", "membrane_properties", ".", "specific_capacitances", ":", "if", "(", "sc", ".", "segment_groups", "==", "None", "and", "id2", "==", "'all'", ")", "or", "sc", ".", "segment_groups", "==", "id2", ":", "specCap", "=", "sc", "specCap", ".", "value", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "elif", "variable", "==", "'resistivity'", ":", "resistivity", "=", "None", "for", "rs", "in", "cell", ".", "biophysical_properties", ".", "intracellular_properties", ".", "resistivities", ":", "if", "(", "rs", ".", "segment_groups", "==", "None", "and", "id2", "==", "'all'", ")", "or", "rs", ".", "segment_groups", "==", "id2", ":", "resistivity", "=", "rs", "resistivity", ".", "value", "=", "'%s %s'", "%", "(", "value", ",", "units", ")", "else", ":", "pyneuroml", ".", "pynml", ".", "print_comment_v", "(", "'Unknown variable (%s) in variable expression: %s'", "%", "(", "variable", ",", "individual_var_name", ")", ")", "exit", "(", ")", "elif", "type", "==", "'izhikevich2007Cell'", ":", "izhcell", "=", "None", "for", "c", "in", "nml_doc", ".", "izhikevich2007_cells", ":", "if", "c", ".", "id", "==", "id1", ":", "izhcell", "=", "c", "izhcell", ".", "__setattr__", "(", "variable", ",", "'%s %s'", "%", "(", "value", ",", "units", ")", ")", "else", ":", "pyneuroml", ".", "pynml", ".", "print_comment_v", "(", "'Unknown type (%s) in variable expression: %s'", "%", "(", "type", ",", "individual_var_name", ")", ")", "new_neuroml_file", "=", "'%s/%s'", "%", "(", "generate_dir", ",", "os", ".", "path", ".", "basename", "(", "neuroml_file", ")", ")", "if", "new_neuroml_file", "==", "neuroml_file", ":", "pyneuroml", ".", "pynml", ".", "print_comment_v", "(", "'Cannot use a directory for generating into (%s) which is the same location of the NeuroML file (%s)!'", "%", "(", "neuroml_file", ",", "generate_dir", ")", ")", "pyneuroml", ".", "pynml", ".", "write_neuroml2_file", "(", "nml_doc", ",", "new_neuroml_file", ")", "for", "include", "in", "still_included", ":", "inc_loc", "=", "'%s/%s'", "%", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "neuroml_file", ")", ")", ",", "include", ")", "pyneuroml", ".", "pynml", ".", "print_comment_v", "(", "\"Copying non included file %s to %s (%s) beside %s\"", "%", "(", "inc_loc", ",", "generate_dir", ",", "os", ".", "path", ".", "abspath", "(", "generate_dir", ")", ",", "new_neuroml_file", ")", ")", "shutil", ".", "copy", "(", "inc_loc", ",", "generate_dir", ")", "from", "pyneuroml", ".", "tune", ".", "NeuroMLSimulation", "import", "NeuroMLSimulation", "sim", "=", "NeuroMLSimulation", "(", "reference", ",", "neuroml_file", "=", "new_neuroml_file", ",", "target", "=", "target", ",", "sim_time", "=", "sim_time", ",", "dt", "=", "dt", ",", "simulator", "=", "simulator", ",", "generate_dir", "=", "generate_dir", ",", "cleanup", "=", "cleanup", ",", "nml_doc", "=", "nml_doc", ")", "sim", ".", "go", "(", ")", "if", "show", ":", "sim", ".", "show", "(", ")", "return", "sim", ".", "t", ",", "sim", ".", "volts" ]
Run an individual simulation. The candidate data has been flattened into the sim_var dict. The sim_var dict contains parameter:value key value pairs, which are applied to the model before it is simulated.
[ "Run", "an", "individual", "simulation", "." ]
python
train
38.311765
oasiswork/zimsoap
zimsoap/client.py
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L2016-L2040
def apply_filter_rule(self, _filter, query='in:inbox', way='in'): """ :param: _filter _filter a zobjects.FilterRule or the filter name :param: query on what will the filter be applied :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of impacted message's ids """ if isinstance(_filter, zobjects.FilterRule): _filter = _filter.name content = { 'filterRules': { 'filterRule': {'name': _filter} }, 'query': {'_content': query} } if way == 'in': ids = self.request('ApplyFilterRules', content) elif way == 'out': ids = self.request('ApplyOutgoingFilterRules', content) if ids: return [int(m) for m in ids['m']['ids'].split(',')] else: return []
[ "def", "apply_filter_rule", "(", "self", ",", "_filter", ",", "query", "=", "'in:inbox'", ",", "way", "=", "'in'", ")", ":", "if", "isinstance", "(", "_filter", ",", "zobjects", ".", "FilterRule", ")", ":", "_filter", "=", "_filter", ".", "name", "content", "=", "{", "'filterRules'", ":", "{", "'filterRule'", ":", "{", "'name'", ":", "_filter", "}", "}", ",", "'query'", ":", "{", "'_content'", ":", "query", "}", "}", "if", "way", "==", "'in'", ":", "ids", "=", "self", ".", "request", "(", "'ApplyFilterRules'", ",", "content", ")", "elif", "way", "==", "'out'", ":", "ids", "=", "self", ".", "request", "(", "'ApplyOutgoingFilterRules'", ",", "content", ")", "if", "ids", ":", "return", "[", "int", "(", "m", ")", "for", "m", "in", "ids", "[", "'m'", "]", "[", "'ids'", "]", ".", "split", "(", "','", ")", "]", "else", ":", "return", "[", "]" ]
:param: _filter _filter a zobjects.FilterRule or the filter name :param: query on what will the filter be applied :param: way string discribing if filter is for 'in' or 'out' messages :returns: list of impacted message's ids
[ ":", "param", ":", "_filter", "_filter", "a", "zobjects", ".", "FilterRule", "or", "the", "filter", "name", ":", "param", ":", "query", "on", "what", "will", "the", "filter", "be", "applied", ":", "param", ":", "way", "string", "discribing", "if", "filter", "is", "for", "in", "or", "out", "messages", ":", "returns", ":", "list", "of", "impacted", "message", "s", "ids" ]
python
train
34.96
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L158-L164
def unset_default_org(self): """ unset the default orgs for tasks """ for org in self.list_orgs(): org_config = self.get_org(org) if org_config.default: del org_config.config["default"] self.set_org(org_config)
[ "def", "unset_default_org", "(", "self", ")", ":", "for", "org", "in", "self", ".", "list_orgs", "(", ")", ":", "org_config", "=", "self", ".", "get_org", "(", "org", ")", "if", "org_config", ".", "default", ":", "del", "org_config", ".", "config", "[", "\"default\"", "]", "self", ".", "set_org", "(", "org_config", ")" ]
unset the default orgs for tasks
[ "unset", "the", "default", "orgs", "for", "tasks" ]
python
train
39.428571
datastax/python-driver
cassandra/io/twistedreactor.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/io/twistedreactor.py#L283-L300
def close(self): """ Disconnect and error-out all requests. """ with self.lock: if self.is_closed: return self.is_closed = True log.debug("Closing connection (%s) to %s", id(self), self.endpoint) reactor.callFromThread(self.connector.disconnect) log.debug("Closed socket to %s", self.endpoint) if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.endpoint)) # don't leave in-progress operations hanging self.connected_event.set()
[ "def", "close", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "if", "self", ".", "is_closed", ":", "return", "self", ".", "is_closed", "=", "True", "log", ".", "debug", "(", "\"Closing connection (%s) to %s\"", ",", "id", "(", "self", ")", ",", "self", ".", "endpoint", ")", "reactor", ".", "callFromThread", "(", "self", ".", "connector", ".", "disconnect", ")", "log", ".", "debug", "(", "\"Closed socket to %s\"", ",", "self", ".", "endpoint", ")", "if", "not", "self", ".", "is_defunct", ":", "self", ".", "error_all_requests", "(", "ConnectionShutdown", "(", "\"Connection to %s was closed\"", "%", "self", ".", "endpoint", ")", ")", "# don't leave in-progress operations hanging", "self", ".", "connected_event", ".", "set", "(", ")" ]
Disconnect and error-out all requests.
[ "Disconnect", "and", "error", "-", "out", "all", "requests", "." ]
python
train
34.555556
gwastro/pycbc-glue
pycbc_glue/datafind.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/datafind.py#L324-L392
def find_frame_urls(self, site, frametype, gpsstart, gpsend, match=None, urltype=None, on_gaps="warn"): """Find the framefiles for the given type in the [start, end) interval frame @param site: single-character name of site to match @param frametype: name of frametype to match @param gpsstart: integer GPS start time of query @param gpsend: integer GPS end time of query @param match: regular expression to match against @param urltype: file scheme to search for (e.g. 'file') @param on_gaps: what to do when the requested frame isn't found, one of: - C{'warn'} (default): print a warning, - C{'error'}: raise an L{RuntimeError}, or - C{'ignore'}: do nothing @type site: L{str} @type frametype: L{str} @type gpsstart: L{int} @type gpsend: L{int} @type match: L{str} @type urltype: L{str} @type on_gaps: L{str} @returns: L{Cache<pycbc_glue.lal.Cache>} @raises RuntimeError: if gaps are found and C{on_gaps='error'} """ if on_gaps not in ("warn", "error", "ignore"): raise ValueError("on_gaps must be 'warn', 'error', or 'ignore'.") url = ("%s/gwf/%s/%s/%s,%s" % (_url_prefix, site, frametype, gpsstart, gpsend)) # if a URL type is specified append it to the path if urltype: url += "/%s" % urltype # request JSON output url += ".json" # append a regex if input if match: url += "?match=%s" % match # make query response = self._requestresponse("GET", url) urllist = decode(response.read()) out = lal.Cache([lal.CacheEntry.from_T050017(x, coltype=self.LIGOTimeGPSType) for x in urllist]) if on_gaps == "ignore": return out else: span = segments.segment(gpsstart, gpsend) seglist = segments.segmentlist(e.segment for e in out).coalesce() missing = (segments.segmentlist([span]) - seglist).coalesce() if span in seglist: return out else: msg = "Missing segments: \n%s" % "\n".join(map(str, missing)) if on_gaps=="warn": sys.stderr.write("%s\n" % msg) return out else: raise RuntimeError(msg)
[ "def", "find_frame_urls", "(", "self", ",", "site", ",", "frametype", ",", "gpsstart", ",", "gpsend", ",", "match", "=", "None", ",", "urltype", "=", "None", ",", "on_gaps", "=", "\"warn\"", ")", ":", "if", "on_gaps", "not", "in", "(", "\"warn\"", ",", "\"error\"", ",", "\"ignore\"", ")", ":", "raise", "ValueError", "(", "\"on_gaps must be 'warn', 'error', or 'ignore'.\"", ")", "url", "=", "(", "\"%s/gwf/%s/%s/%s,%s\"", "%", "(", "_url_prefix", ",", "site", ",", "frametype", ",", "gpsstart", ",", "gpsend", ")", ")", "# if a URL type is specified append it to the path", "if", "urltype", ":", "url", "+=", "\"/%s\"", "%", "urltype", "# request JSON output", "url", "+=", "\".json\"", "# append a regex if input", "if", "match", ":", "url", "+=", "\"?match=%s\"", "%", "match", "# make query", "response", "=", "self", ".", "_requestresponse", "(", "\"GET\"", ",", "url", ")", "urllist", "=", "decode", "(", "response", ".", "read", "(", ")", ")", "out", "=", "lal", ".", "Cache", "(", "[", "lal", ".", "CacheEntry", ".", "from_T050017", "(", "x", ",", "coltype", "=", "self", ".", "LIGOTimeGPSType", ")", "for", "x", "in", "urllist", "]", ")", "if", "on_gaps", "==", "\"ignore\"", ":", "return", "out", "else", ":", "span", "=", "segments", ".", "segment", "(", "gpsstart", ",", "gpsend", ")", "seglist", "=", "segments", ".", "segmentlist", "(", "e", ".", "segment", "for", "e", "in", "out", ")", ".", "coalesce", "(", ")", "missing", "=", "(", "segments", ".", "segmentlist", "(", "[", "span", "]", ")", "-", "seglist", ")", ".", "coalesce", "(", ")", "if", "span", "in", "seglist", ":", "return", "out", "else", ":", "msg", "=", "\"Missing segments: \\n%s\"", "%", "\"\\n\"", ".", "join", "(", "map", "(", "str", ",", "missing", ")", ")", "if", "on_gaps", "==", "\"warn\"", ":", "sys", ".", "stderr", ".", "write", "(", "\"%s\\n\"", "%", "msg", ")", "return", "out", "else", ":", "raise", "RuntimeError", "(", "msg", ")" ]
Find the framefiles for the given type in the [start, end) interval frame @param site: single-character name of site to match @param frametype: name of frametype to match @param gpsstart: integer GPS start time of query @param gpsend: integer GPS end time of query @param match: regular expression to match against @param urltype: file scheme to search for (e.g. 'file') @param on_gaps: what to do when the requested frame isn't found, one of: - C{'warn'} (default): print a warning, - C{'error'}: raise an L{RuntimeError}, or - C{'ignore'}: do nothing @type site: L{str} @type frametype: L{str} @type gpsstart: L{int} @type gpsend: L{int} @type match: L{str} @type urltype: L{str} @type on_gaps: L{str} @returns: L{Cache<pycbc_glue.lal.Cache>} @raises RuntimeError: if gaps are found and C{on_gaps='error'}
[ "Find", "the", "framefiles", "for", "the", "given", "type", "in", "the", "[", "start", "end", ")", "interval", "frame" ]
python
train
36.898551
Kortemme-Lab/klab
klab/bio/pdb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L1719-L1741
def get_atom_sequence_to_rosetta_map(self): '''Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta. We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue. e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue those residues to None. Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None. ''' if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences: raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.') atom_sequence_to_rosetta_mapping = {} for chain_id, mapping in self.rosetta_to_atom_sequence_maps.iteritems(): chain_mapping = {} for k in mapping: chain_mapping[k[1]] = k[0] atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap.from_dict(chain_mapping) # Add empty maps for missing chains for chain_id, sequence in self.atom_sequences.iteritems(): if not atom_sequence_to_rosetta_mapping.get(chain_id): atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap() return atom_sequence_to_rosetta_mapping
[ "def", "get_atom_sequence_to_rosetta_map", "(", "self", ")", ":", "if", "not", "self", ".", "rosetta_to_atom_sequence_maps", "and", "self", ".", "rosetta_sequences", ":", "raise", "Exception", "(", "'The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.'", ")", "atom_sequence_to_rosetta_mapping", "=", "{", "}", "for", "chain_id", ",", "mapping", "in", "self", ".", "rosetta_to_atom_sequence_maps", ".", "iteritems", "(", ")", ":", "chain_mapping", "=", "{", "}", "for", "k", "in", "mapping", ":", "chain_mapping", "[", "k", "[", "1", "]", "]", "=", "k", "[", "0", "]", "atom_sequence_to_rosetta_mapping", "[", "chain_id", "]", "=", "SequenceMap", ".", "from_dict", "(", "chain_mapping", ")", "# Add empty maps for missing chains", "for", "chain_id", ",", "sequence", "in", "self", ".", "atom_sequences", ".", "iteritems", "(", ")", ":", "if", "not", "atom_sequence_to_rosetta_mapping", ".", "get", "(", "chain_id", ")", ":", "atom_sequence_to_rosetta_mapping", "[", "chain_id", "]", "=", "SequenceMap", "(", ")", "return", "atom_sequence_to_rosetta_mapping" ]
Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta. We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue. e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue those residues to None. Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None.
[ "Uses", "the", "Rosetta", "-", ">", "ATOM", "injective", "map", "to", "construct", "an", "injective", "mapping", "from", "ATOM", "-", ">", "Rosetta", ".", "We", "do", "not", "extend", "the", "injection", "to", "include", "ATOM", "residues", "which", "have", "no", "corresponding", "Rosetta", "residue", ".", "e", ".", "g", ".", "atom_sequence_to_rosetta_mapping", "[", "c", "]", ".", "map", ".", "get", "(", "A", "45", ")", "will", "return", "None", "if", "there", "is", "no", "corresponding", "Rosetta", "residue", "those", "residues", "to", "None", ".", "Likewise", "if", "a", "PDB", "chain", "c", "is", "not", "present", "in", "the", "Rosetta", "model", "then", "atom_sequence_to_rosetta_mapping", "[", "c", "]", ".", "map", ".", "get", "(", "s", ")", "returns", "None", "." ]
python
train
60.956522
pypa/pipenv
pipenv/vendor/distlib/index.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/index.py#L466-L507
def encode_request(self, fields, files): """ Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple. """ # Adapted from packaging, which in turn was adapted from # http://code.activestate.com/recipes/146306 parts = [] boundary = self.boundary for k, values in fields: if not isinstance(values, (list, tuple)): values = [values] for v in values: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"' % k).encode('utf-8'), b'', v.encode('utf-8'))) for key, filename, value in files: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)).encode('utf-8'), b'', value)) parts.extend((b'--' + boundary + b'--', b'')) body = b'\r\n'.join(parts) ct = b'multipart/form-data; boundary=' + boundary headers = { 'Content-type': ct, 'Content-length': str(len(body)) } return Request(self.url, body, headers)
[ "def", "encode_request", "(", "self", ",", "fields", ",", "files", ")", ":", "# Adapted from packaging, which in turn was adapted from", "# http://code.activestate.com/recipes/146306", "parts", "=", "[", "]", "boundary", "=", "self", ".", "boundary", "for", "k", ",", "values", "in", "fields", ":", "if", "not", "isinstance", "(", "values", ",", "(", "list", ",", "tuple", ")", ")", ":", "values", "=", "[", "values", "]", "for", "v", "in", "values", ":", "parts", ".", "extend", "(", "(", "b'--'", "+", "boundary", ",", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "k", ")", ".", "encode", "(", "'utf-8'", ")", ",", "b''", ",", "v", ".", "encode", "(", "'utf-8'", ")", ")", ")", "for", "key", ",", "filename", ",", "value", "in", "files", ":", "parts", ".", "extend", "(", "(", "b'--'", "+", "boundary", ",", "(", "'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"'", "%", "(", "key", ",", "filename", ")", ")", ".", "encode", "(", "'utf-8'", ")", ",", "b''", ",", "value", ")", ")", "parts", ".", "extend", "(", "(", "b'--'", "+", "boundary", "+", "b'--'", ",", "b''", ")", ")", "body", "=", "b'\\r\\n'", ".", "join", "(", "parts", ")", "ct", "=", "b'multipart/form-data; boundary='", "+", "boundary", "headers", "=", "{", "'Content-type'", ":", "ct", ",", "'Content-length'", ":", "str", "(", "len", "(", "body", ")", ")", "}", "return", "Request", "(", "self", ".", "url", ",", "body", ",", "headers", ")" ]
Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple.
[ "Encode", "fields", "and", "files", "for", "posting", "to", "an", "HTTP", "server", "." ]
python
train
34.619048
by46/simplekit
simplekit/objson/dolphin2.py
https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/objson/dolphin2.py#L78-L95
def dumps(obj, *args, **kwargs): """Serialize a object to string Basic Usage: >>> import simplekit.objson >>> obj = {'name':'wendy'} >>> print simplekit.objson.dumps(obj) :param obj: a object which need to dump :param args: Optional arguments that :func:`json.dumps` takes. :param kwargs: Keys arguments that :py:func:`json.dumps` takes. :return: string """ kwargs['default'] = object2dict return json.dumps(obj, *args, **kwargs)
[ "def", "dumps", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'default'", "]", "=", "object2dict", "return", "json", ".", "dumps", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Serialize a object to string Basic Usage: >>> import simplekit.objson >>> obj = {'name':'wendy'} >>> print simplekit.objson.dumps(obj) :param obj: a object which need to dump :param args: Optional arguments that :func:`json.dumps` takes. :param kwargs: Keys arguments that :py:func:`json.dumps` takes. :return: string
[ "Serialize", "a", "object", "to", "string" ]
python
train
25.833333
sassoo/goldman
goldman/deserializers/jsonapi.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/deserializers/jsonapi.py#L144-L189
def _parse_relationships(self, relationships): """ Ensure compliance with the spec's relationships section Specifically, the relationships object of the single resource object. For modifications we only support relationships via the `data` key referred to as Resource Linkage. :param relationships: dict JSON API relationships object """ link = 'jsonapi.org/format/#document-resource-object-relationships' if not isinstance(relationships, dict): self.fail('The JSON API resource object relationships key MUST ' 'be a hash & comply with the spec\'s resource linkage ' 'section.', link) for key, val in relationships.items(): if not isinstance(val, dict) or 'data' not in val: self.fail('Relationship key %s MUST be a hash & contain ' 'a `data` field compliant with the spec\'s ' 'resource linkage section.' % key, link) elif isinstance(val['data'], dict): data = val['data'] rid = isinstance(data.get('id'), unicode) rtype = isinstance(data.get('type'), unicode) if not rid or not rtype: self.fail('%s relationship\'s resource linkage MUST ' 'contain `id` & `type` fields. Additionally, ' 'they must both be strings.' % key, link) elif isinstance(val['data'], list): abort(exceptions.ModificationDenied(**{ 'detail': 'Modifying the %s relationship or any to-many ' 'relationships for that matter are is not ' 'currently supported. Instead, modify the ' 'to-one side directly.' % key, 'links': link, })) elif val['data']: self.fail('The relationship key %s is malformed & impossible ' 'for us to understand your intentions. It MUST be ' 'a hash & contain a `data` field compliant with ' 'the spec\'s resource linkage section or null if ' 'you want to unset the relationship.' % key, link)
[ "def", "_parse_relationships", "(", "self", ",", "relationships", ")", ":", "link", "=", "'jsonapi.org/format/#document-resource-object-relationships'", "if", "not", "isinstance", "(", "relationships", ",", "dict", ")", ":", "self", ".", "fail", "(", "'The JSON API resource object relationships key MUST '", "'be a hash & comply with the spec\\'s resource linkage '", "'section.'", ",", "link", ")", "for", "key", ",", "val", "in", "relationships", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "val", ",", "dict", ")", "or", "'data'", "not", "in", "val", ":", "self", ".", "fail", "(", "'Relationship key %s MUST be a hash & contain '", "'a `data` field compliant with the spec\\'s '", "'resource linkage section.'", "%", "key", ",", "link", ")", "elif", "isinstance", "(", "val", "[", "'data'", "]", ",", "dict", ")", ":", "data", "=", "val", "[", "'data'", "]", "rid", "=", "isinstance", "(", "data", ".", "get", "(", "'id'", ")", ",", "unicode", ")", "rtype", "=", "isinstance", "(", "data", ".", "get", "(", "'type'", ")", ",", "unicode", ")", "if", "not", "rid", "or", "not", "rtype", ":", "self", ".", "fail", "(", "'%s relationship\\'s resource linkage MUST '", "'contain `id` & `type` fields. Additionally, '", "'they must both be strings.'", "%", "key", ",", "link", ")", "elif", "isinstance", "(", "val", "[", "'data'", "]", ",", "list", ")", ":", "abort", "(", "exceptions", ".", "ModificationDenied", "(", "*", "*", "{", "'detail'", ":", "'Modifying the %s relationship or any to-many '", "'relationships for that matter are is not '", "'currently supported. Instead, modify the '", "'to-one side directly.'", "%", "key", ",", "'links'", ":", "link", ",", "}", ")", ")", "elif", "val", "[", "'data'", "]", ":", "self", ".", "fail", "(", "'The relationship key %s is malformed & impossible '", "'for us to understand your intentions. It MUST be '", "'a hash & contain a `data` field compliant with '", "'the spec\\'s resource linkage section or null if '", "'you want to unset the relationship.'", "%", "key", ",", "link", ")" ]
Ensure compliance with the spec's relationships section Specifically, the relationships object of the single resource object. For modifications we only support relationships via the `data` key referred to as Resource Linkage. :param relationships: dict JSON API relationships object
[ "Ensure", "compliance", "with", "the", "spec", "s", "relationships", "section" ]
python
train
50.630435
vertexproject/synapse
synapse/lib/urlhelp.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/urlhelp.py#L3-L69
def chopurl(url): ''' A sane "stand alone" url parser. Example: info = chopurl(url) ''' ret = {} if url.find('://') == -1: raise s_exc.BadUrl(':// not found in [{}]!'.format(url)) scheme, remain = url.split('://', 1) ret['scheme'] = scheme.lower() # carve query params from the end if remain.find('?') != -1: query = {} remain, queryrem = remain.split('?', 1) for qkey in queryrem.split('&'): qval = None if qkey.find('=') != -1: qkey, qval = qkey.split('=', 1) query[qkey] = qval ret['query'] = query pathrem = '' slashoff = remain.find('/') if slashoff != -1: pathrem = remain[slashoff:] remain = remain[:slashoff] # detect user[:passwd]@netloc syntax if remain.find('@') != -1: user, remain = remain.rsplit('@', 1) if user.find(':') != -1: user, passwd = user.split(':', 1) ret['passwd'] = passwd ret['user'] = user # remain should be down to host[:port] # detect ipv6 [addr]:port syntax if remain.startswith('['): hostrem, portstr = remain.rsplit(':', 1) ret['port'] = int(portstr) ret['host'] = hostrem[1:-1] # detect ipv6 without port syntax elif remain.count(':') > 1: ret['host'] = remain # regular old host or host:port syntax else: if remain.find(':') != -1: remain, portstr = remain.split(':', 1) ret['port'] = int(portstr) ret['host'] = remain ret['path'] = pathrem return ret
[ "def", "chopurl", "(", "url", ")", ":", "ret", "=", "{", "}", "if", "url", ".", "find", "(", "'://'", ")", "==", "-", "1", ":", "raise", "s_exc", ".", "BadUrl", "(", "':// not found in [{}]!'", ".", "format", "(", "url", ")", ")", "scheme", ",", "remain", "=", "url", ".", "split", "(", "'://'", ",", "1", ")", "ret", "[", "'scheme'", "]", "=", "scheme", ".", "lower", "(", ")", "# carve query params from the end", "if", "remain", ".", "find", "(", "'?'", ")", "!=", "-", "1", ":", "query", "=", "{", "}", "remain", ",", "queryrem", "=", "remain", ".", "split", "(", "'?'", ",", "1", ")", "for", "qkey", "in", "queryrem", ".", "split", "(", "'&'", ")", ":", "qval", "=", "None", "if", "qkey", ".", "find", "(", "'='", ")", "!=", "-", "1", ":", "qkey", ",", "qval", "=", "qkey", ".", "split", "(", "'='", ",", "1", ")", "query", "[", "qkey", "]", "=", "qval", "ret", "[", "'query'", "]", "=", "query", "pathrem", "=", "''", "slashoff", "=", "remain", ".", "find", "(", "'/'", ")", "if", "slashoff", "!=", "-", "1", ":", "pathrem", "=", "remain", "[", "slashoff", ":", "]", "remain", "=", "remain", "[", ":", "slashoff", "]", "# detect user[:passwd]@netloc syntax", "if", "remain", ".", "find", "(", "'@'", ")", "!=", "-", "1", ":", "user", ",", "remain", "=", "remain", ".", "rsplit", "(", "'@'", ",", "1", ")", "if", "user", ".", "find", "(", "':'", ")", "!=", "-", "1", ":", "user", ",", "passwd", "=", "user", ".", "split", "(", "':'", ",", "1", ")", "ret", "[", "'passwd'", "]", "=", "passwd", "ret", "[", "'user'", "]", "=", "user", "# remain should be down to host[:port]", "# detect ipv6 [addr]:port syntax", "if", "remain", ".", "startswith", "(", "'['", ")", ":", "hostrem", ",", "portstr", "=", "remain", ".", "rsplit", "(", "':'", ",", "1", ")", "ret", "[", "'port'", "]", "=", "int", "(", "portstr", ")", "ret", "[", "'host'", "]", "=", "hostrem", "[", "1", ":", "-", "1", "]", "# detect ipv6 without port syntax", "elif", "remain", ".", "count", "(", "':'", ")", ">", "1", ":", "ret", "[", "'host'", "]", "=", "remain", "# regular old host or host:port syntax", "else", ":", "if", "remain", ".", "find", "(", "':'", ")", "!=", "-", "1", ":", "remain", ",", "portstr", "=", "remain", ".", "split", "(", "':'", ",", "1", ")", "ret", "[", "'port'", "]", "=", "int", "(", "portstr", ")", "ret", "[", "'host'", "]", "=", "remain", "ret", "[", "'path'", "]", "=", "pathrem", "return", "ret" ]
A sane "stand alone" url parser. Example: info = chopurl(url)
[ "A", "sane", "stand", "alone", "url", "parser", "." ]
python
train
23.447761
yhat/ggpy
ggplot/colors/palettes.py
https://github.com/yhat/ggpy/blob/b6d23c22d52557b983da8ce7a3a6992501dadcd6/ggplot/colors/palettes.py#L67-L165
def color_palette(name=None, n_colors=6, desat=None): """Return a list of colors defining a color palette. Availible seaborn palette names: deep, muted, bright, pastel, dark, colorblind Other options: hls, husl, any matplotlib palette Matplotlib paletes can be specified as reversed palettes by appending "_r" to the name or as dark palettes by appending "_d" to the name. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. Parameters ---------- name: None, string, or sequence Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int Number of colors in the palette. If larger than the number of colors in the palette, they will cycle. desat : float Value to desaturate each color by. Returns ------- palette : list of RGB tuples. Color palette. Examples -------- >>> p = color_palette("muted") >>> p = color_palette("Blues_d", 10) >>> p = color_palette("Set1", desat=.7) >>> import matplotlib.pyplot as plt >>> with color_palette("husl", 8): ... f, ax = plt.subplots() ... ax.plot(x, y) # doctest: +SKIP See Also -------- set_palette : set the default color cycle for all plots. axes_style : define parameters to set the style of plots plotting_context : define parameters to scale plot elements """ seaborn_palettes = dict( deep=["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"], muted=["#4878CF", "#6ACC65", "#D65F5F", "#B47CC7", "#C4AD66", "#77BEDB"], pastel=["#92C6FF", "#97F0AA", "#FF9F9A", "#D0BBFF", "#FFFEA3", "#B0E0E6"], bright=["#003FFF", "#03ED3A", "#E8000B", "#8A2BE2", "#FFC400", "#00D7FF"], dark=["#001C7F", "#017517", "#8C0900", "#7600A1", "#B8860B", "#006374"], colorblind=["#0072B2", "#009E73", "#D55E00", "#CC79A7", "#F0E442", "#56B4E9"], ) if name is None: palette = mpl.rcParams["axes.color_cycle"] elif not isinstance(name, string_types): palette = name elif name == "hls": palette = hls_palette(n_colors) elif name == "husl": palette = husl_palette(n_colors) elif name in seaborn_palettes: palette = seaborn_palettes[name] elif name in dir(mpl.cm): palette = mpl_palette(name, n_colors) elif name[:-2] in dir(mpl.cm): palette = mpl_palette(name, n_colors) else: raise ValueError("%s is not a valid palette name" % name) if desat is not None: palette = [desaturate(c, desat) for c in palette] # Always return as many colors as we asked for pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] # Always return in r, g, b tuple format try: palette = map(mpl.colors.colorConverter.to_rgb, palette) palette = _ColorPalette(palette) except ValueError: raise ValueError("Could not generate a palette for %s" % str(name)) return palette
[ "def", "color_palette", "(", "name", "=", "None", ",", "n_colors", "=", "6", ",", "desat", "=", "None", ")", ":", "seaborn_palettes", "=", "dict", "(", "deep", "=", "[", "\"#4C72B0\"", ",", "\"#55A868\"", ",", "\"#C44E52\"", ",", "\"#8172B2\"", ",", "\"#CCB974\"", ",", "\"#64B5CD\"", "]", ",", "muted", "=", "[", "\"#4878CF\"", ",", "\"#6ACC65\"", ",", "\"#D65F5F\"", ",", "\"#B47CC7\"", ",", "\"#C4AD66\"", ",", "\"#77BEDB\"", "]", ",", "pastel", "=", "[", "\"#92C6FF\"", ",", "\"#97F0AA\"", ",", "\"#FF9F9A\"", ",", "\"#D0BBFF\"", ",", "\"#FFFEA3\"", ",", "\"#B0E0E6\"", "]", ",", "bright", "=", "[", "\"#003FFF\"", ",", "\"#03ED3A\"", ",", "\"#E8000B\"", ",", "\"#8A2BE2\"", ",", "\"#FFC400\"", ",", "\"#00D7FF\"", "]", ",", "dark", "=", "[", "\"#001C7F\"", ",", "\"#017517\"", ",", "\"#8C0900\"", ",", "\"#7600A1\"", ",", "\"#B8860B\"", ",", "\"#006374\"", "]", ",", "colorblind", "=", "[", "\"#0072B2\"", ",", "\"#009E73\"", ",", "\"#D55E00\"", ",", "\"#CC79A7\"", ",", "\"#F0E442\"", ",", "\"#56B4E9\"", "]", ",", ")", "if", "name", "is", "None", ":", "palette", "=", "mpl", ".", "rcParams", "[", "\"axes.color_cycle\"", "]", "elif", "not", "isinstance", "(", "name", ",", "string_types", ")", ":", "palette", "=", "name", "elif", "name", "==", "\"hls\"", ":", "palette", "=", "hls_palette", "(", "n_colors", ")", "elif", "name", "==", "\"husl\"", ":", "palette", "=", "husl_palette", "(", "n_colors", ")", "elif", "name", "in", "seaborn_palettes", ":", "palette", "=", "seaborn_palettes", "[", "name", "]", "elif", "name", "in", "dir", "(", "mpl", ".", "cm", ")", ":", "palette", "=", "mpl_palette", "(", "name", ",", "n_colors", ")", "elif", "name", "[", ":", "-", "2", "]", "in", "dir", "(", "mpl", ".", "cm", ")", ":", "palette", "=", "mpl_palette", "(", "name", ",", "n_colors", ")", "else", ":", "raise", "ValueError", "(", "\"%s is not a valid palette name\"", "%", "name", ")", "if", "desat", "is", "not", "None", ":", "palette", "=", "[", "desaturate", "(", "c", ",", "desat", ")", "for", "c", "in", "palette", "]", "# Always return as many colors as we asked for", "pal_cycle", "=", "cycle", "(", "palette", ")", "palette", "=", "[", "next", "(", "pal_cycle", ")", "for", "_", "in", "range", "(", "n_colors", ")", "]", "# Always return in r, g, b tuple format", "try", ":", "palette", "=", "map", "(", "mpl", ".", "colors", ".", "colorConverter", ".", "to_rgb", ",", "palette", ")", "palette", "=", "_ColorPalette", "(", "palette", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Could not generate a palette for %s\"", "%", "str", "(", "name", ")", ")", "return", "palette" ]
Return a list of colors defining a color palette. Availible seaborn palette names: deep, muted, bright, pastel, dark, colorblind Other options: hls, husl, any matplotlib palette Matplotlib paletes can be specified as reversed palettes by appending "_r" to the name or as dark palettes by appending "_d" to the name. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. Parameters ---------- name: None, string, or sequence Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int Number of colors in the palette. If larger than the number of colors in the palette, they will cycle. desat : float Value to desaturate each color by. Returns ------- palette : list of RGB tuples. Color palette. Examples -------- >>> p = color_palette("muted") >>> p = color_palette("Blues_d", 10) >>> p = color_palette("Set1", desat=.7) >>> import matplotlib.pyplot as plt >>> with color_palette("husl", 8): ... f, ax = plt.subplots() ... ax.plot(x, y) # doctest: +SKIP See Also -------- set_palette : set the default color cycle for all plots. axes_style : define parameters to set the style of plots plotting_context : define parameters to scale plot elements
[ "Return", "a", "list", "of", "colors", "defining", "a", "color", "palette", "." ]
python
train
32.252525
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1800-L1810
def _get_cpu_virtualization(self): """get cpu virtualization status.""" try: cpu_vt = self._get_bios_setting('ProcVirtualization') except exception.IloCommandNotSupportedError: return False if cpu_vt == 'Enabled': vt_status = True else: vt_status = False return vt_status
[ "def", "_get_cpu_virtualization", "(", "self", ")", ":", "try", ":", "cpu_vt", "=", "self", ".", "_get_bios_setting", "(", "'ProcVirtualization'", ")", "except", "exception", ".", "IloCommandNotSupportedError", ":", "return", "False", "if", "cpu_vt", "==", "'Enabled'", ":", "vt_status", "=", "True", "else", ":", "vt_status", "=", "False", "return", "vt_status" ]
get cpu virtualization status.
[ "get", "cpu", "virtualization", "status", "." ]
python
train
32.454545
Hackerfleet/hfos
modules/maps/hfos/map/TileTools.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/maps/hfos/map/TileTools.py#L30-L38
def getGroundResolution(self, latitude, level): ''' returns the ground resolution for based on latitude and zoom level. ''' latitude = self.clipValue(latitude, self.min_lat, self.max_lat); mapSize = self.getMapDimensionsByZoomLevel(level) return math.cos( latitude * math.pi / 180) * 2 * math.pi * self.earth_radius / \ mapSize
[ "def", "getGroundResolution", "(", "self", ",", "latitude", ",", "level", ")", ":", "latitude", "=", "self", ".", "clipValue", "(", "latitude", ",", "self", ".", "min_lat", ",", "self", ".", "max_lat", ")", "mapSize", "=", "self", ".", "getMapDimensionsByZoomLevel", "(", "level", ")", "return", "math", ".", "cos", "(", "latitude", "*", "math", ".", "pi", "/", "180", ")", "*", "2", "*", "math", ".", "pi", "*", "self", ".", "earth_radius", "/", "mapSize" ]
returns the ground resolution for based on latitude and zoom level.
[ "returns", "the", "ground", "resolution", "for", "based", "on", "latitude", "and", "zoom", "level", "." ]
python
train
43.777778
proteanhq/protean
src/protean/utils/importlib.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/utils/importlib.py#L6-L17
def perform_import(val): """ If the given setting is a string import notation, then perform the necessary import or imports. """ if val is None: return None elif isinstance(val, str): return import_from_string(val) elif isinstance(val, (list, tuple)): return [import_from_string(item) for item in val] return val
[ "def", "perform_import", "(", "val", ")", ":", "if", "val", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "val", ",", "str", ")", ":", "return", "import_from_string", "(", "val", ")", "elif", "isinstance", "(", "val", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "import_from_string", "(", "item", ")", "for", "item", "in", "val", "]", "return", "val" ]
If the given setting is a string import notation, then perform the necessary import or imports.
[ "If", "the", "given", "setting", "is", "a", "string", "import", "notation", "then", "perform", "the", "necessary", "import", "or", "imports", "." ]
python
train
29.75
kivy/buildozer
buildozer/targets/android.py
https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/targets/android.py#L1159-L1174
def cmd_adb(self, *args): ''' Run adb from the Android SDK. Args must come after --, or use --alias to make an alias ''' self.check_requirements() self.install_platform() args = args[0] if args and args[0] == '--alias': print('To set up ADB in this shell session, execute:') print(' alias adb=$(buildozer {} adb --alias 2>&1 >/dev/null)' .format(self.targetname)) sys.stderr.write(self.adb_cmd + '\n') else: self.buildozer.cmd(' '.join([self.adb_cmd] + args))
[ "def", "cmd_adb", "(", "self", ",", "*", "args", ")", ":", "self", ".", "check_requirements", "(", ")", "self", ".", "install_platform", "(", ")", "args", "=", "args", "[", "0", "]", "if", "args", "and", "args", "[", "0", "]", "==", "'--alias'", ":", "print", "(", "'To set up ADB in this shell session, execute:'", ")", "print", "(", "' alias adb=$(buildozer {} adb --alias 2>&1 >/dev/null)'", ".", "format", "(", "self", ".", "targetname", ")", ")", "sys", ".", "stderr", ".", "write", "(", "self", ".", "adb_cmd", "+", "'\\n'", ")", "else", ":", "self", ".", "buildozer", ".", "cmd", "(", "' '", ".", "join", "(", "[", "self", ".", "adb_cmd", "]", "+", "args", ")", ")" ]
Run adb from the Android SDK. Args must come after --, or use --alias to make an alias
[ "Run", "adb", "from", "the", "Android", "SDK", ".", "Args", "must", "come", "after", "--", "or", "use", "--", "alias", "to", "make", "an", "alias" ]
python
train
37.0625
ChristianTremblay/BAC0
BAC0/core/io/Simulate.py
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/io/Simulate.py#L87-L111
def release(self, args): """ Set the Out_Of_Service property to False - to release the I/O point back to the controller's control. :param args: String with <addr> <type> <inst> """ if not self._started: raise ApplicationNotStarted("BACnet stack not running - use startApp()") args = args.split() addr, obj_type, obj_inst = args[:3] try: self.write("{} {} {} outOfService False".format(addr, obj_type, obj_inst)) except NoResponseFromController: pass try: if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)): raise OutOfServiceSet() else: pass # Everything is ok" except NoResponseFromController: pass
[ "def", "release", "(", "self", ",", "args", ")", ":", "if", "not", "self", ".", "_started", ":", "raise", "ApplicationNotStarted", "(", "\"BACnet stack not running - use startApp()\"", ")", "args", "=", "args", ".", "split", "(", ")", "addr", ",", "obj_type", ",", "obj_inst", "=", "args", "[", ":", "3", "]", "try", ":", "self", ".", "write", "(", "\"{} {} {} outOfService False\"", ".", "format", "(", "addr", ",", "obj_type", ",", "obj_inst", ")", ")", "except", "NoResponseFromController", ":", "pass", "try", ":", "if", "self", ".", "read", "(", "\"{} {} {} outOfService\"", ".", "format", "(", "addr", ",", "obj_type", ",", "obj_inst", ")", ")", ":", "raise", "OutOfServiceSet", "(", ")", "else", ":", "pass", "# Everything is ok\"\r", "except", "NoResponseFromController", ":", "pass" ]
Set the Out_Of_Service property to False - to release the I/O point back to the controller's control. :param args: String with <addr> <type> <inst>
[ "Set", "the", "Out_Of_Service", "property", "to", "False", "-", "to", "release", "the", "I", "/", "O", "point", "back", "to", "the", "controller", "s", "control", ".", ":", "param", "args", ":", "String", "with", "<addr", ">", "<type", ">", "<inst", ">" ]
python
train
33
jtambasco/modesolverpy
modesolverpy/mode_solver.py
https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/mode_solver.py#L548-L610
def write_modes_to_file(self, filename="mode.dat", plot=True, analyse=True): """ Writes the mode fields to a file and optionally plots them. Args: filename (str): The nominal filename to use for the saved data. The suffix will be automatically be changed to identifiy each mode number. Default is 'mode.dat' plot (bool): `True` if plots should be generates, otherwise `False`. Default is `True`. analyse (bool): `True` if an analysis on the fundamental mode should be performed. The analysis adds to the plot of the fundamental mode the power mode-field diameter (MFD) and marks it on the output, and it marks with a cross the maximum E-field value. Default is `True`. Returns: dict: A dictionary containing the effective indices and mode field profiles (if solved for). """ modes_directory = "./modes_semi_vec/" if not os.path.isdir(modes_directory): os.mkdir(modes_directory) filename = modes_directory + filename for i, mode in enumerate(self._ms.modes): filename_mode = self._get_mode_filename( self._semi_vectorial_method, i, filename ) self._write_mode_to_file(np.real(mode), filename_mode) if plot: if i == 0 and analyse: A, centre, sigma_2 = anal.fit_gaussian( self._structure.xc, self._structure.yc, np.abs(mode) ) subtitle = ( "E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, " "MFD_{y} = %.3f" ) % (A, centre[0], centre[1], sigma_2[0], sigma_2[1]) self._plot_mode( self._semi_vectorial_method, i, filename_mode, self.n_effs[i], subtitle, sigma_2[0], sigma_2[1], centre[0], centre[1], wavelength=self._structure._wl, ) else: self._plot_mode( self._semi_vectorial_method, i, filename_mode, self.n_effs[i], wavelength=self._structure._wl, ) return self.modes
[ "def", "write_modes_to_file", "(", "self", ",", "filename", "=", "\"mode.dat\"", ",", "plot", "=", "True", ",", "analyse", "=", "True", ")", ":", "modes_directory", "=", "\"./modes_semi_vec/\"", "if", "not", "os", ".", "path", ".", "isdir", "(", "modes_directory", ")", ":", "os", ".", "mkdir", "(", "modes_directory", ")", "filename", "=", "modes_directory", "+", "filename", "for", "i", ",", "mode", "in", "enumerate", "(", "self", ".", "_ms", ".", "modes", ")", ":", "filename_mode", "=", "self", ".", "_get_mode_filename", "(", "self", ".", "_semi_vectorial_method", ",", "i", ",", "filename", ")", "self", ".", "_write_mode_to_file", "(", "np", ".", "real", "(", "mode", ")", ",", "filename_mode", ")", "if", "plot", ":", "if", "i", "==", "0", "and", "analyse", ":", "A", ",", "centre", ",", "sigma_2", "=", "anal", ".", "fit_gaussian", "(", "self", ".", "_structure", ".", "xc", ",", "self", ".", "_structure", ".", "yc", ",", "np", ".", "abs", "(", "mode", ")", ")", "subtitle", "=", "(", "\"E_{max} = %.3f, (x_{max}, y_{max}) = (%.3f, %.3f), MFD_{x} = %.3f, \"", "\"MFD_{y} = %.3f\"", ")", "%", "(", "A", ",", "centre", "[", "0", "]", ",", "centre", "[", "1", "]", ",", "sigma_2", "[", "0", "]", ",", "sigma_2", "[", "1", "]", ")", "self", ".", "_plot_mode", "(", "self", ".", "_semi_vectorial_method", ",", "i", ",", "filename_mode", ",", "self", ".", "n_effs", "[", "i", "]", ",", "subtitle", ",", "sigma_2", "[", "0", "]", ",", "sigma_2", "[", "1", "]", ",", "centre", "[", "0", "]", ",", "centre", "[", "1", "]", ",", "wavelength", "=", "self", ".", "_structure", ".", "_wl", ",", ")", "else", ":", "self", ".", "_plot_mode", "(", "self", ".", "_semi_vectorial_method", ",", "i", ",", "filename_mode", ",", "self", ".", "n_effs", "[", "i", "]", ",", "wavelength", "=", "self", ".", "_structure", ".", "_wl", ",", ")", "return", "self", ".", "modes" ]
Writes the mode fields to a file and optionally plots them. Args: filename (str): The nominal filename to use for the saved data. The suffix will be automatically be changed to identifiy each mode number. Default is 'mode.dat' plot (bool): `True` if plots should be generates, otherwise `False`. Default is `True`. analyse (bool): `True` if an analysis on the fundamental mode should be performed. The analysis adds to the plot of the fundamental mode the power mode-field diameter (MFD) and marks it on the output, and it marks with a cross the maximum E-field value. Default is `True`. Returns: dict: A dictionary containing the effective indices and mode field profiles (if solved for).
[ "Writes", "the", "mode", "fields", "to", "a", "file", "and", "optionally", "plots", "them", "." ]
python
train
41.269841
tensorflow/hub
tensorflow_hub/keras_layer.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/keras_layer.py#L138-L142
def _add_existing_weight(self, weight, trainable=None): """Calls add_weight() to register but not create an existing weight.""" if trainable is None: trainable = weight.trainable self.add_weight(name=weight.name, shape=weight.shape, dtype=weight.dtype, trainable=trainable, getter=lambda *_, **__: weight)
[ "def", "_add_existing_weight", "(", "self", ",", "weight", ",", "trainable", "=", "None", ")", ":", "if", "trainable", "is", "None", ":", "trainable", "=", "weight", ".", "trainable", "self", ".", "add_weight", "(", "name", "=", "weight", ".", "name", ",", "shape", "=", "weight", ".", "shape", ",", "dtype", "=", "weight", ".", "dtype", ",", "trainable", "=", "trainable", ",", "getter", "=", "lambda", "*", "_", ",", "*", "*", "__", ":", "weight", ")" ]
Calls add_weight() to register but not create an existing weight.
[ "Calls", "add_weight", "()", "to", "register", "but", "not", "create", "an", "existing", "weight", "." ]
python
train
66.6
OCHA-DAP/hdx-python-country
setup.py
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/setup.py#L11-L27
def script_dir(pyobject, follow_symlinks=True): """Get current script's directory Args: pyobject (Any): Any Python object in the script follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True. Returns: str: Current script's directory """ if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze path = abspath(sys.executable) else: path = inspect.getabsfile(pyobject) if follow_symlinks: path = realpath(path) return dirname(path)
[ "def", "script_dir", "(", "pyobject", ",", "follow_symlinks", "=", "True", ")", ":", "if", "getattr", "(", "sys", ",", "'frozen'", ",", "False", ")", ":", "# py2exe, PyInstaller, cx_Freeze", "path", "=", "abspath", "(", "sys", ".", "executable", ")", "else", ":", "path", "=", "inspect", ".", "getabsfile", "(", "pyobject", ")", "if", "follow_symlinks", ":", "path", "=", "realpath", "(", "path", ")", "return", "dirname", "(", "path", ")" ]
Get current script's directory Args: pyobject (Any): Any Python object in the script follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True. Returns: str: Current script's directory
[ "Get", "current", "script", "s", "directory" ]
python
train
30.941176
saltstack/salt
salt/cloud/clouds/opennebula.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L3511-L3551
def vm_monitoring(name, call=None): ''' Returns the monitoring records for a given virtual machine. A VM name must be supplied. The monitoring information returned is a list of VM elements. Each VM element contains the complete dictionary of the VM with the updated information returned by the poll action. .. versionadded:: 2016.3.0 name The name of the VM for which to gather monitoring records. CLI Example: .. code-block:: bash salt-cloud -a vm_monitoring my-vm ''' if call != 'action': raise SaltCloudSystemExit( 'The vm_monitoring action must be called with -a or --action.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) vm_id = int(get_vm_id(kwargs={'name': name})) response = server.one.vm.monitoring(auth, vm_id) if response[0] is False: log.error( 'There was an error retrieving the specified VM\'s monitoring ' 'information.' ) return {} else: info = {} for vm_ in _get_xml(response[1]): info[vm_.find('ID').text] = _xml_to_dict(vm_) return info
[ "def", "vm_monitoring", "(", "name", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The vm_monitoring action must be called with -a or --action.'", ")", "server", ",", "user", ",", "password", "=", "_get_xml_rpc", "(", ")", "auth", "=", "':'", ".", "join", "(", "[", "user", ",", "password", "]", ")", "vm_id", "=", "int", "(", "get_vm_id", "(", "kwargs", "=", "{", "'name'", ":", "name", "}", ")", ")", "response", "=", "server", ".", "one", ".", "vm", ".", "monitoring", "(", "auth", ",", "vm_id", ")", "if", "response", "[", "0", "]", "is", "False", ":", "log", ".", "error", "(", "'There was an error retrieving the specified VM\\'s monitoring '", "'information.'", ")", "return", "{", "}", "else", ":", "info", "=", "{", "}", "for", "vm_", "in", "_get_xml", "(", "response", "[", "1", "]", ")", ":", "info", "[", "vm_", ".", "find", "(", "'ID'", ")", ".", "text", "]", "=", "_xml_to_dict", "(", "vm_", ")", "return", "info" ]
Returns the monitoring records for a given virtual machine. A VM name must be supplied. The monitoring information returned is a list of VM elements. Each VM element contains the complete dictionary of the VM with the updated information returned by the poll action. .. versionadded:: 2016.3.0 name The name of the VM for which to gather monitoring records. CLI Example: .. code-block:: bash salt-cloud -a vm_monitoring my-vm
[ "Returns", "the", "monitoring", "records", "for", "a", "given", "virtual", "machine", ".", "A", "VM", "name", "must", "be", "supplied", "." ]
python
train
28.146341
materialsproject/pymatgen
pymatgen/io/abinit/qadapters.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/qadapters.py#L1100-L1120
def get_njobs_in_queue(self, username=None): """ returns the number of jobs in the queue, probably using subprocess or shutil to call a command like 'qstat'. returns None when the number of jobs cannot be determined. Args: username: (str) the username of the jobs to count (default is to autodetect) """ if username is None: username = getpass.getuser() njobs, process = self._get_njobs_in_queue(username=username) if process is not None and process.returncode != 0: # there's a problem talking to squeue server? err_msg = ('Error trying to get the number of jobs in the queue' + 'The error response reads:\n {}'.format(process.stderr.read())) logger.critical(err_msg) if not isinstance(self, ShellAdapter): logger.info('The number of jobs currently in the queue is: {}'.format(njobs)) return njobs
[ "def", "get_njobs_in_queue", "(", "self", ",", "username", "=", "None", ")", ":", "if", "username", "is", "None", ":", "username", "=", "getpass", ".", "getuser", "(", ")", "njobs", ",", "process", "=", "self", ".", "_get_njobs_in_queue", "(", "username", "=", "username", ")", "if", "process", "is", "not", "None", "and", "process", ".", "returncode", "!=", "0", ":", "# there's a problem talking to squeue server?", "err_msg", "=", "(", "'Error trying to get the number of jobs in the queue'", "+", "'The error response reads:\\n {}'", ".", "format", "(", "process", ".", "stderr", ".", "read", "(", ")", ")", ")", "logger", ".", "critical", "(", "err_msg", ")", "if", "not", "isinstance", "(", "self", ",", "ShellAdapter", ")", ":", "logger", ".", "info", "(", "'The number of jobs currently in the queue is: {}'", ".", "format", "(", "njobs", ")", ")", "return", "njobs" ]
returns the number of jobs in the queue, probably using subprocess or shutil to call a command like 'qstat'. returns None when the number of jobs cannot be determined. Args: username: (str) the username of the jobs to count (default is to autodetect)
[ "returns", "the", "number", "of", "jobs", "in", "the", "queue", "probably", "using", "subprocess", "or", "shutil", "to", "call", "a", "command", "like", "qstat", ".", "returns", "None", "when", "the", "number", "of", "jobs", "cannot", "be", "determined", "." ]
python
train
45
Dynatrace/OneAgent-SDK-for-Python
src/oneagent/sdk/__init__.py
https://github.com/Dynatrace/OneAgent-SDK-for-Python/blob/f7b121b492f25b1c5b27316798e1a70b6be2bd01/src/oneagent/sdk/__init__.py#L357-L369
def trace_in_process_link(self, link_bytes): '''Creates a tracer for tracing asynchronous related processing in the same process. For more information see :meth:`create_in_process_link`. :param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`. :rtype: tracers.InProcessLinkTracer .. versionadded:: 1.1.0 ''' return tracers.InProcessLinkTracer(self._nsdk, self._nsdk.trace_in_process_link(link_bytes))
[ "def", "trace_in_process_link", "(", "self", ",", "link_bytes", ")", ":", "return", "tracers", ".", "InProcessLinkTracer", "(", "self", ".", "_nsdk", ",", "self", ".", "_nsdk", ".", "trace_in_process_link", "(", "link_bytes", ")", ")" ]
Creates a tracer for tracing asynchronous related processing in the same process. For more information see :meth:`create_in_process_link`. :param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`. :rtype: tracers.InProcessLinkTracer .. versionadded:: 1.1.0
[ "Creates", "a", "tracer", "for", "tracing", "asynchronous", "related", "processing", "in", "the", "same", "process", "." ]
python
train
40.307692
deepmind/sonnet
sonnet/examples/ptb_reader.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/examples/ptb_reader.py#L55-L82
def ptb_raw_data(data_path): """Load PTB raw data from data directory "data_path". Reads PTB text files, converts strings to integer ids, and performs mini-batching of the inputs. The PTB dataset comes from Tomas Mikolov's webpage: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz Args: data_path: string path to the directory where simple-examples.tgz has been extracted. Returns: tuple (train_data, valid_data, test_data, vocabulary) where each of the data objects can be passed to PTBIterator. """ train_path = os.path.join(data_path, "ptb.train.txt") valid_path = os.path.join(data_path, "ptb.valid.txt") test_path = os.path.join(data_path, "ptb.test.txt") word_to_id = _build_vocab(train_path) train_data = _file_to_word_ids(train_path, word_to_id) valid_data = _file_to_word_ids(valid_path, word_to_id) test_data = _file_to_word_ids(test_path, word_to_id) return train_data, valid_data, test_data, word_to_id
[ "def", "ptb_raw_data", "(", "data_path", ")", ":", "train_path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "\"ptb.train.txt\"", ")", "valid_path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "\"ptb.valid.txt\"", ")", "test_path", "=", "os", ".", "path", ".", "join", "(", "data_path", ",", "\"ptb.test.txt\"", ")", "word_to_id", "=", "_build_vocab", "(", "train_path", ")", "train_data", "=", "_file_to_word_ids", "(", "train_path", ",", "word_to_id", ")", "valid_data", "=", "_file_to_word_ids", "(", "valid_path", ",", "word_to_id", ")", "test_data", "=", "_file_to_word_ids", "(", "test_path", ",", "word_to_id", ")", "return", "train_data", ",", "valid_data", ",", "test_data", ",", "word_to_id" ]
Load PTB raw data from data directory "data_path". Reads PTB text files, converts strings to integer ids, and performs mini-batching of the inputs. The PTB dataset comes from Tomas Mikolov's webpage: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz Args: data_path: string path to the directory where simple-examples.tgz has been extracted. Returns: tuple (train_data, valid_data, test_data, vocabulary) where each of the data objects can be passed to PTBIterator.
[ "Load", "PTB", "raw", "data", "from", "data", "directory", "data_path", "." ]
python
train
34.107143
django-py/django-doberman
doberman/contrib/captcha/client.py
https://github.com/django-py/django-doberman/blob/2e5959737a1b64234ed5a179c93f96a0de1c3e5c/doberman/contrib/captcha/client.py#L42-L71
def displayhtml(public_key, attrs, use_ssl=False, error=None): """Gets the HTML to display for reCAPTCHA public_key -- The public api key use_ssl -- Should the request be sent over ssl? error -- An error message to display (from RecaptchaResponse.error_code)""" error_param = '' if error: error_param = '&error=%s' % error if use_ssl: server = API_SSL_SERVER else: server = API_SERVER if 'lang' not in attrs: attrs['lang'] = get_language()[:2] return render_to_string( WIDGET_TEMPLATE, {'api_server': server, 'public_key': public_key, 'error_param': error_param, 'lang': attrs['lang'], 'options': mark_safe(json.dumps(attrs, indent=2)) })
[ "def", "displayhtml", "(", "public_key", ",", "attrs", ",", "use_ssl", "=", "False", ",", "error", "=", "None", ")", ":", "error_param", "=", "''", "if", "error", ":", "error_param", "=", "'&error=%s'", "%", "error", "if", "use_ssl", ":", "server", "=", "API_SSL_SERVER", "else", ":", "server", "=", "API_SERVER", "if", "'lang'", "not", "in", "attrs", ":", "attrs", "[", "'lang'", "]", "=", "get_language", "(", ")", "[", ":", "2", "]", "return", "render_to_string", "(", "WIDGET_TEMPLATE", ",", "{", "'api_server'", ":", "server", ",", "'public_key'", ":", "public_key", ",", "'error_param'", ":", "error_param", ",", "'lang'", ":", "attrs", "[", "'lang'", "]", ",", "'options'", ":", "mark_safe", "(", "json", ".", "dumps", "(", "attrs", ",", "indent", "=", "2", ")", ")", "}", ")" ]
Gets the HTML to display for reCAPTCHA public_key -- The public api key use_ssl -- Should the request be sent over ssl? error -- An error message to display (from RecaptchaResponse.error_code)
[ "Gets", "the", "HTML", "to", "display", "for", "reCAPTCHA" ]
python
train
26.5
hollenstein/maspy
maspy/_proteindb_refactoring.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/_proteindb_refactoring.py#L512-L528
def _nameFromHeaderInfo(headerInfo, isDecoy, decoyTag): """Generates a protein name from headerInfo. If "isDecoy" is True, the "decoyTag" is added to beginning of the generated protein name. :param headerInfo: dict, must contain a key "name" or "id" :param isDecoy: bool, determines if the "decoyTag" is added or not. :param decoyTag: str, a tag that identifies decoy / reverse protein entries. :returns: str, protein name """ if 'name' in headerInfo: proteinName = headerInfo['name'] else: proteinName = headerInfo['id'] if isDecoy: proteinName = ''.join((decoyTag, proteinName)) return proteinName
[ "def", "_nameFromHeaderInfo", "(", "headerInfo", ",", "isDecoy", ",", "decoyTag", ")", ":", "if", "'name'", "in", "headerInfo", ":", "proteinName", "=", "headerInfo", "[", "'name'", "]", "else", ":", "proteinName", "=", "headerInfo", "[", "'id'", "]", "if", "isDecoy", ":", "proteinName", "=", "''", ".", "join", "(", "(", "decoyTag", ",", "proteinName", ")", ")", "return", "proteinName" ]
Generates a protein name from headerInfo. If "isDecoy" is True, the "decoyTag" is added to beginning of the generated protein name. :param headerInfo: dict, must contain a key "name" or "id" :param isDecoy: bool, determines if the "decoyTag" is added or not. :param decoyTag: str, a tag that identifies decoy / reverse protein entries. :returns: str, protein name
[ "Generates", "a", "protein", "name", "from", "headerInfo", ".", "If", "isDecoy", "is", "True", "the", "decoyTag", "is", "added", "to", "beginning", "of", "the", "generated", "protein", "name", "." ]
python
train
38.411765
oemof/oemof.db
oemof/db/tools.py
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L294-L330
def create_empty_table_serial_primary(conn, schema, table, columns=None, id_col='id'): r"""New database table with primary key type serial and empty columns Parameters ---------- conn : sqlalchemy connection object A valid connection to a database schema : str The database schema table : str The database table columns : list, optional Columns that are to be created id_col : str, optional Name of index column of database table Notes ------- Currently all created table columns will be of type `double precision`. Feel free to enhance this function by by generalizing this aspect. """ sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY NOT NULL) """.format(schema=schema, table=table, id_col=id_col) conn.execute(sql_str) # define more columns if columns is not None: for col in columns: col_str = """alter table {schema}.{table} add column {col} double precision; """.format(schema=schema, table=table, col=col) conn.execute(col_str)
[ "def", "create_empty_table_serial_primary", "(", "conn", ",", "schema", ",", "table", ",", "columns", "=", "None", ",", "id_col", "=", "'id'", ")", ":", "sql_str", "=", "\"\"\"CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY\n NOT NULL)\n \"\"\"", ".", "format", "(", "schema", "=", "schema", ",", "table", "=", "table", ",", "id_col", "=", "id_col", ")", "conn", ".", "execute", "(", "sql_str", ")", "# define more columns", "if", "columns", "is", "not", "None", ":", "for", "col", "in", "columns", ":", "col_str", "=", "\"\"\"alter table {schema}.{table} add column {col}\n double precision;\n \"\"\"", ".", "format", "(", "schema", "=", "schema", ",", "table", "=", "table", ",", "col", "=", "col", ")", "conn", ".", "execute", "(", "col_str", ")" ]
r"""New database table with primary key type serial and empty columns Parameters ---------- conn : sqlalchemy connection object A valid connection to a database schema : str The database schema table : str The database table columns : list, optional Columns that are to be created id_col : str, optional Name of index column of database table Notes ------- Currently all created table columns will be of type `double precision`. Feel free to enhance this function by by generalizing this aspect.
[ "r", "New", "database", "table", "with", "primary", "key", "type", "serial", "and", "empty", "columns" ]
python
train
31.243243
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/job.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/job.py#L1658-L1685
def to_api_repr(self): """Generate a resource for :meth:`_begin`.""" source_refs = [ { "projectId": table.project, "datasetId": table.dataset_id, "tableId": table.table_id, } for table in self.sources ] configuration = self._configuration.to_api_repr() _helpers._set_sub_prop(configuration, ["copy", "sourceTables"], source_refs) _helpers._set_sub_prop( configuration, ["copy", "destinationTable"], { "projectId": self.destination.project, "datasetId": self.destination.dataset_id, "tableId": self.destination.table_id, }, ) return { "jobReference": self._properties["jobReference"], "configuration": configuration, }
[ "def", "to_api_repr", "(", "self", ")", ":", "source_refs", "=", "[", "{", "\"projectId\"", ":", "table", ".", "project", ",", "\"datasetId\"", ":", "table", ".", "dataset_id", ",", "\"tableId\"", ":", "table", ".", "table_id", ",", "}", "for", "table", "in", "self", ".", "sources", "]", "configuration", "=", "self", ".", "_configuration", ".", "to_api_repr", "(", ")", "_helpers", ".", "_set_sub_prop", "(", "configuration", ",", "[", "\"copy\"", ",", "\"sourceTables\"", "]", ",", "source_refs", ")", "_helpers", ".", "_set_sub_prop", "(", "configuration", ",", "[", "\"copy\"", ",", "\"destinationTable\"", "]", ",", "{", "\"projectId\"", ":", "self", ".", "destination", ".", "project", ",", "\"datasetId\"", ":", "self", ".", "destination", ".", "dataset_id", ",", "\"tableId\"", ":", "self", ".", "destination", ".", "table_id", ",", "}", ",", ")", "return", "{", "\"jobReference\"", ":", "self", ".", "_properties", "[", "\"jobReference\"", "]", ",", "\"configuration\"", ":", "configuration", ",", "}" ]
Generate a resource for :meth:`_begin`.
[ "Generate", "a", "resource", "for", ":", "meth", ":", "_begin", "." ]
python
train
31.035714
mitsei/dlkit
dlkit/records/osid/base_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/osid/base_records.py#L2245-L2258
def set_asset(self, asset_id, asset_content_type=None): """stub""" if asset_id is None: raise NullArgument('asset_id cannot be None') if not isinstance(asset_id, Id): raise InvalidArgument('asset_id must be an instance of Id') if asset_content_type is not None and not isinstance(asset_content_type, Type): raise InvalidArgument('asset_content_type must be instance of Type') if asset_content_type is None: asset_content_type = '' self.my_osid_object_form._my_map['fileId'] = { 'assetId': str(asset_id), 'assetContentTypeId': str(asset_content_type) }
[ "def", "set_asset", "(", "self", ",", "asset_id", ",", "asset_content_type", "=", "None", ")", ":", "if", "asset_id", "is", "None", ":", "raise", "NullArgument", "(", "'asset_id cannot be None'", ")", "if", "not", "isinstance", "(", "asset_id", ",", "Id", ")", ":", "raise", "InvalidArgument", "(", "'asset_id must be an instance of Id'", ")", "if", "asset_content_type", "is", "not", "None", "and", "not", "isinstance", "(", "asset_content_type", ",", "Type", ")", ":", "raise", "InvalidArgument", "(", "'asset_content_type must be instance of Type'", ")", "if", "asset_content_type", "is", "None", ":", "asset_content_type", "=", "''", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'fileId'", "]", "=", "{", "'assetId'", ":", "str", "(", "asset_id", ")", ",", "'assetContentTypeId'", ":", "str", "(", "asset_content_type", ")", "}" ]
stub
[ "stub" ]
python
train
47.571429
open-mmlab/mmcv
mmcv/runner/runner.py
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/runner/runner.py#L315-L359
def run(self, data_loaders, workflow, max_epochs, **kwargs): """Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, epochs) to specify the running order and epochs. E.g, [('train', 2), ('val', 1)] means running 2 epochs for training and 1 epoch for validation, iteratively. max_epochs (int): Total training epochs. """ assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert len(data_loaders) == len(workflow) self._max_epochs = max_epochs work_dir = self.work_dir if self.work_dir is not None else 'NONE' self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs) self.call_hook('before_run') while self.epoch < max_epochs: for i, flow in enumerate(workflow): mode, epochs = flow if isinstance(mode, str): # self.train() if not hasattr(self, mode): raise ValueError( 'runner has no method named "{}" to run an epoch'. format(mode)) epoch_runner = getattr(self, mode) elif callable(mode): # custom train() epoch_runner = mode else: raise TypeError('mode in workflow must be a str or ' 'callable function, not {}'.format( type(mode))) for _ in range(epochs): if mode == 'train' and self.epoch >= max_epochs: return epoch_runner(data_loaders[i], **kwargs) time.sleep(1) # wait for some hooks like loggers to finish self.call_hook('after_run')
[ "def", "run", "(", "self", ",", "data_loaders", ",", "workflow", ",", "max_epochs", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "data_loaders", ",", "list", ")", "assert", "mmcv", ".", "is_list_of", "(", "workflow", ",", "tuple", ")", "assert", "len", "(", "data_loaders", ")", "==", "len", "(", "workflow", ")", "self", ".", "_max_epochs", "=", "max_epochs", "work_dir", "=", "self", ".", "work_dir", "if", "self", ".", "work_dir", "is", "not", "None", "else", "'NONE'", "self", ".", "logger", ".", "info", "(", "'Start running, host: %s, work_dir: %s'", ",", "get_host_info", "(", ")", ",", "work_dir", ")", "self", ".", "logger", ".", "info", "(", "'workflow: %s, max: %d epochs'", ",", "workflow", ",", "max_epochs", ")", "self", ".", "call_hook", "(", "'before_run'", ")", "while", "self", ".", "epoch", "<", "max_epochs", ":", "for", "i", ",", "flow", "in", "enumerate", "(", "workflow", ")", ":", "mode", ",", "epochs", "=", "flow", "if", "isinstance", "(", "mode", ",", "str", ")", ":", "# self.train()", "if", "not", "hasattr", "(", "self", ",", "mode", ")", ":", "raise", "ValueError", "(", "'runner has no method named \"{}\" to run an epoch'", ".", "format", "(", "mode", ")", ")", "epoch_runner", "=", "getattr", "(", "self", ",", "mode", ")", "elif", "callable", "(", "mode", ")", ":", "# custom train()", "epoch_runner", "=", "mode", "else", ":", "raise", "TypeError", "(", "'mode in workflow must be a str or '", "'callable function, not {}'", ".", "format", "(", "type", "(", "mode", ")", ")", ")", "for", "_", "in", "range", "(", "epochs", ")", ":", "if", "mode", "==", "'train'", "and", "self", ".", "epoch", ">=", "max_epochs", ":", "return", "epoch_runner", "(", "data_loaders", "[", "i", "]", ",", "*", "*", "kwargs", ")", "time", ".", "sleep", "(", "1", ")", "# wait for some hooks like loggers to finish", "self", ".", "call_hook", "(", "'after_run'", ")" ]
Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, epochs) to specify the running order and epochs. E.g, [('train', 2), ('val', 1)] means running 2 epochs for training and 1 epoch for validation, iteratively. max_epochs (int): Total training epochs.
[ "Start", "running", "." ]
python
test
45.555556
PySimpleGUI/PySimpleGUI
PySimpleGUIWx/PySimpleGUIWx.py
https://github.com/PySimpleGUI/PySimpleGUI/blob/08184197f5bd4580ab5e5aca28bdda30f87b86fc/PySimpleGUIWx/PySimpleGUIWx.py#L2817-L2847
def Read(self, timeout=None): ''' Reads the context menu :param timeout: Optional. Any value other than None indicates a non-blocking read :return: ''' # if not self.Shown: # self.Shown = True # self.TrayIcon.show() timeout1 = timeout # if timeout1 == 0: # timeout1 = 1 # if wx.GetApp(): # wx.GetApp().ProcessPendingEvents() # self.App.ProcessPendingEvents() # self.App.ProcessIdle() # return self.MenuItemChosen if timeout1 is not None: try: self.timer = wx.Timer(self.TaskBarIcon) self.TaskBarIcon.Bind(wx.EVT_TIMER, self.timer_timeout) self.timer.Start(milliseconds=timeout1, oneShot=wx.TIMER_ONE_SHOT) except: print('*** Got error in Read ***') self.RunningMainLoop = True self.App.MainLoop() self.RunningMainLoop = False if self.timer: self.timer.Stop() self.MenuItemChosen = self.TaskBarIcon.menu_item_chosen return self.MenuItemChosen
[ "def", "Read", "(", "self", ",", "timeout", "=", "None", ")", ":", "# if not self.Shown:", "# self.Shown = True", "# self.TrayIcon.show()", "timeout1", "=", "timeout", "# if timeout1 == 0:", "# timeout1 = 1", "# if wx.GetApp():", "# wx.GetApp().ProcessPendingEvents()", "# self.App.ProcessPendingEvents()", "# self.App.ProcessIdle()", "# return self.MenuItemChosen", "if", "timeout1", "is", "not", "None", ":", "try", ":", "self", ".", "timer", "=", "wx", ".", "Timer", "(", "self", ".", "TaskBarIcon", ")", "self", ".", "TaskBarIcon", ".", "Bind", "(", "wx", ".", "EVT_TIMER", ",", "self", ".", "timer_timeout", ")", "self", ".", "timer", ".", "Start", "(", "milliseconds", "=", "timeout1", ",", "oneShot", "=", "wx", ".", "TIMER_ONE_SHOT", ")", "except", ":", "print", "(", "'*** Got error in Read ***'", ")", "self", ".", "RunningMainLoop", "=", "True", "self", ".", "App", ".", "MainLoop", "(", ")", "self", ".", "RunningMainLoop", "=", "False", "if", "self", ".", "timer", ":", "self", ".", "timer", ".", "Stop", "(", ")", "self", ".", "MenuItemChosen", "=", "self", ".", "TaskBarIcon", ".", "menu_item_chosen", "return", "self", ".", "MenuItemChosen" ]
Reads the context menu :param timeout: Optional. Any value other than None indicates a non-blocking read :return:
[ "Reads", "the", "context", "menu", ":", "param", "timeout", ":", "Optional", ".", "Any", "value", "other", "than", "None", "indicates", "a", "non", "-", "blocking", "read", ":", "return", ":" ]
python
train
36.516129
MillionIntegrals/vel
vel/api/learner.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/learner.py#L58-L74
def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True): """ Run a single training epoch """ self.train() if interactive: iterator = tqdm.tqdm(source.train_loader(), desc="Training", unit="iter", file=sys.stdout) else: iterator = source.train_loader() for batch_idx, (data, target) in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_batch_begin() self.train_batch(batch_info, data, target) batch_info.on_batch_end() iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss'))
[ "def", "train_epoch", "(", "self", ",", "epoch_info", ",", "source", ":", "'vel.api.Source'", ",", "interactive", "=", "True", ")", ":", "self", ".", "train", "(", ")", "if", "interactive", ":", "iterator", "=", "tqdm", ".", "tqdm", "(", "source", ".", "train_loader", "(", ")", ",", "desc", "=", "\"Training\"", ",", "unit", "=", "\"iter\"", ",", "file", "=", "sys", ".", "stdout", ")", "else", ":", "iterator", "=", "source", ".", "train_loader", "(", ")", "for", "batch_idx", ",", "(", "data", ",", "target", ")", "in", "enumerate", "(", "iterator", ")", ":", "batch_info", "=", "BatchInfo", "(", "epoch_info", ",", "batch_idx", ")", "batch_info", ".", "on_batch_begin", "(", ")", "self", ".", "train_batch", "(", "batch_info", ",", "data", ",", "target", ")", "batch_info", ".", "on_batch_end", "(", ")", "iterator", ".", "set_postfix", "(", "loss", "=", "epoch_info", ".", "result_accumulator", ".", "intermediate_value", "(", "'loss'", ")", ")" ]
Run a single training epoch
[ "Run", "a", "single", "training", "epoch" ]
python
train
39.176471
Unity-Technologies/ml-agents
ml-agents-envs/mlagents/envs/brain.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents-envs/mlagents/envs/brain.py#L206-L224
def from_proto(brain_param_proto): """ Converts brain parameter proto to BrainParameter object. :param brain_param_proto: protobuf object. :return: BrainParameter object. """ resolution = [{ "height": x.height, "width": x.width, "blackAndWhite": x.gray_scale } for x in brain_param_proto.camera_resolutions] brain_params = BrainParameters(brain_param_proto.brain_name, brain_param_proto.vector_observation_size, brain_param_proto.num_stacked_vector_observations, resolution, list(brain_param_proto.vector_action_size), list(brain_param_proto.vector_action_descriptions), brain_param_proto.vector_action_space_type) return brain_params
[ "def", "from_proto", "(", "brain_param_proto", ")", ":", "resolution", "=", "[", "{", "\"height\"", ":", "x", ".", "height", ",", "\"width\"", ":", "x", ".", "width", ",", "\"blackAndWhite\"", ":", "x", ".", "gray_scale", "}", "for", "x", "in", "brain_param_proto", ".", "camera_resolutions", "]", "brain_params", "=", "BrainParameters", "(", "brain_param_proto", ".", "brain_name", ",", "brain_param_proto", ".", "vector_observation_size", ",", "brain_param_proto", ".", "num_stacked_vector_observations", ",", "resolution", ",", "list", "(", "brain_param_proto", ".", "vector_action_size", ")", ",", "list", "(", "brain_param_proto", ".", "vector_action_descriptions", ")", ",", "brain_param_proto", ".", "vector_action_space_type", ")", "return", "brain_params" ]
Converts brain parameter proto to BrainParameter object. :param brain_param_proto: protobuf object. :return: BrainParameter object.
[ "Converts", "brain", "parameter", "proto", "to", "BrainParameter", "object", ".", ":", "param", "brain_param_proto", ":", "protobuf", "object", ".", ":", "return", ":", "BrainParameter", "object", "." ]
python
train
50.421053
dossier/dossier.store
dossier/store/elastic.py
https://github.com/dossier/dossier.store/blob/b22ffe2470bba9fcc98a30cb55b437bfa1521e7f/dossier/store/elastic.py#L368-L393
def fulltext_scan_ids(self, query_id=None, query_fc=None, preserve_order=True, indexes=None): '''Fulltext search for identifiers. Yields an iterable of triples (score, identifier) corresponding to the search results of the fulltext search in ``query``. This will only search text indexed under the given feature named ``fname``. Note that, unless ``preserve_order`` is set to True, the ``score`` will always be 0.0, and the results will be unordered. ``preserve_order`` set to True will cause the results to be scored and be ordered by score, but you should expect to see a decrease in performance. :param str fname: The feature to search. :param unicode query: The query. :rtype: Iterable of ``(score, content_id)`` ''' it = self._fulltext_scan(query_id, query_fc, feature_names=False, preserve_order=preserve_order, indexes=indexes) for hit in it: yield hit['_score'], did(hit['_id'])
[ "def", "fulltext_scan_ids", "(", "self", ",", "query_id", "=", "None", ",", "query_fc", "=", "None", ",", "preserve_order", "=", "True", ",", "indexes", "=", "None", ")", ":", "it", "=", "self", ".", "_fulltext_scan", "(", "query_id", ",", "query_fc", ",", "feature_names", "=", "False", ",", "preserve_order", "=", "preserve_order", ",", "indexes", "=", "indexes", ")", "for", "hit", "in", "it", ":", "yield", "hit", "[", "'_score'", "]", ",", "did", "(", "hit", "[", "'_id'", "]", ")" ]
Fulltext search for identifiers. Yields an iterable of triples (score, identifier) corresponding to the search results of the fulltext search in ``query``. This will only search text indexed under the given feature named ``fname``. Note that, unless ``preserve_order`` is set to True, the ``score`` will always be 0.0, and the results will be unordered. ``preserve_order`` set to True will cause the results to be scored and be ordered by score, but you should expect to see a decrease in performance. :param str fname: The feature to search. :param unicode query: The query. :rtype: Iterable of ``(score, content_id)``
[ "Fulltext", "search", "for", "identifiers", "." ]
python
test
42.923077
MolSSI-BSE/basis_set_exchange
basis_set_exchange/fileio.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/fileio.py#L232-L243
def read_notes_file(file_path): """ Returns the contents of a notes file. If the notes file does not exist, None is returned """ if not os.path.isfile(file_path): return None with open(file_path, 'r', encoding=_default_encoding) as f: return f.read()
[ "def", "read_notes_file", "(", "file_path", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "return", "None", "with", "open", "(", "file_path", ",", "'r'", ",", "encoding", "=", "_default_encoding", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Returns the contents of a notes file. If the notes file does not exist, None is returned
[ "Returns", "the", "contents", "of", "a", "notes", "file", "." ]
python
train
23.5
Qiskit/qiskit-terra
qiskit/providers/basicaer/qasm_simulator.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/providers/basicaer/qasm_simulator.py#L334-L372
def _validate_measure_sampling(self, experiment): """Determine if measure sampling is allowed for an experiment Args: experiment (QobjExperiment): a qobj experiment. """ # If shots=1 we should disable measure sampling. # This is also required for statevector simulator to return the # correct final statevector without silently dropping final measurements. if self._shots <= 1: self._sample_measure = False return # Check for config flag if hasattr(experiment.config, 'allows_measure_sampling'): self._sample_measure = experiment.config.allows_measure_sampling # If flag isn't found do a simple test to see if a circuit contains # no reset instructions, and no gates instructions after # the first measure. else: measure_flag = False for instruction in experiment.instructions: # If circuit contains reset operations we cannot sample if instruction.name == "reset": self._sample_measure = False return # If circuit contains a measure option then we can # sample only if all following operations are measures if measure_flag: # If we find a non-measure instruction # we cannot do measure sampling if instruction.name not in ["measure", "barrier", "id", "u0"]: self._sample_measure = False return elif instruction.name == "measure": measure_flag = True # If we made it to the end of the circuit without returning # measure sampling is allowed self._sample_measure = True
[ "def", "_validate_measure_sampling", "(", "self", ",", "experiment", ")", ":", "# If shots=1 we should disable measure sampling.", "# This is also required for statevector simulator to return the", "# correct final statevector without silently dropping final measurements.", "if", "self", ".", "_shots", "<=", "1", ":", "self", ".", "_sample_measure", "=", "False", "return", "# Check for config flag", "if", "hasattr", "(", "experiment", ".", "config", ",", "'allows_measure_sampling'", ")", ":", "self", ".", "_sample_measure", "=", "experiment", ".", "config", ".", "allows_measure_sampling", "# If flag isn't found do a simple test to see if a circuit contains", "# no reset instructions, and no gates instructions after", "# the first measure.", "else", ":", "measure_flag", "=", "False", "for", "instruction", "in", "experiment", ".", "instructions", ":", "# If circuit contains reset operations we cannot sample", "if", "instruction", ".", "name", "==", "\"reset\"", ":", "self", ".", "_sample_measure", "=", "False", "return", "# If circuit contains a measure option then we can", "# sample only if all following operations are measures", "if", "measure_flag", ":", "# If we find a non-measure instruction", "# we cannot do measure sampling", "if", "instruction", ".", "name", "not", "in", "[", "\"measure\"", ",", "\"barrier\"", ",", "\"id\"", ",", "\"u0\"", "]", ":", "self", ".", "_sample_measure", "=", "False", "return", "elif", "instruction", ".", "name", "==", "\"measure\"", ":", "measure_flag", "=", "True", "# If we made it to the end of the circuit without returning", "# measure sampling is allowed", "self", ".", "_sample_measure", "=", "True" ]
Determine if measure sampling is allowed for an experiment Args: experiment (QobjExperiment): a qobj experiment.
[ "Determine", "if", "measure", "sampling", "is", "allowed", "for", "an", "experiment" ]
python
test
46.358974
useblocks/groundwork
groundwork/pluginmanager.py
https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L255-L265
def is_active(self, name): """ Returns True if plugin exists and is active. If plugin does not exist, it returns None :param name: plugin name :return: boolean or None """ if name in self._plugins.keys(): return self._plugins["name"].active return None
[ "def", "is_active", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_plugins", ".", "keys", "(", ")", ":", "return", "self", ".", "_plugins", "[", "\"name\"", "]", ".", "active", "return", "None" ]
Returns True if plugin exists and is active. If plugin does not exist, it returns None :param name: plugin name :return: boolean or None
[ "Returns", "True", "if", "plugin", "exists", "and", "is", "active", ".", "If", "plugin", "does", "not", "exist", "it", "returns", "None" ]
python
train
29
Jajcus/pyxmpp2
pyxmpp2/ext/disco.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/disco.py#L464-L474
def get_items(self): """Get the items contained in `self`. :return: the items contained. :returntype: `list` of `DiscoItem`""" ret=[] l=self.xpath_ctxt.xpathEval("d:item") if l is not None: for i in l: ret.append(DiscoItem(self, i)) return ret
[ "def", "get_items", "(", "self", ")", ":", "ret", "=", "[", "]", "l", "=", "self", ".", "xpath_ctxt", ".", "xpathEval", "(", "\"d:item\"", ")", "if", "l", "is", "not", "None", ":", "for", "i", "in", "l", ":", "ret", ".", "append", "(", "DiscoItem", "(", "self", ",", "i", ")", ")", "return", "ret" ]
Get the items contained in `self`. :return: the items contained. :returntype: `list` of `DiscoItem`
[ "Get", "the", "items", "contained", "in", "self", "." ]
python
valid
28.909091
vladsaveliev/TargQC
targqc/utilz/jsontemplate/_jsontemplate.py
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/utilz/jsontemplate/_jsontemplate.py#L741-L744
def _Pairs(data): """dictionary -> list of pairs""" keys = sorted(data) return [{'@key': k, '@value': data[k]} for k in keys]
[ "def", "_Pairs", "(", "data", ")", ":", "keys", "=", "sorted", "(", "data", ")", "return", "[", "{", "'@key'", ":", "k", ",", "'@value'", ":", "data", "[", "k", "]", "}", "for", "k", "in", "keys", "]" ]
dictionary -> list of pairs
[ "dictionary", "-", ">", "list", "of", "pairs" ]
python
train
33.5
UDST/urbansim
urbansim/models/dcm.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L474-L560
def probabilities(self, choosers, alternatives, filter_tables=True): """ Returns the probabilities for a set of choosers to choose from among a set of alternatives. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. filter_tables : bool, optional If True, filter `choosers` and `alternatives` with prediction filters before calculating probabilities. Returns ------- probabilities : pandas.Series Probability of selection associated with each chooser and alternative. Index will be a MultiIndex with alternative IDs in the inner index and chooser IDs in the out index. """ logger.debug('start: calculate probabilities for LCM model {}'.format( self.name)) self.assert_fitted() if filter_tables: choosers, alternatives = self.apply_predict_filters( choosers, alternatives) if self.prediction_sample_size is not None: sample_size = self.prediction_sample_size else: sample_size = len(alternatives) if self.probability_mode == 'single_chooser': _, merged, _ = interaction.mnl_interaction_dataset( choosers.head(1), alternatives, sample_size) elif self.probability_mode == 'full_product': _, merged, _ = interaction.mnl_interaction_dataset( choosers, alternatives, sample_size) else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode)) merged = util.apply_filter_query( merged, self.interaction_predict_filters) model_design = dmatrix( self.str_model_expression, data=merged, return_type='dataframe') if len(merged) != model_design.as_matrix().shape[0]: raise ModelEvaluationError( 'Simulated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.') # get the order of the coefficients in the same order as the # columns in the design matrix coeffs = [self.fit_parameters['Coefficient'][x] for x in model_design.columns] # probabilities are returned from mnl_simulate as a 2d array # with choosers along rows and alternatives along columns if self.probability_mode == 'single_chooser': numalts = len(merged) else: numalts = sample_size probabilities = mnl.mnl_simulate( model_design.as_matrix(), coeffs, numalts=numalts, returnprobs=True) # want to turn probabilities into a Series with a MultiIndex # of chooser IDs and alternative IDs. # indexing by chooser ID will get you the probabilities # across alternatives for that chooser mi = pd.MultiIndex.from_arrays( [merged['join_index'].values, merged.index.values], names=('chooser_id', 'alternative_id')) probabilities = pd.Series(probabilities.flatten(), index=mi) logger.debug('finish: calculate probabilities for LCM model {}'.format( self.name)) return probabilities
[ "def", "probabilities", "(", "self", ",", "choosers", ",", "alternatives", ",", "filter_tables", "=", "True", ")", ":", "logger", ".", "debug", "(", "'start: calculate probabilities for LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "self", ".", "assert_fitted", "(", ")", "if", "filter_tables", ":", "choosers", ",", "alternatives", "=", "self", ".", "apply_predict_filters", "(", "choosers", ",", "alternatives", ")", "if", "self", ".", "prediction_sample_size", "is", "not", "None", ":", "sample_size", "=", "self", ".", "prediction_sample_size", "else", ":", "sample_size", "=", "len", "(", "alternatives", ")", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "_", ",", "merged", ",", "_", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ".", "head", "(", "1", ")", ",", "alternatives", ",", "sample_size", ")", "elif", "self", ".", "probability_mode", "==", "'full_product'", ":", "_", ",", "merged", ",", "_", "=", "interaction", ".", "mnl_interaction_dataset", "(", "choosers", ",", "alternatives", ",", "sample_size", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized probability_mode option: {}'", ".", "format", "(", "self", ".", "probability_mode", ")", ")", "merged", "=", "util", ".", "apply_filter_query", "(", "merged", ",", "self", ".", "interaction_predict_filters", ")", "model_design", "=", "dmatrix", "(", "self", ".", "str_model_expression", ",", "data", "=", "merged", ",", "return_type", "=", "'dataframe'", ")", "if", "len", "(", "merged", ")", "!=", "model_design", ".", "as_matrix", "(", ")", ".", "shape", "[", "0", "]", ":", "raise", "ModelEvaluationError", "(", "'Simulated data does not have the same length as input. '", "'This suggests there are null values in one or more of '", "'the input columns.'", ")", "# get the order of the coefficients in the same order as the", "# columns in the design matrix", "coeffs", "=", "[", "self", ".", "fit_parameters", "[", "'Coefficient'", "]", "[", "x", "]", "for", "x", "in", "model_design", ".", "columns", "]", "# probabilities are returned from mnl_simulate as a 2d array", "# with choosers along rows and alternatives along columns", "if", "self", ".", "probability_mode", "==", "'single_chooser'", ":", "numalts", "=", "len", "(", "merged", ")", "else", ":", "numalts", "=", "sample_size", "probabilities", "=", "mnl", ".", "mnl_simulate", "(", "model_design", ".", "as_matrix", "(", ")", ",", "coeffs", ",", "numalts", "=", "numalts", ",", "returnprobs", "=", "True", ")", "# want to turn probabilities into a Series with a MultiIndex", "# of chooser IDs and alternative IDs.", "# indexing by chooser ID will get you the probabilities", "# across alternatives for that chooser", "mi", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "merged", "[", "'join_index'", "]", ".", "values", ",", "merged", ".", "index", ".", "values", "]", ",", "names", "=", "(", "'chooser_id'", ",", "'alternative_id'", ")", ")", "probabilities", "=", "pd", ".", "Series", "(", "probabilities", ".", "flatten", "(", ")", ",", "index", "=", "mi", ")", "logger", ".", "debug", "(", "'finish: calculate probabilities for LCM model {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "probabilities" ]
Returns the probabilities for a set of choosers to choose from among a set of alternatives. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. filter_tables : bool, optional If True, filter `choosers` and `alternatives` with prediction filters before calculating probabilities. Returns ------- probabilities : pandas.Series Probability of selection associated with each chooser and alternative. Index will be a MultiIndex with alternative IDs in the inner index and chooser IDs in the out index.
[ "Returns", "the", "probabilities", "for", "a", "set", "of", "choosers", "to", "choose", "from", "among", "a", "set", "of", "alternatives", "." ]
python
train
39.885057
msuozzo/Lector
lector/reader.py
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L410-L424
def get_book_progress(self, asin): """Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`. """ kbp = self._get_api_call('get_book_progress', '"%s"' % asin) return KindleCloudReaderAPI._kbp_to_progress(kbp)
[ "def", "get_book_progress", "(", "self", ",", "asin", ")", ":", "kbp", "=", "self", ".", "_get_api_call", "(", "'get_book_progress'", ",", "'\"%s\"'", "%", "asin", ")", "return", "KindleCloudReaderAPI", ".", "_kbp_to_progress", "(", "kbp", ")" ]
Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`.
[ "Returns", "the", "progress", "data", "available", "for", "a", "book", "." ]
python
train
31.466667
datadotworld/data.world-py
datadotworld/__init__.py
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/__init__.py#L104-L143
def query(dataset_key, query, query_type='sql', profile='default', parameters=None, **kwargs): """Query an existing dataset :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param query: SQL or SPARQL query :type query: str :param query_type: The type of the query. Must be either 'sql' or 'sparql'. (Default value = 'sql') :type query_type: {'sql', 'sparql'}, optional :param parameters: parameters to the query - if SPARQL query, this should be a dict containing named parameters, if SQL query, then this should be a list containing positional parameters. Boolean values will be converted to xsd:boolean, Integer values to xsd:integer, and other Numeric values to xsd:decimal. anything else is treated as a String literal (Default value = None) :type parameters: query parameters, optional :param profile: Configuration profile (account) to use. (Default value = 'default') :type profile: str, optional :returns: Object containing the results of the query :rtype: Results :raises RuntimeError: If a server error occurs Examples -------- >>> import datadotworld as dw >>> results = dw.query( ... 'jonloyens/an-intro-to-dataworld-dataset', ... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` ' ... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name') >>> df = results.dataframe >>> df.shape (8, 6) """ return _get_instance(profile, **kwargs).query(dataset_key, query, query_type=query_type, parameters=parameters, **kwargs)
[ "def", "query", "(", "dataset_key", ",", "query", ",", "query_type", "=", "'sql'", ",", "profile", "=", "'default'", ",", "parameters", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "_get_instance", "(", "profile", ",", "*", "*", "kwargs", ")", ".", "query", "(", "dataset_key", ",", "query", ",", "query_type", "=", "query_type", ",", "parameters", "=", "parameters", ",", "*", "*", "kwargs", ")" ]
Query an existing dataset :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param query: SQL or SPARQL query :type query: str :param query_type: The type of the query. Must be either 'sql' or 'sparql'. (Default value = 'sql') :type query_type: {'sql', 'sparql'}, optional :param parameters: parameters to the query - if SPARQL query, this should be a dict containing named parameters, if SQL query, then this should be a list containing positional parameters. Boolean values will be converted to xsd:boolean, Integer values to xsd:integer, and other Numeric values to xsd:decimal. anything else is treated as a String literal (Default value = None) :type parameters: query parameters, optional :param profile: Configuration profile (account) to use. (Default value = 'default') :type profile: str, optional :returns: Object containing the results of the query :rtype: Results :raises RuntimeError: If a server error occurs Examples -------- >>> import datadotworld as dw >>> results = dw.query( ... 'jonloyens/an-intro-to-dataworld-dataset', ... 'SELECT * FROM `DataDotWorldBBallStats`, `DataDotWorldBBallTeam` ' ... 'WHERE DataDotWorldBBallTeam.Name = DataDotWorldBBallStats.Name') >>> df = results.dataframe >>> df.shape (8, 6)
[ "Query", "an", "existing", "dataset" ]
python
train
44.975
smartfile/client-python
smartfile/sync.py
https://github.com/smartfile/client-python/blob/f9ccc40a2870df447c65b53dc0747e37cab62d63/smartfile/sync.py#L116-L121
def download(self, local, remote): """ Performs synchronization from a remote file to a local file. The remote path is the source and the local path is the destination. """ self.sync(RemoteFile(remote, self.api), LocalFile(local))
[ "def", "download", "(", "self", ",", "local", ",", "remote", ")", ":", "self", ".", "sync", "(", "RemoteFile", "(", "remote", ",", "self", ".", "api", ")", ",", "LocalFile", "(", "local", ")", ")" ]
Performs synchronization from a remote file to a local file. The remote path is the source and the local path is the destination.
[ "Performs", "synchronization", "from", "a", "remote", "file", "to", "a", "local", "file", ".", "The", "remote", "path", "is", "the", "source", "and", "the", "local", "path", "is", "the", "destination", "." ]
python
train
44.166667
cokelaer/spectrum
doc/sphinxext/sphinx_gallery/gen_gallery.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/doc/sphinxext/sphinx_gallery/gen_gallery.py#L114-L123
def setup(app): """Setup sphinx-gallery sphinx extension""" app.add_config_value('plot_gallery', True, 'html') app.add_config_value('abort_on_example_error', False, 'html') app.add_config_value('sphinx_gallery_conf', gallery_conf, 'html') app.add_stylesheet('gallery.css') app.connect('builder-inited', generate_gallery_rst) app.connect('build-finished', embed_code_links)
[ "def", "setup", "(", "app", ")", ":", "app", ".", "add_config_value", "(", "'plot_gallery'", ",", "True", ",", "'html'", ")", "app", ".", "add_config_value", "(", "'abort_on_example_error'", ",", "False", ",", "'html'", ")", "app", ".", "add_config_value", "(", "'sphinx_gallery_conf'", ",", "gallery_conf", ",", "'html'", ")", "app", ".", "add_stylesheet", "(", "'gallery.css'", ")", "app", ".", "connect", "(", "'builder-inited'", ",", "generate_gallery_rst", ")", "app", ".", "connect", "(", "'build-finished'", ",", "embed_code_links", ")" ]
Setup sphinx-gallery sphinx extension
[ "Setup", "sphinx", "-", "gallery", "sphinx", "extension" ]
python
valid
39.3
MSchnei/pyprf_feature
pyprf_feature/analysis/model_creation_utils.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/model_creation_utils.py#L88-L144
def rmp_pixel_deg_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax): """Remap x, y, sigma parameters from pixel to degree. Parameters ---------- vecX : 1D numpy array Array with possible x parametrs in pixels vecY : 1D numpy array Array with possible y parametrs in pixels vecPrfSd : 1D numpy array Array with possible sd parametrs in pixels tplPngSize : tuple, 2 Pixel dimensions of the visual space in pixel (width, height). varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varExtYmin : int Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) Returns ------- vecX : 1D numpy array Array with possible x parametrs in degree vecY : 1D numpy array Array with possible y parametrs in degree vecPrfSd : 1D numpy array Array with possible sd parametrs in degree """ # Remap modelled x-positions of the pRFs: vecXdgr = rmp_rng(vecX, varExtXmin, varExtXmax, varOldThrMin=0.0, varOldAbsMax=(tplPngSize[0] - 1)) # Remap modelled y-positions of the pRFs: vecYdgr = rmp_rng(vecY, varExtYmin, varExtYmax, varOldThrMin=0.0, varOldAbsMax=(tplPngSize[1] - 1)) # We calculate the scaling factor from pixels to degrees of visual angle to # separately for the x- and the y-directions (the two should be the same). varPix2DgrX = np.divide((varExtXmax - varExtXmin), tplPngSize[0]) varPix2DgrY = np.divide((varExtYmax - varExtYmin), tplPngSize[1]) # Check whether varDgr2PixX and varDgr2PixY are similar: strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \ 'stimulus space (in pixels) do not agree' assert 0.5 > np.absolute((varPix2DgrX - varPix2DgrY)), strErrMsg # Convert prf sizes from degrees of visual angles to pixel vecPrfSdDgr = np.multiply(vecPrfSd, varPix2DgrX) # Return new values. return vecXdgr, vecYdgr, vecPrfSdDgr
[ "def", "rmp_pixel_deg_xys", "(", "vecX", ",", "vecY", ",", "vecPrfSd", ",", "tplPngSize", ",", "varExtXmin", ",", "varExtXmax", ",", "varExtYmin", ",", "varExtYmax", ")", ":", "# Remap modelled x-positions of the pRFs:", "vecXdgr", "=", "rmp_rng", "(", "vecX", ",", "varExtXmin", ",", "varExtXmax", ",", "varOldThrMin", "=", "0.0", ",", "varOldAbsMax", "=", "(", "tplPngSize", "[", "0", "]", "-", "1", ")", ")", "# Remap modelled y-positions of the pRFs:", "vecYdgr", "=", "rmp_rng", "(", "vecY", ",", "varExtYmin", ",", "varExtYmax", ",", "varOldThrMin", "=", "0.0", ",", "varOldAbsMax", "=", "(", "tplPngSize", "[", "1", "]", "-", "1", ")", ")", "# We calculate the scaling factor from pixels to degrees of visual angle to", "# separately for the x- and the y-directions (the two should be the same).", "varPix2DgrX", "=", "np", ".", "divide", "(", "(", "varExtXmax", "-", "varExtXmin", ")", ",", "tplPngSize", "[", "0", "]", ")", "varPix2DgrY", "=", "np", ".", "divide", "(", "(", "varExtYmax", "-", "varExtYmin", ")", ",", "tplPngSize", "[", "1", "]", ")", "# Check whether varDgr2PixX and varDgr2PixY are similar:", "strErrMsg", "=", "'ERROR. The ratio of X and Y dimensions in '", "+", "'stimulus space (in pixels) do not agree'", "assert", "0.5", ">", "np", ".", "absolute", "(", "(", "varPix2DgrX", "-", "varPix2DgrY", ")", ")", ",", "strErrMsg", "# Convert prf sizes from degrees of visual angles to pixel", "vecPrfSdDgr", "=", "np", ".", "multiply", "(", "vecPrfSd", ",", "varPix2DgrX", ")", "# Return new values.", "return", "vecXdgr", ",", "vecYdgr", ",", "vecPrfSdDgr" ]
Remap x, y, sigma parameters from pixel to degree. Parameters ---------- vecX : 1D numpy array Array with possible x parametrs in pixels vecY : 1D numpy array Array with possible y parametrs in pixels vecPrfSd : 1D numpy array Array with possible sd parametrs in pixels tplPngSize : tuple, 2 Pixel dimensions of the visual space in pixel (width, height). varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varExtYmin : int Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) Returns ------- vecX : 1D numpy array Array with possible x parametrs in degree vecY : 1D numpy array Array with possible y parametrs in degree vecPrfSd : 1D numpy array Array with possible sd parametrs in degree
[ "Remap", "x", "y", "sigma", "parameters", "from", "pixel", "to", "degree", "." ]
python
train
39.017544
twilio/twilio-python
twilio/rest/api/v2010/account/message/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/message/__init__.py#L162-L199
def page(self, to=values.unset, from_=values.unset, date_sent_before=values.unset, date_sent=values.unset, date_sent_after=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of MessageInstance records from the API. Request is executed immediately :param unicode to: Filter by messages sent to this number :param unicode from_: Filter by from number :param datetime date_sent_before: Filter by date sent :param datetime date_sent: Filter by date sent :param datetime date_sent_after: Filter by date sent :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of MessageInstance :rtype: twilio.rest.api.v2010.account.message.MessagePage """ params = values.of({ 'To': to, 'From': from_, 'DateSent<': serialize.iso8601_datetime(date_sent_before), 'DateSent': serialize.iso8601_datetime(date_sent), 'DateSent>': serialize.iso8601_datetime(date_sent_after), 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return MessagePage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "to", "=", "values", ".", "unset", ",", "from_", "=", "values", ".", "unset", ",", "date_sent_before", "=", "values", ".", "unset", ",", "date_sent", "=", "values", ".", "unset", ",", "date_sent_after", "=", "values", ".", "unset", ",", "page_token", "=", "values", ".", "unset", ",", "page_number", "=", "values", ".", "unset", ",", "page_size", "=", "values", ".", "unset", ")", ":", "params", "=", "values", ".", "of", "(", "{", "'To'", ":", "to", ",", "'From'", ":", "from_", ",", "'DateSent<'", ":", "serialize", ".", "iso8601_datetime", "(", "date_sent_before", ")", ",", "'DateSent'", ":", "serialize", ".", "iso8601_datetime", "(", "date_sent", ")", ",", "'DateSent>'", ":", "serialize", ".", "iso8601_datetime", "(", "date_sent_after", ")", ",", "'PageToken'", ":", "page_token", ",", "'Page'", ":", "page_number", ",", "'PageSize'", ":", "page_size", ",", "}", ")", "response", "=", "self", ".", "_version", ".", "page", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "MessagePage", "(", "self", ".", "_version", ",", "response", ",", "self", ".", "_solution", ")" ]
Retrieve a single page of MessageInstance records from the API. Request is executed immediately :param unicode to: Filter by messages sent to this number :param unicode from_: Filter by from number :param datetime date_sent_before: Filter by date sent :param datetime date_sent: Filter by date sent :param datetime date_sent_after: Filter by date sent :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of MessageInstance :rtype: twilio.rest.api.v2010.account.message.MessagePage
[ "Retrieve", "a", "single", "page", "of", "MessageInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
python
train
41.052632
edaniszewski/colorutils
colorutils/colorutils.py
https://github.com/edaniszewski/colorutils/blob/bdff54091cb5d62aa8628ce39bc09abd40fb8dd0/colorutils/colorutils.py#L230-L248
def offset_random_rgb(seed, amount=1): """ Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized offset from the seed. :param seed: :param amount: :return: """ r, g, b = seed results = [] for _ in range(amount): base_val = ((r + g + b) / 3) + 1 # Add one to eliminate case where the base value would otherwise be 0 new_val = base_val + (random.random() * rgb_max_val / 5) # Randomly offset with an arbitrary multiplier ratio = new_val / base_val results.append((min(int(r*ratio), rgb_max_val), min(int(g*ratio), rgb_max_val), min(int(b*ratio), rgb_max_val))) return results[0] if len(results) > 1 else results
[ "def", "offset_random_rgb", "(", "seed", ",", "amount", "=", "1", ")", ":", "r", ",", "g", ",", "b", "=", "seed", "results", "=", "[", "]", "for", "_", "in", "range", "(", "amount", ")", ":", "base_val", "=", "(", "(", "r", "+", "g", "+", "b", ")", "/", "3", ")", "+", "1", "# Add one to eliminate case where the base value would otherwise be 0", "new_val", "=", "base_val", "+", "(", "random", ".", "random", "(", ")", "*", "rgb_max_val", "/", "5", ")", "# Randomly offset with an arbitrary multiplier", "ratio", "=", "new_val", "/", "base_val", "results", ".", "append", "(", "(", "min", "(", "int", "(", "r", "*", "ratio", ")", ",", "rgb_max_val", ")", ",", "min", "(", "int", "(", "g", "*", "ratio", ")", ",", "rgb_max_val", ")", ",", "min", "(", "int", "(", "b", "*", "ratio", ")", ",", "rgb_max_val", ")", ")", ")", "return", "results", "[", "0", "]", "if", "len", "(", "results", ")", ">", "1", "else", "results" ]
Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized offset from the seed. :param seed: :param amount: :return:
[ "Given", "a", "seed", "color", "generate", "a", "specified", "number", "of", "random", "colors", "(", "1", "color", "by", "default", ")", "determined", "by", "a", "randomized", "offset", "from", "the", "seed", "." ]
python
valid
38.473684
b3j0f/task
b3j0f/task/condition.py
https://github.com/b3j0f/task/blob/3e3e48633b1c9a52911c19df3a44fba4b744f60e/b3j0f/task/condition.py#L168-L197
def condition(condition=None, statement=None, _else=None, **kwargs): """ Run an statement if input condition is checked and return statement result. :param condition: condition to check. :type condition: str or dict :param statement: statement to process if condition is checked. :type statement: str or dict :param _else: else statement. :type _else: str or dict :param kwargs: condition and statement additional parameters. :return: statement result. """ result = None checked = False if condition is not None: checked = run(condition, **kwargs) if checked: # if condition is checked if statement is not None: # process statement result = run(statement, **kwargs) elif _else is not None: # else process _else statement result = run(_else, **kwargs) return result
[ "def", "condition", "(", "condition", "=", "None", ",", "statement", "=", "None", ",", "_else", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "None", "checked", "=", "False", "if", "condition", "is", "not", "None", ":", "checked", "=", "run", "(", "condition", ",", "*", "*", "kwargs", ")", "if", "checked", ":", "# if condition is checked", "if", "statement", "is", "not", "None", ":", "# process statement", "result", "=", "run", "(", "statement", ",", "*", "*", "kwargs", ")", "elif", "_else", "is", "not", "None", ":", "# else process _else statement", "result", "=", "run", "(", "_else", ",", "*", "*", "kwargs", ")", "return", "result" ]
Run an statement if input condition is checked and return statement result. :param condition: condition to check. :type condition: str or dict :param statement: statement to process if condition is checked. :type statement: str or dict :param _else: else statement. :type _else: str or dict :param kwargs: condition and statement additional parameters. :return: statement result.
[ "Run", "an", "statement", "if", "input", "condition", "is", "checked", "and", "return", "statement", "result", "." ]
python
train
28.333333
deepmind/sonnet
sonnet/python/modules/rnn_core.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/rnn_core.py#L331-L349
def _build(self): """Connects the module to the graph. Returns: The learnable state, which has the same type, structure and shape as the `initial_state` passed to the constructor. """ flat_initial_state = nest.flatten(self._initial_state) if self._mask is not None: flat_mask = nest.flatten(self._mask) flat_learnable_state = [ _single_learnable_state(state, state_id=i, learnable=mask) for i, (state, mask) in enumerate(zip(flat_initial_state, flat_mask))] else: flat_learnable_state = [_single_learnable_state(state, state_id=i) for i, state in enumerate(flat_initial_state)] return nest.pack_sequence_as(structure=self._initial_state, flat_sequence=flat_learnable_state)
[ "def", "_build", "(", "self", ")", ":", "flat_initial_state", "=", "nest", ".", "flatten", "(", "self", ".", "_initial_state", ")", "if", "self", ".", "_mask", "is", "not", "None", ":", "flat_mask", "=", "nest", ".", "flatten", "(", "self", ".", "_mask", ")", "flat_learnable_state", "=", "[", "_single_learnable_state", "(", "state", ",", "state_id", "=", "i", ",", "learnable", "=", "mask", ")", "for", "i", ",", "(", "state", ",", "mask", ")", "in", "enumerate", "(", "zip", "(", "flat_initial_state", ",", "flat_mask", ")", ")", "]", "else", ":", "flat_learnable_state", "=", "[", "_single_learnable_state", "(", "state", ",", "state_id", "=", "i", ")", "for", "i", ",", "state", "in", "enumerate", "(", "flat_initial_state", ")", "]", "return", "nest", ".", "pack_sequence_as", "(", "structure", "=", "self", ".", "_initial_state", ",", "flat_sequence", "=", "flat_learnable_state", ")" ]
Connects the module to the graph. Returns: The learnable state, which has the same type, structure and shape as the `initial_state` passed to the constructor.
[ "Connects", "the", "module", "to", "the", "graph", "." ]
python
train
42.105263
codelv/enaml-native
src/enamlnative/android/android_fragment.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_fragment.py#L73-L81
def init_widget(self): """ Initialize the underlying widget. """ super(AndroidFragment, self).init_widget() f = self.fragment f.setFragmentListener(f.getId()) f.onCreateView.connect(self.on_create_view) f.onDestroyView.connect(self.on_destroy_view)
[ "def", "init_widget", "(", "self", ")", ":", "super", "(", "AndroidFragment", ",", "self", ")", ".", "init_widget", "(", ")", "f", "=", "self", ".", "fragment", "f", ".", "setFragmentListener", "(", "f", ".", "getId", "(", ")", ")", "f", ".", "onCreateView", ".", "connect", "(", "self", ".", "on_create_view", ")", "f", ".", "onDestroyView", ".", "connect", "(", "self", ".", "on_destroy_view", ")" ]
Initialize the underlying widget.
[ "Initialize", "the", "underlying", "widget", "." ]
python
train
33
bitly/asyncmongo
asyncmongo/cursor.py
https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/cursor.py#L157-L253
def update(self, spec, document, upsert=False, manipulate=False, safe=True, multi=False, callback=None, **kwargs): """Update a document(s) in this collection. Raises :class:`TypeError` if either `spec` or `document` is not an instance of ``dict`` or `upsert` is not an instance of ``bool``. If `safe` is ``True`` then the update will be checked for errors, raising :class:`~pymongo.errors.OperationFailure` if one occurred. Safe updates require a response from the database, while normal updates do not - thus, setting `safe` to ``True`` will negatively impact performance. There are many useful `update modifiers`_ which can be used when performing updates. For example, here we use the ``"$set"`` modifier to modify some fields in a matching document: .. doctest:: >>> db.test.insert({"x": "y", "a": "b"}) ObjectId('...') >>> list(db.test.find()) [{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}] >>> db.test.update({"x": "y"}, {"$set": {"a": "c"}}) >>> list(db.test.find()) [{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}] If `safe` is ``True`` returns the response to the *lastError* command. Otherwise, returns ``None``. # Any additional keyword arguments imply ``safe=True``, and will # be used as options for the resultant `getLastError` # command. For example, to wait for replication to 3 nodes, pass # ``w=3``. :Parameters: - `spec`: a ``dict`` or :class:`~bson.son.SON` instance specifying elements which must be present for a document to be updated - `document`: a ``dict`` or :class:`~bson.son.SON` instance specifying the document to be used for the update or (in the case of an upsert) insert - see docs on MongoDB `update modifiers`_ - `upsert` (optional): perform an upsert if ``True`` - `manipulate` (optional): manipulate the document before updating? If ``True`` all instances of :mod:`~pymongo.son_manipulator.SONManipulator` added to this :class:`~pymongo.database.Database` will be applied to the document before performing the update. - `safe` (optional): check that the update succeeded? - `multi` (optional): update all documents that match `spec`, rather than just the first matching document. The default value for `multi` is currently ``False``, but this might eventually change to ``True``. It is recommended that you specify this argument explicitly for all update operations in order to prepare your code for that change. - `**kwargs` (optional): any additional arguments imply ``safe=True``, and will be used as options for the `getLastError` command .. _update modifiers: http://www.mongodb.org/display/DOCS/Updating .. mongodoc:: update """ if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(document, dict): raise TypeError("document must be an instance of dict") if not isinstance(upsert, bool): raise TypeError("upsert must be an instance of bool") if not isinstance(safe, bool): raise TypeError("safe must be an instance of bool") # TODO: apply SON manipulators # if upsert and manipulate: # document = self.__database._fix_incoming(document, self) if kwargs: safe = True if safe and not callable(callback): raise TypeError("callback must be callable") if not safe and callback is not None: raise TypeError("callback can not be used with safe=False") if callback: callback = functools.partial(self._handle_response, orig_callback=callback) self.__limit = None connection = self.__pool.connection() try: connection.send_message( message.update(self.full_collection_name, upsert, multi, spec, document, safe, kwargs), callback=callback) except: connection.close() raise
[ "def", "update", "(", "self", ",", "spec", ",", "document", ",", "upsert", "=", "False", ",", "manipulate", "=", "False", ",", "safe", "=", "True", ",", "multi", "=", "False", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "spec", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"spec must be an instance of dict\"", ")", "if", "not", "isinstance", "(", "document", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"document must be an instance of dict\"", ")", "if", "not", "isinstance", "(", "upsert", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"upsert must be an instance of bool\"", ")", "if", "not", "isinstance", "(", "safe", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"safe must be an instance of bool\"", ")", "# TODO: apply SON manipulators", "# if upsert and manipulate:", "# document = self.__database._fix_incoming(document, self)", "if", "kwargs", ":", "safe", "=", "True", "if", "safe", "and", "not", "callable", "(", "callback", ")", ":", "raise", "TypeError", "(", "\"callback must be callable\"", ")", "if", "not", "safe", "and", "callback", "is", "not", "None", ":", "raise", "TypeError", "(", "\"callback can not be used with safe=False\"", ")", "if", "callback", ":", "callback", "=", "functools", ".", "partial", "(", "self", ".", "_handle_response", ",", "orig_callback", "=", "callback", ")", "self", ".", "__limit", "=", "None", "connection", "=", "self", ".", "__pool", ".", "connection", "(", ")", "try", ":", "connection", ".", "send_message", "(", "message", ".", "update", "(", "self", ".", "full_collection_name", ",", "upsert", ",", "multi", ",", "spec", ",", "document", ",", "safe", ",", "kwargs", ")", ",", "callback", "=", "callback", ")", "except", ":", "connection", ".", "close", "(", ")", "raise" ]
Update a document(s) in this collection. Raises :class:`TypeError` if either `spec` or `document` is not an instance of ``dict`` or `upsert` is not an instance of ``bool``. If `safe` is ``True`` then the update will be checked for errors, raising :class:`~pymongo.errors.OperationFailure` if one occurred. Safe updates require a response from the database, while normal updates do not - thus, setting `safe` to ``True`` will negatively impact performance. There are many useful `update modifiers`_ which can be used when performing updates. For example, here we use the ``"$set"`` modifier to modify some fields in a matching document: .. doctest:: >>> db.test.insert({"x": "y", "a": "b"}) ObjectId('...') >>> list(db.test.find()) [{u'a': u'b', u'x': u'y', u'_id': ObjectId('...')}] >>> db.test.update({"x": "y"}, {"$set": {"a": "c"}}) >>> list(db.test.find()) [{u'a': u'c', u'x': u'y', u'_id': ObjectId('...')}] If `safe` is ``True`` returns the response to the *lastError* command. Otherwise, returns ``None``. # Any additional keyword arguments imply ``safe=True``, and will # be used as options for the resultant `getLastError` # command. For example, to wait for replication to 3 nodes, pass # ``w=3``. :Parameters: - `spec`: a ``dict`` or :class:`~bson.son.SON` instance specifying elements which must be present for a document to be updated - `document`: a ``dict`` or :class:`~bson.son.SON` instance specifying the document to be used for the update or (in the case of an upsert) insert - see docs on MongoDB `update modifiers`_ - `upsert` (optional): perform an upsert if ``True`` - `manipulate` (optional): manipulate the document before updating? If ``True`` all instances of :mod:`~pymongo.son_manipulator.SONManipulator` added to this :class:`~pymongo.database.Database` will be applied to the document before performing the update. - `safe` (optional): check that the update succeeded? - `multi` (optional): update all documents that match `spec`, rather than just the first matching document. The default value for `multi` is currently ``False``, but this might eventually change to ``True``. It is recommended that you specify this argument explicitly for all update operations in order to prepare your code for that change. - `**kwargs` (optional): any additional arguments imply ``safe=True``, and will be used as options for the `getLastError` command .. _update modifiers: http://www.mongodb.org/display/DOCS/Updating .. mongodoc:: update
[ "Update", "a", "document", "(", "s", ")", "in", "this", "collection", ".", "Raises", ":", "class", ":", "TypeError", "if", "either", "spec", "or", "document", "is", "not", "an", "instance", "of", "dict", "or", "upsert", "is", "not", "an", "instance", "of", "bool", ".", "If", "safe", "is", "True", "then", "the", "update", "will", "be", "checked", "for", "errors", "raising", ":", "class", ":", "~pymongo", ".", "errors", ".", "OperationFailure", "if", "one", "occurred", ".", "Safe", "updates", "require", "a", "response", "from", "the", "database", "while", "normal", "updates", "do", "not", "-", "thus", "setting", "safe", "to", "True", "will", "negatively", "impact", "performance", ".", "There", "are", "many", "useful", "update", "modifiers", "_", "which", "can", "be", "used", "when", "performing", "updates", ".", "For", "example", "here", "we", "use", "the", "$set", "modifier", "to", "modify", "some", "fields", "in", "a", "matching", "document", ":", "..", "doctest", "::", ">>>", "db", ".", "test", ".", "insert", "(", "{", "x", ":", "y", "a", ":", "b", "}", ")", "ObjectId", "(", "...", ")", ">>>", "list", "(", "db", ".", "test", ".", "find", "()", ")", "[", "{", "u", "a", ":", "u", "b", "u", "x", ":", "u", "y", "u", "_id", ":", "ObjectId", "(", "...", ")", "}", "]", ">>>", "db", ".", "test", ".", "update", "(", "{", "x", ":", "y", "}", "{", "$set", ":", "{", "a", ":", "c", "}}", ")", ">>>", "list", "(", "db", ".", "test", ".", "find", "()", ")", "[", "{", "u", "a", ":", "u", "c", "u", "x", ":", "u", "y", "u", "_id", ":", "ObjectId", "(", "...", ")", "}", "]", "If", "safe", "is", "True", "returns", "the", "response", "to", "the", "*", "lastError", "*", "command", ".", "Otherwise", "returns", "None", ".", "#", "Any", "additional", "keyword", "arguments", "imply", "safe", "=", "True", "and", "will", "#", "be", "used", "as", "options", "for", "the", "resultant", "getLastError", "#", "command", ".", "For", "example", "to", "wait", "for", "replication", "to", "3", "nodes", "pass", "#", "w", "=", "3", ".", ":", "Parameters", ":", "-", "spec", ":", "a", "dict", "or", ":", "class", ":", "~bson", ".", "son", ".", "SON", "instance", "specifying", "elements", "which", "must", "be", "present", "for", "a", "document", "to", "be", "updated", "-", "document", ":", "a", "dict", "or", ":", "class", ":", "~bson", ".", "son", ".", "SON", "instance", "specifying", "the", "document", "to", "be", "used", "for", "the", "update", "or", "(", "in", "the", "case", "of", "an", "upsert", ")", "insert", "-", "see", "docs", "on", "MongoDB", "update", "modifiers", "_", "-", "upsert", "(", "optional", ")", ":", "perform", "an", "upsert", "if", "True", "-", "manipulate", "(", "optional", ")", ":", "manipulate", "the", "document", "before", "updating?", "If", "True", "all", "instances", "of", ":", "mod", ":", "~pymongo", ".", "son_manipulator", ".", "SONManipulator", "added", "to", "this", ":", "class", ":", "~pymongo", ".", "database", ".", "Database", "will", "be", "applied", "to", "the", "document", "before", "performing", "the", "update", ".", "-", "safe", "(", "optional", ")", ":", "check", "that", "the", "update", "succeeded?", "-", "multi", "(", "optional", ")", ":", "update", "all", "documents", "that", "match", "spec", "rather", "than", "just", "the", "first", "matching", "document", ".", "The", "default", "value", "for", "multi", "is", "currently", "False", "but", "this", "might", "eventually", "change", "to", "True", ".", "It", "is", "recommended", "that", "you", "specify", "this", "argument", "explicitly", "for", "all", "update", "operations", "in", "order", "to", "prepare", "your", "code", "for", "that", "change", ".", "-", "**", "kwargs", "(", "optional", ")", ":", "any", "additional", "arguments", "imply", "safe", "=", "True", "and", "will", "be", "used", "as", "options", "for", "the", "getLastError", "command", "..", "_update", "modifiers", ":", "http", ":", "//", "www", ".", "mongodb", ".", "org", "/", "display", "/", "DOCS", "/", "Updating", "..", "mongodoc", "::", "update" ]
python
train
45.43299
genialis/resolwe
resolwe/elastic/builder.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/builder.py#L397-L416
def _connect_signal(self, index): """Create signals for building indexes.""" post_save_signal = ElasticSignal(index, 'build') post_save_signal.connect(post_save, sender=index.object_type) self.signals.append(post_save_signal) post_delete_signal = ElasticSignal(index, 'remove_object') post_delete_signal.connect(post_delete, sender=index.object_type) self.signals.append(post_delete_signal) # Connect signals for all dependencies. for dependency in index.get_dependencies(): # Automatically convert m2m fields to dependencies. if isinstance(dependency, (models.ManyToManyField, ManyToManyDescriptor)): dependency = ManyToManyDependency(dependency) elif not isinstance(dependency, Dependency): raise TypeError("Unsupported dependency type: {}".format(repr(dependency))) signal = dependency.connect(index) self.signals.extend(signal)
[ "def", "_connect_signal", "(", "self", ",", "index", ")", ":", "post_save_signal", "=", "ElasticSignal", "(", "index", ",", "'build'", ")", "post_save_signal", ".", "connect", "(", "post_save", ",", "sender", "=", "index", ".", "object_type", ")", "self", ".", "signals", ".", "append", "(", "post_save_signal", ")", "post_delete_signal", "=", "ElasticSignal", "(", "index", ",", "'remove_object'", ")", "post_delete_signal", ".", "connect", "(", "post_delete", ",", "sender", "=", "index", ".", "object_type", ")", "self", ".", "signals", ".", "append", "(", "post_delete_signal", ")", "# Connect signals for all dependencies.", "for", "dependency", "in", "index", ".", "get_dependencies", "(", ")", ":", "# Automatically convert m2m fields to dependencies.", "if", "isinstance", "(", "dependency", ",", "(", "models", ".", "ManyToManyField", ",", "ManyToManyDescriptor", ")", ")", ":", "dependency", "=", "ManyToManyDependency", "(", "dependency", ")", "elif", "not", "isinstance", "(", "dependency", ",", "Dependency", ")", ":", "raise", "TypeError", "(", "\"Unsupported dependency type: {}\"", ".", "format", "(", "repr", "(", "dependency", ")", ")", ")", "signal", "=", "dependency", ".", "connect", "(", "index", ")", "self", ".", "signals", ".", "extend", "(", "signal", ")" ]
Create signals for building indexes.
[ "Create", "signals", "for", "building", "indexes", "." ]
python
train
48.95
dgomes/pymediaroom
pymediaroom/notify.py
https://github.com/dgomes/pymediaroom/blob/f4f2686c8d5622dd5ae1bcdd76900ba35e148529/pymediaroom/notify.py#L60-L69
def tune(self): """XML node representing tune.""" if self._node.get('activities'): tune = self._node['activities'].get('tune') if type(tune) is collections.OrderedDict: return tune elif type(tune) is list: return tune[0] return tune return None
[ "def", "tune", "(", "self", ")", ":", "if", "self", ".", "_node", ".", "get", "(", "'activities'", ")", ":", "tune", "=", "self", ".", "_node", "[", "'activities'", "]", ".", "get", "(", "'tune'", ")", "if", "type", "(", "tune", ")", "is", "collections", ".", "OrderedDict", ":", "return", "tune", "elif", "type", "(", "tune", ")", "is", "list", ":", "return", "tune", "[", "0", "]", "return", "tune", "return", "None" ]
XML node representing tune.
[ "XML", "node", "representing", "tune", "." ]
python
train
33.9
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2282-L2289
def moveEvent(self, event): """Reimplement Qt method""" if not self.isMaximized() and not self.fullscreen_flag: self.window_position = self.pos() QMainWindow.moveEvent(self, event) # To be used by the tour to be able to move self.sig_moved.emit(event)
[ "def", "moveEvent", "(", "self", ",", "event", ")", ":", "if", "not", "self", ".", "isMaximized", "(", ")", "and", "not", "self", ".", "fullscreen_flag", ":", "self", ".", "window_position", "=", "self", ".", "pos", "(", ")", "QMainWindow", ".", "moveEvent", "(", "self", ",", "event", ")", "# To be used by the tour to be able to move\r", "self", ".", "sig_moved", ".", "emit", "(", "event", ")" ]
Reimplement Qt method
[ "Reimplement", "Qt", "method" ]
python
train
38
pyblish/pyblish-qml
pyblish_qml/models.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/models.py#L798-L807
def add_inclusion(self, role, value): """Include item if `role` equals `value` Attributes: role (int): Qt role to compare `value` to value (object): Value to exclude """ self._add_rule(self.includes, role, value)
[ "def", "add_inclusion", "(", "self", ",", "role", ",", "value", ")", ":", "self", ".", "_add_rule", "(", "self", ".", "includes", ",", "role", ",", "value", ")" ]
Include item if `role` equals `value` Attributes: role (int): Qt role to compare `value` to value (object): Value to exclude
[ "Include", "item", "if", "role", "equals", "value" ]
python
train
26.2
totalgood/nlpia
src/nlpia/utils.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/utils.py#L14-L23
def stdout_logging(loglevel=logging.INFO): """Setup basic logging Args: loglevel (int): minimum loglevel for emitting messages """ logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(lineno)d: %(message)s" logging.config.dictConfig(level=loglevel, stream=sys.stdout, format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
[ "def", "stdout_logging", "(", "loglevel", "=", "logging", ".", "INFO", ")", ":", "logformat", "=", "\"[%(asctime)s] %(levelname)s:%(name)s:%(lineno)d: %(message)s\"", "logging", ".", "config", ".", "dictConfig", "(", "level", "=", "loglevel", ",", "stream", "=", "sys", ".", "stdout", ",", "format", "=", "logformat", ",", "datefmt", "=", "\"%Y-%m-%d %H:%M:%S\"", ")" ]
Setup basic logging Args: loglevel (int): minimum loglevel for emitting messages
[ "Setup", "basic", "logging" ]
python
train
36.2
materialsproject/pymatgen
pymatgen/core/structure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L1402-L1602
def get_primitive_structure(self, tolerance=0.25, use_site_props=False, constrain_latt=None): """ This finds a smaller unit cell than the input. Sometimes it doesn"t find the smallest possible one, so this method is recursively called until it is unable to find a smaller cell. NOTE: if the tolerance is greater than 1/2 the minimum inter-site distance in the primitive cell, the algorithm will reject this lattice. Args: tolerance (float), Angstroms: Tolerance for each coordinate of a particular site. For example, [0.1, 0, 0.1] in cartesian coordinates will be considered to be on the same coordinates as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25. use_site_props (bool): Whether to account for site properties in differntiating sites. constrain_latt (list/dict): List of lattice parameters we want to preserve, e.g. ["alpha", "c"] or dict with the lattice parameter names as keys and values we want the parameters to be e.g. {"alpha": 90, "c": 2.5}. Returns: The most primitive structure found. """ if constrain_latt is None: constrain_latt = [] def site_label(site): if not use_site_props: return site.species_string else: d = [site.species_string] for k in sorted(site.properties.keys()): d.append(k + "=" + str(site.properties[k])) return ", ".join(d) # group sites by species string sites = sorted(self._sites, key=site_label) grouped_sites = [ list(a[1]) for a in itertools.groupby(sites, key=site_label)] grouped_fcoords = [np.array([s.frac_coords for s in g]) for g in grouped_sites] # min_vecs are approximate periodicities of the cell. The exact # periodicities from the supercell matrices are checked against these # first min_fcoords = min(grouped_fcoords, key=lambda x: len(x)) min_vecs = min_fcoords - min_fcoords[0] # fractional tolerance in the supercell super_ftol = np.divide(tolerance, self.lattice.abc) super_ftol_2 = super_ftol * 2 def pbc_coord_intersection(fc1, fc2, tol): """ Returns the fractional coords in fc1 that have coordinates within tolerance to some coordinate in fc2 """ d = fc1[:, None, :] - fc2[None, :, :] d -= np.round(d) np.abs(d, d) return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)] # here we reduce the number of min_vecs by enforcing that every # vector in min_vecs approximately maps each site onto a similar site. # The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no # reduction. # This reduction is O(n^3) so usually is an improvement. Using double # the tolerance because both vectors are approximate for g in sorted(grouped_fcoords, key=lambda x: len(x)): for f in g: min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2) def get_hnf(fu): """ Returns all possible distinct supercell matrices given a number of formula units in the supercell. Batches the matrices by the values in the diagonal (for less numpy overhead). Computational complexity is O(n^3), and difficult to improve. Might be able to do something smart with checking combinations of a and b first, though unlikely to reduce to O(n^2). """ def factors(n): for i in range(1, n + 1): if n % i == 0: yield i for det in factors(fu): if det == 1: continue for a in factors(det): for e in factors(det // a): g = det // a // e yield det, np.array( [[[a, b, c], [0, e, f], [0, 0, g]] for b, c, f in itertools.product(range(a), range(a), range(e))]) # we cant let sites match to their neighbors in the supercell grouped_non_nbrs = [] for gfcoords in grouped_fcoords: fdist = gfcoords[None, :, :] - gfcoords[:, None, :] fdist -= np.round(fdist) np.abs(fdist, fdist) non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1) # since we want sites to match to themselves np.fill_diagonal(non_nbrs, True) grouped_non_nbrs.append(non_nbrs) num_fu = functools.reduce(gcd, map(len, grouped_sites)) for size, ms in get_hnf(num_fu): inv_ms = np.linalg.inv(ms) # find sets of lattice vectors that are are present in min_vecs dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :] dist -= np.round(dist) np.abs(dist, dist) is_close = np.all(dist < super_ftol, axis=-1) any_close = np.any(is_close, axis=-1) inds = np.all(any_close, axis=-1) for inv_m, m in zip(inv_ms[inds], ms[inds]): new_m = np.dot(inv_m, self.lattice.matrix) ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1))) valid = True new_coords = [] new_sp = [] new_props = collections.defaultdict(list) for gsites, gfcoords, non_nbrs in zip(grouped_sites, grouped_fcoords, grouped_non_nbrs): all_frac = np.dot(gfcoords, m) # calculate grouping of equivalent sites, represented by # adjacency matrix fdist = all_frac[None, :, :] - all_frac[:, None, :] fdist = np.abs(fdist - np.round(fdist)) close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1) groups = np.logical_and(close_in_prim, non_nbrs) # check that groups are correct if not np.all(np.sum(groups, axis=0) == size): valid = False break # check that groups are all cliques for g in groups: if not np.all(groups[g][:, g]): valid = False break if not valid: break # add the new sites, averaging positions added = np.zeros(len(gsites)) new_fcoords = all_frac % 1 for i, group in enumerate(groups): if not added[i]: added[group] = True inds = np.where(group)[0] coords = new_fcoords[inds[0]] for n, j in enumerate(inds[1:]): offset = new_fcoords[j] - coords coords += (offset - np.round(offset)) / (n + 2) new_sp.append(gsites[inds[0]].species) for k in gsites[inds[0]].properties: new_props[k].append(gsites[inds[0]].properties[k]) new_coords.append(coords) if valid: inv_m = np.linalg.inv(m) new_l = Lattice(np.dot(inv_m, self.lattice.matrix)) s = Structure(new_l, new_sp, new_coords, site_properties=new_props, coords_are_cartesian=False) # Default behavior p = s.get_primitive_structure( tolerance=tolerance, use_site_props=use_site_props, constrain_latt=constrain_latt ).get_reduced_structure() if not constrain_latt: return p # Only return primitive structures that # satisfy the restriction condition p_latt, s_latt = p.lattice, self.lattice if type(constrain_latt).__name__ == "list": if all([getattr(p_latt, p) == getattr(s_latt, p) for p in constrain_latt]): return p elif type(constrain_latt).__name__ == "dict": if all([getattr(p_latt, p) == constrain_latt[p] for p in constrain_latt.keys()]): return p return self.copy()
[ "def", "get_primitive_structure", "(", "self", ",", "tolerance", "=", "0.25", ",", "use_site_props", "=", "False", ",", "constrain_latt", "=", "None", ")", ":", "if", "constrain_latt", "is", "None", ":", "constrain_latt", "=", "[", "]", "def", "site_label", "(", "site", ")", ":", "if", "not", "use_site_props", ":", "return", "site", ".", "species_string", "else", ":", "d", "=", "[", "site", ".", "species_string", "]", "for", "k", "in", "sorted", "(", "site", ".", "properties", ".", "keys", "(", ")", ")", ":", "d", ".", "append", "(", "k", "+", "\"=\"", "+", "str", "(", "site", ".", "properties", "[", "k", "]", ")", ")", "return", "\", \"", ".", "join", "(", "d", ")", "# group sites by species string", "sites", "=", "sorted", "(", "self", ".", "_sites", ",", "key", "=", "site_label", ")", "grouped_sites", "=", "[", "list", "(", "a", "[", "1", "]", ")", "for", "a", "in", "itertools", ".", "groupby", "(", "sites", ",", "key", "=", "site_label", ")", "]", "grouped_fcoords", "=", "[", "np", ".", "array", "(", "[", "s", ".", "frac_coords", "for", "s", "in", "g", "]", ")", "for", "g", "in", "grouped_sites", "]", "# min_vecs are approximate periodicities of the cell. The exact", "# periodicities from the supercell matrices are checked against these", "# first", "min_fcoords", "=", "min", "(", "grouped_fcoords", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", ")", ")", "min_vecs", "=", "min_fcoords", "-", "min_fcoords", "[", "0", "]", "# fractional tolerance in the supercell", "super_ftol", "=", "np", ".", "divide", "(", "tolerance", ",", "self", ".", "lattice", ".", "abc", ")", "super_ftol_2", "=", "super_ftol", "*", "2", "def", "pbc_coord_intersection", "(", "fc1", ",", "fc2", ",", "tol", ")", ":", "\"\"\"\n Returns the fractional coords in fc1 that have coordinates\n within tolerance to some coordinate in fc2\n \"\"\"", "d", "=", "fc1", "[", ":", ",", "None", ",", ":", "]", "-", "fc2", "[", "None", ",", ":", ",", ":", "]", "d", "-=", "np", ".", "round", "(", "d", ")", "np", ".", "abs", "(", "d", ",", "d", ")", "return", "fc1", "[", "np", ".", "any", "(", "np", ".", "all", "(", "d", "<", "tol", ",", "axis", "=", "-", "1", ")", ",", "axis", "=", "-", "1", ")", "]", "# here we reduce the number of min_vecs by enforcing that every", "# vector in min_vecs approximately maps each site onto a similar site.", "# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no", "# reduction.", "# This reduction is O(n^3) so usually is an improvement. Using double", "# the tolerance because both vectors are approximate", "for", "g", "in", "sorted", "(", "grouped_fcoords", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", ")", ")", ":", "for", "f", "in", "g", ":", "min_vecs", "=", "pbc_coord_intersection", "(", "min_vecs", ",", "g", "-", "f", ",", "super_ftol_2", ")", "def", "get_hnf", "(", "fu", ")", ":", "\"\"\"\n Returns all possible distinct supercell matrices given a\n number of formula units in the supercell. Batches the matrices\n by the values in the diagonal (for less numpy overhead).\n Computational complexity is O(n^3), and difficult to improve.\n Might be able to do something smart with checking combinations of a\n and b first, though unlikely to reduce to O(n^2).\n \"\"\"", "def", "factors", "(", "n", ")", ":", "for", "i", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "if", "n", "%", "i", "==", "0", ":", "yield", "i", "for", "det", "in", "factors", "(", "fu", ")", ":", "if", "det", "==", "1", ":", "continue", "for", "a", "in", "factors", "(", "det", ")", ":", "for", "e", "in", "factors", "(", "det", "//", "a", ")", ":", "g", "=", "det", "//", "a", "//", "e", "yield", "det", ",", "np", ".", "array", "(", "[", "[", "[", "a", ",", "b", ",", "c", "]", ",", "[", "0", ",", "e", ",", "f", "]", ",", "[", "0", ",", "0", ",", "g", "]", "]", "for", "b", ",", "c", ",", "f", "in", "itertools", ".", "product", "(", "range", "(", "a", ")", ",", "range", "(", "a", ")", ",", "range", "(", "e", ")", ")", "]", ")", "# we cant let sites match to their neighbors in the supercell", "grouped_non_nbrs", "=", "[", "]", "for", "gfcoords", "in", "grouped_fcoords", ":", "fdist", "=", "gfcoords", "[", "None", ",", ":", ",", ":", "]", "-", "gfcoords", "[", ":", ",", "None", ",", ":", "]", "fdist", "-=", "np", ".", "round", "(", "fdist", ")", "np", ".", "abs", "(", "fdist", ",", "fdist", ")", "non_nbrs", "=", "np", ".", "any", "(", "fdist", ">", "2", "*", "super_ftol", "[", "None", ",", "None", ",", ":", "]", ",", "axis", "=", "-", "1", ")", "# since we want sites to match to themselves", "np", ".", "fill_diagonal", "(", "non_nbrs", ",", "True", ")", "grouped_non_nbrs", ".", "append", "(", "non_nbrs", ")", "num_fu", "=", "functools", ".", "reduce", "(", "gcd", ",", "map", "(", "len", ",", "grouped_sites", ")", ")", "for", "size", ",", "ms", "in", "get_hnf", "(", "num_fu", ")", ":", "inv_ms", "=", "np", ".", "linalg", ".", "inv", "(", "ms", ")", "# find sets of lattice vectors that are are present in min_vecs", "dist", "=", "inv_ms", "[", ":", ",", ":", ",", "None", ",", ":", "]", "-", "min_vecs", "[", "None", ",", "None", ",", ":", ",", ":", "]", "dist", "-=", "np", ".", "round", "(", "dist", ")", "np", ".", "abs", "(", "dist", ",", "dist", ")", "is_close", "=", "np", ".", "all", "(", "dist", "<", "super_ftol", ",", "axis", "=", "-", "1", ")", "any_close", "=", "np", ".", "any", "(", "is_close", ",", "axis", "=", "-", "1", ")", "inds", "=", "np", ".", "all", "(", "any_close", ",", "axis", "=", "-", "1", ")", "for", "inv_m", ",", "m", "in", "zip", "(", "inv_ms", "[", "inds", "]", ",", "ms", "[", "inds", "]", ")", ":", "new_m", "=", "np", ".", "dot", "(", "inv_m", ",", "self", ".", "lattice", ".", "matrix", ")", "ftol", "=", "np", ".", "divide", "(", "tolerance", ",", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "new_m", "**", "2", ",", "axis", "=", "1", ")", ")", ")", "valid", "=", "True", "new_coords", "=", "[", "]", "new_sp", "=", "[", "]", "new_props", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "gsites", ",", "gfcoords", ",", "non_nbrs", "in", "zip", "(", "grouped_sites", ",", "grouped_fcoords", ",", "grouped_non_nbrs", ")", ":", "all_frac", "=", "np", ".", "dot", "(", "gfcoords", ",", "m", ")", "# calculate grouping of equivalent sites, represented by", "# adjacency matrix", "fdist", "=", "all_frac", "[", "None", ",", ":", ",", ":", "]", "-", "all_frac", "[", ":", ",", "None", ",", ":", "]", "fdist", "=", "np", ".", "abs", "(", "fdist", "-", "np", ".", "round", "(", "fdist", ")", ")", "close_in_prim", "=", "np", ".", "all", "(", "fdist", "<", "ftol", "[", "None", ",", "None", ",", ":", "]", ",", "axis", "=", "-", "1", ")", "groups", "=", "np", ".", "logical_and", "(", "close_in_prim", ",", "non_nbrs", ")", "# check that groups are correct", "if", "not", "np", ".", "all", "(", "np", ".", "sum", "(", "groups", ",", "axis", "=", "0", ")", "==", "size", ")", ":", "valid", "=", "False", "break", "# check that groups are all cliques", "for", "g", "in", "groups", ":", "if", "not", "np", ".", "all", "(", "groups", "[", "g", "]", "[", ":", ",", "g", "]", ")", ":", "valid", "=", "False", "break", "if", "not", "valid", ":", "break", "# add the new sites, averaging positions", "added", "=", "np", ".", "zeros", "(", "len", "(", "gsites", ")", ")", "new_fcoords", "=", "all_frac", "%", "1", "for", "i", ",", "group", "in", "enumerate", "(", "groups", ")", ":", "if", "not", "added", "[", "i", "]", ":", "added", "[", "group", "]", "=", "True", "inds", "=", "np", ".", "where", "(", "group", ")", "[", "0", "]", "coords", "=", "new_fcoords", "[", "inds", "[", "0", "]", "]", "for", "n", ",", "j", "in", "enumerate", "(", "inds", "[", "1", ":", "]", ")", ":", "offset", "=", "new_fcoords", "[", "j", "]", "-", "coords", "coords", "+=", "(", "offset", "-", "np", ".", "round", "(", "offset", ")", ")", "/", "(", "n", "+", "2", ")", "new_sp", ".", "append", "(", "gsites", "[", "inds", "[", "0", "]", "]", ".", "species", ")", "for", "k", "in", "gsites", "[", "inds", "[", "0", "]", "]", ".", "properties", ":", "new_props", "[", "k", "]", ".", "append", "(", "gsites", "[", "inds", "[", "0", "]", "]", ".", "properties", "[", "k", "]", ")", "new_coords", ".", "append", "(", "coords", ")", "if", "valid", ":", "inv_m", "=", "np", ".", "linalg", ".", "inv", "(", "m", ")", "new_l", "=", "Lattice", "(", "np", ".", "dot", "(", "inv_m", ",", "self", ".", "lattice", ".", "matrix", ")", ")", "s", "=", "Structure", "(", "new_l", ",", "new_sp", ",", "new_coords", ",", "site_properties", "=", "new_props", ",", "coords_are_cartesian", "=", "False", ")", "# Default behavior", "p", "=", "s", ".", "get_primitive_structure", "(", "tolerance", "=", "tolerance", ",", "use_site_props", "=", "use_site_props", ",", "constrain_latt", "=", "constrain_latt", ")", ".", "get_reduced_structure", "(", ")", "if", "not", "constrain_latt", ":", "return", "p", "# Only return primitive structures that", "# satisfy the restriction condition", "p_latt", ",", "s_latt", "=", "p", ".", "lattice", ",", "self", ".", "lattice", "if", "type", "(", "constrain_latt", ")", ".", "__name__", "==", "\"list\"", ":", "if", "all", "(", "[", "getattr", "(", "p_latt", ",", "p", ")", "==", "getattr", "(", "s_latt", ",", "p", ")", "for", "p", "in", "constrain_latt", "]", ")", ":", "return", "p", "elif", "type", "(", "constrain_latt", ")", ".", "__name__", "==", "\"dict\"", ":", "if", "all", "(", "[", "getattr", "(", "p_latt", ",", "p", ")", "==", "constrain_latt", "[", "p", "]", "for", "p", "in", "constrain_latt", ".", "keys", "(", ")", "]", ")", ":", "return", "p", "return", "self", ".", "copy", "(", ")" ]
This finds a smaller unit cell than the input. Sometimes it doesn"t find the smallest possible one, so this method is recursively called until it is unable to find a smaller cell. NOTE: if the tolerance is greater than 1/2 the minimum inter-site distance in the primitive cell, the algorithm will reject this lattice. Args: tolerance (float), Angstroms: Tolerance for each coordinate of a particular site. For example, [0.1, 0, 0.1] in cartesian coordinates will be considered to be on the same coordinates as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25. use_site_props (bool): Whether to account for site properties in differntiating sites. constrain_latt (list/dict): List of lattice parameters we want to preserve, e.g. ["alpha", "c"] or dict with the lattice parameter names as keys and values we want the parameters to be e.g. {"alpha": 90, "c": 2.5}. Returns: The most primitive structure found.
[ "This", "finds", "a", "smaller", "unit", "cell", "than", "the", "input", ".", "Sometimes", "it", "doesn", "t", "find", "the", "smallest", "possible", "one", "so", "this", "method", "is", "recursively", "called", "until", "it", "is", "unable", "to", "find", "a", "smaller", "cell", "." ]
python
train
44.597015
xeroc/python-graphenelib
graphenecommon/account.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/account.py#L51-L77
def refresh(self): """ Refresh/Obtain an account's data from the API server """ import re if re.match(r"^1\.2\.[0-9]*$", self.identifier): account = self.blockchain.rpc.get_objects([self.identifier])[0] else: account = self.blockchain.rpc.lookup_account_names([self.identifier])[0] if not account: raise AccountDoesNotExistsException(self.identifier) self.store(account, "name") if self.full: # pragma: no cover accounts = self.blockchain.rpc.get_full_accounts([account["id"]], False) if accounts and isinstance(accounts, list): account = accounts[0][1] else: raise AccountDoesNotExistsException(self.identifier) super(Account, self).__init__( account["account"], blockchain_instance=self.blockchain ) for k, v in account.items(): if k != "account": self[k] = v else: super(Account, self).__init__(account, blockchain_instance=self.blockchain)
[ "def", "refresh", "(", "self", ")", ":", "import", "re", "if", "re", ".", "match", "(", "r\"^1\\.2\\.[0-9]*$\"", ",", "self", ".", "identifier", ")", ":", "account", "=", "self", ".", "blockchain", ".", "rpc", ".", "get_objects", "(", "[", "self", ".", "identifier", "]", ")", "[", "0", "]", "else", ":", "account", "=", "self", ".", "blockchain", ".", "rpc", ".", "lookup_account_names", "(", "[", "self", ".", "identifier", "]", ")", "[", "0", "]", "if", "not", "account", ":", "raise", "AccountDoesNotExistsException", "(", "self", ".", "identifier", ")", "self", ".", "store", "(", "account", ",", "\"name\"", ")", "if", "self", ".", "full", ":", "# pragma: no cover", "accounts", "=", "self", ".", "blockchain", ".", "rpc", ".", "get_full_accounts", "(", "[", "account", "[", "\"id\"", "]", "]", ",", "False", ")", "if", "accounts", "and", "isinstance", "(", "accounts", ",", "list", ")", ":", "account", "=", "accounts", "[", "0", "]", "[", "1", "]", "else", ":", "raise", "AccountDoesNotExistsException", "(", "self", ".", "identifier", ")", "super", "(", "Account", ",", "self", ")", ".", "__init__", "(", "account", "[", "\"account\"", "]", ",", "blockchain_instance", "=", "self", ".", "blockchain", ")", "for", "k", ",", "v", "in", "account", ".", "items", "(", ")", ":", "if", "k", "!=", "\"account\"", ":", "self", "[", "k", "]", "=", "v", "else", ":", "super", "(", "Account", ",", "self", ")", ".", "__init__", "(", "account", ",", "blockchain_instance", "=", "self", ".", "blockchain", ")" ]
Refresh/Obtain an account's data from the API server
[ "Refresh", "/", "Obtain", "an", "account", "s", "data", "from", "the", "API", "server" ]
python
valid
40.592593
DMSC-Instrument-Data/lewis
src/lewis/core/devices.py
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/core/devices.py#L93-L102
def is_device(obj): """ Returns True if obj is a device type (derived from DeviceBase), but not defined in :mod:`lewis.core.devices` or :mod:`lewis.devices`. :param obj: Object to test. :return: True if obj is a device type. """ return isinstance(obj, type) and issubclass( obj, DeviceBase) and obj.__module__ not in ('lewis.devices', 'lewis.core.devices')
[ "def", "is_device", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "type", ")", "and", "issubclass", "(", "obj", ",", "DeviceBase", ")", "and", "obj", ".", "__module__", "not", "in", "(", "'lewis.devices'", ",", "'lewis.core.devices'", ")" ]
Returns True if obj is a device type (derived from DeviceBase), but not defined in :mod:`lewis.core.devices` or :mod:`lewis.devices`. :param obj: Object to test. :return: True if obj is a device type.
[ "Returns", "True", "if", "obj", "is", "a", "device", "type", "(", "derived", "from", "DeviceBase", ")", "but", "not", "defined", "in", ":", "mod", ":", "lewis", ".", "core", ".", "devices", "or", ":", "mod", ":", "lewis", ".", "devices", "." ]
python
train
38.4
thiezn/iperf3-python
iperf3/iperf3.py
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L586-L598
def reverse(self): """Toggles direction of test :rtype: bool """ enabled = self.lib.iperf_get_test_reverse(self._test) if enabled: self._reverse = True else: self._reverse = False return self._reverse
[ "def", "reverse", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_test_reverse", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_reverse", "=", "True", "else", ":", "self", ".", "_reverse", "=", "False", "return", "self", ".", "_reverse" ]
Toggles direction of test :rtype: bool
[ "Toggles", "direction", "of", "test" ]
python
train
20.846154
titusjan/argos
argos/repo/repotreemodel.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/repotreemodel.py#L53-L113
def itemData(self, treeItem, column, role=Qt.DisplayRole): """ Returns the data stored under the given role for the item. O """ if role == Qt.DisplayRole: if column == self.COL_NODE_NAME: return treeItem.nodeName elif column == self.COL_NODE_PATH: return treeItem.nodePath elif column == self.COL_SHAPE: if treeItem.isSliceable: return " x ".join(str(elem) for elem in treeItem.arrayShape) else: return "" elif column == self.COL_IS_OPEN: # Only show for RTIs that actually open resources. # TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default? if treeItem.hasChildren(): return str(treeItem.isOpen) else: return "" elif column == self.COL_ELEM_TYPE: return treeItem.elementTypeName elif column == self.COL_FILE_NAME: return treeItem.fileName if hasattr(treeItem, 'fileName') else '' elif column == self.COL_UNIT: return treeItem.unit elif column == self.COL_MISSING_DATA: return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones elif column == self.COL_RTI_TYPE: return type_name(treeItem) elif column == self.COL_EXCEPTION: return str(treeItem.exception) if treeItem.exception else '' else: raise ValueError("Invalid column: {}".format(column)) elif role == Qt.ToolTipRole: if treeItem.exception: return str(treeItem.exception) if column == self.COL_NODE_NAME: return treeItem.nodePath # Also path when hovering over the name elif column == self.COL_NODE_PATH: return treeItem.nodePath elif column == self.COL_SHAPE: if treeItem.isSliceable: return " x ".join(str(elem) for elem in treeItem.arrayShape) else: return "" elif column == self.COL_UNIT: return treeItem.unit elif column == self.COL_MISSING_DATA: return to_string(treeItem.missingDataValue, noneFormat='') # empty str for Nones elif column == self.COL_RTI_TYPE: return type_name(treeItem) elif column == self.COL_ELEM_TYPE: return treeItem.elementTypeName elif column == self.COL_FILE_NAME: return treeItem.fileName if hasattr(treeItem, 'fileName') else '' else: return None else: return super(RepoTreeModel, self).itemData(treeItem, column, role=role)
[ "def", "itemData", "(", "self", ",", "treeItem", ",", "column", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "role", "==", "Qt", ".", "DisplayRole", ":", "if", "column", "==", "self", ".", "COL_NODE_NAME", ":", "return", "treeItem", ".", "nodeName", "elif", "column", "==", "self", ".", "COL_NODE_PATH", ":", "return", "treeItem", ".", "nodePath", "elif", "column", "==", "self", ".", "COL_SHAPE", ":", "if", "treeItem", ".", "isSliceable", ":", "return", "\" x \"", ".", "join", "(", "str", "(", "elem", ")", "for", "elem", "in", "treeItem", ".", "arrayShape", ")", "else", ":", "return", "\"\"", "elif", "column", "==", "self", ".", "COL_IS_OPEN", ":", "# Only show for RTIs that actually open resources.", "# TODO: this must be clearer. Use CanFetchChildren? Set is Open to None by default?", "if", "treeItem", ".", "hasChildren", "(", ")", ":", "return", "str", "(", "treeItem", ".", "isOpen", ")", "else", ":", "return", "\"\"", "elif", "column", "==", "self", ".", "COL_ELEM_TYPE", ":", "return", "treeItem", ".", "elementTypeName", "elif", "column", "==", "self", ".", "COL_FILE_NAME", ":", "return", "treeItem", ".", "fileName", "if", "hasattr", "(", "treeItem", ",", "'fileName'", ")", "else", "''", "elif", "column", "==", "self", ".", "COL_UNIT", ":", "return", "treeItem", ".", "unit", "elif", "column", "==", "self", ".", "COL_MISSING_DATA", ":", "return", "to_string", "(", "treeItem", ".", "missingDataValue", ",", "noneFormat", "=", "''", ")", "# empty str for Nones", "elif", "column", "==", "self", ".", "COL_RTI_TYPE", ":", "return", "type_name", "(", "treeItem", ")", "elif", "column", "==", "self", ".", "COL_EXCEPTION", ":", "return", "str", "(", "treeItem", ".", "exception", ")", "if", "treeItem", ".", "exception", "else", "''", "else", ":", "raise", "ValueError", "(", "\"Invalid column: {}\"", ".", "format", "(", "column", ")", ")", "elif", "role", "==", "Qt", ".", "ToolTipRole", ":", "if", "treeItem", ".", "exception", ":", "return", "str", "(", "treeItem", ".", "exception", ")", "if", "column", "==", "self", ".", "COL_NODE_NAME", ":", "return", "treeItem", ".", "nodePath", "# Also path when hovering over the name", "elif", "column", "==", "self", ".", "COL_NODE_PATH", ":", "return", "treeItem", ".", "nodePath", "elif", "column", "==", "self", ".", "COL_SHAPE", ":", "if", "treeItem", ".", "isSliceable", ":", "return", "\" x \"", ".", "join", "(", "str", "(", "elem", ")", "for", "elem", "in", "treeItem", ".", "arrayShape", ")", "else", ":", "return", "\"\"", "elif", "column", "==", "self", ".", "COL_UNIT", ":", "return", "treeItem", ".", "unit", "elif", "column", "==", "self", ".", "COL_MISSING_DATA", ":", "return", "to_string", "(", "treeItem", ".", "missingDataValue", ",", "noneFormat", "=", "''", ")", "# empty str for Nones", "elif", "column", "==", "self", ".", "COL_RTI_TYPE", ":", "return", "type_name", "(", "treeItem", ")", "elif", "column", "==", "self", ".", "COL_ELEM_TYPE", ":", "return", "treeItem", ".", "elementTypeName", "elif", "column", "==", "self", ".", "COL_FILE_NAME", ":", "return", "treeItem", ".", "fileName", "if", "hasattr", "(", "treeItem", ",", "'fileName'", ")", "else", "''", "else", ":", "return", "None", "else", ":", "return", "super", "(", "RepoTreeModel", ",", "self", ")", ".", "itemData", "(", "treeItem", ",", "column", ",", "role", "=", "role", ")" ]
Returns the data stored under the given role for the item. O
[ "Returns", "the", "data", "stored", "under", "the", "given", "role", "for", "the", "item", ".", "O" ]
python
train
46.704918
phoebe-project/phoebe2
phoebe/distortions/roche.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/distortions/roche.py#L170-L179
def dBinaryRochedz(r, D, q, F): """ Computes a derivative of the potential with respect to z. @param r: relative radius vector (3 components) @param D: instantaneous separation @param q: mass ratio @param F: synchronicity parameter """ return -r[2]*(r[0]*r[0]+r[1]*r[1]+r[2]*r[2])**-1.5 -q*r[2]*((r[0]-D)*(r[0]-D)+r[1]*r[1]+r[2]*r[2])**-1.5
[ "def", "dBinaryRochedz", "(", "r", ",", "D", ",", "q", ",", "F", ")", ":", "return", "-", "r", "[", "2", "]", "*", "(", "r", "[", "0", "]", "*", "r", "[", "0", "]", "+", "r", "[", "1", "]", "*", "r", "[", "1", "]", "+", "r", "[", "2", "]", "*", "r", "[", "2", "]", ")", "**", "-", "1.5", "-", "q", "*", "r", "[", "2", "]", "*", "(", "(", "r", "[", "0", "]", "-", "D", ")", "*", "(", "r", "[", "0", "]", "-", "D", ")", "+", "r", "[", "1", "]", "*", "r", "[", "1", "]", "+", "r", "[", "2", "]", "*", "r", "[", "2", "]", ")", "**", "-", "1.5" ]
Computes a derivative of the potential with respect to z. @param r: relative radius vector (3 components) @param D: instantaneous separation @param q: mass ratio @param F: synchronicity parameter
[ "Computes", "a", "derivative", "of", "the", "potential", "with", "respect", "to", "z", "." ]
python
train
38.5
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L8672-L8725
def read_micromanager_metadata(fh): """Read MicroManager non-TIFF settings from open file and return as dict. The settings can be used to read image data without parsing the TIFF file. Raise ValueError if the file does not contain valid MicroManager metadata. """ fh.seek(0) try: byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] except IndexError: raise ValueError('not a MicroManager TIFF file') result = {} fh.seek(8) (index_header, index_offset, display_header, display_offset, comments_header, comments_offset, summary_header, summary_length ) = struct.unpack(byteorder + 'IIIIIIII', fh.read(32)) if summary_header != 2355492: raise ValueError('invalid MicroManager summary header') result['Summary'] = read_json(fh, byteorder, None, summary_length, None) if index_header != 54773648: raise ValueError('invalid MicroManager index header') fh.seek(index_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 3453623: raise ValueError('invalid MicroManager index header') data = struct.unpack(byteorder + 'IIIII'*count, fh.read(20*count)) result['IndexMap'] = {'Channel': data[::5], 'Slice': data[1::5], 'Frame': data[2::5], 'Position': data[3::5], 'Offset': data[4::5]} if display_header != 483765892: raise ValueError('invalid MicroManager display header') fh.seek(display_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 347834724: raise ValueError('invalid MicroManager display header') result['DisplaySettings'] = read_json(fh, byteorder, None, count, None) if comments_header != 99384722: raise ValueError('invalid MicroManager comments header') fh.seek(comments_offset) header, count = struct.unpack(byteorder + 'II', fh.read(8)) if header != 84720485: raise ValueError('invalid MicroManager comments header') result['Comments'] = read_json(fh, byteorder, None, count, None) return result
[ "def", "read_micromanager_metadata", "(", "fh", ")", ":", "fh", ".", "seek", "(", "0", ")", "try", ":", "byteorder", "=", "{", "b'II'", ":", "'<'", ",", "b'MM'", ":", "'>'", "}", "[", "fh", ".", "read", "(", "2", ")", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "'not a MicroManager TIFF file'", ")", "result", "=", "{", "}", "fh", ".", "seek", "(", "8", ")", "(", "index_header", ",", "index_offset", ",", "display_header", ",", "display_offset", ",", "comments_header", ",", "comments_offset", ",", "summary_header", ",", "summary_length", ")", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'IIIIIIII'", ",", "fh", ".", "read", "(", "32", ")", ")", "if", "summary_header", "!=", "2355492", ":", "raise", "ValueError", "(", "'invalid MicroManager summary header'", ")", "result", "[", "'Summary'", "]", "=", "read_json", "(", "fh", ",", "byteorder", ",", "None", ",", "summary_length", ",", "None", ")", "if", "index_header", "!=", "54773648", ":", "raise", "ValueError", "(", "'invalid MicroManager index header'", ")", "fh", ".", "seek", "(", "index_offset", ")", "header", ",", "count", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'II'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "header", "!=", "3453623", ":", "raise", "ValueError", "(", "'invalid MicroManager index header'", ")", "data", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'IIIII'", "*", "count", ",", "fh", ".", "read", "(", "20", "*", "count", ")", ")", "result", "[", "'IndexMap'", "]", "=", "{", "'Channel'", ":", "data", "[", ":", ":", "5", "]", ",", "'Slice'", ":", "data", "[", "1", ":", ":", "5", "]", ",", "'Frame'", ":", "data", "[", "2", ":", ":", "5", "]", ",", "'Position'", ":", "data", "[", "3", ":", ":", "5", "]", ",", "'Offset'", ":", "data", "[", "4", ":", ":", "5", "]", "}", "if", "display_header", "!=", "483765892", ":", "raise", "ValueError", "(", "'invalid MicroManager display header'", ")", "fh", ".", "seek", "(", "display_offset", ")", "header", ",", "count", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'II'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "header", "!=", "347834724", ":", "raise", "ValueError", "(", "'invalid MicroManager display header'", ")", "result", "[", "'DisplaySettings'", "]", "=", "read_json", "(", "fh", ",", "byteorder", ",", "None", ",", "count", ",", "None", ")", "if", "comments_header", "!=", "99384722", ":", "raise", "ValueError", "(", "'invalid MicroManager comments header'", ")", "fh", ".", "seek", "(", "comments_offset", ")", "header", ",", "count", "=", "struct", ".", "unpack", "(", "byteorder", "+", "'II'", ",", "fh", ".", "read", "(", "8", ")", ")", "if", "header", "!=", "84720485", ":", "raise", "ValueError", "(", "'invalid MicroManager comments header'", ")", "result", "[", "'Comments'", "]", "=", "read_json", "(", "fh", ",", "byteorder", ",", "None", ",", "count", ",", "None", ")", "return", "result" ]
Read MicroManager non-TIFF settings from open file and return as dict. The settings can be used to read image data without parsing the TIFF file. Raise ValueError if the file does not contain valid MicroManager metadata.
[ "Read", "MicroManager", "non", "-", "TIFF", "settings", "from", "open", "file", "and", "return", "as", "dict", "." ]
python
train
39.148148
mozilla/puente
puente/commands.py
https://github.com/mozilla/puente/blob/4379a7717d28a2490d47939800f5d6e695b70def/puente/commands.py#L67-L129
def extract_command(outputdir, domain_methods, text_domain, keywords, comment_tags, base_dir, project, version, msgid_bugs_address): """Extracts strings into .pot files :arg domain: domains to generate strings for or 'all' for all domains :arg outputdir: output dir for .pot files; usually locale/templates/LC_MESSAGES/ :arg domain_methods: DOMAIN_METHODS setting :arg text_domain: TEXT_DOMAIN settings :arg keywords: KEYWORDS setting :arg comment_tags: COMMENT_TAGS setting :arg base_dir: BASE_DIR setting :arg project: PROJECT setting :arg version: VERSION setting :arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting """ # Must monkeypatch first to fix i18n extensions stomping issues! monkeypatch_i18n() # Create the outputdir if it doesn't exist outputdir = os.path.abspath(outputdir) if not os.path.isdir(outputdir): print('Creating output dir %s ...' % outputdir) os.makedirs(outputdir) domains = domain_methods.keys() def callback(filename, method, options): if method != 'ignore': print(' %s' % filename) # Extract string for each domain for domain in domains: print('Extracting all strings in domain %s...' % domain) methods = domain_methods[domain] catalog = Catalog( header_comment='', project=project, version=version, msgid_bugs_address=msgid_bugs_address, charset='utf-8', ) extracted = extract_from_dir( base_dir, method_map=methods, options_map=generate_options_map(), keywords=keywords, comment_tags=comment_tags, callback=callback, ) for filename, lineno, msg, cmts, ctxt in extracted: catalog.add(msg, None, [(filename, lineno)], auto_comments=cmts, context=ctxt) with open(os.path.join(outputdir, '%s.pot' % domain), 'wb') as fp: write_po(fp, catalog, width=80) print('Done')
[ "def", "extract_command", "(", "outputdir", ",", "domain_methods", ",", "text_domain", ",", "keywords", ",", "comment_tags", ",", "base_dir", ",", "project", ",", "version", ",", "msgid_bugs_address", ")", ":", "# Must monkeypatch first to fix i18n extensions stomping issues!", "monkeypatch_i18n", "(", ")", "# Create the outputdir if it doesn't exist", "outputdir", "=", "os", ".", "path", ".", "abspath", "(", "outputdir", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "outputdir", ")", ":", "print", "(", "'Creating output dir %s ...'", "%", "outputdir", ")", "os", ".", "makedirs", "(", "outputdir", ")", "domains", "=", "domain_methods", ".", "keys", "(", ")", "def", "callback", "(", "filename", ",", "method", ",", "options", ")", ":", "if", "method", "!=", "'ignore'", ":", "print", "(", "' %s'", "%", "filename", ")", "# Extract string for each domain", "for", "domain", "in", "domains", ":", "print", "(", "'Extracting all strings in domain %s...'", "%", "domain", ")", "methods", "=", "domain_methods", "[", "domain", "]", "catalog", "=", "Catalog", "(", "header_comment", "=", "''", ",", "project", "=", "project", ",", "version", "=", "version", ",", "msgid_bugs_address", "=", "msgid_bugs_address", ",", "charset", "=", "'utf-8'", ",", ")", "extracted", "=", "extract_from_dir", "(", "base_dir", ",", "method_map", "=", "methods", ",", "options_map", "=", "generate_options_map", "(", ")", ",", "keywords", "=", "keywords", ",", "comment_tags", "=", "comment_tags", ",", "callback", "=", "callback", ",", ")", "for", "filename", ",", "lineno", ",", "msg", ",", "cmts", ",", "ctxt", "in", "extracted", ":", "catalog", ".", "add", "(", "msg", ",", "None", ",", "[", "(", "filename", ",", "lineno", ")", "]", ",", "auto_comments", "=", "cmts", ",", "context", "=", "ctxt", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "outputdir", ",", "'%s.pot'", "%", "domain", ")", ",", "'wb'", ")", "as", "fp", ":", "write_po", "(", "fp", ",", "catalog", ",", "width", "=", "80", ")", "print", "(", "'Done'", ")" ]
Extracts strings into .pot files :arg domain: domains to generate strings for or 'all' for all domains :arg outputdir: output dir for .pot files; usually locale/templates/LC_MESSAGES/ :arg domain_methods: DOMAIN_METHODS setting :arg text_domain: TEXT_DOMAIN settings :arg keywords: KEYWORDS setting :arg comment_tags: COMMENT_TAGS setting :arg base_dir: BASE_DIR setting :arg project: PROJECT setting :arg version: VERSION setting :arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting
[ "Extracts", "strings", "into", ".", "pot", "files" ]
python
train
32.714286
peopledoc/workalendar
workalendar/registry.py
https://github.com/peopledoc/workalendar/blob/d044d5dfc1709ec388db34dab583dd554cc66c4e/workalendar/registry.py#L69-L88
def items(self, region_codes, include_subregions=False): """ Returns calendar classes for regions :param region_codes list of ISO codes for selected regions :param include_subregions boolean if subregions of selected regions should be included in result :rtype dict :return dict where keys are ISO codes strings and values are calendar classes """ items = OrderedDict() for code in region_codes: try: items[code] = self.region_registry[code] except KeyError: continue if include_subregions: items.update(self.get_subregions(code)) return items
[ "def", "items", "(", "self", ",", "region_codes", ",", "include_subregions", "=", "False", ")", ":", "items", "=", "OrderedDict", "(", ")", "for", "code", "in", "region_codes", ":", "try", ":", "items", "[", "code", "]", "=", "self", ".", "region_registry", "[", "code", "]", "except", "KeyError", ":", "continue", "if", "include_subregions", ":", "items", ".", "update", "(", "self", ".", "get_subregions", "(", "code", ")", ")", "return", "items" ]
Returns calendar classes for regions :param region_codes list of ISO codes for selected regions :param include_subregions boolean if subregions of selected regions should be included in result :rtype dict :return dict where keys are ISO codes strings and values are calendar classes
[ "Returns", "calendar", "classes", "for", "regions" ]
python
train
35.25
anti1869/sunhead
src/sunhead/utils.py
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/utils.py#L99-L113
async def parallel_results(future_map: Sequence[Tuple]) -> Dict: """ Run parallel execution of futures and return mapping of their results to the provided keys. Just a neat shortcut around ``asyncio.gather()`` :param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) ) :return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'} """ ctx_methods = OrderedDict(future_map) fs = list(ctx_methods.values()) results = await asyncio.gather(*fs) results = { key: results[idx] for idx, key in enumerate(ctx_methods.keys()) } return results
[ "async", "def", "parallel_results", "(", "future_map", ":", "Sequence", "[", "Tuple", "]", ")", "->", "Dict", ":", "ctx_methods", "=", "OrderedDict", "(", "future_map", ")", "fs", "=", "list", "(", "ctx_methods", ".", "values", "(", ")", ")", "results", "=", "await", "asyncio", ".", "gather", "(", "*", "fs", ")", "results", "=", "{", "key", ":", "results", "[", "idx", "]", "for", "idx", ",", "key", "in", "enumerate", "(", "ctx_methods", ".", "keys", "(", ")", ")", "}", "return", "results" ]
Run parallel execution of futures and return mapping of their results to the provided keys. Just a neat shortcut around ``asyncio.gather()`` :param future_map: Keys to futures mapping, e.g.: ( ('nav', get_nav()), ('content, get_content()) ) :return: Dict with futures results mapped to keys {'nav': {1:2}, 'content': 'xyz'}
[ "Run", "parallel", "execution", "of", "futures", "and", "return", "mapping", "of", "their", "results", "to", "the", "provided", "keys", ".", "Just", "a", "neat", "shortcut", "around", "asyncio", ".", "gather", "()" ]
python
train
42.533333