repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
guaix-ucm/pyemir
emirdrp/util/sextractor.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L533-L565
def run(self, file, updateconfig=True, clean=False, path=None): """ Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates. """ if updateconfig: self.update_config() # Try to find SExtractor program # This will raise an exception if it failed self.program, self.version = self.setup(path) commandline = ( self.program + " -c " + self.config['CONFIG_FILE'] + " " + file) # print commandline rcode = os.system(commandline) if (rcode): raise SExtractorException( "SExtractor command [%s] failed." % commandline ) if clean: self.clean()
[ "def", "run", "(", "self", ",", "file", ",", "updateconfig", "=", "True", ",", "clean", "=", "False", ",", "path", "=", "None", ")", ":", "if", "updateconfig", ":", "self", ".", "update_config", "(", ")", "# Try to find SExtractor program", "# This will raise an exception if it failed", "self", ".", "program", ",", "self", ".", "version", "=", "self", ".", "setup", "(", "path", ")", "commandline", "=", "(", "self", ".", "program", "+", "\" -c \"", "+", "self", ".", "config", "[", "'CONFIG_FILE'", "]", "+", "\" \"", "+", "file", ")", "# print commandline", "rcode", "=", "os", ".", "system", "(", "commandline", ")", "if", "(", "rcode", ")", ":", "raise", "SExtractorException", "(", "\"SExtractor command [%s] failed.\"", "%", "commandline", ")", "if", "clean", ":", "self", ".", "clean", "(", ")" ]
Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates.
[ "Run", "SExtractor", "." ]
python
train
26.818182
apache/spark
python/pyspark/ml/param/__init__.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L261-L271
def params(self): """ Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`. """ if self._params is None: self._params = list(filter(lambda attr: isinstance(attr, Param), [getattr(self, x) for x in dir(self) if x != "params" and not isinstance(getattr(type(self), x, None), property)])) return self._params
[ "def", "params", "(", "self", ")", ":", "if", "self", ".", "_params", "is", "None", ":", "self", ".", "_params", "=", "list", "(", "filter", "(", "lambda", "attr", ":", "isinstance", "(", "attr", ",", "Param", ")", ",", "[", "getattr", "(", "self", ",", "x", ")", "for", "x", "in", "dir", "(", "self", ")", "if", "x", "!=", "\"params\"", "and", "not", "isinstance", "(", "getattr", "(", "type", "(", "self", ")", ",", "x", ",", "None", ")", ",", "property", ")", "]", ")", ")", "return", "self", ".", "_params" ]
Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`.
[ "Returns", "all", "params", "ordered", "by", "name", ".", "The", "default", "implementation", "uses", ":", "py", ":", "func", ":", "dir", "to", "get", "all", "attributes", "of", "type", ":", "py", ":", "class", ":", "Param", "." ]
python
train
47.272727
ray-project/ray
python/ray/services.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/services.py#L196-L226
def get_node_ip_address(address="8.8.8.8:53"): """Determine the IP address of the local node. Args: address (str): The IP address and port of any known live service on the network you care about. Returns: The IP address of the current node. """ ip_address, port = address.split(":") s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # This command will raise an exception if there is no internet # connection. s.connect((ip_address, int(port))) node_ip_address = s.getsockname()[0] except Exception as e: node_ip_address = "127.0.0.1" # [Errno 101] Network is unreachable if e.errno == 101: try: # try get node ip address from host name host_name = socket.getfqdn(socket.gethostname()) node_ip_address = socket.gethostbyname(host_name) except Exception: pass finally: s.close() return node_ip_address
[ "def", "get_node_ip_address", "(", "address", "=", "\"8.8.8.8:53\"", ")", ":", "ip_address", ",", "port", "=", "address", ".", "split", "(", "\":\"", ")", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "try", ":", "# This command will raise an exception if there is no internet", "# connection.", "s", ".", "connect", "(", "(", "ip_address", ",", "int", "(", "port", ")", ")", ")", "node_ip_address", "=", "s", ".", "getsockname", "(", ")", "[", "0", "]", "except", "Exception", "as", "e", ":", "node_ip_address", "=", "\"127.0.0.1\"", "# [Errno 101] Network is unreachable", "if", "e", ".", "errno", "==", "101", ":", "try", ":", "# try get node ip address from host name", "host_name", "=", "socket", ".", "getfqdn", "(", "socket", ".", "gethostname", "(", ")", ")", "node_ip_address", "=", "socket", ".", "gethostbyname", "(", "host_name", ")", "except", "Exception", ":", "pass", "finally", ":", "s", ".", "close", "(", ")", "return", "node_ip_address" ]
Determine the IP address of the local node. Args: address (str): The IP address and port of any known live service on the network you care about. Returns: The IP address of the current node.
[ "Determine", "the", "IP", "address", "of", "the", "local", "node", "." ]
python
train
32.290323
alefnula/tea
tea/shell/__init__.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L316-L331
def __rmfile(path): """Delete a file. Args: path (str): Path to the file that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise. """ logger.info("rmfile: %s" % path) try: os.remove(path) return True except Exception as e: logger.error("rmfile: %s failed! Error: %s" % (path, e)) return False
[ "def", "__rmfile", "(", "path", ")", ":", "logger", ".", "info", "(", "\"rmfile: %s\"", "%", "path", ")", "try", ":", "os", ".", "remove", "(", "path", ")", "return", "True", "except", "Exception", "as", "e", ":", "logger", ".", "error", "(", "\"rmfile: %s failed! Error: %s\"", "%", "(", "path", ",", "e", ")", ")", "return", "False" ]
Delete a file. Args: path (str): Path to the file that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise.
[ "Delete", "a", "file", "." ]
python
train
24.5625
Erotemic/utool
utool/util_path.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L437-L461
def remove_existing_fpaths(fpath_list, verbose=VERBOSE, quiet=QUIET, strict=False, print_caller=PRINT_CALLER, lbl='files'): """ checks existance before removing. then tries to remove exisint paths """ import utool as ut if print_caller: print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_existing_fpaths') fpath_list_ = ut.filter_Nones(fpath_list) exists_list = list(map(exists, fpath_list_)) if verbose: n_total = len(fpath_list) n_valid = len(fpath_list_) n_exist = sum(exists_list) print('[util_path.remove_existing_fpaths] request delete of %d %s' % ( n_total, lbl)) if n_valid != n_total: print(('[util_path.remove_existing_fpaths] ' 'trying to delete %d/%d non None %s ') % (n_valid, n_total, lbl)) print(('[util_path.remove_existing_fpaths] ' ' %d/%d exist and need to be deleted') % (n_exist, n_valid)) existing_fpath_list = ut.compress(fpath_list_, exists_list) return remove_fpaths(existing_fpath_list, verbose=verbose, quiet=quiet, strict=strict, print_caller=False, lbl=lbl)
[ "def", "remove_existing_fpaths", "(", "fpath_list", ",", "verbose", "=", "VERBOSE", ",", "quiet", "=", "QUIET", ",", "strict", "=", "False", ",", "print_caller", "=", "PRINT_CALLER", ",", "lbl", "=", "'files'", ")", ":", "import", "utool", "as", "ut", "if", "print_caller", ":", "print", "(", "util_dbg", ".", "get_caller_name", "(", "range", "(", "1", ",", "4", ")", ")", "+", "' called remove_existing_fpaths'", ")", "fpath_list_", "=", "ut", ".", "filter_Nones", "(", "fpath_list", ")", "exists_list", "=", "list", "(", "map", "(", "exists", ",", "fpath_list_", ")", ")", "if", "verbose", ":", "n_total", "=", "len", "(", "fpath_list", ")", "n_valid", "=", "len", "(", "fpath_list_", ")", "n_exist", "=", "sum", "(", "exists_list", ")", "print", "(", "'[util_path.remove_existing_fpaths] request delete of %d %s'", "%", "(", "n_total", ",", "lbl", ")", ")", "if", "n_valid", "!=", "n_total", ":", "print", "(", "(", "'[util_path.remove_existing_fpaths] '", "'trying to delete %d/%d non None %s '", ")", "%", "(", "n_valid", ",", "n_total", ",", "lbl", ")", ")", "print", "(", "(", "'[util_path.remove_existing_fpaths] '", "' %d/%d exist and need to be deleted'", ")", "%", "(", "n_exist", ",", "n_valid", ")", ")", "existing_fpath_list", "=", "ut", ".", "compress", "(", "fpath_list_", ",", "exists_list", ")", "return", "remove_fpaths", "(", "existing_fpath_list", ",", "verbose", "=", "verbose", ",", "quiet", "=", "quiet", ",", "strict", "=", "strict", ",", "print_caller", "=", "False", ",", "lbl", "=", "lbl", ")" ]
checks existance before removing. then tries to remove exisint paths
[ "checks", "existance", "before", "removing", ".", "then", "tries", "to", "remove", "exisint", "paths" ]
python
train
49.32
square/pylink
examples/rtt.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/examples/rtt.py#L37-L65
def read_rtt(jlink): """Reads the JLink RTT buffer #0 at 10Hz and prints to stdout. This method is a polling loop against the connected JLink unit. If the JLink is disconnected, it will exit. Additionally, if any exceptions are raised, they will be caught and re-raised after interrupting the main thread. sys.stdout.write and sys.stdout.flush are used since target terminals are expected to transmit newlines, which may or may not line up with the arbitrarily-chosen 1024-byte buffer that this loop uses to read. Args: jlink (pylink.JLink): The JLink to read. Raises: Exception on error. """ try: while jlink.connected(): terminal_bytes = jlink.rtt_read(0, 1024) if terminal_bytes: sys.stdout.write("".join(map(chr, terminal_bytes))) sys.stdout.flush() time.sleep(0.1) except Exception: print("IO read thread exception, exiting...") thread.interrupt_main() raise
[ "def", "read_rtt", "(", "jlink", ")", ":", "try", ":", "while", "jlink", ".", "connected", "(", ")", ":", "terminal_bytes", "=", "jlink", ".", "rtt_read", "(", "0", ",", "1024", ")", "if", "terminal_bytes", ":", "sys", ".", "stdout", ".", "write", "(", "\"\"", ".", "join", "(", "map", "(", "chr", ",", "terminal_bytes", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "time", ".", "sleep", "(", "0.1", ")", "except", "Exception", ":", "print", "(", "\"IO read thread exception, exiting...\"", ")", "thread", ".", "interrupt_main", "(", ")", "raise" ]
Reads the JLink RTT buffer #0 at 10Hz and prints to stdout. This method is a polling loop against the connected JLink unit. If the JLink is disconnected, it will exit. Additionally, if any exceptions are raised, they will be caught and re-raised after interrupting the main thread. sys.stdout.write and sys.stdout.flush are used since target terminals are expected to transmit newlines, which may or may not line up with the arbitrarily-chosen 1024-byte buffer that this loop uses to read. Args: jlink (pylink.JLink): The JLink to read. Raises: Exception on error.
[ "Reads", "the", "JLink", "RTT", "buffer", "#0", "at", "10Hz", "and", "prints", "to", "stdout", "." ]
python
train
34.586207
portfors-lab/sparkle
sparkle/run/calibration_runner.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/calibration_runner.py#L200-L207
def set_reps(self, reps): """set the number of repetitions for the stimuli (reference tone and cal stim) :param reps: number of times to present the same stimulus :type reps: int """ self.stimulus.setRepCount(reps) self.refstim.setRepCount(reps)
[ "def", "set_reps", "(", "self", ",", "reps", ")", ":", "self", ".", "stimulus", ".", "setRepCount", "(", "reps", ")", "self", ".", "refstim", ".", "setRepCount", "(", "reps", ")" ]
set the number of repetitions for the stimuli (reference tone and cal stim) :param reps: number of times to present the same stimulus :type reps: int
[ "set", "the", "number", "of", "repetitions", "for", "the", "stimuli", "(", "reference", "tone", "and", "cal", "stim", ")" ]
python
train
35.875
InspectorMustache/base16-builder-python
pybase16_builder/updater.py
https://github.com/InspectorMustache/base16-builder-python/blob/586f1f87ee9f70696ab19c542af6ef55c6548a2e/pybase16_builder/updater.py#L72-L95
def git_clone_job_list(job_list): """Deal with all git clone jobs in $job_list.""" queue = Queue() for job in job_list: queue.put(job) if len(job_list) < 20: thread_num = len(job_list) else: thread_num = 20 threads = [] for _ in range(thread_num): thread = Thread(target=git_clone_worker, args=(queue, )) thread.start() threads.append(thread) queue.join() for _ in range(thread_num): queue.put(None) for thread in threads: thread.join()
[ "def", "git_clone_job_list", "(", "job_list", ")", ":", "queue", "=", "Queue", "(", ")", "for", "job", "in", "job_list", ":", "queue", ".", "put", "(", "job", ")", "if", "len", "(", "job_list", ")", "<", "20", ":", "thread_num", "=", "len", "(", "job_list", ")", "else", ":", "thread_num", "=", "20", "threads", "=", "[", "]", "for", "_", "in", "range", "(", "thread_num", ")", ":", "thread", "=", "Thread", "(", "target", "=", "git_clone_worker", ",", "args", "=", "(", "queue", ",", ")", ")", "thread", ".", "start", "(", ")", "threads", ".", "append", "(", "thread", ")", "queue", ".", "join", "(", ")", "for", "_", "in", "range", "(", "thread_num", ")", ":", "queue", ".", "put", "(", "None", ")", "for", "thread", "in", "threads", ":", "thread", ".", "join", "(", ")" ]
Deal with all git clone jobs in $job_list.
[ "Deal", "with", "all", "git", "clone", "jobs", "in", "$job_list", "." ]
python
train
21.75
aio-libs/aioredis
aioredis/commands/streams.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/streams.py#L150-L153
def xgroup_setid(self, stream, group_name, latest_id='$'): """Set the latest ID for a consumer group""" fut = self.execute(b'XGROUP', b'SETID', stream, group_name, latest_id) return wait_ok(fut)
[ "def", "xgroup_setid", "(", "self", ",", "stream", ",", "group_name", ",", "latest_id", "=", "'$'", ")", ":", "fut", "=", "self", ".", "execute", "(", "b'XGROUP'", ",", "b'SETID'", ",", "stream", ",", "group_name", ",", "latest_id", ")", "return", "wait_ok", "(", "fut", ")" ]
Set the latest ID for a consumer group
[ "Set", "the", "latest", "ID", "for", "a", "consumer", "group" ]
python
train
53.75
ejeschke/ginga
ginga/util/wcs.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/wcs.py#L46-L49
def dmsToDeg(sign, deg, min, sec): """Convert dec sign, degrees, minutes, seconds into a signed angle in degrees.""" return sign * (deg + min * degPerDmsMin + sec * degPerDmsSec)
[ "def", "dmsToDeg", "(", "sign", ",", "deg", ",", "min", ",", "sec", ")", ":", "return", "sign", "*", "(", "deg", "+", "min", "*", "degPerDmsMin", "+", "sec", "*", "degPerDmsSec", ")" ]
Convert dec sign, degrees, minutes, seconds into a signed angle in degrees.
[ "Convert", "dec", "sign", "degrees", "minutes", "seconds", "into", "a", "signed", "angle", "in", "degrees", "." ]
python
train
46.75
quantopian/zipline
zipline/pipeline/graph.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/graph.py#L451-L486
def mask_and_dates_for_term(self, term, root_mask_term, workspace, all_dates): """ Load mask and mask row labels for term. Parameters ---------- term : Term The term to load the mask and labels for. root_mask_term : Term The term that represents the root asset exists mask. workspace : dict[Term, any] The values that have been computed for each term. all_dates : pd.DatetimeIndex All of the dates that are being computed for in the pipeline. Returns ------- mask : np.ndarray The correct mask for this term. dates : np.ndarray The slice of dates for this term. """ mask = term.mask mask_offset = self.extra_rows[mask] - self.extra_rows[term] # This offset is computed against root_mask_term because that is what # determines the shape of the top-level dates array. dates_offset = ( self.extra_rows[root_mask_term] - self.extra_rows[term] ) return workspace[mask][mask_offset:], all_dates[dates_offset:]
[ "def", "mask_and_dates_for_term", "(", "self", ",", "term", ",", "root_mask_term", ",", "workspace", ",", "all_dates", ")", ":", "mask", "=", "term", ".", "mask", "mask_offset", "=", "self", ".", "extra_rows", "[", "mask", "]", "-", "self", ".", "extra_rows", "[", "term", "]", "# This offset is computed against root_mask_term because that is what", "# determines the shape of the top-level dates array.", "dates_offset", "=", "(", "self", ".", "extra_rows", "[", "root_mask_term", "]", "-", "self", ".", "extra_rows", "[", "term", "]", ")", "return", "workspace", "[", "mask", "]", "[", "mask_offset", ":", "]", ",", "all_dates", "[", "dates_offset", ":", "]" ]
Load mask and mask row labels for term. Parameters ---------- term : Term The term to load the mask and labels for. root_mask_term : Term The term that represents the root asset exists mask. workspace : dict[Term, any] The values that have been computed for each term. all_dates : pd.DatetimeIndex All of the dates that are being computed for in the pipeline. Returns ------- mask : np.ndarray The correct mask for this term. dates : np.ndarray The slice of dates for this term.
[ "Load", "mask", "and", "mask", "row", "labels", "for", "term", "." ]
python
train
34.555556
jepegit/cellpy
cellpy/readers/cellreader.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/cellreader.py#L2501-L2547
def get_timestamp(self, cycle=None, dataset_number=None, in_minutes=False, full=True): """Returns timestamps (in sec or minutes (if in_minutes==True)). Args: cycle: cycle number (all if None) dataset_number: first dataset if None in_minutes: return values in minutes instead of seconds if True full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False) """ dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return cycle_index_header = self.headers_normal.cycle_index_txt timestamp_header = self.headers_normal.test_time_txt v = pd.Series() test = self.datasets[dataset_number].dfdata if cycle: c = test[(test[cycle_index_header] == cycle)] if not self.is_empty(c): v = c[timestamp_header] else: if not full: self.logger.debug("getting timestapm for all cycles") v = [] no_cycles = np.amax(test[cycle_index_header]) for j in range(1, no_cycles + 1): txt = "Cycle %i: " % j self.logger.debug(txt) c = test[(test[cycle_index_header] == j)] v.append(c[timestamp_header]) else: self.logger.debug("returning full timestamp col") v = test[timestamp_header] if in_minutes and v is not None: v /= 60.0 if in_minutes and v is not None: v /= 60.0 return v
[ "def", "get_timestamp", "(", "self", ",", "cycle", "=", "None", ",", "dataset_number", "=", "None", ",", "in_minutes", "=", "False", ",", "full", "=", "True", ")", ":", "dataset_number", "=", "self", ".", "_validate_dataset_number", "(", "dataset_number", ")", "if", "dataset_number", "is", "None", ":", "self", ".", "_report_empty_dataset", "(", ")", "return", "cycle_index_header", "=", "self", ".", "headers_normal", ".", "cycle_index_txt", "timestamp_header", "=", "self", ".", "headers_normal", ".", "test_time_txt", "v", "=", "pd", ".", "Series", "(", ")", "test", "=", "self", ".", "datasets", "[", "dataset_number", "]", ".", "dfdata", "if", "cycle", ":", "c", "=", "test", "[", "(", "test", "[", "cycle_index_header", "]", "==", "cycle", ")", "]", "if", "not", "self", ".", "is_empty", "(", "c", ")", ":", "v", "=", "c", "[", "timestamp_header", "]", "else", ":", "if", "not", "full", ":", "self", ".", "logger", ".", "debug", "(", "\"getting timestapm for all cycles\"", ")", "v", "=", "[", "]", "no_cycles", "=", "np", ".", "amax", "(", "test", "[", "cycle_index_header", "]", ")", "for", "j", "in", "range", "(", "1", ",", "no_cycles", "+", "1", ")", ":", "txt", "=", "\"Cycle %i: \"", "%", "j", "self", ".", "logger", ".", "debug", "(", "txt", ")", "c", "=", "test", "[", "(", "test", "[", "cycle_index_header", "]", "==", "j", ")", "]", "v", ".", "append", "(", "c", "[", "timestamp_header", "]", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"returning full timestamp col\"", ")", "v", "=", "test", "[", "timestamp_header", "]", "if", "in_minutes", "and", "v", "is", "not", "None", ":", "v", "/=", "60.0", "if", "in_minutes", "and", "v", "is", "not", "None", ":", "v", "/=", "60.0", "return", "v" ]
Returns timestamps (in sec or minutes (if in_minutes==True)). Args: cycle: cycle number (all if None) dataset_number: first dataset if None in_minutes: return values in minutes instead of seconds if True full: valid only for cycle=None (i.e. all cycles), returns the full pandas.Series if True, else a list of pandas.Series Returns: pandas.Series (or list of pandas.Series if cycle=None og full=False)
[ "Returns", "timestamps", "(", "in", "sec", "or", "minutes", "(", "if", "in_minutes", "==", "True", "))", "." ]
python
train
39.021277
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_map/mp_slipmap_util.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_map/mp_slipmap_util.py#L257-L266
def draw_line(self, img, pixmapper, pt1, pt2, colour, linewidth): '''draw a line on the image''' pix1 = pixmapper(pt1) pix2 = pixmapper(pt2) (width, height) = image_shape(img) (ret, pix1, pix2) = cv2.clipLine((0, 0, width, height), pix1, pix2) if ret is False: return cv2.line(img, pix1, pix2, colour, linewidth) cv2.circle(img, pix2, linewidth*2, colour)
[ "def", "draw_line", "(", "self", ",", "img", ",", "pixmapper", ",", "pt1", ",", "pt2", ",", "colour", ",", "linewidth", ")", ":", "pix1", "=", "pixmapper", "(", "pt1", ")", "pix2", "=", "pixmapper", "(", "pt2", ")", "(", "width", ",", "height", ")", "=", "image_shape", "(", "img", ")", "(", "ret", ",", "pix1", ",", "pix2", ")", "=", "cv2", ".", "clipLine", "(", "(", "0", ",", "0", ",", "width", ",", "height", ")", ",", "pix1", ",", "pix2", ")", "if", "ret", "is", "False", ":", "return", "cv2", ".", "line", "(", "img", ",", "pix1", ",", "pix2", ",", "colour", ",", "linewidth", ")", "cv2", ".", "circle", "(", "img", ",", "pix2", ",", "linewidth", "*", "2", ",", "colour", ")" ]
draw a line on the image
[ "draw", "a", "line", "on", "the", "image" ]
python
train
42.2
ask/redish
redish/serialization.py
https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/serialization.py#L39-L43
def decode(self, value): """Decode value.""" if self.encoding: value = value.decode(self.encoding) return self.deserialize(value)
[ "def", "decode", "(", "self", ",", "value", ")", ":", "if", "self", ".", "encoding", ":", "value", "=", "value", ".", "decode", "(", "self", ".", "encoding", ")", "return", "self", ".", "deserialize", "(", "value", ")" ]
Decode value.
[ "Decode", "value", "." ]
python
train
32.2
crackinglandia/pype32
pype32/directories.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/directories.py#L215-L236
def parse(readDataInstance): """ Returns a new L{ImageBoundImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}. @rtype: L{ImageBoundImportDescriptorEntry} @return: A new {ImageBoundImportDescriptorEntry} object. """ boundEntry = ImageBoundImportDescriptorEntry() boundEntry.timeDateStamp.value = readDataInstance.readDword() boundEntry.offsetModuleName.value = readDataInstance.readWord() boundEntry.numberOfModuleForwarderRefs.value = readDataInstance.readWord() numberOfForwarderRefsEntries = boundEntry.numberOfModuleForwarderRefs .value if numberOfForwarderRefsEntries: bytesToRead = numberOfForwarderRefsEntries * ImageBoundForwarderRefEntry().sizeof() rd = utils.ReadData(readDataInstance.read(bytesToRead)) boundEntry.forwarderRefsList = ImageBoundForwarderRef.parse(rd, numberOfForwarderRefsEntries) return boundEntry
[ "def", "parse", "(", "readDataInstance", ")", ":", "boundEntry", "=", "ImageBoundImportDescriptorEntry", "(", ")", "boundEntry", ".", "timeDateStamp", ".", "value", "=", "readDataInstance", ".", "readDword", "(", ")", "boundEntry", ".", "offsetModuleName", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "boundEntry", ".", "numberOfModuleForwarderRefs", ".", "value", "=", "readDataInstance", ".", "readWord", "(", ")", "numberOfForwarderRefsEntries", "=", "boundEntry", ".", "numberOfModuleForwarderRefs", ".", "value", "if", "numberOfForwarderRefsEntries", ":", "bytesToRead", "=", "numberOfForwarderRefsEntries", "*", "ImageBoundForwarderRefEntry", "(", ")", ".", "sizeof", "(", ")", "rd", "=", "utils", ".", "ReadData", "(", "readDataInstance", ".", "read", "(", "bytesToRead", ")", ")", "boundEntry", ".", "forwarderRefsList", "=", "ImageBoundForwarderRef", ".", "parse", "(", "rd", ",", "numberOfForwarderRefsEntries", ")", "return", "boundEntry" ]
Returns a new L{ImageBoundImportDescriptorEntry} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}. @rtype: L{ImageBoundImportDescriptorEntry} @return: A new {ImageBoundImportDescriptorEntry} object.
[ "Returns", "a", "new", "L", "{", "ImageBoundImportDescriptorEntry", "}", "object", "." ]
python
train
50.954545
datosgobar/pydatajson
pydatajson/indicators.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/indicators.py#L160-L209
def _federation_indicators(catalog, central_catalog, identifier_search=False): """Cuenta la cantidad de datasets incluídos tanto en la lista 'catalogs' como en el catálogo central, y genera indicadores a partir de esa información. Args: catalog (dict): catálogo ya parseado central_catalog (str o dict): ruta a catálogo central, o un dict con el catálogo ya parseado """ result = { 'datasets_federados_cant': None, 'datasets_federados_pct': None, 'datasets_no_federados_cant': None, 'datasets_federados_eliminados_cant': None, 'distribuciones_federadas_cant': None, 'datasets_federados_eliminados': [], 'datasets_no_federados': [], 'datasets_federados': [], } try: central_catalog = readers.read_catalog(central_catalog) except Exception as e: msg = u'Error leyendo el catálogo central: {}'.format(str(e)) logger.error(msg) return result generator = FederationIndicatorsGenerator(central_catalog, catalog, id_based=identifier_search) result.update({ 'datasets_federados_cant': generator.datasets_federados_cant(), 'datasets_no_federados_cant': generator.datasets_no_federados_cant(), 'datasets_federados_eliminados_cant': generator.datasets_federados_eliminados_cant(), 'datasets_federados_eliminados': generator.datasets_federados_eliminados(), 'datasets_no_federados': generator.datasets_no_federados(), 'datasets_federados': generator.datasets_federados(), 'datasets_federados_pct': generator.datasets_federados_pct(), 'distribuciones_federadas_cant': generator.distribuciones_federadas_cant() }) return result
[ "def", "_federation_indicators", "(", "catalog", ",", "central_catalog", ",", "identifier_search", "=", "False", ")", ":", "result", "=", "{", "'datasets_federados_cant'", ":", "None", ",", "'datasets_federados_pct'", ":", "None", ",", "'datasets_no_federados_cant'", ":", "None", ",", "'datasets_federados_eliminados_cant'", ":", "None", ",", "'distribuciones_federadas_cant'", ":", "None", ",", "'datasets_federados_eliminados'", ":", "[", "]", ",", "'datasets_no_federados'", ":", "[", "]", ",", "'datasets_federados'", ":", "[", "]", ",", "}", "try", ":", "central_catalog", "=", "readers", ".", "read_catalog", "(", "central_catalog", ")", "except", "Exception", "as", "e", ":", "msg", "=", "u'Error leyendo el catálogo central: {}'.", "f", "ormat(", "s", "tr(", "e", ")", ")", "", "logger", ".", "error", "(", "msg", ")", "return", "result", "generator", "=", "FederationIndicatorsGenerator", "(", "central_catalog", ",", "catalog", ",", "id_based", "=", "identifier_search", ")", "result", ".", "update", "(", "{", "'datasets_federados_cant'", ":", "generator", ".", "datasets_federados_cant", "(", ")", ",", "'datasets_no_federados_cant'", ":", "generator", ".", "datasets_no_federados_cant", "(", ")", ",", "'datasets_federados_eliminados_cant'", ":", "generator", ".", "datasets_federados_eliminados_cant", "(", ")", ",", "'datasets_federados_eliminados'", ":", "generator", ".", "datasets_federados_eliminados", "(", ")", ",", "'datasets_no_federados'", ":", "generator", ".", "datasets_no_federados", "(", ")", ",", "'datasets_federados'", ":", "generator", ".", "datasets_federados", "(", ")", ",", "'datasets_federados_pct'", ":", "generator", ".", "datasets_federados_pct", "(", ")", ",", "'distribuciones_federadas_cant'", ":", "generator", ".", "distribuciones_federadas_cant", "(", ")", "}", ")", "return", "result" ]
Cuenta la cantidad de datasets incluídos tanto en la lista 'catalogs' como en el catálogo central, y genera indicadores a partir de esa información. Args: catalog (dict): catálogo ya parseado central_catalog (str o dict): ruta a catálogo central, o un dict con el catálogo ya parseado
[ "Cuenta", "la", "cantidad", "de", "datasets", "incluídos", "tanto", "en", "la", "lista", "catalogs", "como", "en", "el", "catálogo", "central", "y", "genera", "indicadores", "a", "partir", "de", "esa", "información", "." ]
python
train
38.24
openid/python-openid
openid/extensions/draft/pape2.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/draft/pape2.py#L211-L247
def parseExtensionArgs(self, args, strict=False): """Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object. """ policies_str = args.get('auth_policies') if policies_str and policies_str != 'none': self.auth_policies = policies_str.split(' ') nist_level_str = args.get('nist_auth_level') if nist_level_str: try: nist_level = int(nist_level_str) except ValueError: if strict: raise ValueError('nist_auth_level must be an integer between ' 'zero and four, inclusive') else: self.nist_auth_level = None else: if 0 <= nist_level < 5: self.nist_auth_level = nist_level auth_time = args.get('auth_time') if auth_time: if TIME_VALIDATOR.match(auth_time): self.auth_time = auth_time elif strict: raise ValueError("auth_time must be in RFC3339 format")
[ "def", "parseExtensionArgs", "(", "self", ",", "args", ",", "strict", "=", "False", ")", ":", "policies_str", "=", "args", ".", "get", "(", "'auth_policies'", ")", "if", "policies_str", "and", "policies_str", "!=", "'none'", ":", "self", ".", "auth_policies", "=", "policies_str", ".", "split", "(", "' '", ")", "nist_level_str", "=", "args", ".", "get", "(", "'nist_auth_level'", ")", "if", "nist_level_str", ":", "try", ":", "nist_level", "=", "int", "(", "nist_level_str", ")", "except", "ValueError", ":", "if", "strict", ":", "raise", "ValueError", "(", "'nist_auth_level must be an integer between '", "'zero and four, inclusive'", ")", "else", ":", "self", ".", "nist_auth_level", "=", "None", "else", ":", "if", "0", "<=", "nist_level", "<", "5", ":", "self", ".", "nist_auth_level", "=", "nist_level", "auth_time", "=", "args", ".", "get", "(", "'auth_time'", ")", "if", "auth_time", ":", "if", "TIME_VALIDATOR", ".", "match", "(", "auth_time", ")", ":", "self", ".", "auth_time", "=", "auth_time", "elif", "strict", ":", "raise", "ValueError", "(", "\"auth_time must be in RFC3339 format\"", ")" ]
Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object.
[ "Parse", "the", "provider", "authentication", "policy", "arguments", "into", "the", "internal", "state", "of", "this", "object" ]
python
train
36.72973
tensorflow/tensor2tensor
tensor2tensor/data_generators/gym_env.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/gym_env.py#L264-L301
def step(self, actions): """Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded. """ if self._store_rollouts and \ self._rollouts_by_epoch_and_split[self.current_epoch]: raise ValueError( "Data for current epoch has already been loaded from disk." ) (obs, unclipped_rewards, dones) = self._step(actions) obs = self._preprocess_observations(obs) (min_reward, max_reward) = self.reward_range rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward)) if self._store_rollouts: unclipped_rewards = unclipped_rewards.astype(np.float64) encoded_obs = self._encode_observations(obs) for (rollout, frame, action) in zip( self._current_batch_rollouts, self._current_batch_frames, actions ): rollout.append(frame._replace(action=action)) # orud = (observation, reward, unclipped_reward, done) self._current_batch_frames = [ Frame(*orud, action=None) for orud in zip(encoded_obs, rewards, unclipped_rewards, dones) ] return (obs, rewards, dones)
[ "def", "step", "(", "self", ",", "actions", ")", ":", "if", "self", ".", "_store_rollouts", "and", "self", ".", "_rollouts_by_epoch_and_split", "[", "self", ".", "current_epoch", "]", ":", "raise", "ValueError", "(", "\"Data for current epoch has already been loaded from disk.\"", ")", "(", "obs", ",", "unclipped_rewards", ",", "dones", ")", "=", "self", ".", "_step", "(", "actions", ")", "obs", "=", "self", ".", "_preprocess_observations", "(", "obs", ")", "(", "min_reward", ",", "max_reward", ")", "=", "self", ".", "reward_range", "rewards", "=", "np", ".", "around", "(", "np", ".", "clip", "(", "unclipped_rewards", ",", "min_reward", ",", "max_reward", ")", ")", "if", "self", ".", "_store_rollouts", ":", "unclipped_rewards", "=", "unclipped_rewards", ".", "astype", "(", "np", ".", "float64", ")", "encoded_obs", "=", "self", ".", "_encode_observations", "(", "obs", ")", "for", "(", "rollout", ",", "frame", ",", "action", ")", "in", "zip", "(", "self", ".", "_current_batch_rollouts", ",", "self", ".", "_current_batch_frames", ",", "actions", ")", ":", "rollout", ".", "append", "(", "frame", ".", "_replace", "(", "action", "=", "action", ")", ")", "# orud = (observation, reward, unclipped_reward, done)", "self", ".", "_current_batch_frames", "=", "[", "Frame", "(", "*", "orud", ",", "action", "=", "None", ")", "for", "orud", "in", "zip", "(", "encoded_obs", ",", "rewards", ",", "unclipped_rewards", ",", "dones", ")", "]", "return", "(", "obs", ",", "rewards", ",", "dones", ")" ]
Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded.
[ "Makes", "a", "step", "in", "all", "environments", "." ]
python
train
35
DataDog/integrations-core
datadog_checks_dev/datadog_checks/dev/tooling/commands/release.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_dev/datadog_checks/dev/tooling/commands/release.py#L553-L690
def make(ctx, check, version, initial_release, skip_sign, sign_only): """Perform a set of operations needed to release a single check: \b * update the version in __about__.py * update the changelog * update the requirements-agent-release.txt file * update in-toto metadata * commit the above changes You can release everything at once by setting the check to `all`. \b If you run into issues signing: \b - Ensure you did `gpg --import <YOUR_KEY_ID>.gpg.pub` """ # Import lazily since in-toto runs a subprocess to check for gpg2 on load from ..signing import update_link_metadata, YubikeyException releasing_all = check == 'all' valid_checks = get_valid_checks() if not releasing_all and check not in valid_checks: abort('Check `{}` is not an Agent-based Integration'.format(check)) # don't run the task on the master branch if get_current_branch() == 'master': abort('This task will commit, you do not want to add commits to master directly') if releasing_all: if version: abort('You cannot bump every check to the same version') checks = sorted(valid_checks) else: checks = [check] if initial_release: version = '1.0.0' for check in checks: if sign_only: break elif initial_release and check in BETA_PACKAGES: continue # Initial releases will only bump if not already 1.0.0 so no need to always output if not initial_release: echo_success('Check `{}`'.format(check)) if version: # sanity check on the version provided cur_version = get_version_string(check) if version == 'final': # Remove any pre-release metadata version = finalize_version(cur_version) else: # Keep track of intermediate version bumps prev_version = cur_version for method in version.split(','): # Apply any supported version bumping methods. Chaining is required for going # from mainline releases to development releases since e.g. x.y.z > x.y.z-rc.A. # So for an initial bug fix dev release you can do `fix,rc`. if method in VERSION_BUMP: version = VERSION_BUMP[method](prev_version) prev_version = version p_version = parse_version_info(version) p_current = parse_version_info(cur_version) if p_version <= p_current: if initial_release: continue else: abort('Current version is {}, cannot bump to {}'.format(cur_version, version)) else: cur_version, changelog_types = ctx.invoke(changes, check=check, dry_run=True) if not changelog_types: echo_warning('No changes for {}, skipping...'.format(check)) continue bump_function = get_bump_function(changelog_types) version = bump_function(cur_version) if initial_release: echo_success('Check `{}`'.format(check)) # update the version number echo_info('Current version of check {}: {}'.format(check, cur_version)) echo_waiting('Bumping to {}... '.format(version), nl=False) update_version_module(check, cur_version, version) echo_success('success!') # update the CHANGELOG echo_waiting('Updating the changelog... ', nl=False) # TODO: Avoid double GitHub API calls when bumping all checks at once ctx.invoke( changelog, check=check, version=version, old_version=cur_version, initial=initial_release, quiet=True, dry_run=False, ) echo_success('success!') commit_targets = [check] # update the list of integrations to be shipped with the Agent if check not in NOT_CHECKS: req_file = get_agent_release_requirements() commit_targets.append(os.path.basename(req_file)) echo_waiting('Updating the Agent requirements file... ', nl=False) update_agent_requirements(req_file, check, get_agent_requirement_line(check, version)) echo_success('success!') echo_waiting('Committing files...') # commit the changes. # do not use [ci skip] so releases get built https://docs.gitlab.com/ee/ci/yaml/#skipping-jobs msg = '[Release] Bumped {} version to {}'.format(check, version) git_commit(commit_targets, msg) if not initial_release: # Reset version version = None if sign_only or not skip_sign: echo_waiting('Updating release metadata...') echo_info('Please touch your Yubikey immediately after entering your PIN!') try: commit_targets = update_link_metadata(checks) git_commit(commit_targets, '[Release] Update metadata', force=True) except YubikeyException as e: abort('A problem occurred while signing metadata: {}'.format(e)) # done echo_success('All done, remember to push to origin and open a PR to merge these changes on master')
[ "def", "make", "(", "ctx", ",", "check", ",", "version", ",", "initial_release", ",", "skip_sign", ",", "sign_only", ")", ":", "# Import lazily since in-toto runs a subprocess to check for gpg2 on load", "from", ".", ".", "signing", "import", "update_link_metadata", ",", "YubikeyException", "releasing_all", "=", "check", "==", "'all'", "valid_checks", "=", "get_valid_checks", "(", ")", "if", "not", "releasing_all", "and", "check", "not", "in", "valid_checks", ":", "abort", "(", "'Check `{}` is not an Agent-based Integration'", ".", "format", "(", "check", ")", ")", "# don't run the task on the master branch", "if", "get_current_branch", "(", ")", "==", "'master'", ":", "abort", "(", "'This task will commit, you do not want to add commits to master directly'", ")", "if", "releasing_all", ":", "if", "version", ":", "abort", "(", "'You cannot bump every check to the same version'", ")", "checks", "=", "sorted", "(", "valid_checks", ")", "else", ":", "checks", "=", "[", "check", "]", "if", "initial_release", ":", "version", "=", "'1.0.0'", "for", "check", "in", "checks", ":", "if", "sign_only", ":", "break", "elif", "initial_release", "and", "check", "in", "BETA_PACKAGES", ":", "continue", "# Initial releases will only bump if not already 1.0.0 so no need to always output", "if", "not", "initial_release", ":", "echo_success", "(", "'Check `{}`'", ".", "format", "(", "check", ")", ")", "if", "version", ":", "# sanity check on the version provided", "cur_version", "=", "get_version_string", "(", "check", ")", "if", "version", "==", "'final'", ":", "# Remove any pre-release metadata", "version", "=", "finalize_version", "(", "cur_version", ")", "else", ":", "# Keep track of intermediate version bumps", "prev_version", "=", "cur_version", "for", "method", "in", "version", ".", "split", "(", "','", ")", ":", "# Apply any supported version bumping methods. Chaining is required for going", "# from mainline releases to development releases since e.g. x.y.z > x.y.z-rc.A.", "# So for an initial bug fix dev release you can do `fix,rc`.", "if", "method", "in", "VERSION_BUMP", ":", "version", "=", "VERSION_BUMP", "[", "method", "]", "(", "prev_version", ")", "prev_version", "=", "version", "p_version", "=", "parse_version_info", "(", "version", ")", "p_current", "=", "parse_version_info", "(", "cur_version", ")", "if", "p_version", "<=", "p_current", ":", "if", "initial_release", ":", "continue", "else", ":", "abort", "(", "'Current version is {}, cannot bump to {}'", ".", "format", "(", "cur_version", ",", "version", ")", ")", "else", ":", "cur_version", ",", "changelog_types", "=", "ctx", ".", "invoke", "(", "changes", ",", "check", "=", "check", ",", "dry_run", "=", "True", ")", "if", "not", "changelog_types", ":", "echo_warning", "(", "'No changes for {}, skipping...'", ".", "format", "(", "check", ")", ")", "continue", "bump_function", "=", "get_bump_function", "(", "changelog_types", ")", "version", "=", "bump_function", "(", "cur_version", ")", "if", "initial_release", ":", "echo_success", "(", "'Check `{}`'", ".", "format", "(", "check", ")", ")", "# update the version number", "echo_info", "(", "'Current version of check {}: {}'", ".", "format", "(", "check", ",", "cur_version", ")", ")", "echo_waiting", "(", "'Bumping to {}... '", ".", "format", "(", "version", ")", ",", "nl", "=", "False", ")", "update_version_module", "(", "check", ",", "cur_version", ",", "version", ")", "echo_success", "(", "'success!'", ")", "# update the CHANGELOG", "echo_waiting", "(", "'Updating the changelog... '", ",", "nl", "=", "False", ")", "# TODO: Avoid double GitHub API calls when bumping all checks at once", "ctx", ".", "invoke", "(", "changelog", ",", "check", "=", "check", ",", "version", "=", "version", ",", "old_version", "=", "cur_version", ",", "initial", "=", "initial_release", ",", "quiet", "=", "True", ",", "dry_run", "=", "False", ",", ")", "echo_success", "(", "'success!'", ")", "commit_targets", "=", "[", "check", "]", "# update the list of integrations to be shipped with the Agent", "if", "check", "not", "in", "NOT_CHECKS", ":", "req_file", "=", "get_agent_release_requirements", "(", ")", "commit_targets", ".", "append", "(", "os", ".", "path", ".", "basename", "(", "req_file", ")", ")", "echo_waiting", "(", "'Updating the Agent requirements file... '", ",", "nl", "=", "False", ")", "update_agent_requirements", "(", "req_file", ",", "check", ",", "get_agent_requirement_line", "(", "check", ",", "version", ")", ")", "echo_success", "(", "'success!'", ")", "echo_waiting", "(", "'Committing files...'", ")", "# commit the changes.", "# do not use [ci skip] so releases get built https://docs.gitlab.com/ee/ci/yaml/#skipping-jobs", "msg", "=", "'[Release] Bumped {} version to {}'", ".", "format", "(", "check", ",", "version", ")", "git_commit", "(", "commit_targets", ",", "msg", ")", "if", "not", "initial_release", ":", "# Reset version", "version", "=", "None", "if", "sign_only", "or", "not", "skip_sign", ":", "echo_waiting", "(", "'Updating release metadata...'", ")", "echo_info", "(", "'Please touch your Yubikey immediately after entering your PIN!'", ")", "try", ":", "commit_targets", "=", "update_link_metadata", "(", "checks", ")", "git_commit", "(", "commit_targets", ",", "'[Release] Update metadata'", ",", "force", "=", "True", ")", "except", "YubikeyException", "as", "e", ":", "abort", "(", "'A problem occurred while signing metadata: {}'", ".", "format", "(", "e", ")", ")", "# done", "echo_success", "(", "'All done, remember to push to origin and open a PR to merge these changes on master'", ")" ]
Perform a set of operations needed to release a single check: \b * update the version in __about__.py * update the changelog * update the requirements-agent-release.txt file * update in-toto metadata * commit the above changes You can release everything at once by setting the check to `all`. \b If you run into issues signing: \b - Ensure you did `gpg --import <YOUR_KEY_ID>.gpg.pub`
[ "Perform", "a", "set", "of", "operations", "needed", "to", "release", "a", "single", "check", ":" ]
python
train
38.021739
apache/incubator-mxnet
python/mxnet/gluon/block.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L522-L533
def cast(self, dtype): """Cast this Block to use another data type. Parameters ---------- dtype : str or numpy.dtype The new data type. """ for child in self._children.values(): child.cast(dtype) for _, param in self.params.items(): param.cast(dtype)
[ "def", "cast", "(", "self", ",", "dtype", ")", ":", "for", "child", "in", "self", ".", "_children", ".", "values", "(", ")", ":", "child", ".", "cast", "(", "dtype", ")", "for", "_", ",", "param", "in", "self", ".", "params", ".", "items", "(", ")", ":", "param", ".", "cast", "(", "dtype", ")" ]
Cast this Block to use another data type. Parameters ---------- dtype : str or numpy.dtype The new data type.
[ "Cast", "this", "Block", "to", "use", "another", "data", "type", "." ]
python
train
27.666667
trevisanj/f311
f311/hapi.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/hapi.py#L11690-L11726
def radianceSpectrum(Omegas,AbsorptionCoefficient,Environment={'l':100.,'T':296.}, File=None, Format='%e %e', Wavenumber=None): """ INPUT PARAMETERS: Wavenumber/Omegas: wavenumber grid (required) AbsorptionCoefficient: absorption coefficient on grid (required) Environment: dictionary containing path length in cm. and temperature in Kelvin. Default={'l':100.,'T':296.} File: name of the output file (optional) Format: c format used in file output, default '%e %e' (optional) OUTPUT PARAMETERS: Wavenum: wavenumber grid Xsect: radiance spectrum calculated on the grid --- DESCRIPTION: Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based on previously calculated absorption coefficient. Radiance spectrum is calculated at an arbitrary optical path length 'l' (1 m by default) and temperature 'T' (296 K by default). For obtaining a physically meaningful result 'T' must be the same as a temperature which was used in absorption coefficient. --- EXAMPLE OF USAGE: nu,radi = radianceSpectrum(nu,coef) --- """ # compatibility with older versions if Wavenumber: Omegas=Wavenumber l = Environment['l'] T = Environment['T'] Alw = 1-exp(-AbsorptionCoefficient*l) LBBTw = 2*hh*cc**2*Omegas**3 / (exp(hh*cc*Omegas/(cBolts*T)) - 1) * 1.0E-7 Xsect = Alw*LBBTw # W/sr/cm**2/cm**-1 if File: save_to_file(File,Format,Omegas,Xsect) return Omegas,Xsect
[ "def", "radianceSpectrum", "(", "Omegas", ",", "AbsorptionCoefficient", ",", "Environment", "=", "{", "'l'", ":", "100.", ",", "'T'", ":", "296.", "}", ",", "File", "=", "None", ",", "Format", "=", "'%e %e'", ",", "Wavenumber", "=", "None", ")", ":", "# compatibility with older versions", "if", "Wavenumber", ":", "Omegas", "=", "Wavenumber", "l", "=", "Environment", "[", "'l'", "]", "T", "=", "Environment", "[", "'T'", "]", "Alw", "=", "1", "-", "exp", "(", "-", "AbsorptionCoefficient", "*", "l", ")", "LBBTw", "=", "2", "*", "hh", "*", "cc", "**", "2", "*", "Omegas", "**", "3", "/", "(", "exp", "(", "hh", "*", "cc", "*", "Omegas", "/", "(", "cBolts", "*", "T", ")", ")", "-", "1", ")", "*", "1.0E-7", "Xsect", "=", "Alw", "*", "LBBTw", "# W/sr/cm**2/cm**-1", "if", "File", ":", "save_to_file", "(", "File", ",", "Format", ",", "Omegas", ",", "Xsect", ")", "return", "Omegas", ",", "Xsect" ]
INPUT PARAMETERS: Wavenumber/Omegas: wavenumber grid (required) AbsorptionCoefficient: absorption coefficient on grid (required) Environment: dictionary containing path length in cm. and temperature in Kelvin. Default={'l':100.,'T':296.} File: name of the output file (optional) Format: c format used in file output, default '%e %e' (optional) OUTPUT PARAMETERS: Wavenum: wavenumber grid Xsect: radiance spectrum calculated on the grid --- DESCRIPTION: Calculate a radiance spectrum (in W/sr/cm^2/cm-1) based on previously calculated absorption coefficient. Radiance spectrum is calculated at an arbitrary optical path length 'l' (1 m by default) and temperature 'T' (296 K by default). For obtaining a physically meaningful result 'T' must be the same as a temperature which was used in absorption coefficient. --- EXAMPLE OF USAGE: nu,radi = radianceSpectrum(nu,coef) ---
[ "INPUT", "PARAMETERS", ":", "Wavenumber", "/", "Omegas", ":", "wavenumber", "grid", "(", "required", ")", "AbsorptionCoefficient", ":", "absorption", "coefficient", "on", "grid", "(", "required", ")", "Environment", ":", "dictionary", "containing", "path", "length", "in", "cm", ".", "and", "temperature", "in", "Kelvin", ".", "Default", "=", "{", "l", ":", "100", ".", "T", ":", "296", ".", "}", "File", ":", "name", "of", "the", "output", "file", "(", "optional", ")", "Format", ":", "c", "format", "used", "in", "file", "output", "default", "%e", "%e", "(", "optional", ")", "OUTPUT", "PARAMETERS", ":", "Wavenum", ":", "wavenumber", "grid", "Xsect", ":", "radiance", "spectrum", "calculated", "on", "the", "grid", "---", "DESCRIPTION", ":", "Calculate", "a", "radiance", "spectrum", "(", "in", "W", "/", "sr", "/", "cm^2", "/", "cm", "-", "1", ")", "based", "on", "previously", "calculated", "absorption", "coefficient", ".", "Radiance", "spectrum", "is", "calculated", "at", "an", "arbitrary", "optical", "path", "length", "l", "(", "1", "m", "by", "default", ")", "and", "temperature", "T", "(", "296", "K", "by", "default", ")", ".", "For", "obtaining", "a", "physically", "meaningful", "result", "T", "must", "be", "the", "same", "as", "a", "temperature", "which", "was", "used", "in", "absorption", "coefficient", ".", "---", "EXAMPLE", "OF", "USAGE", ":", "nu", "radi", "=", "radianceSpectrum", "(", "nu", "coef", ")", "---" ]
python
train
43.567568
otto-torino/django-baton
baton/views.py
https://github.com/otto-torino/django-baton/blob/e791b5db3a0814bb49d8dfbdfb989d45e03594b7/baton/views.py#L121-L132
def get_app_model_voice(self, app_model_item): """ App Model voice Returns the js menu compatible voice dict if the user can see it, None otherwise """ if app_model_item.get('name', None) is None: raise ImproperlyConfigured('Model menu voices must have a name key') # noqa if app_model_item.get('app', None) is None: raise ImproperlyConfigured('Model menu voices must have an app key') # noqa return self.get_model_voice(app_model_item.get('app'), app_model_item)
[ "def", "get_app_model_voice", "(", "self", ",", "app_model_item", ")", ":", "if", "app_model_item", ".", "get", "(", "'name'", ",", "None", ")", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "'Model menu voices must have a name key'", ")", "# noqa", "if", "app_model_item", ".", "get", "(", "'app'", ",", "None", ")", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "'Model menu voices must have an app key'", ")", "# noqa", "return", "self", ".", "get_model_voice", "(", "app_model_item", ".", "get", "(", "'app'", ")", ",", "app_model_item", ")" ]
App Model voice Returns the js menu compatible voice dict if the user can see it, None otherwise
[ "App", "Model", "voice", "Returns", "the", "js", "menu", "compatible", "voice", "dict", "if", "the", "user", "can", "see", "it", "None", "otherwise" ]
python
train
45.166667
af/turrentine
turrentine/models.py
https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/models.py#L41-L49
def create(self, *args, **kwargs): """ Allow an 'author' kwarg to automatically fill in the created_by and last_modified_by fields. """ if kwargs.has_key('author'): kwargs['created_by'] = kwargs['author'] kwargs['last_modified_by'] = kwargs['author'] del kwargs['author'] return super(CMSPageManager, self).create(*args, **kwargs)
[ "def", "create", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "has_key", "(", "'author'", ")", ":", "kwargs", "[", "'created_by'", "]", "=", "kwargs", "[", "'author'", "]", "kwargs", "[", "'last_modified_by'", "]", "=", "kwargs", "[", "'author'", "]", "del", "kwargs", "[", "'author'", "]", "return", "super", "(", "CMSPageManager", ",", "self", ")", ".", "create", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Allow an 'author' kwarg to automatically fill in the created_by and last_modified_by fields.
[ "Allow", "an", "author", "kwarg", "to", "automatically", "fill", "in", "the", "created_by", "and", "last_modified_by", "fields", "." ]
python
train
44.222222
newville/asteval
asteval/asteval.py
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L267-L277
def parse(self, text): """Parse statement/expression to Ast representation.""" self.expr = text try: out = ast.parse(text) except SyntaxError: self.raise_exception(None, msg='Syntax Error', expr=text) except: self.raise_exception(None, msg='Runtime Error', expr=text) return out
[ "def", "parse", "(", "self", ",", "text", ")", ":", "self", ".", "expr", "=", "text", "try", ":", "out", "=", "ast", ".", "parse", "(", "text", ")", "except", "SyntaxError", ":", "self", ".", "raise_exception", "(", "None", ",", "msg", "=", "'Syntax Error'", ",", "expr", "=", "text", ")", "except", ":", "self", ".", "raise_exception", "(", "None", ",", "msg", "=", "'Runtime Error'", ",", "expr", "=", "text", ")", "return", "out" ]
Parse statement/expression to Ast representation.
[ "Parse", "statement", "/", "expression", "to", "Ast", "representation", "." ]
python
train
32.090909
pyrogram/pyrogram
pyrogram/client/client.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/client.py#L263-L355
def start(self): """Use this method to start the Client after creating it. Requires no parameters. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ConnectionError`` in case you try to start an already started Client. """ if self.is_started: raise ConnectionError("Client has already been started") if self.BOT_TOKEN_RE.match(self.session_name): self.is_bot = True self.bot_token = self.session_name self.session_name = self.session_name.split(":")[0] log.warning('\nWARNING: You are using a bot token as session name!\n' 'This usage will be deprecated soon. Please use a session file name to load ' 'an existing session and the bot_token argument to create new sessions.\n' 'More info: https://docs.pyrogram.ml/start/Setup#bot-authorization\n') self.load_config() self.load_session() self.load_plugins() self.session = Session( self, self.dc_id, self.auth_key ) self.session.start() self.is_started = True try: if self.user_id is None: if self.bot_token is None: self.is_bot = False self.authorize_user() else: self.is_bot = True self.authorize_bot() self.save_session() if not self.is_bot: if self.takeout: self.takeout_id = self.send(functions.account.InitTakeoutSession()).id log.warning("Takeout session {} initiated".format(self.takeout_id)) now = time.time() if abs(now - self.date) > Client.OFFLINE_SLEEP: self.peers_by_username = {} self.peers_by_phone = {} self.get_initial_dialogs() self.get_contacts() else: self.send(functions.messages.GetPinnedDialogs()) self.get_initial_dialogs_chunk() else: self.send(functions.updates.GetState()) except Exception as e: self.is_started = False self.session.stop() raise e for i in range(self.UPDATES_WORKERS): self.updates_workers_list.append( Thread( target=self.updates_worker, name="UpdatesWorker#{}".format(i + 1) ) ) self.updates_workers_list[-1].start() for i in range(self.DOWNLOAD_WORKERS): self.download_workers_list.append( Thread( target=self.download_worker, name="DownloadWorker#{}".format(i + 1) ) ) self.download_workers_list[-1].start() self.dispatcher.start() mimetypes.init() Syncer.add(self) return self
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "is_started", ":", "raise", "ConnectionError", "(", "\"Client has already been started\"", ")", "if", "self", ".", "BOT_TOKEN_RE", ".", "match", "(", "self", ".", "session_name", ")", ":", "self", ".", "is_bot", "=", "True", "self", ".", "bot_token", "=", "self", ".", "session_name", "self", ".", "session_name", "=", "self", ".", "session_name", ".", "split", "(", "\":\"", ")", "[", "0", "]", "log", ".", "warning", "(", "'\\nWARNING: You are using a bot token as session name!\\n'", "'This usage will be deprecated soon. Please use a session file name to load '", "'an existing session and the bot_token argument to create new sessions.\\n'", "'More info: https://docs.pyrogram.ml/start/Setup#bot-authorization\\n'", ")", "self", ".", "load_config", "(", ")", "self", ".", "load_session", "(", ")", "self", ".", "load_plugins", "(", ")", "self", ".", "session", "=", "Session", "(", "self", ",", "self", ".", "dc_id", ",", "self", ".", "auth_key", ")", "self", ".", "session", ".", "start", "(", ")", "self", ".", "is_started", "=", "True", "try", ":", "if", "self", ".", "user_id", "is", "None", ":", "if", "self", ".", "bot_token", "is", "None", ":", "self", ".", "is_bot", "=", "False", "self", ".", "authorize_user", "(", ")", "else", ":", "self", ".", "is_bot", "=", "True", "self", ".", "authorize_bot", "(", ")", "self", ".", "save_session", "(", ")", "if", "not", "self", ".", "is_bot", ":", "if", "self", ".", "takeout", ":", "self", ".", "takeout_id", "=", "self", ".", "send", "(", "functions", ".", "account", ".", "InitTakeoutSession", "(", ")", ")", ".", "id", "log", ".", "warning", "(", "\"Takeout session {} initiated\"", ".", "format", "(", "self", ".", "takeout_id", ")", ")", "now", "=", "time", ".", "time", "(", ")", "if", "abs", "(", "now", "-", "self", ".", "date", ")", ">", "Client", ".", "OFFLINE_SLEEP", ":", "self", ".", "peers_by_username", "=", "{", "}", "self", ".", "peers_by_phone", "=", "{", "}", "self", ".", "get_initial_dialogs", "(", ")", "self", ".", "get_contacts", "(", ")", "else", ":", "self", ".", "send", "(", "functions", ".", "messages", ".", "GetPinnedDialogs", "(", ")", ")", "self", ".", "get_initial_dialogs_chunk", "(", ")", "else", ":", "self", ".", "send", "(", "functions", ".", "updates", ".", "GetState", "(", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "is_started", "=", "False", "self", ".", "session", ".", "stop", "(", ")", "raise", "e", "for", "i", "in", "range", "(", "self", ".", "UPDATES_WORKERS", ")", ":", "self", ".", "updates_workers_list", ".", "append", "(", "Thread", "(", "target", "=", "self", ".", "updates_worker", ",", "name", "=", "\"UpdatesWorker#{}\"", ".", "format", "(", "i", "+", "1", ")", ")", ")", "self", ".", "updates_workers_list", "[", "-", "1", "]", ".", "start", "(", ")", "for", "i", "in", "range", "(", "self", ".", "DOWNLOAD_WORKERS", ")", ":", "self", ".", "download_workers_list", ".", "append", "(", "Thread", "(", "target", "=", "self", ".", "download_worker", ",", "name", "=", "\"DownloadWorker#{}\"", ".", "format", "(", "i", "+", "1", ")", ")", ")", "self", ".", "download_workers_list", "[", "-", "1", "]", ".", "start", "(", ")", "self", ".", "dispatcher", ".", "start", "(", ")", "mimetypes", ".", "init", "(", ")", "Syncer", ".", "add", "(", "self", ")", "return", "self" ]
Use this method to start the Client after creating it. Requires no parameters. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ConnectionError`` in case you try to start an already started Client.
[ "Use", "this", "method", "to", "start", "the", "Client", "after", "creating", "it", ".", "Requires", "no", "parameters", "." ]
python
train
32.763441
LuqueDaniel/pybooru
pybooru/api_danbooru.py
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L774-L796
def note_update(self, note_id, coor_x=None, coor_y=None, width=None, height=None, body=None): """Function to update a note (Requires login) (UNTESTED). Parameters: note_id (int): Where note_id is the note id. coor_x (int): The x coordinates of the note in pixels, with respect to the top-left corner of the image. coor_y (int): The y coordinates of the note in pixels, with respect to the top-left corner of the image. width (int): The width of the note in pixels. height (int): The height of the note in pixels. body (str): The body of the note. """ params = { 'note[x]': coor_x, 'note[y]': coor_y, 'note[width]': width, 'note[height]': height, 'note[body]': body } return self._get('notes/{0}.jso'.format(note_id), params, method='PUT', auth=True)
[ "def", "note_update", "(", "self", ",", "note_id", ",", "coor_x", "=", "None", ",", "coor_y", "=", "None", ",", "width", "=", "None", ",", "height", "=", "None", ",", "body", "=", "None", ")", ":", "params", "=", "{", "'note[x]'", ":", "coor_x", ",", "'note[y]'", ":", "coor_y", ",", "'note[width]'", ":", "width", ",", "'note[height]'", ":", "height", ",", "'note[body]'", ":", "body", "}", "return", "self", ".", "_get", "(", "'notes/{0}.jso'", ".", "format", "(", "note_id", ")", ",", "params", ",", "method", "=", "'PUT'", ",", "auth", "=", "True", ")" ]
Function to update a note (Requires login) (UNTESTED). Parameters: note_id (int): Where note_id is the note id. coor_x (int): The x coordinates of the note in pixels, with respect to the top-left corner of the image. coor_y (int): The y coordinates of the note in pixels, with respect to the top-left corner of the image. width (int): The width of the note in pixels. height (int): The height of the note in pixels. body (str): The body of the note.
[ "Function", "to", "update", "a", "note", "(", "Requires", "login", ")", "(", "UNTESTED", ")", "." ]
python
train
43.869565
vatlab/SoS
misc/vim-ipython/vim_ipython.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L646-L673
def set_pid(): """ Explicitly ask the ipython kernel for its pid """ global pid lines = '\n'.join(['import os', '_pid = os.getpid()']) try: msg_id = send(lines, silent=True, user_variables=['_pid']) except TypeError: # change in IPython 3.0+ msg_id = send(lines, silent=True, user_expressions={'_pid':'_pid'}) # wait to get message back from kernel try: child = get_child_msg(msg_id) except Empty: echo("no reply from IPython kernel") return try: pid = int(child['content']['user_variables']['_pid']) except TypeError: # change in IPython 1.0.dev moved this out pid = int(child['content']['user_variables']['_pid']['data']['text/plain']) except KeyError: # change in IPython 3.0+ pid = int( child['content']['user_expressions']['_pid']['data']['text/plain']) except KeyError: # change in IPython 1.0.dev moved this out echo("Could not get PID information, kernel not running Python?") return pid
[ "def", "set_pid", "(", ")", ":", "global", "pid", "lines", "=", "'\\n'", ".", "join", "(", "[", "'import os'", ",", "'_pid = os.getpid()'", "]", ")", "try", ":", "msg_id", "=", "send", "(", "lines", ",", "silent", "=", "True", ",", "user_variables", "=", "[", "'_pid'", "]", ")", "except", "TypeError", ":", "# change in IPython 3.0+", "msg_id", "=", "send", "(", "lines", ",", "silent", "=", "True", ",", "user_expressions", "=", "{", "'_pid'", ":", "'_pid'", "}", ")", "# wait to get message back from kernel", "try", ":", "child", "=", "get_child_msg", "(", "msg_id", ")", "except", "Empty", ":", "echo", "(", "\"no reply from IPython kernel\"", ")", "return", "try", ":", "pid", "=", "int", "(", "child", "[", "'content'", "]", "[", "'user_variables'", "]", "[", "'_pid'", "]", ")", "except", "TypeError", ":", "# change in IPython 1.0.dev moved this out", "pid", "=", "int", "(", "child", "[", "'content'", "]", "[", "'user_variables'", "]", "[", "'_pid'", "]", "[", "'data'", "]", "[", "'text/plain'", "]", ")", "except", "KeyError", ":", "# change in IPython 3.0+", "pid", "=", "int", "(", "child", "[", "'content'", "]", "[", "'user_expressions'", "]", "[", "'_pid'", "]", "[", "'data'", "]", "[", "'text/plain'", "]", ")", "except", "KeyError", ":", "# change in IPython 1.0.dev moved this out", "echo", "(", "\"Could not get PID information, kernel not running Python?\"", ")", "return", "pid" ]
Explicitly ask the ipython kernel for its pid
[ "Explicitly", "ask", "the", "ipython", "kernel", "for", "its", "pid" ]
python
train
36.321429
PaloAltoNetworks/pancloud
pancloud/logging.py
https://github.com/PaloAltoNetworks/pancloud/blob/c51e4c8aca3c988c60f062291007534edcb55285/pancloud/logging.py#L62-L87
def delete(self, query_id=None, **kwargs): # pragma: no cover """Delete a query job. Uses the DELETE HTTP method to delete a query job. After calling this endpoint, it is an error to poll for query results using the queryId specified here. Args: query_id (str): Specifies the ID of the query job. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``logging_query.py`` example. """ path = "/logging-service/v1/queries/{}".format(query_id) r = self._httpclient.request( method="DELETE", url=self.url, path=path, **kwargs ) return r
[ "def", "delete", "(", "self", ",", "query_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "path", "=", "\"/logging-service/v1/queries/{}\"", ".", "format", "(", "query_id", ")", "r", "=", "self", ".", "_httpclient", ".", "request", "(", "method", "=", "\"DELETE\"", ",", "url", "=", "self", ".", "url", ",", "path", "=", "path", ",", "*", "*", "kwargs", ")", "return", "r" ]
Delete a query job. Uses the DELETE HTTP method to delete a query job. After calling this endpoint, it is an error to poll for query results using the queryId specified here. Args: query_id (str): Specifies the ID of the query job. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``logging_query.py`` example.
[ "Delete", "a", "query", "job", "." ]
python
train
31.038462
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/categories.py
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/categories.py#L1-L37
def check_categories(lines): ''' find out how many row and col categories are available ''' # count the number of row categories rcat_line = lines[0].split('\t') # calc the number of row names and categories num_rc = 0 found_end = False # skip first tab for inst_string in rcat_line[1:]: if inst_string == '': if found_end is False: num_rc = num_rc + 1 else: found_end = True max_rcat = 15 if max_rcat > len(lines): max_rcat = len(lines) - 1 num_cc = 0 for i in range(max_rcat): ccat_line = lines[i + 1].split('\t') # make sure that line has length greater than one to prevent false cats from # trailing new lines at end of matrix if ccat_line[0] == '' and len(ccat_line) > 1: num_cc = num_cc + 1 num_labels = {} num_labels['row'] = num_rc + 1 num_labels['col'] = num_cc + 1 return num_labels
[ "def", "check_categories", "(", "lines", ")", ":", "# count the number of row categories", "rcat_line", "=", "lines", "[", "0", "]", ".", "split", "(", "'\\t'", ")", "# calc the number of row names and categories", "num_rc", "=", "0", "found_end", "=", "False", "# skip first tab", "for", "inst_string", "in", "rcat_line", "[", "1", ":", "]", ":", "if", "inst_string", "==", "''", ":", "if", "found_end", "is", "False", ":", "num_rc", "=", "num_rc", "+", "1", "else", ":", "found_end", "=", "True", "max_rcat", "=", "15", "if", "max_rcat", ">", "len", "(", "lines", ")", ":", "max_rcat", "=", "len", "(", "lines", ")", "-", "1", "num_cc", "=", "0", "for", "i", "in", "range", "(", "max_rcat", ")", ":", "ccat_line", "=", "lines", "[", "i", "+", "1", "]", ".", "split", "(", "'\\t'", ")", "# make sure that line has length greater than one to prevent false cats from", "# trailing new lines at end of matrix", "if", "ccat_line", "[", "0", "]", "==", "''", "and", "len", "(", "ccat_line", ")", ">", "1", ":", "num_cc", "=", "num_cc", "+", "1", "num_labels", "=", "{", "}", "num_labels", "[", "'row'", "]", "=", "num_rc", "+", "1", "num_labels", "[", "'col'", "]", "=", "num_cc", "+", "1", "return", "num_labels" ]
find out how many row and col categories are available
[ "find", "out", "how", "many", "row", "and", "col", "categories", "are", "available" ]
python
train
23.054054
onelogin/python3-saml
src/onelogin/saml2/utils.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/utils.py#L458-L486
def get_expire_time(cache_duration=None, valid_until=None): """ Compares 2 dates and returns the earliest. :param cache_duration: The duration, as a string. :type: string :param valid_until: The valid until date, as a string or as a timestamp :type: string :return: The expiration time. :rtype: int """ expire_time = None if cache_duration is not None: expire_time = OneLogin_Saml2_Utils.parse_duration(cache_duration) if valid_until is not None: if isinstance(valid_until, int): valid_until_time = valid_until else: valid_until_time = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until) if expire_time is None or expire_time > valid_until_time: expire_time = valid_until_time if expire_time is not None: return '%d' % expire_time return None
[ "def", "get_expire_time", "(", "cache_duration", "=", "None", ",", "valid_until", "=", "None", ")", ":", "expire_time", "=", "None", "if", "cache_duration", "is", "not", "None", ":", "expire_time", "=", "OneLogin_Saml2_Utils", ".", "parse_duration", "(", "cache_duration", ")", "if", "valid_until", "is", "not", "None", ":", "if", "isinstance", "(", "valid_until", ",", "int", ")", ":", "valid_until_time", "=", "valid_until", "else", ":", "valid_until_time", "=", "OneLogin_Saml2_Utils", ".", "parse_SAML_to_time", "(", "valid_until", ")", "if", "expire_time", "is", "None", "or", "expire_time", ">", "valid_until_time", ":", "expire_time", "=", "valid_until_time", "if", "expire_time", "is", "not", "None", ":", "return", "'%d'", "%", "expire_time", "return", "None" ]
Compares 2 dates and returns the earliest. :param cache_duration: The duration, as a string. :type: string :param valid_until: The valid until date, as a string or as a timestamp :type: string :return: The expiration time. :rtype: int
[ "Compares", "2", "dates", "and", "returns", "the", "earliest", "." ]
python
train
32.448276
estnltk/estnltk
estnltk/wordnet/eurown.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/eurown.py#L2102-L2131
def named_eq_relations(self, name, neg=False): '''Returns list of named eqLinks. <name> may be string or list. ''' if self.eqLinks and not neg: if isinstance(name, six.string_types): return filter(lambda x: x.relation.name == name, self.eqLinks) elif isinstance(name, list): return filter(lambda x: x.relation.name in name, self.eqLinks) else: return None #should rise error elif self.eqLinks and neg: if isinstance(name, six.string_types): return filter(lambda x: x.relation.name != name, self.eqLinks) elif isinstance(name, list): return filter(lambda x: x.relation.name not in name, self.eqLinks) else: return None #should rise error else: return None
[ "def", "named_eq_relations", "(", "self", ",", "name", ",", "neg", "=", "False", ")", ":", "if", "self", ".", "eqLinks", "and", "not", "neg", ":", "if", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "relation", ".", "name", "==", "name", ",", "self", ".", "eqLinks", ")", "elif", "isinstance", "(", "name", ",", "list", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "relation", ".", "name", "in", "name", ",", "self", ".", "eqLinks", ")", "else", ":", "return", "None", "#should rise error", "elif", "self", ".", "eqLinks", "and", "neg", ":", "if", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "relation", ".", "name", "!=", "name", ",", "self", ".", "eqLinks", ")", "elif", "isinstance", "(", "name", ",", "list", ")", ":", "return", "filter", "(", "lambda", "x", ":", "x", ".", "relation", ".", "name", "not", "in", "name", ",", "self", ".", "eqLinks", ")", "else", ":", "return", "None", "#should rise error", "else", ":", "return", "None" ]
Returns list of named eqLinks. <name> may be string or list.
[ "Returns", "list", "of", "named", "eqLinks", "." ]
python
train
32.633333
cloudtools/stacker
stacker/util.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/util.py#L682-L720
def fetch_git_package(self, config): """Make a remote git repository available for local use. Args: config (dict): git config dictionary """ # only loading git here when needed to avoid load errors on systems # without git installed from git import Repo ref = self.determine_git_ref(config) dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref) cached_dir_path = os.path.join(self.package_cache_dir, dir_name) # We can skip cloning the repo if it's already been cached if not os.path.isdir(cached_dir_path): logger.debug("Remote repo %s does not appear to have been " "previously downloaded - starting clone to %s", config['uri'], cached_dir_path) tmp_dir = tempfile.mkdtemp(prefix='stacker') try: tmp_repo_path = os.path.join(tmp_dir, dir_name) with Repo.clone_from(config['uri'], tmp_repo_path) as repo: repo.head.reference = ref repo.head.reset(index=True, working_tree=True) shutil.move(tmp_repo_path, self.package_cache_dir) finally: shutil.rmtree(tmp_dir) else: logger.debug("Remote repo %s appears to have been previously " "cloned to %s -- bypassing download", config['uri'], cached_dir_path) # Update sys.path & merge in remote configs (if necessary) self.update_paths_and_config(config=config, pkg_dir_name=dir_name)
[ "def", "fetch_git_package", "(", "self", ",", "config", ")", ":", "# only loading git here when needed to avoid load errors on systems", "# without git installed", "from", "git", "import", "Repo", "ref", "=", "self", ".", "determine_git_ref", "(", "config", ")", "dir_name", "=", "self", ".", "sanitize_git_path", "(", "uri", "=", "config", "[", "'uri'", "]", ",", "ref", "=", "ref", ")", "cached_dir_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "package_cache_dir", ",", "dir_name", ")", "# We can skip cloning the repo if it's already been cached", "if", "not", "os", ".", "path", ".", "isdir", "(", "cached_dir_path", ")", ":", "logger", ".", "debug", "(", "\"Remote repo %s does not appear to have been \"", "\"previously downloaded - starting clone to %s\"", ",", "config", "[", "'uri'", "]", ",", "cached_dir_path", ")", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'stacker'", ")", "try", ":", "tmp_repo_path", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "dir_name", ")", "with", "Repo", ".", "clone_from", "(", "config", "[", "'uri'", "]", ",", "tmp_repo_path", ")", "as", "repo", ":", "repo", ".", "head", ".", "reference", "=", "ref", "repo", ".", "head", ".", "reset", "(", "index", "=", "True", ",", "working_tree", "=", "True", ")", "shutil", ".", "move", "(", "tmp_repo_path", ",", "self", ".", "package_cache_dir", ")", "finally", ":", "shutil", ".", "rmtree", "(", "tmp_dir", ")", "else", ":", "logger", ".", "debug", "(", "\"Remote repo %s appears to have been previously \"", "\"cloned to %s -- bypassing download\"", ",", "config", "[", "'uri'", "]", ",", "cached_dir_path", ")", "# Update sys.path & merge in remote configs (if necessary)", "self", ".", "update_paths_and_config", "(", "config", "=", "config", ",", "pkg_dir_name", "=", "dir_name", ")" ]
Make a remote git repository available for local use. Args: config (dict): git config dictionary
[ "Make", "a", "remote", "git", "repository", "available", "for", "local", "use", "." ]
python
train
42.974359
zimeon/iiif
iiif/error.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L100-L116
def as_txt(self): """Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging. """ s = "IIIF Image Server Error\n\n" s += self.text if (self.text) else 'UNKNOWN_ERROR' s += "\n\n" if (self.parameter): s += "parameter=%s\n" % self.parameter if (self.code): s += "code=%d\n\n" % self.code for header in sorted(self.headers): s += "header %s=%s\n" % (header, self.headers[header]) return s
[ "def", "as_txt", "(", "self", ")", ":", "s", "=", "\"IIIF Image Server Error\\n\\n\"", "s", "+=", "self", ".", "text", "if", "(", "self", ".", "text", ")", "else", "'UNKNOWN_ERROR'", "s", "+=", "\"\\n\\n\"", "if", "(", "self", ".", "parameter", ")", ":", "s", "+=", "\"parameter=%s\\n\"", "%", "self", ".", "parameter", "if", "(", "self", ".", "code", ")", ":", "s", "+=", "\"code=%d\\n\\n\"", "%", "self", ".", "code", "for", "header", "in", "sorted", "(", "self", ".", "headers", ")", ":", "s", "+=", "\"header %s=%s\\n\"", "%", "(", "header", ",", "self", ".", "headers", "[", "header", "]", ")", "return", "s" ]
Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging.
[ "Text", "rendering", "of", "error", "response", "." ]
python
train
39.588235
rgs1/zk_shell
zk_shell/xclient.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/xclient.py#L516-L534
def sessions_info(self, hosts): """Returns ClientInfo per session. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (session_id, ClientInfo). """ info_by_id = {} for server_endpoint, dump in self.dump_by_server(hosts).items(): server_ip, server_port = server_endpoint for line in dump.split("\n"): mat = self.IP_PORT_REGEX.match(line) if mat is None: continue ip, port, sid = mat.groups() info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port) return info_by_id
[ "def", "sessions_info", "(", "self", ",", "hosts", ")", ":", "info_by_id", "=", "{", "}", "for", "server_endpoint", ",", "dump", "in", "self", ".", "dump_by_server", "(", "hosts", ")", ".", "items", "(", ")", ":", "server_ip", ",", "server_port", "=", "server_endpoint", "for", "line", "in", "dump", ".", "split", "(", "\"\\n\"", ")", ":", "mat", "=", "self", ".", "IP_PORT_REGEX", ".", "match", "(", "line", ")", "if", "mat", "is", "None", ":", "continue", "ip", ",", "port", ",", "sid", "=", "mat", ".", "groups", "(", ")", "info_by_id", "[", "sid", "]", "=", "ClientInfo", "(", "sid", ",", "ip", ",", "port", ",", "server_ip", ",", "server_port", ")", "return", "info_by_id" ]
Returns ClientInfo per session. :param hosts: comma separated lists of members of the ZK ensemble. :returns: A dictionary of (session_id, ClientInfo).
[ "Returns", "ClientInfo", "per", "session", "." ]
python
train
35.157895
fstab50/metal
metal/script_utils.py
https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/script_utils.py#L335-L361
def json_integrity(baseline, suspect): """ Summary: Validates baseline dict against suspect dict to ensure contain USERNAME k,v parameters. Args: baseline (dict): baseline json structure suspect (dict): json object validated against baseline structure Returns: Success (matches baseline) | Failure (no match), TYPE: bool """ try: for k,v in baseline.items(): for ks, vs in suspect.items(): keys_baseline = set(v.keys()) keys_suspect = set(vs.keys()) intersect_keys = keys_baseline.intersection(keys_suspect) added = keys_baseline - keys_suspect rm = keys_suspect - keys_baseline logger.info('keys added: %s, keys removed %s' % (str(added), str(rm))) if keys_baseline != keys_suspect: return False except KeyError as e: logger.info( 'KeyError parsing pre-existing config (%s). Replacing config file' % str(e)) return True
[ "def", "json_integrity", "(", "baseline", ",", "suspect", ")", ":", "try", ":", "for", "k", ",", "v", "in", "baseline", ".", "items", "(", ")", ":", "for", "ks", ",", "vs", "in", "suspect", ".", "items", "(", ")", ":", "keys_baseline", "=", "set", "(", "v", ".", "keys", "(", ")", ")", "keys_suspect", "=", "set", "(", "vs", ".", "keys", "(", ")", ")", "intersect_keys", "=", "keys_baseline", ".", "intersection", "(", "keys_suspect", ")", "added", "=", "keys_baseline", "-", "keys_suspect", "rm", "=", "keys_suspect", "-", "keys_baseline", "logger", ".", "info", "(", "'keys added: %s, keys removed %s'", "%", "(", "str", "(", "added", ")", ",", "str", "(", "rm", ")", ")", ")", "if", "keys_baseline", "!=", "keys_suspect", ":", "return", "False", "except", "KeyError", "as", "e", ":", "logger", ".", "info", "(", "'KeyError parsing pre-existing config (%s). Replacing config file'", "%", "str", "(", "e", ")", ")", "return", "True" ]
Summary: Validates baseline dict against suspect dict to ensure contain USERNAME k,v parameters. Args: baseline (dict): baseline json structure suspect (dict): json object validated against baseline structure Returns: Success (matches baseline) | Failure (no match), TYPE: bool
[ "Summary", ":", "Validates", "baseline", "dict", "against", "suspect", "dict", "to", "ensure", "contain", "USERNAME", "k", "v", "parameters", ".", "Args", ":", "baseline", "(", "dict", ")", ":", "baseline", "json", "structure", "suspect", "(", "dict", ")", ":", "json", "object", "validated", "against", "baseline", "structure", "Returns", ":", "Success", "(", "matches", "baseline", ")", "|", "Failure", "(", "no", "match", ")", "TYPE", ":", "bool" ]
python
train
38.888889
BD2KGenomics/toil-lib
src/toil_lib/spark.py
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/spark.py#L28-L64
def spawn_spark_cluster(job, numWorkers, cores=None, memory=None, disk=None, overrideLeaderIP=None): ''' :param numWorkers: The number of worker nodes to have in the cluster. \ Must be greater than or equal to 1. :param cores: Optional parameter to set the number of cores per node. \ If not provided, we use the number of cores on the node that launches \ the service. :param memory: Optional parameter to set the memory requested per node. :param disk: Optional parameter to set the disk requested per node. :type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int :type numWorkers: int :type cores: int :type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int :type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int ''' if numWorkers < 1: raise ValueError("Must have more than one worker. %d given." % numWorkers) leaderService = SparkService(cores=cores, memory=memory, disk=disk, overrideLeaderIP=overrideLeaderIP) leaderIP = job.addService(leaderService) for i in range(numWorkers): job.addService(WorkerService(leaderIP, cores=cores, disk=disk, memory=memory), parentService=leaderService) return leaderIP
[ "def", "spawn_spark_cluster", "(", "job", ",", "numWorkers", ",", "cores", "=", "None", ",", "memory", "=", "None", ",", "disk", "=", "None", ",", "overrideLeaderIP", "=", "None", ")", ":", "if", "numWorkers", "<", "1", ":", "raise", "ValueError", "(", "\"Must have more than one worker. %d given.\"", "%", "numWorkers", ")", "leaderService", "=", "SparkService", "(", "cores", "=", "cores", ",", "memory", "=", "memory", ",", "disk", "=", "disk", ",", "overrideLeaderIP", "=", "overrideLeaderIP", ")", "leaderIP", "=", "job", ".", "addService", "(", "leaderService", ")", "for", "i", "in", "range", "(", "numWorkers", ")", ":", "job", ".", "addService", "(", "WorkerService", "(", "leaderIP", ",", "cores", "=", "cores", ",", "disk", "=", "disk", ",", "memory", "=", "memory", ")", ",", "parentService", "=", "leaderService", ")", "return", "leaderIP" ]
:param numWorkers: The number of worker nodes to have in the cluster. \ Must be greater than or equal to 1. :param cores: Optional parameter to set the number of cores per node. \ If not provided, we use the number of cores on the node that launches \ the service. :param memory: Optional parameter to set the memory requested per node. :param disk: Optional parameter to set the disk requested per node. :type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int :type numWorkers: int :type cores: int :type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int :type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int
[ ":", "param", "numWorkers", ":", "The", "number", "of", "worker", "nodes", "to", "have", "in", "the", "cluster", ".", "\\", "Must", "be", "greater", "than", "or", "equal", "to", "1", ".", ":", "param", "cores", ":", "Optional", "parameter", "to", "set", "the", "number", "of", "cores", "per", "node", ".", "\\", "If", "not", "provided", "we", "use", "the", "number", "of", "cores", "on", "the", "node", "that", "launches", "\\", "the", "service", ".", ":", "param", "memory", ":", "Optional", "parameter", "to", "set", "the", "memory", "requested", "per", "node", ".", ":", "param", "disk", ":", "Optional", "parameter", "to", "set", "the", "disk", "requested", "per", "node", ".", ":", "type", "leaderMemory", ":", "int", "or", "string", "convertable", "by", "bd2k", ".", "util", ".", "humanize", ".", "human2bytes", "to", "an", "int", ":", "type", "numWorkers", ":", "int", ":", "type", "cores", ":", "int", ":", "type", "memory", ":", "int", "or", "string", "convertable", "by", "bd2k", ".", "util", ".", "humanize", ".", "human2bytes", "to", "an", "int", ":", "type", "disk", ":", "int", "or", "string", "convertable", "by", "bd2k", ".", "util", ".", "humanize", ".", "human2bytes", "to", "an", "int" ]
python
test
43.513514
dictatorlib/dictator
dictator/__init__.py
https://github.com/dictatorlib/dictator/blob/b77b1709b6fff174f13b0f0c5dbe740b4c07d712/dictator/__init__.py#L275-L296
def keys(self, pattern=None): """Returns a list of keys matching ``pattern``. By default return all keys. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc['s0'] = 'string value' >>> dc.keys() ['l0', 's0'] >>> dc.keys('h*') [] >>> dc.clear() :param pattern: key pattern :type pattern: str :return: list of keys in db :rtype: list of str """ logger.debug('call pop %s', pattern) if pattern is None: pattern = '*' return self._redis.keys(pattern=pattern)
[ "def", "keys", "(", "self", ",", "pattern", "=", "None", ")", ":", "logger", ".", "debug", "(", "'call pop %s'", ",", "pattern", ")", "if", "pattern", "is", "None", ":", "pattern", "=", "'*'", "return", "self", ".", "_redis", ".", "keys", "(", "pattern", "=", "pattern", ")" ]
Returns a list of keys matching ``pattern``. By default return all keys. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc['s0'] = 'string value' >>> dc.keys() ['l0', 's0'] >>> dc.keys('h*') [] >>> dc.clear() :param pattern: key pattern :type pattern: str :return: list of keys in db :rtype: list of str
[ "Returns", "a", "list", "of", "keys", "matching", "pattern", ".", "By", "default", "return", "all", "keys", "." ]
python
train
27
noahbenson/pimms
pimms/calculation.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/calculation.py#L713-L758
def planfn(*args, **kwargs): ''' planfn(val1=fn1, val2=fn2...) uses the pimms plan mechanism to yield a function f that produces an immutable imap when called with the correct paramters. Unlike in plan(), planfn() does not care about the names of the calculation units; instead the val1, val2, etc. are names of the efferent values while the given fn objects are the functions that calculate them (they are automatically converted into calc objects, and names are automatically generated for the items required). planfn(res, val1=fn1, val2=fn2...) yields a function f that is equivalent to the function lambda *args,**kw: g(*args,**kw)[res] where g = planfn(val1=fn1, val2=fn2...). planfn((p1, p2...), val1=fn1, val2=fn2...) yields a function f that is equivalent to the function lambda *a: g({p1:a[0], p2:a[1]...}) where g = planfn(val1=fn1, val2=fn2...). planfn((p1, p2...), res, val1=fn1, val2=fn2...) applies both of the above translations to the resulting function; note that either the first or second argument may be None to specify default behavior. Additionally, the first argument may be a string s equivalent to (s,). ''' # first: parse options if len(args) > 2: raise ValueError('no more than 2 non-keyword arguments allowed') elif len(args) == 0: (params,result) = (None,None) elif len(args) == 2: (params,result) = args elif is_str(args[0]): (params,result) = (None,args[0]) elif is_vector(args[0], str): (params,result) = (args[0],None) else: raise ValueError('cannot interpret argument: %s' % args[0]) # okay, now, we make the calcs and the calc plan: calcs = {} for (k,v) in six.iteritems(kwargs): # if v is already a calc object, we can only use this if it has exactly one efferent if is_calc(v): if len(v.efferents) != 1: raise ValueError('calc objects given to planfn must have exactly 1 efferent') eff = v.efferents[0] if eff != k: v = v.tr({eff:k}) else: v = calc(k)(v) # otherwise convert it to a calc with the output we want # it's possible that the names of the calc objects are not unique, so we want to auto- # generate unique names based on their names... calcs['calc_' + k] = v p0 = plan(calcs) # see if we need to fix the input if params is None: p = p0 elif is_str(params): p = lambda arg: p0({params:arg}) elif is_vector(params, str): p = lambda *args: p0({k:v for (k,v) in zip(params,args)}) else: raise ValueError('params must be a string or list of strings or None') # and if we need to fix the output if result is None: return p def f(*args, **kw): return p(*args, **kw)[result] return f
[ "def", "planfn", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# first: parse options", "if", "len", "(", "args", ")", ">", "2", ":", "raise", "ValueError", "(", "'no more than 2 non-keyword arguments allowed'", ")", "elif", "len", "(", "args", ")", "==", "0", ":", "(", "params", ",", "result", ")", "=", "(", "None", ",", "None", ")", "elif", "len", "(", "args", ")", "==", "2", ":", "(", "params", ",", "result", ")", "=", "args", "elif", "is_str", "(", "args", "[", "0", "]", ")", ":", "(", "params", ",", "result", ")", "=", "(", "None", ",", "args", "[", "0", "]", ")", "elif", "is_vector", "(", "args", "[", "0", "]", ",", "str", ")", ":", "(", "params", ",", "result", ")", "=", "(", "args", "[", "0", "]", ",", "None", ")", "else", ":", "raise", "ValueError", "(", "'cannot interpret argument: %s'", "%", "args", "[", "0", "]", ")", "# okay, now, we make the calcs and the calc plan:", "calcs", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "kwargs", ")", ":", "# if v is already a calc object, we can only use this if it has exactly one efferent", "if", "is_calc", "(", "v", ")", ":", "if", "len", "(", "v", ".", "efferents", ")", "!=", "1", ":", "raise", "ValueError", "(", "'calc objects given to planfn must have exactly 1 efferent'", ")", "eff", "=", "v", ".", "efferents", "[", "0", "]", "if", "eff", "!=", "k", ":", "v", "=", "v", ".", "tr", "(", "{", "eff", ":", "k", "}", ")", "else", ":", "v", "=", "calc", "(", "k", ")", "(", "v", ")", "# otherwise convert it to a calc with the output we want", "# it's possible that the names of the calc objects are not unique, so we want to auto-", "# generate unique names based on their names...", "calcs", "[", "'calc_'", "+", "k", "]", "=", "v", "p0", "=", "plan", "(", "calcs", ")", "# see if we need to fix the input", "if", "params", "is", "None", ":", "p", "=", "p0", "elif", "is_str", "(", "params", ")", ":", "p", "=", "lambda", "arg", ":", "p0", "(", "{", "params", ":", "arg", "}", ")", "elif", "is_vector", "(", "params", ",", "str", ")", ":", "p", "=", "lambda", "*", "args", ":", "p0", "(", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "zip", "(", "params", ",", "args", ")", "}", ")", "else", ":", "raise", "ValueError", "(", "'params must be a string or list of strings or None'", ")", "# and if we need to fix the output", "if", "result", "is", "None", ":", "return", "p", "def", "f", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "p", "(", "*", "args", ",", "*", "*", "kw", ")", "[", "result", "]", "return", "f" ]
planfn(val1=fn1, val2=fn2...) uses the pimms plan mechanism to yield a function f that produces an immutable imap when called with the correct paramters. Unlike in plan(), planfn() does not care about the names of the calculation units; instead the val1, val2, etc. are names of the efferent values while the given fn objects are the functions that calculate them (they are automatically converted into calc objects, and names are automatically generated for the items required). planfn(res, val1=fn1, val2=fn2...) yields a function f that is equivalent to the function lambda *args,**kw: g(*args,**kw)[res] where g = planfn(val1=fn1, val2=fn2...). planfn((p1, p2...), val1=fn1, val2=fn2...) yields a function f that is equivalent to the function lambda *a: g({p1:a[0], p2:a[1]...}) where g = planfn(val1=fn1, val2=fn2...). planfn((p1, p2...), res, val1=fn1, val2=fn2...) applies both of the above translations to the resulting function; note that either the first or second argument may be None to specify default behavior. Additionally, the first argument may be a string s equivalent to (s,).
[ "planfn", "(", "val1", "=", "fn1", "val2", "=", "fn2", "...", ")", "uses", "the", "pimms", "plan", "mechanism", "to", "yield", "a", "function", "f", "that", "produces", "an", "immutable", "imap", "when", "called", "with", "the", "correct", "paramters", ".", "Unlike", "in", "plan", "()", "planfn", "()", "does", "not", "care", "about", "the", "names", "of", "the", "calculation", "units", ";", "instead", "the", "val1", "val2", "etc", ".", "are", "names", "of", "the", "efferent", "values", "while", "the", "given", "fn", "objects", "are", "the", "functions", "that", "calculate", "them", "(", "they", "are", "automatically", "converted", "into", "calc", "objects", "and", "names", "are", "automatically", "generated", "for", "the", "items", "required", ")", ".", "planfn", "(", "res", "val1", "=", "fn1", "val2", "=", "fn2", "...", ")", "yields", "a", "function", "f", "that", "is", "equivalent", "to", "the", "function", "lambda", "*", "args", "**", "kw", ":", "g", "(", "*", "args", "**", "kw", ")", "[", "res", "]", "where", "g", "=", "planfn", "(", "val1", "=", "fn1", "val2", "=", "fn2", "...", ")", ".", "planfn", "((", "p1", "p2", "...", ")", "val1", "=", "fn1", "val2", "=", "fn2", "...", ")", "yields", "a", "function", "f", "that", "is", "equivalent", "to", "the", "function", "lambda", "*", "a", ":", "g", "(", "{", "p1", ":", "a", "[", "0", "]", "p2", ":", "a", "[", "1", "]", "...", "}", ")", "where", "g", "=", "planfn", "(", "val1", "=", "fn1", "val2", "=", "fn2", "...", ")", ".", "planfn", "((", "p1", "p2", "...", ")", "res", "val1", "=", "fn1", "val2", "=", "fn2", "...", ")", "applies", "both", "of", "the", "above", "translations", "to", "the", "resulting", "function", ";", "note", "that", "either", "the", "first", "or", "second", "argument", "may", "be", "None", "to", "specify", "default", "behavior", ".", "Additionally", "the", "first", "argument", "may", "be", "a", "string", "s", "equivalent", "to", "(", "s", ")", "." ]
python
train
61.217391
MisterY/pydatum
pydatum/datum.py
https://github.com/MisterY/pydatum/blob/4b39f43040e31a95bcf219603b6429078a9ba3c2/pydatum/datum.py#L190-L194
def to_iso_time_string(self) -> str: """ Return the iso time string only """ short_time = self.to_short_time_string() second = self.time.second return f"{short_time}:{second:02}"
[ "def", "to_iso_time_string", "(", "self", ")", "->", "str", ":", "short_time", "=", "self", ".", "to_short_time_string", "(", ")", "second", "=", "self", ".", "time", ".", "second", "return", "f\"{short_time}:{second:02}\"" ]
Return the iso time string only
[ "Return", "the", "iso", "time", "string", "only" ]
python
train
41.2
thombashi/pytablereader
pytablereader/_logger/_logger.py
https://github.com/thombashi/pytablereader/blob/bc3c057a2cc775bcce690e0e9019c2907b638101/pytablereader/_logger/_logger.py#L51-L85
def set_log_level(log_level): """ Set logging level of this module. Using `logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging. :param int log_level: One of the log level of `logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__. Disabled logging if ``log_level`` is ``logbook.NOTSET``. :raises LookupError: If ``log_level`` is an invalid value. """ if not LOGBOOK_INSTALLED: return # validate log level logbook.get_level_name(log_level) if log_level == logger.level: return if log_level == logbook.NOTSET: set_logger(is_enable=False) else: set_logger(is_enable=True) logger.level = log_level dataproperty.set_log_level(log_level) try: import simplesqlite simplesqlite.set_log_level(log_level) except ImportError: pass
[ "def", "set_log_level", "(", "log_level", ")", ":", "if", "not", "LOGBOOK_INSTALLED", ":", "return", "# validate log level", "logbook", ".", "get_level_name", "(", "log_level", ")", "if", "log_level", "==", "logger", ".", "level", ":", "return", "if", "log_level", "==", "logbook", ".", "NOTSET", ":", "set_logger", "(", "is_enable", "=", "False", ")", "else", ":", "set_logger", "(", "is_enable", "=", "True", ")", "logger", ".", "level", "=", "log_level", "dataproperty", ".", "set_log_level", "(", "log_level", ")", "try", ":", "import", "simplesqlite", "simplesqlite", ".", "set_log_level", "(", "log_level", ")", "except", "ImportError", ":", "pass" ]
Set logging level of this module. Using `logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging. :param int log_level: One of the log level of `logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__. Disabled logging if ``log_level`` is ``logbook.NOTSET``. :raises LookupError: If ``log_level`` is an invalid value.
[ "Set", "logging", "level", "of", "this", "module", ".", "Using", "logbook", "<https", ":", "//", "logbook", ".", "readthedocs", ".", "io", "/", "en", "/", "stable", "/", ">", "__", "module", "for", "logging", "." ]
python
train
24.914286
awslabs/aws-sam-cli
samcli/local/docker/manager.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/docker/manager.py#L108-L142
def pull_image(self, image_name, stream=None): """ Ask Docker to pull the container image with given name. Parameters ---------- image_name str Name of the image stream samcli.lib.utils.stream_writer.StreamWriter Optional stream writer to output to. Defaults to stderr Raises ------ DockerImagePullFailedException If the Docker image was not available in the server """ stream_writer = stream or StreamWriter(sys.stderr) try: result_itr = self.docker_client.api.pull(image_name, stream=True, decode=True) except docker.errors.APIError as ex: LOG.debug("Failed to download image with name %s", image_name) raise DockerImagePullFailedException(str(ex)) # io streams, especially StringIO, work only with unicode strings stream_writer.write(u"\nFetching {} Docker container image...".format(image_name)) # Each line contains information on progress of the pull. Each line is a JSON string for _ in result_itr: # For every line, print a dot to show progress stream_writer.write(u'.') stream_writer.flush() # We are done. Go to the next line stream_writer.write(u"\n")
[ "def", "pull_image", "(", "self", ",", "image_name", ",", "stream", "=", "None", ")", ":", "stream_writer", "=", "stream", "or", "StreamWriter", "(", "sys", ".", "stderr", ")", "try", ":", "result_itr", "=", "self", ".", "docker_client", ".", "api", ".", "pull", "(", "image_name", ",", "stream", "=", "True", ",", "decode", "=", "True", ")", "except", "docker", ".", "errors", ".", "APIError", "as", "ex", ":", "LOG", ".", "debug", "(", "\"Failed to download image with name %s\"", ",", "image_name", ")", "raise", "DockerImagePullFailedException", "(", "str", "(", "ex", ")", ")", "# io streams, especially StringIO, work only with unicode strings", "stream_writer", ".", "write", "(", "u\"\\nFetching {} Docker container image...\"", ".", "format", "(", "image_name", ")", ")", "# Each line contains information on progress of the pull. Each line is a JSON string", "for", "_", "in", "result_itr", ":", "# For every line, print a dot to show progress", "stream_writer", ".", "write", "(", "u'.'", ")", "stream_writer", ".", "flush", "(", ")", "# We are done. Go to the next line", "stream_writer", ".", "write", "(", "u\"\\n\"", ")" ]
Ask Docker to pull the container image with given name. Parameters ---------- image_name str Name of the image stream samcli.lib.utils.stream_writer.StreamWriter Optional stream writer to output to. Defaults to stderr Raises ------ DockerImagePullFailedException If the Docker image was not available in the server
[ "Ask", "Docker", "to", "pull", "the", "container", "image", "with", "given", "name", "." ]
python
train
36.971429
myusuf3/delorean
delorean/interface.py
https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L108-L113
def range_daily(start=None, stop=None, timezone='UTC', count=None): """ This an alternative way to generating sets of Delorean objects with DAILY stops """ return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count)
[ "def", "range_daily", "(", "start", "=", "None", ",", "stop", "=", "None", ",", "timezone", "=", "'UTC'", ",", "count", "=", "None", ")", ":", "return", "stops", "(", "start", "=", "start", ",", "stop", "=", "stop", ",", "freq", "=", "DAILY", ",", "timezone", "=", "timezone", ",", "count", "=", "count", ")" ]
This an alternative way to generating sets of Delorean objects with DAILY stops
[ "This", "an", "alternative", "way", "to", "generating", "sets", "of", "Delorean", "objects", "with", "DAILY", "stops" ]
python
train
41.833333
resonai/ybt
yabt/yabt.py
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/yabt.py#L60-L71
def cmd_list(unused_conf: Config): """Print out information on loaded builders and hooks.""" for name, builder in sorted(Plugin.builders.items()): if builder.func: print('+- {0:16s} implemented in {1.__module__}.{1.__name__}()' .format(name, builder.func)) else: print('+- {0:16s} loaded with no builder function'.format(name)) for hook_name, hook_func in sorted(Plugin.get_hooks_for_builder(name)): print(' +- {0} hook implemented in ' '{1.__module__}.{1.__name__}()' .format(hook_name, hook_func))
[ "def", "cmd_list", "(", "unused_conf", ":", "Config", ")", ":", "for", "name", ",", "builder", "in", "sorted", "(", "Plugin", ".", "builders", ".", "items", "(", ")", ")", ":", "if", "builder", ".", "func", ":", "print", "(", "'+- {0:16s} implemented in {1.__module__}.{1.__name__}()'", ".", "format", "(", "name", ",", "builder", ".", "func", ")", ")", "else", ":", "print", "(", "'+- {0:16s} loaded with no builder function'", ".", "format", "(", "name", ")", ")", "for", "hook_name", ",", "hook_func", "in", "sorted", "(", "Plugin", ".", "get_hooks_for_builder", "(", "name", ")", ")", ":", "print", "(", "' +- {0} hook implemented in '", "'{1.__module__}.{1.__name__}()'", ".", "format", "(", "hook_name", ",", "hook_func", ")", ")" ]
Print out information on loaded builders and hooks.
[ "Print", "out", "information", "on", "loaded", "builders", "and", "hooks", "." ]
python
train
50.916667
pantsbuild/pants
src/python/pants/base/workunit.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/workunit.py#L179-L198
def output(self, name): """Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if necessary. :API: public """ m = WorkUnit._valid_name_re.match(name) if not m or m.group(0) != name: raise Exception('Invalid output name: {}'.format(name)) if name not in self._outputs: workunit_name = re.sub(r'\W', '_', self.name) path = os.path.join(self.run_info_dir, 'tool_outputs', '{workunit_name}-{id}.{output_name}' .format(workunit_name=workunit_name, id=self.id, output_name=name)) safe_mkdir_for(path) self._outputs[name] = FileBackedRWBuf(path) self._output_paths[name] = path return self._outputs[name]
[ "def", "output", "(", "self", ",", "name", ")", ":", "m", "=", "WorkUnit", ".", "_valid_name_re", ".", "match", "(", "name", ")", "if", "not", "m", "or", "m", ".", "group", "(", "0", ")", "!=", "name", ":", "raise", "Exception", "(", "'Invalid output name: {}'", ".", "format", "(", "name", ")", ")", "if", "name", "not", "in", "self", ".", "_outputs", ":", "workunit_name", "=", "re", ".", "sub", "(", "r'\\W'", ",", "'_'", ",", "self", ".", "name", ")", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "run_info_dir", ",", "'tool_outputs'", ",", "'{workunit_name}-{id}.{output_name}'", ".", "format", "(", "workunit_name", "=", "workunit_name", ",", "id", "=", "self", ".", "id", ",", "output_name", "=", "name", ")", ")", "safe_mkdir_for", "(", "path", ")", "self", ".", "_outputs", "[", "name", "]", "=", "FileBackedRWBuf", "(", "path", ")", "self", ".", "_output_paths", "[", "name", "]", "=", "path", "return", "self", ".", "_outputs", "[", "name", "]" ]
Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if necessary. :API: public
[ "Returns", "the", "output", "buffer", "for", "the", "specified", "output", "name", "(", "e", ".", "g", ".", "stdout", ")", "creating", "it", "if", "necessary", "." ]
python
train
40.05
FutunnOpen/futuquant
futuquant/examples/TinyQuant/TinyQuantBase.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/TinyQuantBase.py#L179-L187
def boll(self, n, dev, array=False): """布林通道""" mid = self.sma(n, array) std = self.std(n, array) up = mid + std * dev down = mid - std * dev return up, down
[ "def", "boll", "(", "self", ",", "n", ",", "dev", ",", "array", "=", "False", ")", ":", "mid", "=", "self", ".", "sma", "(", "n", ",", "array", ")", "std", "=", "self", ".", "std", "(", "n", ",", "array", ")", "up", "=", "mid", "+", "std", "*", "dev", "down", "=", "mid", "-", "std", "*", "dev", "return", "up", ",", "down" ]
布林通道
[ "布林通道" ]
python
train
22.111111
mathiasertl/django-ca
ca/django_ca/admin.py
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/admin.py#L126-L134
def get_actions(self, request): """Disable the "delete selected" admin action. Otherwise the action is present even though has_delete_permission is False, it just doesn't work. """ actions = super(CertificateMixin, self).get_actions(request) actions.pop('delete_selected', '') return actions
[ "def", "get_actions", "(", "self", ",", "request", ")", ":", "actions", "=", "super", "(", "CertificateMixin", ",", "self", ")", ".", "get_actions", "(", "request", ")", "actions", ".", "pop", "(", "'delete_selected'", ",", "''", ")", "return", "actions" ]
Disable the "delete selected" admin action. Otherwise the action is present even though has_delete_permission is False, it just doesn't work.
[ "Disable", "the", "delete", "selected", "admin", "action", "." ]
python
train
37.777778
saltstack/salt
salt/client/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L954-L993
def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub()
[ "def", "cmd_full_return", "(", "self", ",", "tgt", ",", "fun", ",", "arg", "=", "(", ")", ",", "timeout", "=", "None", ",", "tgt_type", "=", "'glob'", ",", "ret", "=", "''", ",", "verbose", "=", "False", ",", "kwarg", "=", "None", ",", "*", "*", "kwargs", ")", ":", "was_listening", "=", "self", ".", "event", ".", "cpub", "try", ":", "pub_data", "=", "self", ".", "run_job", "(", "tgt", ",", "fun", ",", "arg", ",", "tgt_type", ",", "ret", ",", "timeout", ",", "kwarg", "=", "kwarg", ",", "listen", "=", "True", ",", "*", "*", "kwargs", ")", "if", "not", "pub_data", ":", "return", "pub_data", "return", "(", "self", ".", "get_cli_static_event_returns", "(", "pub_data", "[", "'jid'", "]", ",", "pub_data", "[", "'minions'", "]", ",", "timeout", ",", "tgt", ",", "tgt_type", ",", "verbose", ")", ")", "finally", ":", "if", "not", "was_listening", ":", "self", ".", "event", ".", "close_pub", "(", ")" ]
Execute a salt command and return
[ "Execute", "a", "salt", "command", "and", "return" ]
python
train
28.175
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1457-L1575
def detect_events(self, data, method, params, label): """Detect events and display on signal. Parameters ---------- data : instance of ChanTime one segment with all channels of interest method : str Method used for detection. params : dict Parameters used for detection. label : str Name of event type, on event labels """ if self.annot is None: # remove if buttons are disabled self.parent.statusBar().showMessage('No score file loaded') return lg.info('Adding event type ' + label) self.annot.add_event_type(label) self.display_eventtype() n_eventtype = self.idx_eventtype.count() self.idx_eventtype.setCurrentIndex(n_eventtype - 1) if params['max_dur'] in [0, 'None']: params['max_dur'] = None freq = (float(params['f1']), float(params['f2'])) duration = (params['min_dur'], params['max_dur']) if method in SPINDLE_METHODS: detector = DetectSpindle(method=method, frequency=freq, duration=duration, merge=params['merge']) detector.rolloff = params['rolloff'] detector.min_interval = params['interval'] if 'Ferrarelli2007' == method: detector.det_thresh = params['0'] detector.sel_thresh = params['1'] if 'Nir2011' == method: detector.smooth['dur'] = params['0'] detector.det_thresh = params['1'] detector.sel_thresh = params['2'] if 'Moelle2011' == method: detector.moving_rms['dur'] = params['0'] detector.smooth['dur'] = params['1'] detector.det_thresh = params['2'] if 'Wamsley2012' == method: detector.det_wavelet['dur'] = params['0'] detector.det_wavelet['sd'] = params['1'] detector.smooth['dur'] = params['2'] detector.det_thresh = params['3'] if 'Martin2013' == method: detector.moving_rms['dur'] = params['0'] detector.moving_rms['step'] = params['1'] detector.det_thresh = params['2'] if 'Ray2015' == method: detector.smooth['dur'] = params['0'] detector.zscore['step'] = params['1'] detector.det_thresh = params['2'] detector.sel_thresh = params['3'] if 'Lacourse2018' == method: detector.windowing['dur'] = params['0'] detector.windowing['step'] = params['1'] detector.abs_pow_thresh = params['2'] detector.rel_pow_thresh = params['3'] detector.covar_thresh = params['4'] detector.corr_thresh = params['5'] if 'FASST' == method: detector.det_thresh = params['0'] detector.smooth['dur'] = params['1'] if 'FASST2' == method: detector.det_thresh = params['0'] detector.moving_rms['dur'] = params['1'] detector.smooth['dur'] = params['2'] if 'UCSD' == method: detector.det_wavelet['dur'] = params['0'] detector.det_wavelet['width'] = params['1'] detector.det_wavelet['win'] = params['2'] detector.det_thresh = params['3'] detector.sel_thresh = params['4'] if 'Concordia' == method: detector.moving_rms['dur'] = params['0'] detector.smooth['dur'] = params['1'] detector.det_thresh = params['2'] detector.det_thresh_hi = params['3'] detector.tolerance = params['4'] detector.sel_thresh = params['5'] elif method in SLOW_WAVE_METHODS: detector = DetectSlowWave(method=method, duration=duration) detector.det_filt['freq'] = freq detector.trough_duration = (params['min_trough_dur'], params['max_trough_dur']) detector.max_trough_amp = params['max_trough_amp'] detector.min_ptp = params['min_ptp'] detector.invert = params['invert'] else: lg.info('Method not recognized: ' + method) return events = detector(data, parent=self) if events: self.annot.add_events(events, name=label) self.update_annotations()
[ "def", "detect_events", "(", "self", ",", "data", ",", "method", ",", "params", ",", "label", ")", ":", "if", "self", ".", "annot", "is", "None", ":", "# remove if buttons are disabled", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "'No score file loaded'", ")", "return", "lg", ".", "info", "(", "'Adding event type '", "+", "label", ")", "self", ".", "annot", ".", "add_event_type", "(", "label", ")", "self", ".", "display_eventtype", "(", ")", "n_eventtype", "=", "self", ".", "idx_eventtype", ".", "count", "(", ")", "self", ".", "idx_eventtype", ".", "setCurrentIndex", "(", "n_eventtype", "-", "1", ")", "if", "params", "[", "'max_dur'", "]", "in", "[", "0", ",", "'None'", "]", ":", "params", "[", "'max_dur'", "]", "=", "None", "freq", "=", "(", "float", "(", "params", "[", "'f1'", "]", ")", ",", "float", "(", "params", "[", "'f2'", "]", ")", ")", "duration", "=", "(", "params", "[", "'min_dur'", "]", ",", "params", "[", "'max_dur'", "]", ")", "if", "method", "in", "SPINDLE_METHODS", ":", "detector", "=", "DetectSpindle", "(", "method", "=", "method", ",", "frequency", "=", "freq", ",", "duration", "=", "duration", ",", "merge", "=", "params", "[", "'merge'", "]", ")", "detector", ".", "rolloff", "=", "params", "[", "'rolloff'", "]", "detector", ".", "min_interval", "=", "params", "[", "'interval'", "]", "if", "'Ferrarelli2007'", "==", "method", ":", "detector", ".", "det_thresh", "=", "params", "[", "'0'", "]", "detector", ".", "sel_thresh", "=", "params", "[", "'1'", "]", "if", "'Nir2011'", "==", "method", ":", "detector", ".", "smooth", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "det_thresh", "=", "params", "[", "'1'", "]", "detector", ".", "sel_thresh", "=", "params", "[", "'2'", "]", "if", "'Moelle2011'", "==", "method", ":", "detector", ".", "moving_rms", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "smooth", "[", "'dur'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "det_thresh", "=", "params", "[", "'2'", "]", "if", "'Wamsley2012'", "==", "method", ":", "detector", ".", "det_wavelet", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "det_wavelet", "[", "'sd'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "smooth", "[", "'dur'", "]", "=", "params", "[", "'2'", "]", "detector", ".", "det_thresh", "=", "params", "[", "'3'", "]", "if", "'Martin2013'", "==", "method", ":", "detector", ".", "moving_rms", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "moving_rms", "[", "'step'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "det_thresh", "=", "params", "[", "'2'", "]", "if", "'Ray2015'", "==", "method", ":", "detector", ".", "smooth", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "zscore", "[", "'step'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "det_thresh", "=", "params", "[", "'2'", "]", "detector", ".", "sel_thresh", "=", "params", "[", "'3'", "]", "if", "'Lacourse2018'", "==", "method", ":", "detector", ".", "windowing", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "windowing", "[", "'step'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "abs_pow_thresh", "=", "params", "[", "'2'", "]", "detector", ".", "rel_pow_thresh", "=", "params", "[", "'3'", "]", "detector", ".", "covar_thresh", "=", "params", "[", "'4'", "]", "detector", ".", "corr_thresh", "=", "params", "[", "'5'", "]", "if", "'FASST'", "==", "method", ":", "detector", ".", "det_thresh", "=", "params", "[", "'0'", "]", "detector", ".", "smooth", "[", "'dur'", "]", "=", "params", "[", "'1'", "]", "if", "'FASST2'", "==", "method", ":", "detector", ".", "det_thresh", "=", "params", "[", "'0'", "]", "detector", ".", "moving_rms", "[", "'dur'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "smooth", "[", "'dur'", "]", "=", "params", "[", "'2'", "]", "if", "'UCSD'", "==", "method", ":", "detector", ".", "det_wavelet", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "det_wavelet", "[", "'width'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "det_wavelet", "[", "'win'", "]", "=", "params", "[", "'2'", "]", "detector", ".", "det_thresh", "=", "params", "[", "'3'", "]", "detector", ".", "sel_thresh", "=", "params", "[", "'4'", "]", "if", "'Concordia'", "==", "method", ":", "detector", ".", "moving_rms", "[", "'dur'", "]", "=", "params", "[", "'0'", "]", "detector", ".", "smooth", "[", "'dur'", "]", "=", "params", "[", "'1'", "]", "detector", ".", "det_thresh", "=", "params", "[", "'2'", "]", "detector", ".", "det_thresh_hi", "=", "params", "[", "'3'", "]", "detector", ".", "tolerance", "=", "params", "[", "'4'", "]", "detector", ".", "sel_thresh", "=", "params", "[", "'5'", "]", "elif", "method", "in", "SLOW_WAVE_METHODS", ":", "detector", "=", "DetectSlowWave", "(", "method", "=", "method", ",", "duration", "=", "duration", ")", "detector", ".", "det_filt", "[", "'freq'", "]", "=", "freq", "detector", ".", "trough_duration", "=", "(", "params", "[", "'min_trough_dur'", "]", ",", "params", "[", "'max_trough_dur'", "]", ")", "detector", ".", "max_trough_amp", "=", "params", "[", "'max_trough_amp'", "]", "detector", ".", "min_ptp", "=", "params", "[", "'min_ptp'", "]", "detector", ".", "invert", "=", "params", "[", "'invert'", "]", "else", ":", "lg", ".", "info", "(", "'Method not recognized: '", "+", "method", ")", "return", "events", "=", "detector", "(", "data", ",", "parent", "=", "self", ")", "if", "events", ":", "self", ".", "annot", ".", "add_events", "(", "events", ",", "name", "=", "label", ")", "self", ".", "update_annotations", "(", ")" ]
Detect events and display on signal. Parameters ---------- data : instance of ChanTime one segment with all channels of interest method : str Method used for detection. params : dict Parameters used for detection. label : str Name of event type, on event labels
[ "Detect", "events", "and", "display", "on", "signal", "." ]
python
train
39.756303
tensorflow/tensor2tensor
tensor2tensor/data_generators/text_problems.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_problems.py#L831-L851
def text_filepaths_for_task(self, tmp_dir, task_id): """List of input filepaths for a particular training or dev shard. Args: tmp_dir: a string task_id: an integer less than self.num_shards Returns: a list of tuples (filepath, start_pos, num_bytes) """ assert task_id >= 0 assert task_id < self.num_train_shards + self.num_dev_shards if task_id < self.num_train_shards: return [ f for i, f in enumerate(self.train_text_filepaths(tmp_dir)) if i % self.num_train_shards == task_id ] else: return [ f for i, f in enumerate(self.dev_text_filepaths(tmp_dir)) if i % self.num_dev_shards == task_id - self.num_train_shards ]
[ "def", "text_filepaths_for_task", "(", "self", ",", "tmp_dir", ",", "task_id", ")", ":", "assert", "task_id", ">=", "0", "assert", "task_id", "<", "self", ".", "num_train_shards", "+", "self", ".", "num_dev_shards", "if", "task_id", "<", "self", ".", "num_train_shards", ":", "return", "[", "f", "for", "i", ",", "f", "in", "enumerate", "(", "self", ".", "train_text_filepaths", "(", "tmp_dir", ")", ")", "if", "i", "%", "self", ".", "num_train_shards", "==", "task_id", "]", "else", ":", "return", "[", "f", "for", "i", ",", "f", "in", "enumerate", "(", "self", ".", "dev_text_filepaths", "(", "tmp_dir", ")", ")", "if", "i", "%", "self", ".", "num_dev_shards", "==", "task_id", "-", "self", ".", "num_train_shards", "]" ]
List of input filepaths for a particular training or dev shard. Args: tmp_dir: a string task_id: an integer less than self.num_shards Returns: a list of tuples (filepath, start_pos, num_bytes)
[ "List", "of", "input", "filepaths", "for", "a", "particular", "training", "or", "dev", "shard", "." ]
python
train
33.904762
chrisrink10/basilisp
src/basilisp/lang/list.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/list.py#L93-L97
def l(*members, meta=None) -> List: """Creates a new list from members.""" return List( # pylint: disable=abstract-class-instantiated plist(iterable=members), meta=meta )
[ "def", "l", "(", "*", "members", ",", "meta", "=", "None", ")", "->", "List", ":", "return", "List", "(", "# pylint: disable=abstract-class-instantiated", "plist", "(", "iterable", "=", "members", ")", ",", "meta", "=", "meta", ")" ]
Creates a new list from members.
[ "Creates", "a", "new", "list", "from", "members", "." ]
python
test
37.4
CI-WATER/gsshapy
gsshapy/orm/idx.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/idx.py#L96-L135
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Index Map Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and read plain text into text field with open(path, 'r') as f: self.rasterText = f.read() # Retrieve metadata from header lines = self.rasterText.split('\n') for line in lines[0:6]: spline = line.split() if 'north' in spline[0].lower(): self.north = float(spline[1]) elif 'south' in spline[0].lower(): self.south = float(spline[1]) elif 'east' in spline[0].lower(): self.east = float(spline[1]) elif 'west' in spline[0].lower(): self.west = float(spline[1]) elif 'rows' in spline[0].lower(): self.rows = int(spline[1]) elif 'cols' in spline[0].lower(): self.columns = int(spline[1]) if spatial: # Get well known binary from the raster file using the MapKit RasterLoader wkbRaster = RasterLoader.grassAsciiRasterToWKB(session=session, grassRasterPath=path, srid=str(spatialReferenceID), noData='-1') self.raster = wkbRaster self.srid = spatialReferenceID # Assign other properties self.filename = filename
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", ",", "spatialReferenceID", ",", "replaceParamFile", ")", ":", "# Set file extension property", "self", ".", "fileExtension", "=", "extension", "# Open file and read plain text into text field", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "self", ".", "rasterText", "=", "f", ".", "read", "(", ")", "# Retrieve metadata from header", "lines", "=", "self", ".", "rasterText", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", "[", "0", ":", "6", "]", ":", "spline", "=", "line", ".", "split", "(", ")", "if", "'north'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "north", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'south'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "south", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'east'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "east", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'west'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "west", "=", "float", "(", "spline", "[", "1", "]", ")", "elif", "'rows'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "rows", "=", "int", "(", "spline", "[", "1", "]", ")", "elif", "'cols'", "in", "spline", "[", "0", "]", ".", "lower", "(", ")", ":", "self", ".", "columns", "=", "int", "(", "spline", "[", "1", "]", ")", "if", "spatial", ":", "# Get well known binary from the raster file using the MapKit RasterLoader", "wkbRaster", "=", "RasterLoader", ".", "grassAsciiRasterToWKB", "(", "session", "=", "session", ",", "grassRasterPath", "=", "path", ",", "srid", "=", "str", "(", "spatialReferenceID", ")", ",", "noData", "=", "'-1'", ")", "self", ".", "raster", "=", "wkbRaster", "self", ".", "srid", "=", "spatialReferenceID", "# Assign other properties", "self", ".", "filename", "=", "filename" ]
Index Map Read from File Method
[ "Index", "Map", "Read", "from", "File", "Method" ]
python
train
40.525
chimera0/accel-brain-code
Algorithmic-Composition/pycomposer/noisesampler/bar_noise_sampler.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Algorithmic-Composition/pycomposer/noisesampler/bar_noise_sampler.py#L54-L82
def generate(self): ''' Generate noise samples. Returns: `np.ndarray` of samples. ''' sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim)) for batch in range(self.__batch_size): for i in range(len(self.__program_list)): program_key = self.__program_list[i] key = np.random.randint(low=0, high=len(self.__midi_df_list)) midi_df = self.__midi_df_list[key] midi_df = midi_df[midi_df.program == program_key] if midi_df.shape[0] < self.__seq_len: continue row = np.random.uniform( low=midi_df.start.min(), high=midi_df.end.max() - (self.__seq_len * self.__time_fraction) ) for seq in range(self.__seq_len): start = row + (seq * self.__time_fraction) end = row + ((seq+1) * self.__time_fraction) df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)] sampled_arr[batch, i, seq] = self.__convert_into_feature(df) return sampled_arr
[ "def", "generate", "(", "self", ")", ":", "sampled_arr", "=", "np", ".", "zeros", "(", "(", "self", ".", "__batch_size", ",", "self", ".", "__channel", ",", "self", ".", "__seq_len", ",", "self", ".", "__dim", ")", ")", "for", "batch", "in", "range", "(", "self", ".", "__batch_size", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "__program_list", ")", ")", ":", "program_key", "=", "self", ".", "__program_list", "[", "i", "]", "key", "=", "np", ".", "random", ".", "randint", "(", "low", "=", "0", ",", "high", "=", "len", "(", "self", ".", "__midi_df_list", ")", ")", "midi_df", "=", "self", ".", "__midi_df_list", "[", "key", "]", "midi_df", "=", "midi_df", "[", "midi_df", ".", "program", "==", "program_key", "]", "if", "midi_df", ".", "shape", "[", "0", "]", "<", "self", ".", "__seq_len", ":", "continue", "row", "=", "np", ".", "random", ".", "uniform", "(", "low", "=", "midi_df", ".", "start", ".", "min", "(", ")", ",", "high", "=", "midi_df", ".", "end", ".", "max", "(", ")", "-", "(", "self", ".", "__seq_len", "*", "self", ".", "__time_fraction", ")", ")", "for", "seq", "in", "range", "(", "self", ".", "__seq_len", ")", ":", "start", "=", "row", "+", "(", "seq", "*", "self", ".", "__time_fraction", ")", "end", "=", "row", "+", "(", "(", "seq", "+", "1", ")", "*", "self", ".", "__time_fraction", ")", "df", "=", "midi_df", "[", "(", "start", "<=", "midi_df", ".", "start", ")", "&", "(", "midi_df", ".", "start", "<=", "end", ")", "]", "sampled_arr", "[", "batch", ",", "i", ",", "seq", "]", "=", "self", ".", "__convert_into_feature", "(", "df", ")", "return", "sampled_arr" ]
Generate noise samples. Returns: `np.ndarray` of samples.
[ "Generate", "noise", "samples", ".", "Returns", ":", "np", ".", "ndarray", "of", "samples", "." ]
python
train
42.37931
OzymandiasTheGreat/python-libinput
libinput/event.py
https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L799-L829
def delta_unaccelerated(self): """The relative delta of the unaccelerated motion vector of the current event. For gesture events that are not of type :attr:`~libinput.constant.EventType.GESTURE_SWIPE_UPDATE` or :attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE`, this property raises :exc:`AttributeError`. Relative unaccelerated motion deltas are normalized to represent those of a device with 1000dpi resolution. See `Normalization of relative motion`_ for more details. Note that unaccelerated events are not equivalent to 'raw' events as read from the device. Any rotation applied to the device also applies to gesture motion (see :meth:`~libinput.define.DeviceConfigRotation.set_angle`). Returns: (float, float): The unaccelerated relative (x, y) movement since the last event. """ if self.type not in {EventType.GESTURE_SWIPE_UPDATE, EventType.GESTURE_PINCH_UPDATE}: raise AttributeError(_wrong_prop.format(self.type)) delta_x = self._libinput.libinput_event_gesture_get_dx_unaccelerated( self._handle) delta_y = self._libinput.libinput_event_gesture_get_dy_unaccelerated( self._handle) return delta_x, delta_y
[ "def", "delta_unaccelerated", "(", "self", ")", ":", "if", "self", ".", "type", "not", "in", "{", "EventType", ".", "GESTURE_SWIPE_UPDATE", ",", "EventType", ".", "GESTURE_PINCH_UPDATE", "}", ":", "raise", "AttributeError", "(", "_wrong_prop", ".", "format", "(", "self", ".", "type", ")", ")", "delta_x", "=", "self", ".", "_libinput", ".", "libinput_event_gesture_get_dx_unaccelerated", "(", "self", ".", "_handle", ")", "delta_y", "=", "self", ".", "_libinput", ".", "libinput_event_gesture_get_dy_unaccelerated", "(", "self", ".", "_handle", ")", "return", "delta_x", ",", "delta_y" ]
The relative delta of the unaccelerated motion vector of the current event. For gesture events that are not of type :attr:`~libinput.constant.EventType.GESTURE_SWIPE_UPDATE` or :attr:`~libinput.constant.EventType.GESTURE_PINCH_UPDATE`, this property raises :exc:`AttributeError`. Relative unaccelerated motion deltas are normalized to represent those of a device with 1000dpi resolution. See `Normalization of relative motion`_ for more details. Note that unaccelerated events are not equivalent to 'raw' events as read from the device. Any rotation applied to the device also applies to gesture motion (see :meth:`~libinput.define.DeviceConfigRotation.set_angle`). Returns: (float, float): The unaccelerated relative (x, y) movement since the last event.
[ "The", "relative", "delta", "of", "the", "unaccelerated", "motion", "vector", "of", "the", "current", "event", "." ]
python
train
37.258065
JnyJny/Geometry
Geometry/point.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/point.py#L1260-L1281
def cross(self, other): ''' :other: Point or point equivalent :return: float Vector cross product of points U (self) and V (other), computed: U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k) s1 = u2v3 - u3v2 s2 = u3v1 - u1v3 s3 = u1v2 - u2v1 U x V = s1 + s2 + s3 Returns a float. ''' b = self.__class__._convert(other) return sum([(self.y * b.z) - (self.z * b.y), (self.z * b.x) - (self.x * b.z), (self.x * b.y) - (self.y * b.x)])
[ "def", "cross", "(", "self", ",", "other", ")", ":", "b", "=", "self", ".", "__class__", ".", "_convert", "(", "other", ")", "return", "sum", "(", "[", "(", "self", ".", "y", "*", "b", ".", "z", ")", "-", "(", "self", ".", "z", "*", "b", ".", "y", ")", ",", "(", "self", ".", "z", "*", "b", ".", "x", ")", "-", "(", "self", ".", "x", "*", "b", ".", "z", ")", ",", "(", "self", ".", "x", "*", "b", ".", "y", ")", "-", "(", "self", ".", "y", "*", "b", ".", "x", ")", "]", ")" ]
:other: Point or point equivalent :return: float Vector cross product of points U (self) and V (other), computed: U x V = (u1*i + u2*j + u3*k) x (v1*i + v2*j + v3*k) s1 = u2v3 - u3v2 s2 = u3v1 - u1v3 s3 = u1v2 - u2v1 U x V = s1 + s2 + s3 Returns a float.
[ ":", "other", ":", "Point", "or", "point", "equivalent", ":", "return", ":", "float" ]
python
train
25.545455
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1438-L1453
def console_get_height_rect( con: tcod.console.Console, x: int, y: int, w: int, h: int, fmt: str ) -> int: """Return the height of this text once word-wrapped into this rectangle. Returns: int: The number of lines of text once word-wrapped. .. deprecated:: 8.5 Use :any:`Console.get_height_rect` instead. """ return int( lib.TCOD_console_get_height_rect_fmt( _console(con), x, y, w, h, _fmt(fmt) ) )
[ "def", "console_get_height_rect", "(", "con", ":", "tcod", ".", "console", ".", "Console", ",", "x", ":", "int", ",", "y", ":", "int", ",", "w", ":", "int", ",", "h", ":", "int", ",", "fmt", ":", "str", ")", "->", "int", ":", "return", "int", "(", "lib", ".", "TCOD_console_get_height_rect_fmt", "(", "_console", "(", "con", ")", ",", "x", ",", "y", ",", "w", ",", "h", ",", "_fmt", "(", "fmt", ")", ")", ")" ]
Return the height of this text once word-wrapped into this rectangle. Returns: int: The number of lines of text once word-wrapped. .. deprecated:: 8.5 Use :any:`Console.get_height_rect` instead.
[ "Return", "the", "height", "of", "this", "text", "once", "word", "-", "wrapped", "into", "this", "rectangle", "." ]
python
train
28.625
xtuml/pyxtuml
xtuml/consistency_check.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/consistency_check.py#L31-L47
def pretty_to_link(inst, link): ''' Create a human-readable representation of a link on the 'TO'-side ''' values = '' prefix = '' metaclass = xtuml.get_metaclass(inst) for name, ty in metaclass.attributes: if name in link.key_map: value = getattr(inst, name) value = xtuml.serialize_value(value, ty) name = link.key_map[name] values += '%s%s=%s' % (prefix, name, value) prefix = ', ' return '%s(%s)' % (link.kind, values)
[ "def", "pretty_to_link", "(", "inst", ",", "link", ")", ":", "values", "=", "''", "prefix", "=", "''", "metaclass", "=", "xtuml", ".", "get_metaclass", "(", "inst", ")", "for", "name", ",", "ty", "in", "metaclass", ".", "attributes", ":", "if", "name", "in", "link", ".", "key_map", ":", "value", "=", "getattr", "(", "inst", ",", "name", ")", "value", "=", "xtuml", ".", "serialize_value", "(", "value", ",", "ty", ")", "name", "=", "link", ".", "key_map", "[", "name", "]", "values", "+=", "'%s%s=%s'", "%", "(", "prefix", ",", "name", ",", "value", ")", "prefix", "=", "', '", "return", "'%s(%s)'", "%", "(", "link", ".", "kind", ",", "values", ")" ]
Create a human-readable representation of a link on the 'TO'-side
[ "Create", "a", "human", "-", "readable", "representation", "of", "a", "link", "on", "the", "TO", "-", "side" ]
python
test
30.764706
PythonCharmers/python-future
src/future/backports/http/server.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/server.py#L675-L680
def do_GET(self): """Serve a GET request.""" f = self.send_head() if f: self.copyfile(f, self.wfile) f.close()
[ "def", "do_GET", "(", "self", ")", ":", "f", "=", "self", ".", "send_head", "(", ")", "if", "f", ":", "self", ".", "copyfile", "(", "f", ",", "self", ".", "wfile", ")", "f", ".", "close", "(", ")" ]
Serve a GET request.
[ "Serve", "a", "GET", "request", "." ]
python
train
25.5
StackStorm/pybind
pybind/nos/v6_0_2f/interface/port_channel/qos/flowcontrol/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/port_channel/qos/flowcontrol/__init__.py#L127-L148
def _set_pfc(self, v, load=False): """ Setter method for pfc, mapped from YANG variable /interface/port_channel/qos/flowcontrol/pfc (list) If this variable is read-only (config: false) in the source YANG file, then _set_pfc is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_pfc() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("pfc_cos",pfc.pfc, yang_name="pfc", rest_name="pfc", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pfc-cos', extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="pfc", rest_name="pfc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """pfc must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("pfc_cos",pfc.pfc, yang_name="pfc", rest_name="pfc", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pfc-cos', extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name="pfc", rest_name="pfc", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""", }) self.__pfc = t if hasattr(self, '_set'): self._set()
[ "def", "_set_pfc", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"pfc_cos\"", ",", "pfc", ".", "pfc", ",", "yang_name", "=", "\"pfc\"", ",", "rest_name", "=", "\"pfc\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'pfc-cos'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Priority-based Flow Control (PFC)'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'callpoint'", ":", "u'pfc_flowcontrol_po'", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"pfc\"", ",", "rest_name", "=", "\"pfc\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Priority-based Flow Control (PFC)'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'callpoint'", ":", "u'pfc_flowcontrol_po'", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-qos'", ",", "defining_module", "=", "'brocade-qos'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"pfc must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"pfc_cos\",pfc.pfc, yang_name=\"pfc\", rest_name=\"pfc\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pfc-cos', extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}), is_container='list', yang_name=\"pfc\", rest_name=\"pfc\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Priority-based Flow Control (PFC)', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'callpoint': u'pfc_flowcontrol_po', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__pfc", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for pfc, mapped from YANG variable /interface/port_channel/qos/flowcontrol/pfc (list) If this variable is read-only (config: false) in the source YANG file, then _set_pfc is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_pfc() directly.
[ "Setter", "method", "for", "pfc", "mapped", "from", "YANG", "variable", "/", "interface", "/", "port_channel", "/", "qos", "/", "flowcontrol", "/", "pfc", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_pfc", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_pfc", "()", "directly", "." ]
python
train
134.590909
pyviz/holoviews
holoviews/operation/datashader.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/operation/datashader.py#L270-L340
def get_agg_data(cls, obj, category=None): """ Reduces any Overlay or NdOverlay of Elements into a single xarray Dataset that can be aggregated. """ paths = [] if isinstance(obj, Graph): obj = obj.edgepaths kdims = list(obj.kdims) vdims = list(obj.vdims) dims = obj.dimensions()[:2] if isinstance(obj, Path): glyph = 'line' for p in obj.split(datatype='dataframe'): paths.append(p) elif isinstance(obj, CompositeOverlay): element = None for key, el in obj.data.items(): x, y, element, glyph = cls.get_agg_data(el) dims = (x, y) df = PandasInterface.as_dframe(element) if isinstance(obj, NdOverlay): df = df.assign(**dict(zip(obj.dimensions('key', True), key))) paths.append(df) if element is None: dims = None else: kdims += element.kdims vdims = element.vdims elif isinstance(obj, Element): glyph = 'line' if isinstance(obj, Curve) else 'points' paths.append(PandasInterface.as_dframe(obj)) if dims is None or len(dims) != 2: return None, None, None, None else: x, y = dims if len(paths) > 1: if glyph == 'line': path = paths[0][:1] if isinstance(path, dd.DataFrame): path = path.compute() empty = path.copy() empty.iloc[0, :] = (np.NaN,) * empty.shape[1] paths = [elem for p in paths for elem in (p, empty)][:-1] if all(isinstance(path, dd.DataFrame) for path in paths): df = dd.concat(paths) else: paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths] df = pd.concat(paths) else: df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name]) if category and df[category].dtype.name != 'category': df[category] = df[category].astype('category') is_dask = isinstance(df, dd.DataFrame) if any((not is_dask and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or df[d.name].dtype.kind == 'M' for d in (x, y)): df = df.copy() for d in (x, y): vals = df[d.name] if not is_dask and len(vals) and isinstance(vals.values[0], cftime_types): vals = cftime_to_timestamp(vals, 'ns') elif df[d.name].dtype.kind == 'M': vals = vals.astype('datetime64[ns]') else: continue df[d.name] = vals.astype('int64') return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph
[ "def", "get_agg_data", "(", "cls", ",", "obj", ",", "category", "=", "None", ")", ":", "paths", "=", "[", "]", "if", "isinstance", "(", "obj", ",", "Graph", ")", ":", "obj", "=", "obj", ".", "edgepaths", "kdims", "=", "list", "(", "obj", ".", "kdims", ")", "vdims", "=", "list", "(", "obj", ".", "vdims", ")", "dims", "=", "obj", ".", "dimensions", "(", ")", "[", ":", "2", "]", "if", "isinstance", "(", "obj", ",", "Path", ")", ":", "glyph", "=", "'line'", "for", "p", "in", "obj", ".", "split", "(", "datatype", "=", "'dataframe'", ")", ":", "paths", ".", "append", "(", "p", ")", "elif", "isinstance", "(", "obj", ",", "CompositeOverlay", ")", ":", "element", "=", "None", "for", "key", ",", "el", "in", "obj", ".", "data", ".", "items", "(", ")", ":", "x", ",", "y", ",", "element", ",", "glyph", "=", "cls", ".", "get_agg_data", "(", "el", ")", "dims", "=", "(", "x", ",", "y", ")", "df", "=", "PandasInterface", ".", "as_dframe", "(", "element", ")", "if", "isinstance", "(", "obj", ",", "NdOverlay", ")", ":", "df", "=", "df", ".", "assign", "(", "*", "*", "dict", "(", "zip", "(", "obj", ".", "dimensions", "(", "'key'", ",", "True", ")", ",", "key", ")", ")", ")", "paths", ".", "append", "(", "df", ")", "if", "element", "is", "None", ":", "dims", "=", "None", "else", ":", "kdims", "+=", "element", ".", "kdims", "vdims", "=", "element", ".", "vdims", "elif", "isinstance", "(", "obj", ",", "Element", ")", ":", "glyph", "=", "'line'", "if", "isinstance", "(", "obj", ",", "Curve", ")", "else", "'points'", "paths", ".", "append", "(", "PandasInterface", ".", "as_dframe", "(", "obj", ")", ")", "if", "dims", "is", "None", "or", "len", "(", "dims", ")", "!=", "2", ":", "return", "None", ",", "None", ",", "None", ",", "None", "else", ":", "x", ",", "y", "=", "dims", "if", "len", "(", "paths", ")", ">", "1", ":", "if", "glyph", "==", "'line'", ":", "path", "=", "paths", "[", "0", "]", "[", ":", "1", "]", "if", "isinstance", "(", "path", ",", "dd", ".", "DataFrame", ")", ":", "path", "=", "path", ".", "compute", "(", ")", "empty", "=", "path", ".", "copy", "(", ")", "empty", ".", "iloc", "[", "0", ",", ":", "]", "=", "(", "np", ".", "NaN", ",", ")", "*", "empty", ".", "shape", "[", "1", "]", "paths", "=", "[", "elem", "for", "p", "in", "paths", "for", "elem", "in", "(", "p", ",", "empty", ")", "]", "[", ":", "-", "1", "]", "if", "all", "(", "isinstance", "(", "path", ",", "dd", ".", "DataFrame", ")", "for", "path", "in", "paths", ")", ":", "df", "=", "dd", ".", "concat", "(", "paths", ")", "else", ":", "paths", "=", "[", "p", ".", "compute", "(", ")", "if", "isinstance", "(", "p", ",", "dd", ".", "DataFrame", ")", "else", "p", "for", "p", "in", "paths", "]", "df", "=", "pd", ".", "concat", "(", "paths", ")", "else", ":", "df", "=", "paths", "[", "0", "]", "if", "paths", "else", "pd", ".", "DataFrame", "(", "[", "]", ",", "columns", "=", "[", "x", ".", "name", ",", "y", ".", "name", "]", ")", "if", "category", "and", "df", "[", "category", "]", ".", "dtype", ".", "name", "!=", "'category'", ":", "df", "[", "category", "]", "=", "df", "[", "category", "]", ".", "astype", "(", "'category'", ")", "is_dask", "=", "isinstance", "(", "df", ",", "dd", ".", "DataFrame", ")", "if", "any", "(", "(", "not", "is_dask", "and", "len", "(", "df", "[", "d", ".", "name", "]", ")", "and", "isinstance", "(", "df", "[", "d", ".", "name", "]", ".", "values", "[", "0", "]", ",", "cftime_types", ")", ")", "or", "df", "[", "d", ".", "name", "]", ".", "dtype", ".", "kind", "==", "'M'", "for", "d", "in", "(", "x", ",", "y", ")", ")", ":", "df", "=", "df", ".", "copy", "(", ")", "for", "d", "in", "(", "x", ",", "y", ")", ":", "vals", "=", "df", "[", "d", ".", "name", "]", "if", "not", "is_dask", "and", "len", "(", "vals", ")", "and", "isinstance", "(", "vals", ".", "values", "[", "0", "]", ",", "cftime_types", ")", ":", "vals", "=", "cftime_to_timestamp", "(", "vals", ",", "'ns'", ")", "elif", "df", "[", "d", ".", "name", "]", ".", "dtype", ".", "kind", "==", "'M'", ":", "vals", "=", "vals", ".", "astype", "(", "'datetime64[ns]'", ")", "else", ":", "continue", "df", "[", "d", ".", "name", "]", "=", "vals", ".", "astype", "(", "'int64'", ")", "return", "x", ",", "y", ",", "Dataset", "(", "df", ",", "kdims", "=", "kdims", ",", "vdims", "=", "vdims", ")", ",", "glyph" ]
Reduces any Overlay or NdOverlay of Elements into a single xarray Dataset that can be aggregated.
[ "Reduces", "any", "Overlay", "or", "NdOverlay", "of", "Elements", "into", "a", "single", "xarray", "Dataset", "that", "can", "be", "aggregated", "." ]
python
train
40.014085
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L274-L289
def getNamespace(self, prefix): """prefix -- deference namespace prefix in node's context. Ascends parent nodes until found. """ namespace = None if prefix == 'xmlns': namespace = DOM.findDefaultNS(prefix, self.__node) else: try: namespace = DOM.findNamespaceURI(prefix, self.__node) except DOMException, ex: if prefix != 'xml': raise SchemaError, '%s namespace not declared for %s'\ %(prefix, self.__node._get_tagName()) namespace = XMLNS.XML return namespace
[ "def", "getNamespace", "(", "self", ",", "prefix", ")", ":", "namespace", "=", "None", "if", "prefix", "==", "'xmlns'", ":", "namespace", "=", "DOM", ".", "findDefaultNS", "(", "prefix", ",", "self", ".", "__node", ")", "else", ":", "try", ":", "namespace", "=", "DOM", ".", "findNamespaceURI", "(", "prefix", ",", "self", ".", "__node", ")", "except", "DOMException", ",", "ex", ":", "if", "prefix", "!=", "'xml'", ":", "raise", "SchemaError", ",", "'%s namespace not declared for %s'", "%", "(", "prefix", ",", "self", ".", "__node", ".", "_get_tagName", "(", ")", ")", "namespace", "=", "XMLNS", ".", "XML", "return", "namespace" ]
prefix -- deference namespace prefix in node's context. Ascends parent nodes until found.
[ "prefix", "--", "deference", "namespace", "prefix", "in", "node", "s", "context", ".", "Ascends", "parent", "nodes", "until", "found", "." ]
python
train
39.5
garenchan/policy
policy/enforcer.py
https://github.com/garenchan/policy/blob/7709ae5f371146f8c90380d0877a5e59d731f644/policy/enforcer.py#L133-L158
def enforce(self, rule, target, creds, exc=None, *args, **kwargs): """Checks authorization of a rule against the target and credentials.""" self.load_rules() if isinstance(rule, checks.BaseCheck): result = rule(target, creds, self, rule) elif not self.rules: # No rules means we're going to fail closed. result = False else: try: # Evaluate the rule result = self.rules[rule](target, creds, self, rule) except KeyError: LOG.debug('Rule [%s] does not exist', rule) # If the rule doesn't exist, fail closed result = False if self.raise_error and not result: if exc: raise exc(*args, **kwargs) else: raise PolicyNotAuthorized(rule, target, creds) return result
[ "def", "enforce", "(", "self", ",", "rule", ",", "target", ",", "creds", ",", "exc", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "load_rules", "(", ")", "if", "isinstance", "(", "rule", ",", "checks", ".", "BaseCheck", ")", ":", "result", "=", "rule", "(", "target", ",", "creds", ",", "self", ",", "rule", ")", "elif", "not", "self", ".", "rules", ":", "# No rules means we're going to fail closed.", "result", "=", "False", "else", ":", "try", ":", "# Evaluate the rule", "result", "=", "self", ".", "rules", "[", "rule", "]", "(", "target", ",", "creds", ",", "self", ",", "rule", ")", "except", "KeyError", ":", "LOG", ".", "debug", "(", "'Rule [%s] does not exist'", ",", "rule", ")", "# If the rule doesn't exist, fail closed", "result", "=", "False", "if", "self", ".", "raise_error", "and", "not", "result", ":", "if", "exc", ":", "raise", "exc", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "PolicyNotAuthorized", "(", "rule", ",", "target", ",", "creds", ")", "return", "result" ]
Checks authorization of a rule against the target and credentials.
[ "Checks", "authorization", "of", "a", "rule", "against", "the", "target", "and", "credentials", "." ]
python
train
34.153846
tjcsl/ion
intranet/apps/users/models.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/users/models.py#L747-L752
def absence_info(self): """Return information about the user's absences.""" # FIXME: remove recursive dep from ..eighth.models import EighthSignup return EighthSignup.objects.filter(user=self, was_absent=True, scheduled_activity__attendance_taken=True)
[ "def", "absence_info", "(", "self", ")", ":", "# FIXME: remove recursive dep", "from", ".", ".", "eighth", ".", "models", "import", "EighthSignup", "return", "EighthSignup", ".", "objects", ".", "filter", "(", "user", "=", "self", ",", "was_absent", "=", "True", ",", "scheduled_activity__attendance_taken", "=", "True", ")" ]
Return information about the user's absences.
[ "Return", "information", "about", "the", "user", "s", "absences", "." ]
python
train
46.666667
KE-works/pykechain
pykechain/client.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L788-L816
def users(self, username=None, pk=None, **kwargs): """ Users of KE-chain. Provide a list of :class:`User`s of KE-chain. You can filter on username or id or any other advanced filter. :param username: (optional) username to filter :type username: basestring or None :param pk: (optional) id of the user to filter :type pk: basestring or None :param kwargs: Additional filtering keyword=value arguments :type kwargs: dict or None :return: List of :class:`Users` :raises NotFoundError: when a user could not be found """ request_params = { 'username': username, 'pk': pk, } if kwargs: request_params.update(**kwargs) r = self._request('GET', self._build_url('users'), params=request_params) if r.status_code != requests.codes.ok: # pragma: no cover raise NotFoundError("Could not find users: '{}'".format(r.json())) data = r.json() return [User(user, client=self) for user in data['results']]
[ "def", "users", "(", "self", ",", "username", "=", "None", ",", "pk", "=", "None", ",", "*", "*", "kwargs", ")", ":", "request_params", "=", "{", "'username'", ":", "username", ",", "'pk'", ":", "pk", ",", "}", "if", "kwargs", ":", "request_params", ".", "update", "(", "*", "*", "kwargs", ")", "r", "=", "self", ".", "_request", "(", "'GET'", ",", "self", ".", "_build_url", "(", "'users'", ")", ",", "params", "=", "request_params", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "# pragma: no cover", "raise", "NotFoundError", "(", "\"Could not find users: '{}'\"", ".", "format", "(", "r", ".", "json", "(", ")", ")", ")", "data", "=", "r", ".", "json", "(", ")", "return", "[", "User", "(", "user", ",", "client", "=", "self", ")", "for", "user", "in", "data", "[", "'results'", "]", "]" ]
Users of KE-chain. Provide a list of :class:`User`s of KE-chain. You can filter on username or id or any other advanced filter. :param username: (optional) username to filter :type username: basestring or None :param pk: (optional) id of the user to filter :type pk: basestring or None :param kwargs: Additional filtering keyword=value arguments :type kwargs: dict or None :return: List of :class:`Users` :raises NotFoundError: when a user could not be found
[ "Users", "of", "KE", "-", "chain", "." ]
python
train
36.793103
mozilla/socorrolib
socorrolib/lib/datetimeutil.py
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/datetimeutil.py#L94-L120
def date_to_string(date): """Transform a date or datetime object into a string and return it. Examples: >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC)) '2012-01-03T12:23:34+00:00' >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34)) '2012-01-03T12:23:34' >>> date_to_string(datetime.date(2012, 1, 3)) '2012-01-03' """ if isinstance(date, datetime.datetime): # Create an ISO 8601 datetime string date_str = date.strftime('%Y-%m-%dT%H:%M:%S') tzstr = date.strftime('%z') if tzstr: # Yes, this is ugly. And no, I haven't found a better way to have a # truly ISO 8601 datetime with timezone in Python. date_str = '%s%s:%s' % (date_str, tzstr[0:3], tzstr[3:5]) elif isinstance(date, datetime.date): # Create an ISO 8601 date string date_str = date.strftime('%Y-%m-%d') else: raise TypeError('Argument is not a date or datetime. ') return date_str
[ "def", "date_to_string", "(", "date", ")", ":", "if", "isinstance", "(", "date", ",", "datetime", ".", "datetime", ")", ":", "# Create an ISO 8601 datetime string", "date_str", "=", "date", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", "tzstr", "=", "date", ".", "strftime", "(", "'%z'", ")", "if", "tzstr", ":", "# Yes, this is ugly. And no, I haven't found a better way to have a", "# truly ISO 8601 datetime with timezone in Python.", "date_str", "=", "'%s%s:%s'", "%", "(", "date_str", ",", "tzstr", "[", "0", ":", "3", "]", ",", "tzstr", "[", "3", ":", "5", "]", ")", "elif", "isinstance", "(", "date", ",", "datetime", ".", "date", ")", ":", "# Create an ISO 8601 date string", "date_str", "=", "date", ".", "strftime", "(", "'%Y-%m-%d'", ")", "else", ":", "raise", "TypeError", "(", "'Argument is not a date or datetime. '", ")", "return", "date_str" ]
Transform a date or datetime object into a string and return it. Examples: >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC)) '2012-01-03T12:23:34+00:00' >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34)) '2012-01-03T12:23:34' >>> date_to_string(datetime.date(2012, 1, 3)) '2012-01-03'
[ "Transform", "a", "date", "or", "datetime", "object", "into", "a", "string", "and", "return", "it", "." ]
python
train
36.925926
google/grr
grr/core/grr_response_core/lib/rdfvalues/crypto.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/crypto.py#L163-L205
def ClientCertFromCSR(cls, csr): """Creates a new cert for the given common name. Args: csr: A CertificateSigningRequest. Returns: The signed cert. """ builder = x509.CertificateBuilder() # Use the client CN for a cert serial_id. This will ensure we do # not have clashing cert id. common_name = csr.GetCN() serial = int(common_name.split(".")[1], 16) builder = builder.serial_number(serial) builder = builder.subject_name( x509.Name( [x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))])) now = rdfvalue.RDFDatetime.Now() now_plus_year = now + rdfvalue.Duration("52w") builder = builder.not_valid_after(now_plus_year.AsDatetime()) now_minus_ten = now - rdfvalue.Duration("10s") builder = builder.not_valid_before(now_minus_ten.AsDatetime()) # TODO(user): dependency loop with # grr/core/grr_response_core/config/client.py. # pylint: disable=protected-access ca_cert = config_lib._CONFIG["CA.certificate"] # pylint: enable=protected-access builder = builder.issuer_name(ca_cert.GetIssuer()) builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey()) # TODO(user): dependency loop with # grr/core/grr_response_core/config/client.py. # pylint: disable=protected-access ca_key = config_lib._CONFIG["PrivateKeys.ca_key"] # pylint: enable=protected-access return RDFX509Cert( builder.sign( private_key=ca_key.GetRawPrivateKey(), algorithm=hashes.SHA256(), backend=openssl.backend))
[ "def", "ClientCertFromCSR", "(", "cls", ",", "csr", ")", ":", "builder", "=", "x509", ".", "CertificateBuilder", "(", ")", "# Use the client CN for a cert serial_id. This will ensure we do", "# not have clashing cert id.", "common_name", "=", "csr", ".", "GetCN", "(", ")", "serial", "=", "int", "(", "common_name", ".", "split", "(", "\".\"", ")", "[", "1", "]", ",", "16", ")", "builder", "=", "builder", ".", "serial_number", "(", "serial", ")", "builder", "=", "builder", ".", "subject_name", "(", "x509", ".", "Name", "(", "[", "x509", ".", "NameAttribute", "(", "oid", ".", "NameOID", ".", "COMMON_NAME", ",", "str", "(", "common_name", ")", ")", "]", ")", ")", "now", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "now_plus_year", "=", "now", "+", "rdfvalue", ".", "Duration", "(", "\"52w\"", ")", "builder", "=", "builder", ".", "not_valid_after", "(", "now_plus_year", ".", "AsDatetime", "(", ")", ")", "now_minus_ten", "=", "now", "-", "rdfvalue", ".", "Duration", "(", "\"10s\"", ")", "builder", "=", "builder", ".", "not_valid_before", "(", "now_minus_ten", ".", "AsDatetime", "(", ")", ")", "# TODO(user): dependency loop with", "# grr/core/grr_response_core/config/client.py.", "# pylint: disable=protected-access", "ca_cert", "=", "config_lib", ".", "_CONFIG", "[", "\"CA.certificate\"", "]", "# pylint: enable=protected-access", "builder", "=", "builder", ".", "issuer_name", "(", "ca_cert", ".", "GetIssuer", "(", ")", ")", "builder", "=", "builder", ".", "public_key", "(", "csr", ".", "GetPublicKey", "(", ")", ".", "GetRawPublicKey", "(", ")", ")", "# TODO(user): dependency loop with", "# grr/core/grr_response_core/config/client.py.", "# pylint: disable=protected-access", "ca_key", "=", "config_lib", ".", "_CONFIG", "[", "\"PrivateKeys.ca_key\"", "]", "# pylint: enable=protected-access", "return", "RDFX509Cert", "(", "builder", ".", "sign", "(", "private_key", "=", "ca_key", ".", "GetRawPrivateKey", "(", ")", ",", "algorithm", "=", "hashes", ".", "SHA256", "(", ")", ",", "backend", "=", "openssl", ".", "backend", ")", ")" ]
Creates a new cert for the given common name. Args: csr: A CertificateSigningRequest. Returns: The signed cert.
[ "Creates", "a", "new", "cert", "for", "the", "given", "common", "name", "." ]
python
train
36.046512
karjaljo/hiisi
hiisi/hiisi.py
https://github.com/karjaljo/hiisi/blob/de6a64df5dcbcb37d5d3d5468663e65a7794f9a8/hiisi/hiisi.py#L102-L132
def attr_gen(self, attr): """Returns attribute generator that yields namedtuples containing path value pairs Parameters ---------- attr : str Name of the search attribute Returns ------- attr_generator : generator Returns a generator that yields named tuples with field names path and value. Examples -------- >>> gen = h5f.attr_gen('elangle') >>> pair = next(gen) >>> print(pair.path) '/dataset1/where' >>> print(pair.value) 0.5 """ HiisiHDF._clear_cache() HiisiHDF.CACHE['search_attribute'] = attr HiisiHDF._find_attr_paths('/', self['/']) # Check root attributes self.visititems(HiisiHDF._find_attr_paths) path_attr_gen = (PathValue(attr_path, self[attr_path].attrs.get(attr)) for attr_path in HiisiHDF.CACHE['attribute_paths']) return path_attr_gen
[ "def", "attr_gen", "(", "self", ",", "attr", ")", ":", "HiisiHDF", ".", "_clear_cache", "(", ")", "HiisiHDF", ".", "CACHE", "[", "'search_attribute'", "]", "=", "attr", "HiisiHDF", ".", "_find_attr_paths", "(", "'/'", ",", "self", "[", "'/'", "]", ")", "# Check root attributes", "self", ".", "visititems", "(", "HiisiHDF", ".", "_find_attr_paths", ")", "path_attr_gen", "=", "(", "PathValue", "(", "attr_path", ",", "self", "[", "attr_path", "]", ".", "attrs", ".", "get", "(", "attr", ")", ")", "for", "attr_path", "in", "HiisiHDF", ".", "CACHE", "[", "'attribute_paths'", "]", ")", "return", "path_attr_gen" ]
Returns attribute generator that yields namedtuples containing path value pairs Parameters ---------- attr : str Name of the search attribute Returns ------- attr_generator : generator Returns a generator that yields named tuples with field names path and value. Examples -------- >>> gen = h5f.attr_gen('elangle') >>> pair = next(gen) >>> print(pair.path) '/dataset1/where' >>> print(pair.value) 0.5
[ "Returns", "attribute", "generator", "that", "yields", "namedtuples", "containing", "path", "value", "pairs", "Parameters", "----------", "attr", ":", "str", "Name", "of", "the", "search", "attribute" ]
python
train
30.774194
bsolomon1124/pyfinance
pyfinance/ols.py
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/ols.py#L304-L306
def ss_reg(self): """Sum of squares of the regression.""" return np.sum(np.square(self.predicted - self.ybar), axis=0)
[ "def", "ss_reg", "(", "self", ")", ":", "return", "np", ".", "sum", "(", "np", ".", "square", "(", "self", ".", "predicted", "-", "self", ".", "ybar", ")", ",", "axis", "=", "0", ")" ]
Sum of squares of the regression.
[ "Sum", "of", "squares", "of", "the", "regression", "." ]
python
train
44.666667
bwohlberg/sporco
sporco/admm/bpdn.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/bpdn.py#L1077-L1088
def uinit(self, ushape): """Return initialiser for working variable U.""" if self.opt['Y0'] is None: return np.zeros(ushape, dtype=self.dtype) else: # If initial Y is non-zero, initial U is chosen so that # the relevant dual optimality criterion (see (3.10) in # boyd-2010-distributed) is satisfied. U0 = np.sign(self.block_sep0(self.Y)) / self.rho U1 = self.block_sep1(self.Y) - self.S return self.block_cat(U0, U1)
[ "def", "uinit", "(", "self", ",", "ushape", ")", ":", "if", "self", ".", "opt", "[", "'Y0'", "]", "is", "None", ":", "return", "np", ".", "zeros", "(", "ushape", ",", "dtype", "=", "self", ".", "dtype", ")", "else", ":", "# If initial Y is non-zero, initial U is chosen so that", "# the relevant dual optimality criterion (see (3.10) in", "# boyd-2010-distributed) is satisfied.", "U0", "=", "np", ".", "sign", "(", "self", ".", "block_sep0", "(", "self", ".", "Y", ")", ")", "/", "self", ".", "rho", "U1", "=", "self", ".", "block_sep1", "(", "self", ".", "Y", ")", "-", "self", ".", "S", "return", "self", ".", "block_cat", "(", "U0", ",", "U1", ")" ]
Return initialiser for working variable U.
[ "Return", "initialiser", "for", "working", "variable", "U", "." ]
python
train
42.833333
materialsproject/pymatgen
pymatgen/core/ion.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/ion.py#L121-L131
def alphabetical_formula(self): """ Returns a reduced formula string with appended charge """ alph_formula = super().alphabetical_formula chg_str = "" if self.charge > 0: chg_str = " +" + formula_double_format(self.charge, False) elif self.charge < 0: chg_str = " " + formula_double_format(self.charge, False) return alph_formula + chg_str
[ "def", "alphabetical_formula", "(", "self", ")", ":", "alph_formula", "=", "super", "(", ")", ".", "alphabetical_formula", "chg_str", "=", "\"\"", "if", "self", ".", "charge", ">", "0", ":", "chg_str", "=", "\" +\"", "+", "formula_double_format", "(", "self", ".", "charge", ",", "False", ")", "elif", "self", ".", "charge", "<", "0", ":", "chg_str", "=", "\" \"", "+", "formula_double_format", "(", "self", ".", "charge", ",", "False", ")", "return", "alph_formula", "+", "chg_str" ]
Returns a reduced formula string with appended charge
[ "Returns", "a", "reduced", "formula", "string", "with", "appended", "charge" ]
python
train
37.909091
noahbenson/neuropythy
neuropythy/io/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/io/core.py#L392-L465
def to_nifti(obj, like=None, header=None, affine=None, extensions=Ellipsis, version=1): ''' to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules are described below. The following options are accepted: * like (default: None) may be provided to give a guide for the various header- and meta-data that is included in the image. If this is a nifti image object, its meta-data are used; if this is a subject, then the meta-data are deduced from the subject's voxel and native orientation matrices. All other specific options below override anything deduced from the like argument. * header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header or to replace the header in a new image. * affine (default: None) may specify the affine transform to be given to the image object. * extensions (default: Ellipsis) may specify a nifti extensions object that should be included in the header. The default value, Ellipsis, indicates that the extensions should not be changed, and that None should be used if extensions are not implied in obj (if, for example, obj is a data array rather than an image object with a header already. * version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object, respectively. ''' from neuropythy.mri import Subject obj0 = obj # First go from like to explicit versions of affine and header: if like is not None: if isinstance(like, nib.analyze.AnalyzeHeader) or \ isinstance(like, nib.freesurfer.mghformat.MGHHeader): if header is None: header = like elif isinstance(like, nib.analyze.SpatialImage): if header is None: header = like.header if affine is None: affine = like.affine elif isinstance(like, Subject): if affine is None: affine = like.voxel_to_native_matrix else: raise ValueError('Could not interpret like argument with type %s' % type(like)) # check to make sure that we have to change something: elif ((version == 1 and isinstance(obj, nib.nifti1.Nifti1Image)) or (version == 2 and isinstance(obj, nib.nifti2.Nifti2Image))): if ((header is None or obj.header is header) and (extensions is Ellipsis or extensions is obj.header.extensions or (extensions is None and len(obj.header.extensions) == 0))): return obj # okay, now look at the header and affine etc. if header is None: if isinstance(obj, nib.analyze.SpatialImage): header = obj.header else: header = nib.nifti1.Nifti1Header() if version == 1 else nib.nifti2.Nifti2Header() if affine is None: if isinstance(obj, nib.analyze.SpatialImage): affine = obj.affine else: affine = np.eye(4) if extensions is None: extensions = nib.nifti1.Nifti1Extensions() # Figure out what the data is if isinstance(obj, nib.analyze.SpatialImage): obj = obj.dataobj elif not pimms.is_nparray(obj): obj = np.asarray(obj) if len(obj.shape) < 3: obj = np.asarray([[obj]]) # Okay, make a new object now... if version == 1: obj = nib.nifti1.Nifti1Image(obj, affine, header) elif version == 2: obj = nib.nifti2.Nifti2Image(obj, affine, header) else: raise ValueError('invalid version given (should be 1 or 2): %s' % version) # add the extensions if they're needed if extensions is not Ellipsis and (len(extensions) > 0 or len(obj.header.extensions) > 0): obj.header.extensions = extensions # Okay, that's it! return obj
[ "def", "to_nifti", "(", "obj", ",", "like", "=", "None", ",", "header", "=", "None", ",", "affine", "=", "None", ",", "extensions", "=", "Ellipsis", ",", "version", "=", "1", ")", ":", "from", "neuropythy", ".", "mri", "import", "Subject", "obj0", "=", "obj", "# First go from like to explicit versions of affine and header:", "if", "like", "is", "not", "None", ":", "if", "isinstance", "(", "like", ",", "nib", ".", "analyze", ".", "AnalyzeHeader", ")", "or", "isinstance", "(", "like", ",", "nib", ".", "freesurfer", ".", "mghformat", ".", "MGHHeader", ")", ":", "if", "header", "is", "None", ":", "header", "=", "like", "elif", "isinstance", "(", "like", ",", "nib", ".", "analyze", ".", "SpatialImage", ")", ":", "if", "header", "is", "None", ":", "header", "=", "like", ".", "header", "if", "affine", "is", "None", ":", "affine", "=", "like", ".", "affine", "elif", "isinstance", "(", "like", ",", "Subject", ")", ":", "if", "affine", "is", "None", ":", "affine", "=", "like", ".", "voxel_to_native_matrix", "else", ":", "raise", "ValueError", "(", "'Could not interpret like argument with type %s'", "%", "type", "(", "like", ")", ")", "# check to make sure that we have to change something:", "elif", "(", "(", "version", "==", "1", "and", "isinstance", "(", "obj", ",", "nib", ".", "nifti1", ".", "Nifti1Image", ")", ")", "or", "(", "version", "==", "2", "and", "isinstance", "(", "obj", ",", "nib", ".", "nifti2", ".", "Nifti2Image", ")", ")", ")", ":", "if", "(", "(", "header", "is", "None", "or", "obj", ".", "header", "is", "header", ")", "and", "(", "extensions", "is", "Ellipsis", "or", "extensions", "is", "obj", ".", "header", ".", "extensions", "or", "(", "extensions", "is", "None", "and", "len", "(", "obj", ".", "header", ".", "extensions", ")", "==", "0", ")", ")", ")", ":", "return", "obj", "# okay, now look at the header and affine etc.", "if", "header", "is", "None", ":", "if", "isinstance", "(", "obj", ",", "nib", ".", "analyze", ".", "SpatialImage", ")", ":", "header", "=", "obj", ".", "header", "else", ":", "header", "=", "nib", ".", "nifti1", ".", "Nifti1Header", "(", ")", "if", "version", "==", "1", "else", "nib", ".", "nifti2", ".", "Nifti2Header", "(", ")", "if", "affine", "is", "None", ":", "if", "isinstance", "(", "obj", ",", "nib", ".", "analyze", ".", "SpatialImage", ")", ":", "affine", "=", "obj", ".", "affine", "else", ":", "affine", "=", "np", ".", "eye", "(", "4", ")", "if", "extensions", "is", "None", ":", "extensions", "=", "nib", ".", "nifti1", ".", "Nifti1Extensions", "(", ")", "# Figure out what the data is", "if", "isinstance", "(", "obj", ",", "nib", ".", "analyze", ".", "SpatialImage", ")", ":", "obj", "=", "obj", ".", "dataobj", "elif", "not", "pimms", ".", "is_nparray", "(", "obj", ")", ":", "obj", "=", "np", ".", "asarray", "(", "obj", ")", "if", "len", "(", "obj", ".", "shape", ")", "<", "3", ":", "obj", "=", "np", ".", "asarray", "(", "[", "[", "obj", "]", "]", ")", "# Okay, make a new object now...", "if", "version", "==", "1", ":", "obj", "=", "nib", ".", "nifti1", ".", "Nifti1Image", "(", "obj", ",", "affine", ",", "header", ")", "elif", "version", "==", "2", ":", "obj", "=", "nib", ".", "nifti2", ".", "Nifti2Image", "(", "obj", ",", "affine", ",", "header", ")", "else", ":", "raise", "ValueError", "(", "'invalid version given (should be 1 or 2): %s'", "%", "version", ")", "# add the extensions if they're needed", "if", "extensions", "is", "not", "Ellipsis", "and", "(", "len", "(", "extensions", ")", ">", "0", "or", "len", "(", "obj", ".", "header", ".", "extensions", ")", ">", "0", ")", ":", "obj", ".", "header", ".", "extensions", "=", "extensions", "# Okay, that's it!", "return", "obj" ]
to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules are described below. The following options are accepted: * like (default: None) may be provided to give a guide for the various header- and meta-data that is included in the image. If this is a nifti image object, its meta-data are used; if this is a subject, then the meta-data are deduced from the subject's voxel and native orientation matrices. All other specific options below override anything deduced from the like argument. * header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header or to replace the header in a new image. * affine (default: None) may specify the affine transform to be given to the image object. * extensions (default: Ellipsis) may specify a nifti extensions object that should be included in the header. The default value, Ellipsis, indicates that the extensions should not be changed, and that None should be used if extensions are not implied in obj (if, for example, obj is a data array rather than an image object with a header already. * version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object, respectively.
[ "to_nifti", "(", "obj", ")", "yields", "a", "Nifti2Image", "object", "that", "is", "as", "equivalent", "as", "possible", "to", "the", "given", "object", "obj", ".", "If", "obj", "is", "a", "Nifti2Image", "already", "then", "it", "is", "returned", "unmolested", ";", "other", "deduction", "rules", "are", "described", "below", "." ]
python
train
51.540541
softlayer/softlayer-python
SoftLayer/managers/hardware.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/hardware.py#L706-L733
def _get_bandwidth_price_id(items, hourly=True, no_public=False, location=None): """Choose a valid price id for bandwidth.""" # Prefer pay-for-use data transfer with hourly for item in items: capacity = float(item.get('capacity', 0)) # Hourly and private only do pay-as-you-go bandwidth if any([utils.lookup(item, 'itemCategory', 'categoryCode') != 'bandwidth', (hourly or no_public) and capacity != 0.0, not (hourly or no_public) and capacity == 0.0]): continue for price in item['prices']: if not _matches_billing(price, hourly): continue if not _matches_location(price, location): continue return price['id'] raise SoftLayer.SoftLayerError( "Could not find valid price for bandwidth option")
[ "def", "_get_bandwidth_price_id", "(", "items", ",", "hourly", "=", "True", ",", "no_public", "=", "False", ",", "location", "=", "None", ")", ":", "# Prefer pay-for-use data transfer with hourly", "for", "item", "in", "items", ":", "capacity", "=", "float", "(", "item", ".", "get", "(", "'capacity'", ",", "0", ")", ")", "# Hourly and private only do pay-as-you-go bandwidth", "if", "any", "(", "[", "utils", ".", "lookup", "(", "item", ",", "'itemCategory'", ",", "'categoryCode'", ")", "!=", "'bandwidth'", ",", "(", "hourly", "or", "no_public", ")", "and", "capacity", "!=", "0.0", ",", "not", "(", "hourly", "or", "no_public", ")", "and", "capacity", "==", "0.0", "]", ")", ":", "continue", "for", "price", "in", "item", "[", "'prices'", "]", ":", "if", "not", "_matches_billing", "(", "price", ",", "hourly", ")", ":", "continue", "if", "not", "_matches_location", "(", "price", ",", "location", ")", ":", "continue", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid price for bandwidth option\"", ")" ]
Choose a valid price id for bandwidth.
[ "Choose", "a", "valid", "price", "id", "for", "bandwidth", "." ]
python
train
35.071429
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L147-L160
def to_gremlin(self): """Return a unicode object with the Gremlin representation of this block.""" self.validate() template = ( u'transform{{' u'it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([ {} ])' u'}}') field_representations = ( u'{name}: {expr}'.format(name=key, expr=self.fields[key].to_gremlin()) for key in sorted(self.fields.keys()) # Sort the keys for deterministic output order. ) return template.format(u', '.join(field_representations))
[ "def", "to_gremlin", "(", "self", ")", ":", "self", ".", "validate", "(", ")", "template", "=", "(", "u'transform{{'", "u'it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([ {} ])'", "u'}}'", ")", "field_representations", "=", "(", "u'{name}: {expr}'", ".", "format", "(", "name", "=", "key", ",", "expr", "=", "self", ".", "fields", "[", "key", "]", ".", "to_gremlin", "(", ")", ")", "for", "key", "in", "sorted", "(", "self", ".", "fields", ".", "keys", "(", ")", ")", "# Sort the keys for deterministic output order.", ")", "return", "template", ".", "format", "(", "u', '", ".", "join", "(", "field_representations", ")", ")" ]
Return a unicode object with the Gremlin representation of this block.
[ "Return", "a", "unicode", "object", "with", "the", "Gremlin", "representation", "of", "this", "block", "." ]
python
train
40.714286
andychase/reparse
reparse/expression.py
https://github.com/andychase/reparse/blob/5f46cdd0fc4e239c0ddeca4b542e48a5ae95c508/reparse/expression.py#L47-L55
def findall(self, string): """ Parse string, returning all outputs as parsed by functions """ output = [] for match in self.pattern.findall(string): if hasattr(match, 'strip'): match = [match] self._list_add(output, self.run(match)) return output
[ "def", "findall", "(", "self", ",", "string", ")", ":", "output", "=", "[", "]", "for", "match", "in", "self", ".", "pattern", ".", "findall", "(", "string", ")", ":", "if", "hasattr", "(", "match", ",", "'strip'", ")", ":", "match", "=", "[", "match", "]", "self", ".", "_list_add", "(", "output", ",", "self", ".", "run", "(", "match", ")", ")", "return", "output" ]
Parse string, returning all outputs as parsed by functions
[ "Parse", "string", "returning", "all", "outputs", "as", "parsed", "by", "functions" ]
python
train
35.333333
xolox/python-coloredlogs
coloredlogs/syslog.py
https://github.com/xolox/python-coloredlogs/blob/1cbf0c6bbee400c6ddbc43008143809934ec3e79/coloredlogs/syslog.py#L155-L204
def connect_to_syslog(address=None, facility=None, level=None): """ Create a :class:`~logging.handlers.SysLogHandler`. :param address: The device file or network address of the system logging daemon (a string or tuple, defaults to the result of :func:`find_syslog_address()`). :param facility: Refer to :class:`~logging.handlers.SysLogHandler`. Defaults to ``LOG_USER``. :param level: The logging level for the :class:`~logging.handlers.SysLogHandler` (defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced using :func:`~coloredlogs.level_to_number()`. :returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the system logging daemon is unavailable). The process of connecting to the system logging daemon goes as follows: - If :class:`~logging.handlers.SysLogHandler` supports the `socktype` option (it does since Python 2.7) the following two socket types are tried (in decreasing preference): 1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may not be supported. 2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the default (which is UDP). - If socket types are not supported Python's (2.6) defaults are used to connect to the selected `address`. """ if not address: address = find_syslog_address() if facility is None: facility = logging.handlers.SysLogHandler.LOG_USER if level is None: level = DEFAULT_LOG_LEVEL for socktype in socket.SOCK_RAW, socket.SOCK_STREAM, None: kw = dict(facility=facility, address=address) if socktype is not None: kw['socktype'] = socktype try: handler = logging.handlers.SysLogHandler(**kw) except (IOError, TypeError): # The socktype argument was added in Python 2.7 and its use will raise a # TypeError exception on Python 2.6. IOError is a superclass of socket.error # (since Python 2.6) which can be raised if the system logging daemon is # unavailable. pass else: handler.setLevel(level_to_number(level)) return handler
[ "def", "connect_to_syslog", "(", "address", "=", "None", ",", "facility", "=", "None", ",", "level", "=", "None", ")", ":", "if", "not", "address", ":", "address", "=", "find_syslog_address", "(", ")", "if", "facility", "is", "None", ":", "facility", "=", "logging", ".", "handlers", ".", "SysLogHandler", ".", "LOG_USER", "if", "level", "is", "None", ":", "level", "=", "DEFAULT_LOG_LEVEL", "for", "socktype", "in", "socket", ".", "SOCK_RAW", ",", "socket", ".", "SOCK_STREAM", ",", "None", ":", "kw", "=", "dict", "(", "facility", "=", "facility", ",", "address", "=", "address", ")", "if", "socktype", "is", "not", "None", ":", "kw", "[", "'socktype'", "]", "=", "socktype", "try", ":", "handler", "=", "logging", ".", "handlers", ".", "SysLogHandler", "(", "*", "*", "kw", ")", "except", "(", "IOError", ",", "TypeError", ")", ":", "# The socktype argument was added in Python 2.7 and its use will raise a", "# TypeError exception on Python 2.6. IOError is a superclass of socket.error", "# (since Python 2.6) which can be raised if the system logging daemon is", "# unavailable.", "pass", "else", ":", "handler", ".", "setLevel", "(", "level_to_number", "(", "level", ")", ")", "return", "handler" ]
Create a :class:`~logging.handlers.SysLogHandler`. :param address: The device file or network address of the system logging daemon (a string or tuple, defaults to the result of :func:`find_syslog_address()`). :param facility: Refer to :class:`~logging.handlers.SysLogHandler`. Defaults to ``LOG_USER``. :param level: The logging level for the :class:`~logging.handlers.SysLogHandler` (defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced using :func:`~coloredlogs.level_to_number()`. :returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the system logging daemon is unavailable). The process of connecting to the system logging daemon goes as follows: - If :class:`~logging.handlers.SysLogHandler` supports the `socktype` option (it does since Python 2.7) the following two socket types are tried (in decreasing preference): 1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may not be supported. 2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the default (which is UDP). - If socket types are not supported Python's (2.6) defaults are used to connect to the selected `address`.
[ "Create", "a", ":", "class", ":", "~logging", ".", "handlers", ".", "SysLogHandler", "." ]
python
train
45.52
frostming/marko
marko/parser.py
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/parser.py#L105-L112
def _build_block_element_list(self): """Return a list of block elements, ordered from highest priority to lowest. """ return sorted( [e for e in self.block_elements.values() if not e.virtual], key=lambda e: e.priority, reverse=True )
[ "def", "_build_block_element_list", "(", "self", ")", ":", "return", "sorted", "(", "[", "e", "for", "e", "in", "self", ".", "block_elements", ".", "values", "(", ")", "if", "not", "e", ".", "virtual", "]", ",", "key", "=", "lambda", "e", ":", "e", ".", "priority", ",", "reverse", "=", "True", ")" ]
Return a list of block elements, ordered from highest priority to lowest.
[ "Return", "a", "list", "of", "block", "elements", "ordered", "from", "highest", "priority", "to", "lowest", "." ]
python
train
36.75
hangyan/shaw
shaw/decorator/retries.py
https://github.com/hangyan/shaw/blob/63d01d35e225ba4edb9c61edaf351e1bc0e8fd15/shaw/decorator/retries.py#L28-L35
def example_exc_handler(tries_remaining, exception, delay): """Example exception handler; prints a warning to stderr. tries_remaining: The number of tries remaining. exception: The exception instance which was raised. """ print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % ( exception, tries_remaining, delay)
[ "def", "example_exc_handler", "(", "tries_remaining", ",", "exception", ",", "delay", ")", ":", "print", ">>", "sys", ".", "stderr", ",", "\"Caught '%s', %d tries remaining, sleeping for %s seconds\"", "%", "(", "exception", ",", "tries_remaining", ",", "delay", ")" ]
Example exception handler; prints a warning to stderr. tries_remaining: The number of tries remaining. exception: The exception instance which was raised.
[ "Example", "exception", "handler", ";", "prints", "a", "warning", "to", "stderr", "." ]
python
train
45.25
pr-omethe-us/PyKED
pyked/validation.py
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/validation.py#L289-L315
def _validate_isvalid_uncertainty(self, isvalid_uncertainty, field, value): """Checks for valid given value and appropriate units with uncertainty. Args: isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked field (`str`): property associated with the quantity in question. value (`list`): list with the string of the value of the quantity and a dictionary of the uncertainty The rule's arguments are validated against this schema: {'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'list'}} """ self._validate_isvalid_quantity(True, field, value) # This len check is necessary for reasons that aren't quite clear to me # Cerberus calls this validation method even when lists have only one element # and should therefore be validated only by isvalid_quantity if len(value) > 1 and value[1]['uncertainty-type'] != 'relative': if value[1].get('uncertainty') is not None: self._validate_isvalid_quantity(True, field, [value[1]['uncertainty']]) if value[1].get('upper-uncertainty') is not None: self._validate_isvalid_quantity(True, field, [value[1]['upper-uncertainty']]) if value[1].get('lower-uncertainty') is not None: self._validate_isvalid_quantity(True, field, [value[1]['lower-uncertainty']])
[ "def", "_validate_isvalid_uncertainty", "(", "self", ",", "isvalid_uncertainty", ",", "field", ",", "value", ")", ":", "self", ".", "_validate_isvalid_quantity", "(", "True", ",", "field", ",", "value", ")", "# This len check is necessary for reasons that aren't quite clear to me", "# Cerberus calls this validation method even when lists have only one element", "# and should therefore be validated only by isvalid_quantity", "if", "len", "(", "value", ")", ">", "1", "and", "value", "[", "1", "]", "[", "'uncertainty-type'", "]", "!=", "'relative'", ":", "if", "value", "[", "1", "]", ".", "get", "(", "'uncertainty'", ")", "is", "not", "None", ":", "self", ".", "_validate_isvalid_quantity", "(", "True", ",", "field", ",", "[", "value", "[", "1", "]", "[", "'uncertainty'", "]", "]", ")", "if", "value", "[", "1", "]", ".", "get", "(", "'upper-uncertainty'", ")", "is", "not", "None", ":", "self", ".", "_validate_isvalid_quantity", "(", "True", ",", "field", ",", "[", "value", "[", "1", "]", "[", "'upper-uncertainty'", "]", "]", ")", "if", "value", "[", "1", "]", ".", "get", "(", "'lower-uncertainty'", ")", "is", "not", "None", ":", "self", ".", "_validate_isvalid_quantity", "(", "True", ",", "field", ",", "[", "value", "[", "1", "]", "[", "'lower-uncertainty'", "]", "]", ")" ]
Checks for valid given value and appropriate units with uncertainty. Args: isvalid_uncertainty (`bool`): flag from schema indicating uncertainty to be checked field (`str`): property associated with the quantity in question. value (`list`): list with the string of the value of the quantity and a dictionary of the uncertainty The rule's arguments are validated against this schema: {'isvalid_uncertainty': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'list'}}
[ "Checks", "for", "valid", "given", "value", "and", "appropriate", "units", "with", "uncertainty", "." ]
python
train
54.555556
PyCQA/pylint
pylint/checkers/variables.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/variables.py#L1672-L1704
def _check_self_cls_assign(self, node): """Check that self/cls don't get assigned""" assign_names = { target.name for target in node.targets if isinstance(target, astroid.AssignName) } scope = node.scope() nonlocals_with_same_name = any( child for child in scope.body if isinstance(child, astroid.Nonlocal) and assign_names & set(child.names) ) if nonlocals_with_same_name: scope = node.scope().parent.scope() if not ( isinstance(scope, astroid.scoped_nodes.FunctionDef) and scope.is_method() and "builtins.staticmethod" not in scope.decoratornames() ): return argument_names = scope.argnames() if not argument_names: return self_cls_name = argument_names[0] target_assign_names = ( target.name for target in node.targets if isinstance(target, astroid.node_classes.AssignName) ) if self_cls_name in target_assign_names: self.add_message("self-cls-assignment", node=node, args=(self_cls_name))
[ "def", "_check_self_cls_assign", "(", "self", ",", "node", ")", ":", "assign_names", "=", "{", "target", ".", "name", "for", "target", "in", "node", ".", "targets", "if", "isinstance", "(", "target", ",", "astroid", ".", "AssignName", ")", "}", "scope", "=", "node", ".", "scope", "(", ")", "nonlocals_with_same_name", "=", "any", "(", "child", "for", "child", "in", "scope", ".", "body", "if", "isinstance", "(", "child", ",", "astroid", ".", "Nonlocal", ")", "and", "assign_names", "&", "set", "(", "child", ".", "names", ")", ")", "if", "nonlocals_with_same_name", ":", "scope", "=", "node", ".", "scope", "(", ")", ".", "parent", ".", "scope", "(", ")", "if", "not", "(", "isinstance", "(", "scope", ",", "astroid", ".", "scoped_nodes", ".", "FunctionDef", ")", "and", "scope", ".", "is_method", "(", ")", "and", "\"builtins.staticmethod\"", "not", "in", "scope", ".", "decoratornames", "(", ")", ")", ":", "return", "argument_names", "=", "scope", ".", "argnames", "(", ")", "if", "not", "argument_names", ":", "return", "self_cls_name", "=", "argument_names", "[", "0", "]", "target_assign_names", "=", "(", "target", ".", "name", "for", "target", "in", "node", ".", "targets", "if", "isinstance", "(", "target", ",", "astroid", ".", "node_classes", ".", "AssignName", ")", ")", "if", "self_cls_name", "in", "target_assign_names", ":", "self", ".", "add_message", "(", "\"self-cls-assignment\"", ",", "node", "=", "node", ",", "args", "=", "(", "self_cls_name", ")", ")" ]
Check that self/cls don't get assigned
[ "Check", "that", "self", "/", "cls", "don", "t", "get", "assigned" ]
python
test
35.545455
confirm/ansibleci
ansibleci/runner.py
https://github.com/confirm/ansibleci/blob/6a53ae8c4a4653624977e146092422857f661b8f/ansibleci/runner.py#L71-L97
def run(self): ''' Runs all enabled tests. ''' # Run all tests. for cls in self.get_test_classes(): # Print informational message. self.logger.info('Running {cls.__name__} test...'.format(cls=cls)) # Create new test instance. test = cls(runner=self) # Run test and evaluate result. if test._run(): self.logger.passed('Test {cls.__name__} succeeded!'.format(cls=cls)) else: self.logger.failed('Test {cls.__name__} failed!'.format(cls=cls)) self.has_passed = False # Print summary. if self.has_passed: self.logger.passed('Summary: All tests passed!') else: self.logger.failed('Summary: One or more tests failed!') return self.has_passed
[ "def", "run", "(", "self", ")", ":", "# Run all tests.", "for", "cls", "in", "self", ".", "get_test_classes", "(", ")", ":", "# Print informational message.", "self", ".", "logger", ".", "info", "(", "'Running {cls.__name__} test...'", ".", "format", "(", "cls", "=", "cls", ")", ")", "# Create new test instance.", "test", "=", "cls", "(", "runner", "=", "self", ")", "# Run test and evaluate result.", "if", "test", ".", "_run", "(", ")", ":", "self", ".", "logger", ".", "passed", "(", "'Test {cls.__name__} succeeded!'", ".", "format", "(", "cls", "=", "cls", ")", ")", "else", ":", "self", ".", "logger", ".", "failed", "(", "'Test {cls.__name__} failed!'", ".", "format", "(", "cls", "=", "cls", ")", ")", "self", ".", "has_passed", "=", "False", "# Print summary.", "if", "self", ".", "has_passed", ":", "self", ".", "logger", ".", "passed", "(", "'Summary: All tests passed!'", ")", "else", ":", "self", ".", "logger", ".", "failed", "(", "'Summary: One or more tests failed!'", ")", "return", "self", ".", "has_passed" ]
Runs all enabled tests.
[ "Runs", "all", "enabled", "tests", "." ]
python
train
31.148148
IdentityPython/fedoidcmsg
src/fedoidcmsg/file_system.py
https://github.com/IdentityPython/fedoidcmsg/blob/d30107be02521fa6cdfe285da3b6b0cdd153c8cc/src/fedoidcmsg/file_system.py#L213-L218
def update(self, ava): """ Implements the dict.update() method """ for key, val in ava.items(): self[key] = val
[ "def", "update", "(", "self", ",", "ava", ")", ":", "for", "key", ",", "val", "in", "ava", ".", "items", "(", ")", ":", "self", "[", "key", "]", "=", "val" ]
Implements the dict.update() method
[ "Implements", "the", "dict", ".", "update", "()", "method" ]
python
test
25
andreikop/qutepart
qutepart/indenter/cstyle.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/indenter/cstyle.py#L47-L64
def findTextBackward(self, block, column, needle): """Search for a needle and return (block, column) Raise ValueError, if not found """ if column is not None: index = block.text()[:column].rfind(needle) else: index = block.text().rfind(needle) if index != -1: return block, index for block in self.iterateBlocksBackFrom(block.previous()): column = block.text().rfind(needle) if column != -1: return block, column raise ValueError('Not found')
[ "def", "findTextBackward", "(", "self", ",", "block", ",", "column", ",", "needle", ")", ":", "if", "column", "is", "not", "None", ":", "index", "=", "block", ".", "text", "(", ")", "[", ":", "column", "]", ".", "rfind", "(", "needle", ")", "else", ":", "index", "=", "block", ".", "text", "(", ")", ".", "rfind", "(", "needle", ")", "if", "index", "!=", "-", "1", ":", "return", "block", ",", "index", "for", "block", "in", "self", ".", "iterateBlocksBackFrom", "(", "block", ".", "previous", "(", ")", ")", ":", "column", "=", "block", ".", "text", "(", ")", ".", "rfind", "(", "needle", ")", "if", "column", "!=", "-", "1", ":", "return", "block", ",", "column", "raise", "ValueError", "(", "'Not found'", ")" ]
Search for a needle and return (block, column) Raise ValueError, if not found
[ "Search", "for", "a", "needle", "and", "return", "(", "block", "column", ")", "Raise", "ValueError", "if", "not", "found" ]
python
train
31.555556
spyder-ide/spyder
spyder/widgets/status.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/status.py#L102-L109
def setVisible(self, value): """Override Qt method to stops timers if widget is not visible.""" if self.timer is not None: if value: self.timer.start(self._interval) else: self.timer.stop() super(BaseTimerStatus, self).setVisible(value)
[ "def", "setVisible", "(", "self", ",", "value", ")", ":", "if", "self", ".", "timer", "is", "not", "None", ":", "if", "value", ":", "self", ".", "timer", ".", "start", "(", "self", ".", "_interval", ")", "else", ":", "self", ".", "timer", ".", "stop", "(", ")", "super", "(", "BaseTimerStatus", ",", "self", ")", ".", "setVisible", "(", "value", ")" ]
Override Qt method to stops timers if widget is not visible.
[ "Override", "Qt", "method", "to", "stops", "timers", "if", "widget", "is", "not", "visible", "." ]
python
train
38.625
QuantEcon/QuantEcon.py
quantecon/inequality.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/inequality.py#L83-L119
def shorrocks_index(A): r""" Implements Shorrocks mobility index Parameters ----------- A : array_like(float) Square matrix with transition probabilities (mobility matrix) of dimension m Returns -------- Shorrocks index: float The Shorrocks mobility index calculated as .. math:: s(A) = \frac{m - \sum_j a_{jj} }{m - 1} \in (0, 1) An index equal to 0 indicates complete immobility. References ----------- .. [1] Wealth distribution and social mobility in the US: A quantitative approach (Benhabib, Bisin, Luo, 2017). https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf """ A = np.asarray(A) # Convert to array if not already m, n = A.shape if m != n: raise ValueError('A must be a square matrix') diag_sum = np.diag(A).sum() return (m - diag_sum) / (m - 1)
[ "def", "shorrocks_index", "(", "A", ")", ":", "A", "=", "np", ".", "asarray", "(", "A", ")", "# Convert to array if not already", "m", ",", "n", "=", "A", ".", "shape", "if", "m", "!=", "n", ":", "raise", "ValueError", "(", "'A must be a square matrix'", ")", "diag_sum", "=", "np", ".", "diag", "(", "A", ")", ".", "sum", "(", ")", "return", "(", "m", "-", "diag_sum", ")", "/", "(", "m", "-", "1", ")" ]
r""" Implements Shorrocks mobility index Parameters ----------- A : array_like(float) Square matrix with transition probabilities (mobility matrix) of dimension m Returns -------- Shorrocks index: float The Shorrocks mobility index calculated as .. math:: s(A) = \frac{m - \sum_j a_{jj} }{m - 1} \in (0, 1) An index equal to 0 indicates complete immobility. References ----------- .. [1] Wealth distribution and social mobility in the US: A quantitative approach (Benhabib, Bisin, Luo, 2017). https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf
[ "r", "Implements", "Shorrocks", "mobility", "index" ]
python
train
24.027027
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_mac_address_table.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_mac_address_table.py#L390-L403
def get_mac_address_table_input_request_type_get_next_request_mac_address_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table input = ET.SubElement(get_mac_address_table, "input") request_type = ET.SubElement(input, "request-type") get_next_request = ET.SubElement(request_type, "get-next-request") mac_address_type = ET.SubElement(get_next_request, "mac-address-type") mac_address_type.text = kwargs.pop('mac_address_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_mac_address_table_input_request_type_get_next_request_mac_address_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_mac_address_table", "=", "ET", ".", "Element", "(", "\"get_mac_address_table\"", ")", "config", "=", "get_mac_address_table", "input", "=", "ET", ".", "SubElement", "(", "get_mac_address_table", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_next_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-next-request\"", ")", "mac_address_type", "=", "ET", ".", "SubElement", "(", "get_next_request", ",", "\"mac-address-type\"", ")", "mac_address_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac_address_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
50
ryanjdillon/pylleo
pylleo/calapp/main.py
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L82-L109
def callback_parent(attr, old, new): '''Update data directories drop down with new parent directory''' import os # Remove accidental white space if copy/pasted new = new.strip() parent_input.value = new # Verify new parent path exists and update `datadirs_select` widget if os.path.exists(new): # Create sorted list of data directories, ignore files joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d)) options = sorted([d for d in os.listdir(new) if joinisdir(new, d)]) # Update dropdown list of available data directories and select first datadirs_select.options = options datadirs_select.value = options[0] callback_datadirs('value', options[0], options[0]) else: msg = ''' The parent path `{}` does not exist. Check that you have entered the absolute path. '''.format(new) output_window.text = output_template.format(msg) return None
[ "def", "callback_parent", "(", "attr", ",", "old", ",", "new", ")", ":", "import", "os", "# Remove accidental white space if copy/pasted", "new", "=", "new", ".", "strip", "(", ")", "parent_input", ".", "value", "=", "new", "# Verify new parent path exists and update `datadirs_select` widget", "if", "os", ".", "path", ".", "exists", "(", "new", ")", ":", "# Create sorted list of data directories, ignore files", "joinisdir", "=", "lambda", "parent", ",", "d", ":", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "parent", ",", "d", ")", ")", "options", "=", "sorted", "(", "[", "d", "for", "d", "in", "os", ".", "listdir", "(", "new", ")", "if", "joinisdir", "(", "new", ",", "d", ")", "]", ")", "# Update dropdown list of available data directories and select first", "datadirs_select", ".", "options", "=", "options", "datadirs_select", ".", "value", "=", "options", "[", "0", "]", "callback_datadirs", "(", "'value'", ",", "options", "[", "0", "]", ",", "options", "[", "0", "]", ")", "else", ":", "msg", "=", "'''\n The parent path `{}` does not exist.\n\n Check that you have entered the absolute path.\n '''", ".", "format", "(", "new", ")", "output_window", ".", "text", "=", "output_template", ".", "format", "(", "msg", ")", "return", "None" ]
Update data directories drop down with new parent directory
[ "Update", "data", "directories", "drop", "down", "with", "new", "parent", "directory" ]
python
train
35.071429
Clinical-Genomics/scout
scout/server/blueprints/variants/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/variants/controllers.py#L123-L202
def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True, get_overlapping=True): """Pre-process an SV variant entry for detail page. Adds information to display variant Args: store(scout.adapter.MongoAdapter) institute_id(str) case_name(str) variant_id(str) variant_obj(dcit) add_case(bool): If information about case files should be added Returns: detailed_information(dict): { 'institute': <institute_obj>, 'case': <case_obj>, 'variant': <variant_obj>, 'overlapping_snvs': <overlapping_snvs>, 'manual_rank_options': MANUAL_RANK_OPTIONS, 'dismiss_variant_options': DISMISS_VARIANT_OPTIONS } """ institute_obj, case_obj = institute_and_case(store, institute_id, case_name) if not variant_obj: variant_obj = store.variant(variant_id) if add_case: # fill in information for pilup view variant_case(store, case_obj, variant_obj) # frequencies variant_obj['frequencies'] = [ ('1000G', variant_obj.get('thousand_genomes_frequency')), ('1000G (left)', variant_obj.get('thousand_genomes_frequency_left')), ('1000G (right)', variant_obj.get('thousand_genomes_frequency_right')), ('ClinGen CGH (benign)', variant_obj.get('clingen_cgh_benign')), ('ClinGen CGH (pathogenic)', variant_obj.get('clingen_cgh_pathogenic')), ('ClinGen NGI', variant_obj.get('clingen_ngi')), ('SweGen', variant_obj.get('swegen')), ('Decipher', variant_obj.get('decipher')), ] variant_obj['callers'] = callers(variant_obj, category='sv') overlapping_snvs = [] if get_overlapping: overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in store.overlapping(variant_obj)) # parse_gene function is not called for SVs, but a link to ensembl gene is required for gene_obj in variant_obj['genes']: if gene_obj.get('common'): ensembl_id = gene_obj['common']['ensembl_id'] try: build = int(gene_obj['common'].get('build','37')) except Exception: build = 37 gene_obj['ensembl_link'] = ensembl(ensembl_id, build=build) variant_obj['comments'] = store.events(institute_obj, case=case_obj, variant_id=variant_obj['variant_id'], comments=True) case_clinvars = store.case_to_clinVars(case_obj.get('display_name')) if variant_id in case_clinvars: variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig'] if not 'end_chrom' in variant_obj: variant_obj['end_chrom'] = variant_obj['chromosome'] return { 'institute': institute_obj, 'case': case_obj, 'variant': variant_obj, 'overlapping_snvs': overlapping_snvs, 'manual_rank_options': MANUAL_RANK_OPTIONS, 'dismiss_variant_options': DISMISS_VARIANT_OPTIONS }
[ "def", "sv_variant", "(", "store", ",", "institute_id", ",", "case_name", ",", "variant_id", "=", "None", ",", "variant_obj", "=", "None", ",", "add_case", "=", "True", ",", "get_overlapping", "=", "True", ")", ":", "institute_obj", ",", "case_obj", "=", "institute_and_case", "(", "store", ",", "institute_id", ",", "case_name", ")", "if", "not", "variant_obj", ":", "variant_obj", "=", "store", ".", "variant", "(", "variant_id", ")", "if", "add_case", ":", "# fill in information for pilup view", "variant_case", "(", "store", ",", "case_obj", ",", "variant_obj", ")", "# frequencies", "variant_obj", "[", "'frequencies'", "]", "=", "[", "(", "'1000G'", ",", "variant_obj", ".", "get", "(", "'thousand_genomes_frequency'", ")", ")", ",", "(", "'1000G (left)'", ",", "variant_obj", ".", "get", "(", "'thousand_genomes_frequency_left'", ")", ")", ",", "(", "'1000G (right)'", ",", "variant_obj", ".", "get", "(", "'thousand_genomes_frequency_right'", ")", ")", ",", "(", "'ClinGen CGH (benign)'", ",", "variant_obj", ".", "get", "(", "'clingen_cgh_benign'", ")", ")", ",", "(", "'ClinGen CGH (pathogenic)'", ",", "variant_obj", ".", "get", "(", "'clingen_cgh_pathogenic'", ")", ")", ",", "(", "'ClinGen NGI'", ",", "variant_obj", ".", "get", "(", "'clingen_ngi'", ")", ")", ",", "(", "'SweGen'", ",", "variant_obj", ".", "get", "(", "'swegen'", ")", ")", ",", "(", "'Decipher'", ",", "variant_obj", ".", "get", "(", "'decipher'", ")", ")", ",", "]", "variant_obj", "[", "'callers'", "]", "=", "callers", "(", "variant_obj", ",", "category", "=", "'sv'", ")", "overlapping_snvs", "=", "[", "]", "if", "get_overlapping", ":", "overlapping_snvs", "=", "(", "parse_variant", "(", "store", ",", "institute_obj", ",", "case_obj", ",", "variant", ")", "for", "variant", "in", "store", ".", "overlapping", "(", "variant_obj", ")", ")", "# parse_gene function is not called for SVs, but a link to ensembl gene is required", "for", "gene_obj", "in", "variant_obj", "[", "'genes'", "]", ":", "if", "gene_obj", ".", "get", "(", "'common'", ")", ":", "ensembl_id", "=", "gene_obj", "[", "'common'", "]", "[", "'ensembl_id'", "]", "try", ":", "build", "=", "int", "(", "gene_obj", "[", "'common'", "]", ".", "get", "(", "'build'", ",", "'37'", ")", ")", "except", "Exception", ":", "build", "=", "37", "gene_obj", "[", "'ensembl_link'", "]", "=", "ensembl", "(", "ensembl_id", ",", "build", "=", "build", ")", "variant_obj", "[", "'comments'", "]", "=", "store", ".", "events", "(", "institute_obj", ",", "case", "=", "case_obj", ",", "variant_id", "=", "variant_obj", "[", "'variant_id'", "]", ",", "comments", "=", "True", ")", "case_clinvars", "=", "store", ".", "case_to_clinVars", "(", "case_obj", ".", "get", "(", "'display_name'", ")", ")", "if", "variant_id", "in", "case_clinvars", ":", "variant_obj", "[", "'clinvar_clinsig'", "]", "=", "case_clinvars", ".", "get", "(", "variant_id", ")", "[", "'clinsig'", "]", "if", "not", "'end_chrom'", "in", "variant_obj", ":", "variant_obj", "[", "'end_chrom'", "]", "=", "variant_obj", "[", "'chromosome'", "]", "return", "{", "'institute'", ":", "institute_obj", ",", "'case'", ":", "case_obj", ",", "'variant'", ":", "variant_obj", ",", "'overlapping_snvs'", ":", "overlapping_snvs", ",", "'manual_rank_options'", ":", "MANUAL_RANK_OPTIONS", ",", "'dismiss_variant_options'", ":", "DISMISS_VARIANT_OPTIONS", "}" ]
Pre-process an SV variant entry for detail page. Adds information to display variant Args: store(scout.adapter.MongoAdapter) institute_id(str) case_name(str) variant_id(str) variant_obj(dcit) add_case(bool): If information about case files should be added Returns: detailed_information(dict): { 'institute': <institute_obj>, 'case': <case_obj>, 'variant': <variant_obj>, 'overlapping_snvs': <overlapping_snvs>, 'manual_rank_options': MANUAL_RANK_OPTIONS, 'dismiss_variant_options': DISMISS_VARIANT_OPTIONS }
[ "Pre", "-", "process", "an", "SV", "variant", "entry", "for", "detail", "page", "." ]
python
test
37.9375
awesto/djangoshop-stripe
shop_stripe/payment.py
https://github.com/awesto/djangoshop-stripe/blob/010d4642f971961cfeb415520ad819b3751281cb/shop_stripe/payment.py#L40-L74
def charge(self, cart, request): """ Use the Stripe token from the request and charge immediately. This view is invoked by the Javascript function `scope.charge()` delivered by `get_payment_request`. """ token_id = cart.extra['payment_extra_data']['token_id'] if LooseVersion(SHOP_VERSION) < LooseVersion('0.11'): charge = stripe.Charge.create( amount=cart.total.as_integer(), currency=cart.total.currency, source=token_id, description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION'] ) if charge['status'] == 'succeeded': order = OrderModel.objects.create_from_cart(cart, request) order.add_stripe_payment(charge) order.save() else: order = OrderModel.objects.create_from_cart(cart, request) charge = stripe.Charge.create( amount=cart.total.as_integer(), currency=cart.total.currency, source=token_id, transfer_group=order.get_number(), description=settings.SHOP_STRIPE['PURCHASE_DESCRIPTION'], ) if charge['status'] == 'succeeded': order.populate_from_cart(cart, request) order.add_stripe_payment(charge) order.save() if charge['status'] != 'succeeded': msg = "Stripe returned status '{status}' for id: {id}" raise stripe.error.InvalidRequestError(msg.format(**charge))
[ "def", "charge", "(", "self", ",", "cart", ",", "request", ")", ":", "token_id", "=", "cart", ".", "extra", "[", "'payment_extra_data'", "]", "[", "'token_id'", "]", "if", "LooseVersion", "(", "SHOP_VERSION", ")", "<", "LooseVersion", "(", "'0.11'", ")", ":", "charge", "=", "stripe", ".", "Charge", ".", "create", "(", "amount", "=", "cart", ".", "total", ".", "as_integer", "(", ")", ",", "currency", "=", "cart", ".", "total", ".", "currency", ",", "source", "=", "token_id", ",", "description", "=", "settings", ".", "SHOP_STRIPE", "[", "'PURCHASE_DESCRIPTION'", "]", ")", "if", "charge", "[", "'status'", "]", "==", "'succeeded'", ":", "order", "=", "OrderModel", ".", "objects", ".", "create_from_cart", "(", "cart", ",", "request", ")", "order", ".", "add_stripe_payment", "(", "charge", ")", "order", ".", "save", "(", ")", "else", ":", "order", "=", "OrderModel", ".", "objects", ".", "create_from_cart", "(", "cart", ",", "request", ")", "charge", "=", "stripe", ".", "Charge", ".", "create", "(", "amount", "=", "cart", ".", "total", ".", "as_integer", "(", ")", ",", "currency", "=", "cart", ".", "total", ".", "currency", ",", "source", "=", "token_id", ",", "transfer_group", "=", "order", ".", "get_number", "(", ")", ",", "description", "=", "settings", ".", "SHOP_STRIPE", "[", "'PURCHASE_DESCRIPTION'", "]", ",", ")", "if", "charge", "[", "'status'", "]", "==", "'succeeded'", ":", "order", ".", "populate_from_cart", "(", "cart", ",", "request", ")", "order", ".", "add_stripe_payment", "(", "charge", ")", "order", ".", "save", "(", ")", "if", "charge", "[", "'status'", "]", "!=", "'succeeded'", ":", "msg", "=", "\"Stripe returned status '{status}' for id: {id}\"", "raise", "stripe", ".", "error", ".", "InvalidRequestError", "(", "msg", ".", "format", "(", "*", "*", "charge", ")", ")" ]
Use the Stripe token from the request and charge immediately. This view is invoked by the Javascript function `scope.charge()` delivered by `get_payment_request`.
[ "Use", "the", "Stripe", "token", "from", "the", "request", "and", "charge", "immediately", ".", "This", "view", "is", "invoked", "by", "the", "Javascript", "function", "scope", ".", "charge", "()", "delivered", "by", "get_payment_request", "." ]
python
train
44.4
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1355-L1380
def _ShouldPrintError(category, confidence, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: # a "NOLINT(category)" comment appears in the source, # the verbosity level isn't high enough, or the filters filter it out. if IsErrorSuppressedByNolint(category, linenum): return False if confidence < _cpplint_state.verbose_level: return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False # should have been checked for in SetFilter. if is_filtered: return False return True
[ "def", "_ShouldPrintError", "(", "category", ",", "confidence", ",", "linenum", ")", ":", "# There are three ways we might decide not to print an error message:", "# a \"NOLINT(category)\" comment appears in the source,", "# the verbosity level isn't high enough, or the filters filter it out.", "if", "IsErrorSuppressedByNolint", "(", "category", ",", "linenum", ")", ":", "return", "False", "if", "confidence", "<", "_cpplint_state", ".", "verbose_level", ":", "return", "False", "is_filtered", "=", "False", "for", "one_filter", "in", "_Filters", "(", ")", ":", "if", "one_filter", ".", "startswith", "(", "'-'", ")", ":", "if", "category", ".", "startswith", "(", "one_filter", "[", "1", ":", "]", ")", ":", "is_filtered", "=", "True", "elif", "one_filter", ".", "startswith", "(", "'+'", ")", ":", "if", "category", ".", "startswith", "(", "one_filter", "[", "1", ":", "]", ")", ":", "is_filtered", "=", "False", "else", ":", "assert", "False", "# should have been checked for in SetFilter.", "if", "is_filtered", ":", "return", "False", "return", "True" ]
If confidence >= verbose, category passes filter and is not suppressed.
[ "If", "confidence", ">", "=", "verbose", "category", "passes", "filter", "and", "is", "not", "suppressed", "." ]
python
valid
32.423077
pazz/alot
alot/completion.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/completion.py#L538-L551
def get_context(line, pos): """ computes start and end position of substring of line that is the command string under given position """ commands = split_commandline(line) + [''] i = 0 start = 0 end = len(commands[i]) while pos > end: i += 1 start = end + 1 end += 1 + len(commands[i]) return start, end
[ "def", "get_context", "(", "line", ",", "pos", ")", ":", "commands", "=", "split_commandline", "(", "line", ")", "+", "[", "''", "]", "i", "=", "0", "start", "=", "0", "end", "=", "len", "(", "commands", "[", "i", "]", ")", "while", "pos", ">", "end", ":", "i", "+=", "1", "start", "=", "end", "+", "1", "end", "+=", "1", "+", "len", "(", "commands", "[", "i", "]", ")", "return", "start", ",", "end" ]
computes start and end position of substring of line that is the command string under given position
[ "computes", "start", "and", "end", "position", "of", "substring", "of", "line", "that", "is", "the", "command", "string", "under", "given", "position" ]
python
train
29
ioos/compliance-checker
compliance_checker/cf/cf.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L2474-L2571
def check_geographic_region(self, ds): """ 6.1.1 When data is representative of geographic regions which can be identified by names but which have complex boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled axis should be used to identify the regions. Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate that the label values are standardized the variable that contains the labels must be given the standard_name attribute with the value region. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results """ ret_val = [] region_list = [ # TODO maybe move this (and other info like it) into a config file? 'africa', 'antarctica', 'arabian_sea', 'aral_sea', 'arctic_ocean', 'asia', 'atlantic_ocean', 'australia', 'baltic_sea', 'barents_opening', 'barents_sea', 'beaufort_sea', 'bellingshausen_sea', 'bering_sea', 'bering_strait', 'black_sea', 'canadian_archipelago', 'caribbean_sea', 'caspian_sea', 'central_america', 'chukchi_sea', 'contiguous_united_states', 'denmark_strait', 'drake_passage', 'east_china_sea', 'english_channel', 'eurasia', 'europe', 'faroe_scotland_channel', 'florida_bahamas_strait', 'fram_strait', 'global', 'global_land', 'global_ocean', 'great_lakes', 'greenland', 'gulf_of_alaska', 'gulf_of_mexico', 'hudson_bay', 'iceland_faroe_channel', 'indian_ocean', 'indonesian_throughflow', 'indo_pacific_ocean', 'irish_sea', 'lake_baykal', 'lake_chad', 'lake_malawi', 'lake_tanganyika', 'lake_victoria', 'mediterranean_sea', 'mozambique_channel', 'north_america', 'north_sea', 'norwegian_sea', 'pacific_equatorial_undercurrent', 'pacific_ocean', 'persian_gulf', 'red_sea', 'ross_sea', 'sea_of_japan', 'sea_of_okhotsk', 'south_america', 'south_china_sea', 'southern_ocean', 'taiwan_luzon_straits', 'weddell_sea', 'windward_passage', 'yellow_sea' ] for var in ds.get_variables_by_attributes(standard_name='region'): valid_region = TestCtx(BaseCheck.MEDIUM, self.section_titles["6.1"]) region = var[:] if np.ma.isMA(region): region = region.data valid_region.assert_true(''.join(region.astype(str)).lower() in region_list, "6.1.1 '{}' specified by '{}' is not a valid region".format( ''.join(region.astype(str)), var.name ) ) ret_val.append(valid_region.to_result()) return ret_val
[ "def", "check_geographic_region", "(", "self", ",", "ds", ")", ":", "ret_val", "=", "[", "]", "region_list", "=", "[", "# TODO maybe move this (and other info like it) into a config file?", "'africa'", ",", "'antarctica'", ",", "'arabian_sea'", ",", "'aral_sea'", ",", "'arctic_ocean'", ",", "'asia'", ",", "'atlantic_ocean'", ",", "'australia'", ",", "'baltic_sea'", ",", "'barents_opening'", ",", "'barents_sea'", ",", "'beaufort_sea'", ",", "'bellingshausen_sea'", ",", "'bering_sea'", ",", "'bering_strait'", ",", "'black_sea'", ",", "'canadian_archipelago'", ",", "'caribbean_sea'", ",", "'caspian_sea'", ",", "'central_america'", ",", "'chukchi_sea'", ",", "'contiguous_united_states'", ",", "'denmark_strait'", ",", "'drake_passage'", ",", "'east_china_sea'", ",", "'english_channel'", ",", "'eurasia'", ",", "'europe'", ",", "'faroe_scotland_channel'", ",", "'florida_bahamas_strait'", ",", "'fram_strait'", ",", "'global'", ",", "'global_land'", ",", "'global_ocean'", ",", "'great_lakes'", ",", "'greenland'", ",", "'gulf_of_alaska'", ",", "'gulf_of_mexico'", ",", "'hudson_bay'", ",", "'iceland_faroe_channel'", ",", "'indian_ocean'", ",", "'indonesian_throughflow'", ",", "'indo_pacific_ocean'", ",", "'irish_sea'", ",", "'lake_baykal'", ",", "'lake_chad'", ",", "'lake_malawi'", ",", "'lake_tanganyika'", ",", "'lake_victoria'", ",", "'mediterranean_sea'", ",", "'mozambique_channel'", ",", "'north_america'", ",", "'north_sea'", ",", "'norwegian_sea'", ",", "'pacific_equatorial_undercurrent'", ",", "'pacific_ocean'", ",", "'persian_gulf'", ",", "'red_sea'", ",", "'ross_sea'", ",", "'sea_of_japan'", ",", "'sea_of_okhotsk'", ",", "'south_america'", ",", "'south_china_sea'", ",", "'southern_ocean'", ",", "'taiwan_luzon_straits'", ",", "'weddell_sea'", ",", "'windward_passage'", ",", "'yellow_sea'", "]", "for", "var", "in", "ds", ".", "get_variables_by_attributes", "(", "standard_name", "=", "'region'", ")", ":", "valid_region", "=", "TestCtx", "(", "BaseCheck", ".", "MEDIUM", ",", "self", ".", "section_titles", "[", "\"6.1\"", "]", ")", "region", "=", "var", "[", ":", "]", "if", "np", ".", "ma", ".", "isMA", "(", "region", ")", ":", "region", "=", "region", ".", "data", "valid_region", ".", "assert_true", "(", "''", ".", "join", "(", "region", ".", "astype", "(", "str", ")", ")", ".", "lower", "(", ")", "in", "region_list", ",", "\"6.1.1 '{}' specified by '{}' is not a valid region\"", ".", "format", "(", "''", ".", "join", "(", "region", ".", "astype", "(", "str", ")", ")", ",", "var", ".", "name", ")", ")", "ret_val", ".", "append", "(", "valid_region", ".", "to_result", "(", ")", ")", "return", "ret_val" ]
6.1.1 When data is representative of geographic regions which can be identified by names but which have complex boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled axis should be used to identify the regions. Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate that the label values are standardized the variable that contains the labels must be given the standard_name attribute with the value region. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
[ "6", ".", "1", ".", "1", "When", "data", "is", "representative", "of", "geographic", "regions", "which", "can", "be", "identified", "by", "names", "but", "which", "have", "complex", "boundaries", "that", "cannot", "practically", "be", "specified", "using", "longitude", "and", "latitude", "boundary", "coordinates", "a", "labeled", "axis", "should", "be", "used", "to", "identify", "the", "regions", "." ]
python
train
34.877551
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L3965-L3972
def save_sequence_rule(self, sequence_rule_form, *args, **kwargs): """Pass through to provider SequenceRuleAdminSession.update_sequence_rule""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.update_resource if sequence_rule_form.is_for_update(): return self.update_sequence_rule(sequence_rule_form, *args, **kwargs) else: return self.create_sequence_rule(sequence_rule_form, *args, **kwargs)
[ "def", "save_sequence_rule", "(", "self", ",", "sequence_rule_form", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.ResourceAdminSession.update_resource", "if", "sequence_rule_form", ".", "is_for_update", "(", ")", ":", "return", "self", ".", "update_sequence_rule", "(", "sequence_rule_form", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "create_sequence_rule", "(", "sequence_rule_form", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Pass through to provider SequenceRuleAdminSession.update_sequence_rule
[ "Pass", "through", "to", "provider", "SequenceRuleAdminSession", ".", "update_sequence_rule" ]
python
train
60
thunder-project/thunder
thunder/base.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L510-L549
def element_wise(self, other, op): """ Apply an elementwise operation to data. Both self and other data must have the same mode. If self is in local mode, other can also be a numpy array. Self and other must have the same shape, or other must be a scalar. Parameters ---------- other : Data or numpy array Data to apply elementwise operation to op : function Binary operator to use for elementwise operations, e.g. add, subtract """ if not isscalar(other) and not self.shape == other.shape: raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape)) if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode: raise NotImplementedError if isscalar(other): return self.map(lambda x: op(x, other)) if self.mode == 'local' and isinstance(other, ndarray): return self._constructor(op(self.values, other)).__finalize__(self) if self.mode == 'local' and isinstance(other, Data): return self._constructor(op(self.values, other.values)).__finalize__(self) if self.mode == 'spark' and isinstance(other, Data): def func(record): (k1, x), (k2, y) = record return k1, op(x, y) rdd = self.tordd().zip(other.tordd()).map(func) barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split) return self._constructor(barray).__finalize__(self)
[ "def", "element_wise", "(", "self", ",", "other", ",", "op", ")", ":", "if", "not", "isscalar", "(", "other", ")", "and", "not", "self", ".", "shape", "==", "other", ".", "shape", ":", "raise", "ValueError", "(", "\"shapes %s and %s must be equal\"", "%", "(", "self", ".", "shape", ",", "other", ".", "shape", ")", ")", "if", "not", "isscalar", "(", "other", ")", "and", "isinstance", "(", "other", ",", "Data", ")", "and", "not", "self", ".", "mode", "==", "other", ".", "mode", ":", "raise", "NotImplementedError", "if", "isscalar", "(", "other", ")", ":", "return", "self", ".", "map", "(", "lambda", "x", ":", "op", "(", "x", ",", "other", ")", ")", "if", "self", ".", "mode", "==", "'local'", "and", "isinstance", "(", "other", ",", "ndarray", ")", ":", "return", "self", ".", "_constructor", "(", "op", "(", "self", ".", "values", ",", "other", ")", ")", ".", "__finalize__", "(", "self", ")", "if", "self", ".", "mode", "==", "'local'", "and", "isinstance", "(", "other", ",", "Data", ")", ":", "return", "self", ".", "_constructor", "(", "op", "(", "self", ".", "values", ",", "other", ".", "values", ")", ")", ".", "__finalize__", "(", "self", ")", "if", "self", ".", "mode", "==", "'spark'", "and", "isinstance", "(", "other", ",", "Data", ")", ":", "def", "func", "(", "record", ")", ":", "(", "k1", ",", "x", ")", ",", "(", "k2", ",", "y", ")", "=", "record", "return", "k1", ",", "op", "(", "x", ",", "y", ")", "rdd", "=", "self", ".", "tordd", "(", ")", ".", "zip", "(", "other", ".", "tordd", "(", ")", ")", ".", "map", "(", "func", ")", "barray", "=", "BoltArraySpark", "(", "rdd", ",", "shape", "=", "self", ".", "shape", ",", "dtype", "=", "self", ".", "dtype", ",", "split", "=", "self", ".", "values", ".", "split", ")", "return", "self", ".", "_constructor", "(", "barray", ")", ".", "__finalize__", "(", "self", ")" ]
Apply an elementwise operation to data. Both self and other data must have the same mode. If self is in local mode, other can also be a numpy array. Self and other must have the same shape, or other must be a scalar. Parameters ---------- other : Data or numpy array Data to apply elementwise operation to op : function Binary operator to use for elementwise operations, e.g. add, subtract
[ "Apply", "an", "elementwise", "operation", "to", "data", "." ]
python
train
39.025
sebp/scikit-survival
sksurv/ensemble/survival_loss.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/survival_loss.py#L55-L63
def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Least squares does not need to update terminal regions. But it has to update the predictions. """ # update predictions y_pred[:, k] += learning_rate * tree.predict(X).ravel()
[ "def", "update_terminal_regions", "(", "self", ",", "tree", ",", "X", ",", "y", ",", "residual", ",", "y_pred", ",", "sample_weight", ",", "sample_mask", ",", "learning_rate", "=", "1.0", ",", "k", "=", "0", ")", ":", "# update predictions", "y_pred", "[", ":", ",", "k", "]", "+=", "learning_rate", "*", "tree", ".", "predict", "(", "X", ")", ".", "ravel", "(", ")" ]
Least squares does not need to update terminal regions. But it has to update the predictions.
[ "Least", "squares", "does", "not", "need", "to", "update", "terminal", "regions", "." ]
python
train
43.444444
fermiPy/fermipy
fermipy/gtanalysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L5527-L5551
def _update_srcmap_file(self, sources, overwrite=True): """Check the contents of the source map file and generate source maps for any components that are not present.""" if not os.path.isfile(self.files['srcmap']): return hdulist = fits.open(self.files['srcmap']) hdunames = [hdu.name.upper() for hdu in hdulist] srcmaps = {} for src in sources: if src.name.upper() in hdunames and not overwrite: continue self.logger.debug('Creating source map for %s', src.name) srcmaps[src.name] = self._create_srcmap(src.name, src) if srcmaps: self.logger.debug( 'Updating source map file for component %s.', self.name) srcmap_utils.update_source_maps(self.files['srcmap'], srcmaps, logger=self.logger) hdulist.close()
[ "def", "_update_srcmap_file", "(", "self", ",", "sources", ",", "overwrite", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "files", "[", "'srcmap'", "]", ")", ":", "return", "hdulist", "=", "fits", ".", "open", "(", "self", ".", "files", "[", "'srcmap'", "]", ")", "hdunames", "=", "[", "hdu", ".", "name", ".", "upper", "(", ")", "for", "hdu", "in", "hdulist", "]", "srcmaps", "=", "{", "}", "for", "src", "in", "sources", ":", "if", "src", ".", "name", ".", "upper", "(", ")", "in", "hdunames", "and", "not", "overwrite", ":", "continue", "self", ".", "logger", ".", "debug", "(", "'Creating source map for %s'", ",", "src", ".", "name", ")", "srcmaps", "[", "src", ".", "name", "]", "=", "self", ".", "_create_srcmap", "(", "src", ".", "name", ",", "src", ")", "if", "srcmaps", ":", "self", ".", "logger", ".", "debug", "(", "'Updating source map file for component %s.'", ",", "self", ".", "name", ")", "srcmap_utils", ".", "update_source_maps", "(", "self", ".", "files", "[", "'srcmap'", "]", ",", "srcmaps", ",", "logger", "=", "self", ".", "logger", ")", "hdulist", ".", "close", "(", ")" ]
Check the contents of the source map file and generate source maps for any components that are not present.
[ "Check", "the", "contents", "of", "the", "source", "map", "file", "and", "generate", "source", "maps", "for", "any", "components", "that", "are", "not", "present", "." ]
python
train
36.28
RudolfCardinal/pythonlib
cardinal_pythonlib/lists.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/lists.py#L48-L84
def index_list_for_sort_order(x: List[Any], key: Callable[[Any], Any] = None, reverse: bool = False) -> List[int]: """ Returns a list of indexes of ``x``, IF ``x`` WERE TO BE SORTED. Args: x: data key: function to be applied to the data to generate a sort key; this function is passed as the ``key=`` parameter to :func:`sorted`; the default is ``itemgetter(1)`` reverse: reverse the sort order? Returns: list of integer index values Example: .. code-block:: python z = ["a", "c", "b"] index_list_for_sort_order(z) # [0, 2, 1] index_list_for_sort_order(z, reverse=True) # [1, 2, 0] q = [("a", 9), ("b", 8), ("c", 7)] index_list_for_sort_order(q, key=itemgetter(1)) """ def key_with_user_func(idx_val: Tuple[int, Any]): return key(idx_val[1]) if key: sort_key = key_with_user_func # see the simpler version below else: sort_key = itemgetter(1) # enumerate, below, will return tuples of (index, value), so # itemgetter(1) means sort by the value index_value_list = sorted(enumerate(x), key=sort_key, reverse=reverse) return [i for i, _ in index_value_list]
[ "def", "index_list_for_sort_order", "(", "x", ":", "List", "[", "Any", "]", ",", "key", ":", "Callable", "[", "[", "Any", "]", ",", "Any", "]", "=", "None", ",", "reverse", ":", "bool", "=", "False", ")", "->", "List", "[", "int", "]", ":", "def", "key_with_user_func", "(", "idx_val", ":", "Tuple", "[", "int", ",", "Any", "]", ")", ":", "return", "key", "(", "idx_val", "[", "1", "]", ")", "if", "key", ":", "sort_key", "=", "key_with_user_func", "# see the simpler version below", "else", ":", "sort_key", "=", "itemgetter", "(", "1", ")", "# enumerate, below, will return tuples of (index, value), so", "# itemgetter(1) means sort by the value", "index_value_list", "=", "sorted", "(", "enumerate", "(", "x", ")", ",", "key", "=", "sort_key", ",", "reverse", "=", "reverse", ")", "return", "[", "i", "for", "i", ",", "_", "in", "index_value_list", "]" ]
Returns a list of indexes of ``x``, IF ``x`` WERE TO BE SORTED. Args: x: data key: function to be applied to the data to generate a sort key; this function is passed as the ``key=`` parameter to :func:`sorted`; the default is ``itemgetter(1)`` reverse: reverse the sort order? Returns: list of integer index values Example: .. code-block:: python z = ["a", "c", "b"] index_list_for_sort_order(z) # [0, 2, 1] index_list_for_sort_order(z, reverse=True) # [1, 2, 0] q = [("a", 9), ("b", 8), ("c", 7)] index_list_for_sort_order(q, key=itemgetter(1))
[ "Returns", "a", "list", "of", "indexes", "of", "x", "IF", "x", "WERE", "TO", "BE", "SORTED", "." ]
python
train
33.72973
DLR-RM/RAFCON
source/rafcon/gui/models/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/container_state.py#L88-L92
def _load_scoped_variable_models(self): """ Adds models for each scoped variable of the state """ self.scoped_variables = [] for scoped_variable in self.state.scoped_variables.values(): self._add_model(self.scoped_variables, scoped_variable, ScopedVariableModel)
[ "def", "_load_scoped_variable_models", "(", "self", ")", ":", "self", ".", "scoped_variables", "=", "[", "]", "for", "scoped_variable", "in", "self", ".", "state", ".", "scoped_variables", ".", "values", "(", ")", ":", "self", ".", "_add_model", "(", "self", ".", "scoped_variables", ",", "scoped_variable", ",", "ScopedVariableModel", ")" ]
Adds models for each scoped variable of the state
[ "Adds", "models", "for", "each", "scoped", "variable", "of", "the", "state" ]
python
train
58.8
mbr/flask-nav
flask_nav/__init__.py
https://github.com/mbr/flask-nav/blob/06f3b5b2addad29c2fc531a7e8e74958e9e4b793/flask_nav/__init__.py#L24-L42
def get_renderer(app, id): """Retrieve a renderer. :param app: :class:`~flask.Flask` application to look ``id`` up on :param id: Internal renderer id-string to look up """ renderer = app.extensions.get('nav_renderers', {})[id] if isinstance(renderer, tuple): mod_name, cls_name = renderer mod = import_module(mod_name) cls = mod for name in cls_name.split('.'): cls = getattr(cls, name) return cls return renderer
[ "def", "get_renderer", "(", "app", ",", "id", ")", ":", "renderer", "=", "app", ".", "extensions", ".", "get", "(", "'nav_renderers'", ",", "{", "}", ")", "[", "id", "]", "if", "isinstance", "(", "renderer", ",", "tuple", ")", ":", "mod_name", ",", "cls_name", "=", "renderer", "mod", "=", "import_module", "(", "mod_name", ")", "cls", "=", "mod", "for", "name", "in", "cls_name", ".", "split", "(", "'.'", ")", ":", "cls", "=", "getattr", "(", "cls", ",", "name", ")", "return", "cls", "return", "renderer" ]
Retrieve a renderer. :param app: :class:`~flask.Flask` application to look ``id`` up on :param id: Internal renderer id-string to look up
[ "Retrieve", "a", "renderer", "." ]
python
train
25.263158
ladybug-tools/ladybug
ladybug/skymodel.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L585-L617
def get_extra_radiation(doy, solar_constant=1366.1): """ Determine extraterrestrial radiation from day of year (using the spencer method). Note: [1] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. [2] <http://solardat.uoregon.edu/SolarRadiationBasics.html>, Eqs. SR1 and SR2 Args: doy : array of integers representing the days of the year. solar_constant : float, default 1366.1 The solar constant. Returns: dni_extra : float, array, or Series The extraterrestrial radiation present in watts per square meter on a surface which is normal to the sun. Pandas Timestamp and DatetimeIndex inputs will yield a Pandas TimeSeries. All other inputs will yield a float or an array of floats. """ # Calculates the day angle for the Earth's orbit around the Sun. B = (2. * math.pi / 365.) * (doy - 1) # Calculate R over R squared from the angle RoverR0sqrd = (1.00011 + 0.034221 * math.cos(B) + 0.00128 * math.sin(B) + 0.000719 * math.cos(2 * B) + 7.7e-05 * math.sin(2 * B)) Ea = solar_constant * RoverR0sqrd return Ea
[ "def", "get_extra_radiation", "(", "doy", ",", "solar_constant", "=", "1366.1", ")", ":", "# Calculates the day angle for the Earth's orbit around the Sun.", "B", "=", "(", "2.", "*", "math", ".", "pi", "/", "365.", ")", "*", "(", "doy", "-", "1", ")", "# Calculate R over R squared from the angle", "RoverR0sqrd", "=", "(", "1.00011", "+", "0.034221", "*", "math", ".", "cos", "(", "B", ")", "+", "0.00128", "*", "math", ".", "sin", "(", "B", ")", "+", "0.000719", "*", "math", ".", "cos", "(", "2", "*", "B", ")", "+", "7.7e-05", "*", "math", ".", "sin", "(", "2", "*", "B", ")", ")", "Ea", "=", "solar_constant", "*", "RoverR0sqrd", "return", "Ea" ]
Determine extraterrestrial radiation from day of year (using the spencer method). Note: [1] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. [2] <http://solardat.uoregon.edu/SolarRadiationBasics.html>, Eqs. SR1 and SR2 Args: doy : array of integers representing the days of the year. solar_constant : float, default 1366.1 The solar constant. Returns: dni_extra : float, array, or Series The extraterrestrial radiation present in watts per square meter on a surface which is normal to the sun. Pandas Timestamp and DatetimeIndex inputs will yield a Pandas TimeSeries. All other inputs will yield a float or an array of floats.
[ "Determine", "extraterrestrial", "radiation", "from", "day", "of", "year", "(", "using", "the", "spencer", "method", ")", "." ]
python
train
39
senaite/senaite.jsonapi
src/senaite/jsonapi/api.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/api.py#L1563-L1578
def get_batch(sequence, size, start=0, endpoint=None, complete=False): """ create a batched result record out of a sequence (catalog brains) """ batch = make_batch(sequence, size, start) return { "pagesize": batch.get_pagesize(), "next": batch.make_next_url(), "previous": batch.make_prev_url(), "page": batch.get_pagenumber(), "pages": batch.get_numpages(), "count": batch.get_sequence_length(), "items": make_items_for([b for b in batch.get_batch()], endpoint, complete=complete), }
[ "def", "get_batch", "(", "sequence", ",", "size", ",", "start", "=", "0", ",", "endpoint", "=", "None", ",", "complete", "=", "False", ")", ":", "batch", "=", "make_batch", "(", "sequence", ",", "size", ",", "start", ")", "return", "{", "\"pagesize\"", ":", "batch", ".", "get_pagesize", "(", ")", ",", "\"next\"", ":", "batch", ".", "make_next_url", "(", ")", ",", "\"previous\"", ":", "batch", ".", "make_prev_url", "(", ")", ",", "\"page\"", ":", "batch", ".", "get_pagenumber", "(", ")", ",", "\"pages\"", ":", "batch", ".", "get_numpages", "(", ")", ",", "\"count\"", ":", "batch", ".", "get_sequence_length", "(", ")", ",", "\"items\"", ":", "make_items_for", "(", "[", "b", "for", "b", "in", "batch", ".", "get_batch", "(", ")", "]", ",", "endpoint", ",", "complete", "=", "complete", ")", ",", "}" ]
create a batched result record out of a sequence (catalog brains)
[ "create", "a", "batched", "result", "record", "out", "of", "a", "sequence", "(", "catalog", "brains", ")" ]
python
train
36.1875
chrisrink10/basilisp
src/basilisp/lang/compiler/parser.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/parser.py#L1514-L1554
def _assert_recur_is_tail(node: Node) -> None: # pylint: disable=too-many-branches """Assert that `recur` forms only appear in the tail position of this or child AST nodes. `recur` forms may only appear in `do` nodes (both literal and synthetic `do` nodes) and in either the :then or :else expression of an `if` node.""" if node.op == NodeOp.DO: assert isinstance(node, Do) for child in node.statements: _assert_no_recur(child) _assert_recur_is_tail(node.ret) elif node.op in {NodeOp.FN, NodeOp.FN_METHOD, NodeOp.METHOD}: assert isinstance(node, (Fn, FnMethod, Method)) node.visit(_assert_recur_is_tail) elif node.op == NodeOp.IF: assert isinstance(node, If) _assert_no_recur(node.test) _assert_recur_is_tail(node.then) _assert_recur_is_tail(node.else_) elif node.op in {NodeOp.LET, NodeOp.LETFN}: assert isinstance(node, (Let, LetFn)) for binding in node.bindings: assert binding.init is not None _assert_no_recur(binding.init) _assert_recur_is_tail(node.body) elif node.op == NodeOp.LOOP: assert isinstance(node, Loop) for binding in node.bindings: assert binding.init is not None _assert_no_recur(binding.init) elif node.op == NodeOp.RECUR: pass elif node.op == NodeOp.TRY: assert isinstance(node, Try) _assert_recur_is_tail(node.body) for catch in node.catches: _assert_recur_is_tail(catch) if node.finally_: _assert_no_recur(node.finally_) else: node.visit(_assert_no_recur)
[ "def", "_assert_recur_is_tail", "(", "node", ":", "Node", ")", "->", "None", ":", "# pylint: disable=too-many-branches", "if", "node", ".", "op", "==", "NodeOp", ".", "DO", ":", "assert", "isinstance", "(", "node", ",", "Do", ")", "for", "child", "in", "node", ".", "statements", ":", "_assert_no_recur", "(", "child", ")", "_assert_recur_is_tail", "(", "node", ".", "ret", ")", "elif", "node", ".", "op", "in", "{", "NodeOp", ".", "FN", ",", "NodeOp", ".", "FN_METHOD", ",", "NodeOp", ".", "METHOD", "}", ":", "assert", "isinstance", "(", "node", ",", "(", "Fn", ",", "FnMethod", ",", "Method", ")", ")", "node", ".", "visit", "(", "_assert_recur_is_tail", ")", "elif", "node", ".", "op", "==", "NodeOp", ".", "IF", ":", "assert", "isinstance", "(", "node", ",", "If", ")", "_assert_no_recur", "(", "node", ".", "test", ")", "_assert_recur_is_tail", "(", "node", ".", "then", ")", "_assert_recur_is_tail", "(", "node", ".", "else_", ")", "elif", "node", ".", "op", "in", "{", "NodeOp", ".", "LET", ",", "NodeOp", ".", "LETFN", "}", ":", "assert", "isinstance", "(", "node", ",", "(", "Let", ",", "LetFn", ")", ")", "for", "binding", "in", "node", ".", "bindings", ":", "assert", "binding", ".", "init", "is", "not", "None", "_assert_no_recur", "(", "binding", ".", "init", ")", "_assert_recur_is_tail", "(", "node", ".", "body", ")", "elif", "node", ".", "op", "==", "NodeOp", ".", "LOOP", ":", "assert", "isinstance", "(", "node", ",", "Loop", ")", "for", "binding", "in", "node", ".", "bindings", ":", "assert", "binding", ".", "init", "is", "not", "None", "_assert_no_recur", "(", "binding", ".", "init", ")", "elif", "node", ".", "op", "==", "NodeOp", ".", "RECUR", ":", "pass", "elif", "node", ".", "op", "==", "NodeOp", ".", "TRY", ":", "assert", "isinstance", "(", "node", ",", "Try", ")", "_assert_recur_is_tail", "(", "node", ".", "body", ")", "for", "catch", "in", "node", ".", "catches", ":", "_assert_recur_is_tail", "(", "catch", ")", "if", "node", ".", "finally_", ":", "_assert_no_recur", "(", "node", ".", "finally_", ")", "else", ":", "node", ".", "visit", "(", "_assert_no_recur", ")" ]
Assert that `recur` forms only appear in the tail position of this or child AST nodes. `recur` forms may only appear in `do` nodes (both literal and synthetic `do` nodes) and in either the :then or :else expression of an `if` node.
[ "Assert", "that", "recur", "forms", "only", "appear", "in", "the", "tail", "position", "of", "this", "or", "child", "AST", "nodes", "." ]
python
test
39.829268