repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
kyuupichan/aiorpcX
aiorpcx/socks.py
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L380-L393
async def auto_detect_at_host(cls, host, ports, auth): '''Try to detect a SOCKS proxy on a host on one of the ports. Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not mean it is functioning - for example, it may have no network connectivity. If no proxy is detected return None. ''' for port in ports: proxy = await cls.auto_detect_at_address(NetAddress(host, port), auth) if proxy: return proxy return None
[ "async", "def", "auto_detect_at_host", "(", "cls", ",", "host", ",", "ports", ",", "auth", ")", ":", "for", "port", "in", "ports", ":", "proxy", "=", "await", "cls", ".", "auto_detect_at_address", "(", "NetAddress", "(", "host", ",", "port", ")", ",", "auth", ")", "if", "proxy", ":", "return", "proxy", "return", "None" ]
Try to detect a SOCKS proxy on a host on one of the ports. Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not mean it is functioning - for example, it may have no network connectivity. If no proxy is detected return None.
[ "Try", "to", "detect", "a", "SOCKS", "proxy", "on", "a", "host", "on", "one", "of", "the", "ports", "." ]
python
train
jamesturk/django-honeypot
honeypot/decorators.py
https://github.com/jamesturk/django-honeypot/blob/4b149bfca81828eaf418b5a6854e670d156f5e35/honeypot/decorators.py#L36-L60
def check_honeypot(func=None, field_name=None): """ Check request.POST for valid honeypot field. Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if not specified. """ # hack to reverse arguments if called with str param if isinstance(func, six.string_types): func, field_name = field_name, func def decorated(func): def inner(request, *args, **kwargs): response = verify_honeypot_value(request, field_name) if response: return response else: return func(request, *args, **kwargs) return wraps(func, assigned=available_attrs(func))(inner) if func is None: def decorator(func): return decorated(func) return decorator return decorated(func)
[ "def", "check_honeypot", "(", "func", "=", "None", ",", "field_name", "=", "None", ")", ":", "# hack to reverse arguments if called with str param", "if", "isinstance", "(", "func", ",", "six", ".", "string_types", ")", ":", "func", ",", "field_name", "=", "field_name", ",", "func", "def", "decorated", "(", "func", ")", ":", "def", "inner", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "verify_honeypot_value", "(", "request", ",", "field_name", ")", "if", "response", ":", "return", "response", "else", ":", "return", "func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wraps", "(", "func", ",", "assigned", "=", "available_attrs", "(", "func", ")", ")", "(", "inner", ")", "if", "func", "is", "None", ":", "def", "decorator", "(", "func", ")", ":", "return", "decorated", "(", "func", ")", "return", "decorator", "return", "decorated", "(", "func", ")" ]
Check request.POST for valid honeypot field. Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if not specified.
[ "Check", "request", ".", "POST", "for", "valid", "honeypot", "field", "." ]
python
train
mardix/Mocha
mocha/contrib/auth/models.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/models.py#L180-L189
def search_by_name(cls, query, name): """ Make a search :param query: :param name: :return: """ query = query.filter(db.or_(cls.first_name.contains(name), cls.last_name.contains(name))) return query
[ "def", "search_by_name", "(", "cls", ",", "query", ",", "name", ")", ":", "query", "=", "query", ".", "filter", "(", "db", ".", "or_", "(", "cls", ".", "first_name", ".", "contains", "(", "name", ")", ",", "cls", ".", "last_name", ".", "contains", "(", "name", ")", ")", ")", "return", "query" ]
Make a search :param query: :param name: :return:
[ "Make", "a", "search", ":", "param", "query", ":", ":", "param", "name", ":", ":", "return", ":" ]
python
train
tijme/not-your-average-web-crawler
nyawc/helpers/RandomInputHelper.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/helpers/RandomInputHelper.py#L142-L161
def get_random_email(ltd="com"): """Get a random email address with the given ltd. Args: ltd (str): The ltd to use (e.g. com). Returns: str: The random email. """ email = [ RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), "@", RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), ".", ltd ] return "".join(email)
[ "def", "get_random_email", "(", "ltd", "=", "\"com\"", ")", ":", "email", "=", "[", "RandomInputHelper", ".", "get_random_value", "(", "6", ",", "[", "string", ".", "ascii_lowercase", "]", ")", ",", "\"@\"", ",", "RandomInputHelper", ".", "get_random_value", "(", "6", ",", "[", "string", ".", "ascii_lowercase", "]", ")", ",", "\".\"", ",", "ltd", "]", "return", "\"\"", ".", "join", "(", "email", ")" ]
Get a random email address with the given ltd. Args: ltd (str): The ltd to use (e.g. com). Returns: str: The random email.
[ "Get", "a", "random", "email", "address", "with", "the", "given", "ltd", "." ]
python
train
pvlib/pvlib-python
pvlib/forecast.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/forecast.py#L504-L537
def cloud_cover_to_irradiance_liujordan(self, cloud_cover, **kwargs): """ Estimates irradiance from cloud cover in the following steps: 1. Determine transmittance using a function of cloud cover e.g. :py:meth:`~ForecastModel.cloud_cover_to_transmittance_linear` 2. Calculate GHI, DNI, DHI using the :py:func:`pvlib.irradiance.liujordan` model Parameters ---------- cloud_cover : Series Returns ------- irradiance : DataFrame Columns include ghi, dni, dhi """ # in principle, get_solarposition could use the forecast # pressure, temp, etc., but the cloud cover forecast is not # accurate enough to justify using these minor corrections solar_position = self.location.get_solarposition(cloud_cover.index) dni_extra = get_extra_radiation(cloud_cover.index) airmass = self.location.get_airmass(cloud_cover.index) transmittance = self.cloud_cover_to_transmittance_linear(cloud_cover, **kwargs) irrads = liujordan(solar_position['apparent_zenith'], transmittance, airmass['airmass_absolute'], dni_extra=dni_extra) irrads = irrads.fillna(0) return irrads
[ "def", "cloud_cover_to_irradiance_liujordan", "(", "self", ",", "cloud_cover", ",", "*", "*", "kwargs", ")", ":", "# in principle, get_solarposition could use the forecast", "# pressure, temp, etc., but the cloud cover forecast is not", "# accurate enough to justify using these minor corrections", "solar_position", "=", "self", ".", "location", ".", "get_solarposition", "(", "cloud_cover", ".", "index", ")", "dni_extra", "=", "get_extra_radiation", "(", "cloud_cover", ".", "index", ")", "airmass", "=", "self", ".", "location", ".", "get_airmass", "(", "cloud_cover", ".", "index", ")", "transmittance", "=", "self", ".", "cloud_cover_to_transmittance_linear", "(", "cloud_cover", ",", "*", "*", "kwargs", ")", "irrads", "=", "liujordan", "(", "solar_position", "[", "'apparent_zenith'", "]", ",", "transmittance", ",", "airmass", "[", "'airmass_absolute'", "]", ",", "dni_extra", "=", "dni_extra", ")", "irrads", "=", "irrads", ".", "fillna", "(", "0", ")", "return", "irrads" ]
Estimates irradiance from cloud cover in the following steps: 1. Determine transmittance using a function of cloud cover e.g. :py:meth:`~ForecastModel.cloud_cover_to_transmittance_linear` 2. Calculate GHI, DNI, DHI using the :py:func:`pvlib.irradiance.liujordan` model Parameters ---------- cloud_cover : Series Returns ------- irradiance : DataFrame Columns include ghi, dni, dhi
[ "Estimates", "irradiance", "from", "cloud", "cover", "in", "the", "following", "steps", ":" ]
python
train
LIVVkit/LIVVkit
livvkit/bundles/CISM_glissade/verification.py
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/bundles/CISM_glissade/verification.py#L43-L102
def parse_log(file_path): """ Parse a CISM output log and extract some information. Args: file_path: absolute path to the log file Return: A dictionary created by the elements object corresponding to the results of the bit for bit testing """ if not os.path.isfile(file_path): return elements.error("Output Log", "Could not open file: " + file_path.split(os.sep)[-1]) headers = ["Converged Iterations", "Avg. Iterations to Converge", "Processor Count", "Dycore Type"] with open(file_path, 'r') as f: dycore_types = {"0": "Glide", "1": "Glam", "2": "Glissade", "3": "Albany_felix", "4": "BISICLES"} curr_step = 0 proc_count = 0 iter_number = 0 converged_iters = [] iters_to_converge = [] for line in f: split = line.split() if ('CISM dycore type' in line): if line.split()[-1] == '=': dycore_type = dycore_types[next(f).strip()] else: dycore_type = dycore_types[line.split()[-1]] elif ('total procs' in line): proc_count += int(line.split()[-1]) elif ('Nonlinear Solver Step' in line): curr_step = int(line.split()[4]) elif ('Compute ice velocities, time = ' in line): converged_iters.append(curr_step) curr_step = float(line.split()[-1]) elif ('"SOLVE_STATUS_CONVERGED"' in line): split = line.split() iters_to_converge.append(int(split[split.index('"SOLVE_STATUS_CONVERGED"') + 2])) elif ("Compute dH/dt" in line): iters_to_converge.append(int(iter_number)) elif len(split) > 0 and split[0].isdigit(): iter_number = split[0] if iters_to_converge == []: iters_to_converge.append(int(iter_number)) data = { "Dycore Type": dycore_type, "Processor Count": proc_count, "Converged Iterations": len(converged_iters), "Avg. Iterations to Converge": np.mean(iters_to_converge) } return elements.table("Output Log", headers, data)
[ "def", "parse_log", "(", "file_path", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "return", "elements", ".", "error", "(", "\"Output Log\"", ",", "\"Could not open file: \"", "+", "file_path", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", ")", "headers", "=", "[", "\"Converged Iterations\"", ",", "\"Avg. Iterations to Converge\"", ",", "\"Processor Count\"", ",", "\"Dycore Type\"", "]", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "f", ":", "dycore_types", "=", "{", "\"0\"", ":", "\"Glide\"", ",", "\"1\"", ":", "\"Glam\"", ",", "\"2\"", ":", "\"Glissade\"", ",", "\"3\"", ":", "\"Albany_felix\"", ",", "\"4\"", ":", "\"BISICLES\"", "}", "curr_step", "=", "0", "proc_count", "=", "0", "iter_number", "=", "0", "converged_iters", "=", "[", "]", "iters_to_converge", "=", "[", "]", "for", "line", "in", "f", ":", "split", "=", "line", ".", "split", "(", ")", "if", "(", "'CISM dycore type'", "in", "line", ")", ":", "if", "line", ".", "split", "(", ")", "[", "-", "1", "]", "==", "'='", ":", "dycore_type", "=", "dycore_types", "[", "next", "(", "f", ")", ".", "strip", "(", ")", "]", "else", ":", "dycore_type", "=", "dycore_types", "[", "line", ".", "split", "(", ")", "[", "-", "1", "]", "]", "elif", "(", "'total procs'", "in", "line", ")", ":", "proc_count", "+=", "int", "(", "line", ".", "split", "(", ")", "[", "-", "1", "]", ")", "elif", "(", "'Nonlinear Solver Step'", "in", "line", ")", ":", "curr_step", "=", "int", "(", "line", ".", "split", "(", ")", "[", "4", "]", ")", "elif", "(", "'Compute ice velocities, time = '", "in", "line", ")", ":", "converged_iters", ".", "append", "(", "curr_step", ")", "curr_step", "=", "float", "(", "line", ".", "split", "(", ")", "[", "-", "1", "]", ")", "elif", "(", "'\"SOLVE_STATUS_CONVERGED\"'", "in", "line", ")", ":", "split", "=", "line", ".", "split", "(", ")", "iters_to_converge", ".", "append", "(", "int", "(", "split", "[", "split", ".", "index", "(", "'\"SOLVE_STATUS_CONVERGED\"'", ")", "+", "2", "]", ")", ")", "elif", "(", "\"Compute dH/dt\"", "in", "line", ")", ":", "iters_to_converge", ".", "append", "(", "int", "(", "iter_number", ")", ")", "elif", "len", "(", "split", ")", ">", "0", "and", "split", "[", "0", "]", ".", "isdigit", "(", ")", ":", "iter_number", "=", "split", "[", "0", "]", "if", "iters_to_converge", "==", "[", "]", ":", "iters_to_converge", ".", "append", "(", "int", "(", "iter_number", ")", ")", "data", "=", "{", "\"Dycore Type\"", ":", "dycore_type", ",", "\"Processor Count\"", ":", "proc_count", ",", "\"Converged Iterations\"", ":", "len", "(", "converged_iters", ")", ",", "\"Avg. Iterations to Converge\"", ":", "np", ".", "mean", "(", "iters_to_converge", ")", "}", "return", "elements", ".", "table", "(", "\"Output Log\"", ",", "headers", ",", "data", ")" ]
Parse a CISM output log and extract some information. Args: file_path: absolute path to the log file Return: A dictionary created by the elements object corresponding to the results of the bit for bit testing
[ "Parse", "a", "CISM", "output", "log", "and", "extract", "some", "information", "." ]
python
train
umich-brcf-bioinf/Jacquard
jacquard/variant_caller_transforms/mutect.py
https://github.com/umich-brcf-bioinf/Jacquard/blob/83dd61dd2b5e4110468493beec7bc121e6cb3cd1/jacquard/variant_caller_transforms/mutect.py#L253-L278
def _get_new_column_header(self, vcf_reader): """Returns a standardized column header. MuTect sample headers include the name of input alignment, which is nice, but doesn't match up with the sample names reported in Strelka or VarScan. To fix this, we replace with NORMAL and TUMOR using the MuTect metadata command line to replace them correctly.""" mutect_dict = self._build_mutect_dict(vcf_reader.metaheaders) new_header_list = [] required_keys = set([self._NORMAL_SAMPLE_KEY, self._TUMOR_SAMPLE_KEY]) mutect_keys = set(mutect_dict.keys()) if not required_keys.issubset(mutect_keys): raise utils.JQException("Unable to determine normal " "and tumor sample ordering " "based on MuTect metaheader.") for field_name in vcf_reader.column_header.split("\t"): if field_name == mutect_dict[self._NORMAL_SAMPLE_KEY]: field_name = "NORMAL" elif field_name == mutect_dict[self._TUMOR_SAMPLE_KEY]: field_name = "TUMOR" new_header_list.append(field_name) return "\t".join(new_header_list)
[ "def", "_get_new_column_header", "(", "self", ",", "vcf_reader", ")", ":", "mutect_dict", "=", "self", ".", "_build_mutect_dict", "(", "vcf_reader", ".", "metaheaders", ")", "new_header_list", "=", "[", "]", "required_keys", "=", "set", "(", "[", "self", ".", "_NORMAL_SAMPLE_KEY", ",", "self", ".", "_TUMOR_SAMPLE_KEY", "]", ")", "mutect_keys", "=", "set", "(", "mutect_dict", ".", "keys", "(", ")", ")", "if", "not", "required_keys", ".", "issubset", "(", "mutect_keys", ")", ":", "raise", "utils", ".", "JQException", "(", "\"Unable to determine normal \"", "\"and tumor sample ordering \"", "\"based on MuTect metaheader.\"", ")", "for", "field_name", "in", "vcf_reader", ".", "column_header", ".", "split", "(", "\"\\t\"", ")", ":", "if", "field_name", "==", "mutect_dict", "[", "self", ".", "_NORMAL_SAMPLE_KEY", "]", ":", "field_name", "=", "\"NORMAL\"", "elif", "field_name", "==", "mutect_dict", "[", "self", ".", "_TUMOR_SAMPLE_KEY", "]", ":", "field_name", "=", "\"TUMOR\"", "new_header_list", ".", "append", "(", "field_name", ")", "return", "\"\\t\"", ".", "join", "(", "new_header_list", ")" ]
Returns a standardized column header. MuTect sample headers include the name of input alignment, which is nice, but doesn't match up with the sample names reported in Strelka or VarScan. To fix this, we replace with NORMAL and TUMOR using the MuTect metadata command line to replace them correctly.
[ "Returns", "a", "standardized", "column", "header", "." ]
python
test
dw/mitogen
mitogen/core.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L381-L388
def has_parent_authority(msg, _stream=None): """Policy function for use with :class:`Receiver` and :meth:`Router.add_handler` that requires incoming messages to originate from a parent context, or on a :class:`Stream` whose :attr:`auth_id <Stream.auth_id>` has been set to that of a parent context or the current context.""" return (msg.auth_id == mitogen.context_id or msg.auth_id in mitogen.parent_ids)
[ "def", "has_parent_authority", "(", "msg", ",", "_stream", "=", "None", ")", ":", "return", "(", "msg", ".", "auth_id", "==", "mitogen", ".", "context_id", "or", "msg", ".", "auth_id", "in", "mitogen", ".", "parent_ids", ")" ]
Policy function for use with :class:`Receiver` and :meth:`Router.add_handler` that requires incoming messages to originate from a parent context, or on a :class:`Stream` whose :attr:`auth_id <Stream.auth_id>` has been set to that of a parent context or the current context.
[ "Policy", "function", "for", "use", "with", ":", "class", ":", "Receiver", "and", ":", "meth", ":", "Router", ".", "add_handler", "that", "requires", "incoming", "messages", "to", "originate", "from", "a", "parent", "context", "or", "on", "a", ":", "class", ":", "Stream", "whose", ":", "attr", ":", "auth_id", "<Stream", ".", "auth_id", ">", "has", "been", "set", "to", "that", "of", "a", "parent", "context", "or", "the", "current", "context", "." ]
python
train
profitbricks/profitbricks-sdk-python
examples/pb_snapshotDatacenter.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/examples/pb_snapshotDatacenter.py#L393-L588
def main(argv=None): '''Parse command line options and dump a datacenter to snapshots and file.''' if argv is None: argv = sys.argv else: sys.argv.extend(argv) program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = str(__updated__) program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date) program_shortdesc = __import__('__main__').__doc__.split("\n")[1] program_license = '''%s Created by J. Buchhammer on %s. Copyright 2016 ProfitBricks GmbH. All rights reserved. Licensed under the Apache License 2.0 http://www.apache.org/licenses/LICENSE-2.0 Distributed on an "AS IS" basis without warranties or conditions of any kind, either express or implied. USAGE ''' % (program_shortdesc, str(__date__)) try: # Setup argument parser parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('-u', '--user', dest='user', help='the login name') parser.add_argument('-p', '--password', dest='password', help='the login password') parser.add_argument('-L', '--Login', dest='loginfile', default=None, help='the login file to use') parser.add_argument('-d', '--datacenterid', dest='dc_id', required=True, default=None, help='datacenter ID of the server') parser.add_argument('-o', '--outfile', dest='outfile', default='dc-def_'+datetime.now().strftime('%Y-%m-%d_%H%M%S'), help='the output file name') parser.add_argument('-S', '--Stopalways', dest='stopalways', action='store_true', help='power off even when VM is running') parser.add_argument('-v', '--verbose', dest="verbose", action="count", default=0, help="set verbosity level [default: %(default)s]") parser.add_argument('-V', '--version', action='version', version=program_version_message) # Process arguments args = parser.parse_args() global verbose verbose = args.verbose if verbose > 0: print("Verbose mode on") print("start {} with args {}".format(program_name, str(args))) outfile = args.outfile if outfile.endswith(".json"): outfile = os.path.splitext(outfile) print("Using output file base name '{}'".format(outfile)) (user, password) = getLogin(args.loginfile, args.user, args.password) if user is None or password is None: raise ValueError("user or password resolved to None") pbclient = ProfitBricksService(user, password) dc_id = args.dc_id # first get all server's VM and OS state to see if we can start srv_info = getServerInfo(pbclient, dc_id) srvon = 0 for server in srv_info: if server['vmstate'] != 'SHUTOFF': print("VM {} is in state {}, but should be SHUTOFF" .format(server['name'], server['vmstate'])) srvon += 1 # end for(srv_info) if srvon > 0 and not args.stopalways: print("shutdown running OS before trying again") return 1 # now power off all VMs before starting the snapshots for server in srv_info: controlServerState(pbclient, dc_id, server['id'], action='POWEROFF') # now let's go dcdef = pbclient.get_datacenter(dc_id, 5) print("starting dump of datacenter {}".format(dcdef['properties']['name'])) dcdef_file = outfile+'_source.json' print("write source dc to {}".format(dcdef_file)) write_dc_definition(dcdef, dcdef_file) print("get existing Snapshots") # first get existing snapshots known_snapshots = dict() snapshots = pbclient.list_snapshots() for snap in snapshots['items']: print("SNAP : {}".format(json.dumps(snap))) known_snapshots[snap['properties']['name']] = snap['id'] print("create Snapshots, this may take a while ..") # we do NOT consider dangling volumes, only server-attached ones vol_snapshots = dict() # map volume id==snapshot name snapshot id for server in dcdef['entities']['servers']['items']: print("- server {}".format(server['properties']['name'])) if 'volumes' not in server['entities']: print(" server {} has no volumes" .format(server['properties']['name'])) continue # The volumes are attached by order of creation # Thus we must sort them to keep the order in the clone print("setting volume order by deviceNumber") volumes = server['entities']['volumes']['items'] new_order = sorted(volumes, key=lambda vol: vol['properties']['deviceNumber']) server['entities']['volumes']['items'] = new_order for volume in server['entities']['volumes']['items']: vol_id = volume['id'] # this will be the name too if vol_id in known_snapshots: print("use existing snapshot {} of volume {}" .format(vol_id, volume['properties']['name'])) vol_snapshots[vol_id] = known_snapshots[vol_id] else: print("taking snapshot {} of volume {}" .format(vol_id, volume['properties']['name'])) response = pbclient.create_snapshot(dc_id, vol_id, vol_id, "auto-created by pb_snapshotDatacenter") # response has no request id, need to check metadata state (BUSY, AVAILABLE..) vol_snapshots[vol_id] = response['id'] print("snapshot in progress: {}".format(str(response))) # end for(volume) # end for(server) print("Waiting for snapshots to complete") snapdone = dict() while len(snapdone) != len(vol_snapshots): sleep(10) for snap_id in vol_snapshots.values(): print("looking for {}".format(snap_id)) if snap_id in snapdone: continue snapshot = pbclient.get_snapshot(snap_id) print("snapshot {} is in state {}" .format(snap_id, snapshot['metadata']['state'])) if snapshot['metadata']['state'] == 'AVAILABLE': snapdone[snap_id] = snapshot['metadata']['state'] # end for(vol_snapshots) # end while(snapdone) # now replace the volumes image IDs print("setting snapshot id to volumes") for server in dcdef['entities']['servers']['items']: print("- server {}".format(server['properties']['name'])) if 'volumes' not in server['entities']: print(" server {} has no volumes" .format(server['properties']['name'])) continue for volume in server['entities']['volumes']['items']: vol_id = volume['id'] # this will be the name too volume['properties']['image'] = vol_snapshots[vol_id] # end for(volume) # end for(server) # As it came out, the LAN id is rearranged by order of creation # Thus we must sort the LANs to keep the order in the clone print("setting LAN order by id") lans = dcdef['entities']['lans']['items'] new_order = sorted(lans, key=lambda lan: lan['id']) dcdef['entities']['lans']['items'] = new_order # now sort unordered NICs by MAC and save the dcdef # reason is, that NICs seem to be ordered by MAC, but API response # doesn't guarantee the order, which we need for re-creation print("setting NIC order by MAC") for server in dcdef['entities']['servers']['items']: print("- server {}".format(server['properties']['name'])) if 'nics' not in server['entities']: print(" server {} has no nics" .format(server['properties']['name'])) continue nics = server['entities']['nics']['items'] # print("NICs before {}".format(json.dumps(nics))) new_order = sorted(nics, key=lambda nic: nic['properties']['mac']) # print("NICs after {}".format(json.dumps(new_order))) server['entities']['nics']['items'] = new_order # end for(server) dcdef_file = outfile+'.json' print("write snapshot dc to {}".format(dcdef_file)) write_dc_definition(dcdef, dcdef_file) return 0 except KeyboardInterrupt: # handle keyboard interrupt return 0 except Exception: traceback.print_exc() sys.stderr.write("\n" + program_name + ": for help use --help\n") return 2
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "else", ":", "sys", ".", "argv", ".", "extend", "(", "argv", ")", "program_name", "=", "os", ".", "path", ".", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", "program_version", "=", "\"v%s\"", "%", "__version__", "program_build_date", "=", "str", "(", "__updated__", ")", "program_version_message", "=", "'%%(prog)s %s (%s)'", "%", "(", "program_version", ",", "program_build_date", ")", "program_shortdesc", "=", "__import__", "(", "'__main__'", ")", ".", "__doc__", ".", "split", "(", "\"\\n\"", ")", "[", "1", "]", "program_license", "=", "'''%s\n\n Created by J. Buchhammer on %s.\n Copyright 2016 ProfitBricks GmbH. All rights reserved.\n\n Licensed under the Apache License 2.0\n http://www.apache.org/licenses/LICENSE-2.0\n\n Distributed on an \"AS IS\" basis without warranties\n or conditions of any kind, either express or implied.\n\nUSAGE\n'''", "%", "(", "program_shortdesc", ",", "str", "(", "__date__", ")", ")", "try", ":", "# Setup argument parser", "parser", "=", "ArgumentParser", "(", "description", "=", "program_license", ",", "formatter_class", "=", "RawDescriptionHelpFormatter", ")", "parser", ".", "add_argument", "(", "'-u'", ",", "'--user'", ",", "dest", "=", "'user'", ",", "help", "=", "'the login name'", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--password'", ",", "dest", "=", "'password'", ",", "help", "=", "'the login password'", ")", "parser", ".", "add_argument", "(", "'-L'", ",", "'--Login'", ",", "dest", "=", "'loginfile'", ",", "default", "=", "None", ",", "help", "=", "'the login file to use'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--datacenterid'", ",", "dest", "=", "'dc_id'", ",", "required", "=", "True", ",", "default", "=", "None", ",", "help", "=", "'datacenter ID of the server'", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--outfile'", ",", "dest", "=", "'outfile'", ",", "default", "=", "'dc-def_'", "+", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d_%H%M%S'", ")", ",", "help", "=", "'the output file name'", ")", "parser", ".", "add_argument", "(", "'-S'", ",", "'--Stopalways'", ",", "dest", "=", "'stopalways'", ",", "action", "=", "'store_true'", ",", "help", "=", "'power off even when VM is running'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "dest", "=", "\"verbose\"", ",", "action", "=", "\"count\"", ",", "default", "=", "0", ",", "help", "=", "\"set verbosity level [default: %(default)s]\"", ")", "parser", ".", "add_argument", "(", "'-V'", ",", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "program_version_message", ")", "# Process arguments", "args", "=", "parser", ".", "parse_args", "(", ")", "global", "verbose", "verbose", "=", "args", ".", "verbose", "if", "verbose", ">", "0", ":", "print", "(", "\"Verbose mode on\"", ")", "print", "(", "\"start {} with args {}\"", ".", "format", "(", "program_name", ",", "str", "(", "args", ")", ")", ")", "outfile", "=", "args", ".", "outfile", "if", "outfile", ".", "endswith", "(", "\".json\"", ")", ":", "outfile", "=", "os", ".", "path", ".", "splitext", "(", "outfile", ")", "print", "(", "\"Using output file base name '{}'\"", ".", "format", "(", "outfile", ")", ")", "(", "user", ",", "password", ")", "=", "getLogin", "(", "args", ".", "loginfile", ",", "args", ".", "user", ",", "args", ".", "password", ")", "if", "user", "is", "None", "or", "password", "is", "None", ":", "raise", "ValueError", "(", "\"user or password resolved to None\"", ")", "pbclient", "=", "ProfitBricksService", "(", "user", ",", "password", ")", "dc_id", "=", "args", ".", "dc_id", "# first get all server's VM and OS state to see if we can start", "srv_info", "=", "getServerInfo", "(", "pbclient", ",", "dc_id", ")", "srvon", "=", "0", "for", "server", "in", "srv_info", ":", "if", "server", "[", "'vmstate'", "]", "!=", "'SHUTOFF'", ":", "print", "(", "\"VM {} is in state {}, but should be SHUTOFF\"", ".", "format", "(", "server", "[", "'name'", "]", ",", "server", "[", "'vmstate'", "]", ")", ")", "srvon", "+=", "1", "# end for(srv_info)", "if", "srvon", ">", "0", "and", "not", "args", ".", "stopalways", ":", "print", "(", "\"shutdown running OS before trying again\"", ")", "return", "1", "# now power off all VMs before starting the snapshots", "for", "server", "in", "srv_info", ":", "controlServerState", "(", "pbclient", ",", "dc_id", ",", "server", "[", "'id'", "]", ",", "action", "=", "'POWEROFF'", ")", "# now let's go", "dcdef", "=", "pbclient", ".", "get_datacenter", "(", "dc_id", ",", "5", ")", "print", "(", "\"starting dump of datacenter {}\"", ".", "format", "(", "dcdef", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "dcdef_file", "=", "outfile", "+", "'_source.json'", "print", "(", "\"write source dc to {}\"", ".", "format", "(", "dcdef_file", ")", ")", "write_dc_definition", "(", "dcdef", ",", "dcdef_file", ")", "print", "(", "\"get existing Snapshots\"", ")", "# first get existing snapshots", "known_snapshots", "=", "dict", "(", ")", "snapshots", "=", "pbclient", ".", "list_snapshots", "(", ")", "for", "snap", "in", "snapshots", "[", "'items'", "]", ":", "print", "(", "\"SNAP : {}\"", ".", "format", "(", "json", ".", "dumps", "(", "snap", ")", ")", ")", "known_snapshots", "[", "snap", "[", "'properties'", "]", "[", "'name'", "]", "]", "=", "snap", "[", "'id'", "]", "print", "(", "\"create Snapshots, this may take a while ..\"", ")", "# we do NOT consider dangling volumes, only server-attached ones", "vol_snapshots", "=", "dict", "(", ")", "# map volume id==snapshot name snapshot id", "for", "server", "in", "dcdef", "[", "'entities'", "]", "[", "'servers'", "]", "[", "'items'", "]", ":", "print", "(", "\"- server {}\"", ".", "format", "(", "server", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "if", "'volumes'", "not", "in", "server", "[", "'entities'", "]", ":", "print", "(", "\" server {} has no volumes\"", ".", "format", "(", "server", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "continue", "# The volumes are attached by order of creation", "# Thus we must sort them to keep the order in the clone", "print", "(", "\"setting volume order by deviceNumber\"", ")", "volumes", "=", "server", "[", "'entities'", "]", "[", "'volumes'", "]", "[", "'items'", "]", "new_order", "=", "sorted", "(", "volumes", ",", "key", "=", "lambda", "vol", ":", "vol", "[", "'properties'", "]", "[", "'deviceNumber'", "]", ")", "server", "[", "'entities'", "]", "[", "'volumes'", "]", "[", "'items'", "]", "=", "new_order", "for", "volume", "in", "server", "[", "'entities'", "]", "[", "'volumes'", "]", "[", "'items'", "]", ":", "vol_id", "=", "volume", "[", "'id'", "]", "# this will be the name too", "if", "vol_id", "in", "known_snapshots", ":", "print", "(", "\"use existing snapshot {} of volume {}\"", ".", "format", "(", "vol_id", ",", "volume", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "vol_snapshots", "[", "vol_id", "]", "=", "known_snapshots", "[", "vol_id", "]", "else", ":", "print", "(", "\"taking snapshot {} of volume {}\"", ".", "format", "(", "vol_id", ",", "volume", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "response", "=", "pbclient", ".", "create_snapshot", "(", "dc_id", ",", "vol_id", ",", "vol_id", ",", "\"auto-created by pb_snapshotDatacenter\"", ")", "# response has no request id, need to check metadata state (BUSY, AVAILABLE..)", "vol_snapshots", "[", "vol_id", "]", "=", "response", "[", "'id'", "]", "print", "(", "\"snapshot in progress: {}\"", ".", "format", "(", "str", "(", "response", ")", ")", ")", "# end for(volume)", "# end for(server)", "print", "(", "\"Waiting for snapshots to complete\"", ")", "snapdone", "=", "dict", "(", ")", "while", "len", "(", "snapdone", ")", "!=", "len", "(", "vol_snapshots", ")", ":", "sleep", "(", "10", ")", "for", "snap_id", "in", "vol_snapshots", ".", "values", "(", ")", ":", "print", "(", "\"looking for {}\"", ".", "format", "(", "snap_id", ")", ")", "if", "snap_id", "in", "snapdone", ":", "continue", "snapshot", "=", "pbclient", ".", "get_snapshot", "(", "snap_id", ")", "print", "(", "\"snapshot {} is in state {}\"", ".", "format", "(", "snap_id", ",", "snapshot", "[", "'metadata'", "]", "[", "'state'", "]", ")", ")", "if", "snapshot", "[", "'metadata'", "]", "[", "'state'", "]", "==", "'AVAILABLE'", ":", "snapdone", "[", "snap_id", "]", "=", "snapshot", "[", "'metadata'", "]", "[", "'state'", "]", "# end for(vol_snapshots)", "# end while(snapdone)", "# now replace the volumes image IDs", "print", "(", "\"setting snapshot id to volumes\"", ")", "for", "server", "in", "dcdef", "[", "'entities'", "]", "[", "'servers'", "]", "[", "'items'", "]", ":", "print", "(", "\"- server {}\"", ".", "format", "(", "server", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "if", "'volumes'", "not", "in", "server", "[", "'entities'", "]", ":", "print", "(", "\" server {} has no volumes\"", ".", "format", "(", "server", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "continue", "for", "volume", "in", "server", "[", "'entities'", "]", "[", "'volumes'", "]", "[", "'items'", "]", ":", "vol_id", "=", "volume", "[", "'id'", "]", "# this will be the name too", "volume", "[", "'properties'", "]", "[", "'image'", "]", "=", "vol_snapshots", "[", "vol_id", "]", "# end for(volume)", "# end for(server)", "# As it came out, the LAN id is rearranged by order of creation", "# Thus we must sort the LANs to keep the order in the clone", "print", "(", "\"setting LAN order by id\"", ")", "lans", "=", "dcdef", "[", "'entities'", "]", "[", "'lans'", "]", "[", "'items'", "]", "new_order", "=", "sorted", "(", "lans", ",", "key", "=", "lambda", "lan", ":", "lan", "[", "'id'", "]", ")", "dcdef", "[", "'entities'", "]", "[", "'lans'", "]", "[", "'items'", "]", "=", "new_order", "# now sort unordered NICs by MAC and save the dcdef", "# reason is, that NICs seem to be ordered by MAC, but API response", "# doesn't guarantee the order, which we need for re-creation", "print", "(", "\"setting NIC order by MAC\"", ")", "for", "server", "in", "dcdef", "[", "'entities'", "]", "[", "'servers'", "]", "[", "'items'", "]", ":", "print", "(", "\"- server {}\"", ".", "format", "(", "server", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "if", "'nics'", "not", "in", "server", "[", "'entities'", "]", ":", "print", "(", "\" server {} has no nics\"", ".", "format", "(", "server", "[", "'properties'", "]", "[", "'name'", "]", ")", ")", "continue", "nics", "=", "server", "[", "'entities'", "]", "[", "'nics'", "]", "[", "'items'", "]", "# print(\"NICs before {}\".format(json.dumps(nics)))", "new_order", "=", "sorted", "(", "nics", ",", "key", "=", "lambda", "nic", ":", "nic", "[", "'properties'", "]", "[", "'mac'", "]", ")", "# print(\"NICs after {}\".format(json.dumps(new_order)))", "server", "[", "'entities'", "]", "[", "'nics'", "]", "[", "'items'", "]", "=", "new_order", "# end for(server)", "dcdef_file", "=", "outfile", "+", "'.json'", "print", "(", "\"write snapshot dc to {}\"", ".", "format", "(", "dcdef_file", ")", ")", "write_dc_definition", "(", "dcdef", ",", "dcdef_file", ")", "return", "0", "except", "KeyboardInterrupt", ":", "# handle keyboard interrupt", "return", "0", "except", "Exception", ":", "traceback", ".", "print_exc", "(", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\n\"", "+", "program_name", "+", "\": for help use --help\\n\"", ")", "return", "2" ]
Parse command line options and dump a datacenter to snapshots and file.
[ "Parse", "command", "line", "options", "and", "dump", "a", "datacenter", "to", "snapshots", "and", "file", "." ]
python
valid
gunthercox/ChatterBot
chatterbot/trainers.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/trainers.py#L66-L74
def export_for_training(self, file_path='./export.json'): """ Create a file from the database that can be used to train other chat bots. """ import json export = {'conversations': self._generate_export_data()} with open(file_path, 'w+') as jsonfile: json.dump(export, jsonfile, ensure_ascii=False)
[ "def", "export_for_training", "(", "self", ",", "file_path", "=", "'./export.json'", ")", ":", "import", "json", "export", "=", "{", "'conversations'", ":", "self", ".", "_generate_export_data", "(", ")", "}", "with", "open", "(", "file_path", ",", "'w+'", ")", "as", "jsonfile", ":", "json", ".", "dump", "(", "export", ",", "jsonfile", ",", "ensure_ascii", "=", "False", ")" ]
Create a file from the database that can be used to train other chat bots.
[ "Create", "a", "file", "from", "the", "database", "that", "can", "be", "used", "to", "train", "other", "chat", "bots", "." ]
python
train
ligyxy/DictMySQL
dictmysql.py
https://github.com/ligyxy/DictMySQL/blob/f40d649193ccf58d1c7933189be1042b37afbe31/dictmysql.py#L99-L117
def _value_parser(self, value, columnname=False, placeholder='%s'): """ Input: {'c1': 'v', 'c2': None, '#c3': 'uuid()'} Output: ('%s, %s, uuid()', [None, 'v']) # insert; columnname=False ('`c2` = %s, `c1` = %s, `c3` = uuid()', [None, 'v']) # update; columnname=True No need to transform NULL value since it's supported in execute() """ if not isinstance(value, dict): raise TypeError('Input value should be a dictionary') q = [] a = [] for k, v in value.items(): if k[0] == '#': # if is sql function q.append(' = '.join([self._backtick(k[1:]), str(v)]) if columnname else v) else: q.append(' = '.join([self._backtick(k), placeholder]) if columnname else placeholder) a.append(v) return ', '.join(q), tuple(a)
[ "def", "_value_parser", "(", "self", ",", "value", ",", "columnname", "=", "False", ",", "placeholder", "=", "'%s'", ")", ":", "if", "not", "isinstance", "(", "value", ",", "dict", ")", ":", "raise", "TypeError", "(", "'Input value should be a dictionary'", ")", "q", "=", "[", "]", "a", "=", "[", "]", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", ":", "if", "k", "[", "0", "]", "==", "'#'", ":", "# if is sql function", "q", ".", "append", "(", "' = '", ".", "join", "(", "[", "self", ".", "_backtick", "(", "k", "[", "1", ":", "]", ")", ",", "str", "(", "v", ")", "]", ")", "if", "columnname", "else", "v", ")", "else", ":", "q", ".", "append", "(", "' = '", ".", "join", "(", "[", "self", ".", "_backtick", "(", "k", ")", ",", "placeholder", "]", ")", "if", "columnname", "else", "placeholder", ")", "a", ".", "append", "(", "v", ")", "return", "', '", ".", "join", "(", "q", ")", ",", "tuple", "(", "a", ")" ]
Input: {'c1': 'v', 'c2': None, '#c3': 'uuid()'} Output: ('%s, %s, uuid()', [None, 'v']) # insert; columnname=False ('`c2` = %s, `c1` = %s, `c3` = uuid()', [None, 'v']) # update; columnname=True No need to transform NULL value since it's supported in execute()
[ "Input", ":", "{", "c1", ":", "v", "c2", ":", "None", "#c3", ":", "uuid", "()", "}", "Output", ":", "(", "%s", "%s", "uuid", "()", "[", "None", "v", "]", ")", "#", "insert", ";", "columnname", "=", "False", "(", "c2", "=", "%s", "c1", "=", "%s", "c3", "=", "uuid", "()", "[", "None", "v", "]", ")", "#", "update", ";", "columnname", "=", "True", "No", "need", "to", "transform", "NULL", "value", "since", "it", "s", "supported", "in", "execute", "()" ]
python
train
zyga/python-glibc
pyglibc/_signalfd.py
https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/pyglibc/_signalfd.py#L182-L200
def update(self, signals): """ Update the mask of signals this signalfd reacts to :param signals: A replacement set of signal numbers to monitor :raises ValueError: If :meth:`closed()` is True """ if self._sfd < 0: _err_closed() mask = sigset_t() sigemptyset(mask) if signals is not None: for signal in signals: sigaddset(mask, signal) # flags are ignored when sfd is not -1 _signalfd(self._sfd, mask, 0) self._signals = frozenset(signals)
[ "def", "update", "(", "self", ",", "signals", ")", ":", "if", "self", ".", "_sfd", "<", "0", ":", "_err_closed", "(", ")", "mask", "=", "sigset_t", "(", ")", "sigemptyset", "(", "mask", ")", "if", "signals", "is", "not", "None", ":", "for", "signal", "in", "signals", ":", "sigaddset", "(", "mask", ",", "signal", ")", "# flags are ignored when sfd is not -1", "_signalfd", "(", "self", ".", "_sfd", ",", "mask", ",", "0", ")", "self", ".", "_signals", "=", "frozenset", "(", "signals", ")" ]
Update the mask of signals this signalfd reacts to :param signals: A replacement set of signal numbers to monitor :raises ValueError: If :meth:`closed()` is True
[ "Update", "the", "mask", "of", "signals", "this", "signalfd", "reacts", "to" ]
python
train
saeschdivara/ArangoPy
arangodb/api.py
https://github.com/saeschdivara/ArangoPy/blob/b924cc57bed71520fc2ef528b917daeb98e10eca/arangodb/api.py#L145-L164
def remove(cls, name): """ Destroys the database. """ client = Client.instance() new_current_database = None if client.database != name: new_current_database = name # Deletions are only possible from the system database client.set_database(name=SYSTEM_DATABASE) api = client.api api.database(name).delete() if new_current_database: client.set_database(name=new_current_database)
[ "def", "remove", "(", "cls", ",", "name", ")", ":", "client", "=", "Client", ".", "instance", "(", ")", "new_current_database", "=", "None", "if", "client", ".", "database", "!=", "name", ":", "new_current_database", "=", "name", "# Deletions are only possible from the system database", "client", ".", "set_database", "(", "name", "=", "SYSTEM_DATABASE", ")", "api", "=", "client", ".", "api", "api", ".", "database", "(", "name", ")", ".", "delete", "(", ")", "if", "new_current_database", ":", "client", ".", "set_database", "(", "name", "=", "new_current_database", ")" ]
Destroys the database.
[ "Destroys", "the", "database", "." ]
python
train
coleifer/irc
irc.py
https://github.com/coleifer/irc/blob/f9d2bd6369aafe6cb0916c9406270ca8ecea2080/irc.py#L176-L185
def new_nick(self): """\ Generates a new nickname based on original nickname followed by a random number """ old = self.nick self.nick = '%s_%s' % (self.base_nick, random.randint(1, 1000)) self.logger.warn('Nick %s already taken, trying %s' % (old, self.nick)) self.register_nick() self.handle_nick_change(old, self.nick)
[ "def", "new_nick", "(", "self", ")", ":", "old", "=", "self", ".", "nick", "self", ".", "nick", "=", "'%s_%s'", "%", "(", "self", ".", "base_nick", ",", "random", ".", "randint", "(", "1", ",", "1000", ")", ")", "self", ".", "logger", ".", "warn", "(", "'Nick %s already taken, trying %s'", "%", "(", "old", ",", "self", ".", "nick", ")", ")", "self", ".", "register_nick", "(", ")", "self", ".", "handle_nick_change", "(", "old", ",", "self", ".", "nick", ")" ]
\ Generates a new nickname based on original nickname followed by a random number
[ "\\", "Generates", "a", "new", "nickname", "based", "on", "original", "nickname", "followed", "by", "a", "random", "number" ]
python
test
saltstack/salt
salt/modules/git.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L192-L211
def _find_ssh_exe(): ''' Windows only: search for Git's bundled ssh.exe in known locations ''' # Known locations for Git's ssh.exe in Windows globmasks = [os.path.join(os.getenv('SystemDrive'), os.sep, 'Program Files*', 'Git', 'usr', 'bin', 'ssh.exe'), os.path.join(os.getenv('SystemDrive'), os.sep, 'Program Files*', 'Git', 'bin', 'ssh.exe')] for globmask in globmasks: ssh_exe = glob.glob(globmask) if ssh_exe and os.path.isfile(ssh_exe[0]): ret = ssh_exe[0] break else: ret = None return ret
[ "def", "_find_ssh_exe", "(", ")", ":", "# Known locations for Git's ssh.exe in Windows", "globmasks", "=", "[", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'SystemDrive'", ")", ",", "os", ".", "sep", ",", "'Program Files*'", ",", "'Git'", ",", "'usr'", ",", "'bin'", ",", "'ssh.exe'", ")", ",", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'SystemDrive'", ")", ",", "os", ".", "sep", ",", "'Program Files*'", ",", "'Git'", ",", "'bin'", ",", "'ssh.exe'", ")", "]", "for", "globmask", "in", "globmasks", ":", "ssh_exe", "=", "glob", ".", "glob", "(", "globmask", ")", "if", "ssh_exe", "and", "os", ".", "path", ".", "isfile", "(", "ssh_exe", "[", "0", "]", ")", ":", "ret", "=", "ssh_exe", "[", "0", "]", "break", "else", ":", "ret", "=", "None", "return", "ret" ]
Windows only: search for Git's bundled ssh.exe in known locations
[ "Windows", "only", ":", "search", "for", "Git", "s", "bundled", "ssh", ".", "exe", "in", "known", "locations" ]
python
train
cloud-custodian/cloud-custodian
tools/sandbox/c7n_sphere11/c7n_sphere11/cli.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/sandbox/c7n_sphere11/c7n_sphere11/cli.py#L67-L71
def lock_status(account_id, resource_id, parent_id): """Show extant locks' status """ return output( Client(BASE_URL, account_id).lock_status(resource_id, parent_id))
[ "def", "lock_status", "(", "account_id", ",", "resource_id", ",", "parent_id", ")", ":", "return", "output", "(", "Client", "(", "BASE_URL", ",", "account_id", ")", ".", "lock_status", "(", "resource_id", ",", "parent_id", ")", ")" ]
Show extant locks' status
[ "Show", "extant", "locks", "status" ]
python
train
projectshift/shift-boiler
boiler/user/models.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L224-L232
def increment_failed_logins(self): """ Increment failed logins counter""" if not self.failed_logins: self.failed_logins = 1 elif not self.failed_login_limit_reached(): self.failed_logins += 1 else: self.reset_login_counter() self.lock_account(30)
[ "def", "increment_failed_logins", "(", "self", ")", ":", "if", "not", "self", ".", "failed_logins", ":", "self", ".", "failed_logins", "=", "1", "elif", "not", "self", ".", "failed_login_limit_reached", "(", ")", ":", "self", ".", "failed_logins", "+=", "1", "else", ":", "self", ".", "reset_login_counter", "(", ")", "self", ".", "lock_account", "(", "30", ")" ]
Increment failed logins counter
[ "Increment", "failed", "logins", "counter" ]
python
train
hydpy-dev/hydpy
hydpy/models/lland/lland_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_model.py#L972-L1049
def calc_qib2_v1(self): """Calculate the first inflow component released from the soil. Required control parameters: |NHRU| |Lnk| |NFk| |DMin| |DMax| Required derived parameter: |WZ| Required state sequence: |BoWa| Calculated flux sequence: |QIB2| Basic equation: :math:`QIB2 = (DMax-DMin) \\cdot (\\frac{BoWa-WZ}{NFk-WZ})^\\frac{3}{2}` Examples: For water and sealed areas, no interflow is calculated (the first three HRUs are of type |FLUSS|, |SEE|, and |VERS|, respectively). No principal distinction is made between the remaining land use classes (arable land |ACKER| has been selected for the last five HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(8) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER, ACKER) >>> dmax(10.0) >>> dmin(4.0) >>> nfk(100.0, 100.0, 100.0, 50.0, 100.0, 100.0, 100.0, 200.0) >>> derived.wz(50.0) >>> states.bowa = 100.0, 100.0, 100.0, 50.1, 50.0, 75.0, 100.0, 100.0 Note the time dependence of parameters |DMin| (see the example above) and |DMax|: >>> dmax dmax(10.0) >>> dmax.values array([ 5., 5., 5., 5., 5., 5., 5., 5.]) The following results show that he calculation of |QIB2| both resembles those of |QBB| and |QIB1| in some regards: >>> model.calc_qib2_v1() >>> fluxes.qib2 qib2(0.0, 0.0, 0.0, 0.0, 0.0, 1.06066, 3.0, 0.57735) In the given example, the maximum rate of total interflow generation is 5 mm/12h (parameter |DMax|). For the seventh zone, which contains a saturated soil, the value calculated for the second interflow component (|QIB2|) is 3 mm/h. The "missing" value of 2 mm/12h is be calculated by method |calc_qib1_v1|. (The fourth zone, which is slightly oversaturated, is only intended to demonstrate that zero division due to |NFk| = |WZ| is circumvented.) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if ((con.lnk[k] in (VERS, WASSER, FLUSS, SEE)) or (sta.bowa[k] <= der.wz[k]) or (con.nfk[k] <= der.wz[k])): flu.qib2[k] = 0. else: flu.qib2[k] = ((con.dmax[k]-con.dmin[k]) * ((sta.bowa[k]-der.wz[k]) / (con.nfk[k]-der.wz[k]))**1.5)
[ "def", "calc_qib2_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "sta", "=", "self", ".", "sequences", ".", "states", ".", "fastaccess", "for", "k", "in", "range", "(", "con", ".", "nhru", ")", ":", "if", "(", "(", "con", ".", "lnk", "[", "k", "]", "in", "(", "VERS", ",", "WASSER", ",", "FLUSS", ",", "SEE", ")", ")", "or", "(", "sta", ".", "bowa", "[", "k", "]", "<=", "der", ".", "wz", "[", "k", "]", ")", "or", "(", "con", ".", "nfk", "[", "k", "]", "<=", "der", ".", "wz", "[", "k", "]", ")", ")", ":", "flu", ".", "qib2", "[", "k", "]", "=", "0.", "else", ":", "flu", ".", "qib2", "[", "k", "]", "=", "(", "(", "con", ".", "dmax", "[", "k", "]", "-", "con", ".", "dmin", "[", "k", "]", ")", "*", "(", "(", "sta", ".", "bowa", "[", "k", "]", "-", "der", ".", "wz", "[", "k", "]", ")", "/", "(", "con", ".", "nfk", "[", "k", "]", "-", "der", ".", "wz", "[", "k", "]", ")", ")", "**", "1.5", ")" ]
Calculate the first inflow component released from the soil. Required control parameters: |NHRU| |Lnk| |NFk| |DMin| |DMax| Required derived parameter: |WZ| Required state sequence: |BoWa| Calculated flux sequence: |QIB2| Basic equation: :math:`QIB2 = (DMax-DMin) \\cdot (\\frac{BoWa-WZ}{NFk-WZ})^\\frac{3}{2}` Examples: For water and sealed areas, no interflow is calculated (the first three HRUs are of type |FLUSS|, |SEE|, and |VERS|, respectively). No principal distinction is made between the remaining land use classes (arable land |ACKER| has been selected for the last five HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nhru(8) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER, ACKER) >>> dmax(10.0) >>> dmin(4.0) >>> nfk(100.0, 100.0, 100.0, 50.0, 100.0, 100.0, 100.0, 200.0) >>> derived.wz(50.0) >>> states.bowa = 100.0, 100.0, 100.0, 50.1, 50.0, 75.0, 100.0, 100.0 Note the time dependence of parameters |DMin| (see the example above) and |DMax|: >>> dmax dmax(10.0) >>> dmax.values array([ 5., 5., 5., 5., 5., 5., 5., 5.]) The following results show that he calculation of |QIB2| both resembles those of |QBB| and |QIB1| in some regards: >>> model.calc_qib2_v1() >>> fluxes.qib2 qib2(0.0, 0.0, 0.0, 0.0, 0.0, 1.06066, 3.0, 0.57735) In the given example, the maximum rate of total interflow generation is 5 mm/12h (parameter |DMax|). For the seventh zone, which contains a saturated soil, the value calculated for the second interflow component (|QIB2|) is 3 mm/h. The "missing" value of 2 mm/12h is be calculated by method |calc_qib1_v1|. (The fourth zone, which is slightly oversaturated, is only intended to demonstrate that zero division due to |NFk| = |WZ| is circumvented.)
[ "Calculate", "the", "first", "inflow", "component", "released", "from", "the", "soil", "." ]
python
train
jasonrbriggs/stomp.py
stomp/connect.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/connect.py#L176-L185
def disconnect(self, receipt=None, headers=None, **keyword_headers): """ Call the protocol disconnection, and then stop the transport itself. :param str receipt: the receipt to use with the disconnect :param dict headers: a map of any additional headers to send with the disconnection :param keyword_headers: any additional headers to send with the disconnection """ Protocol11.disconnect(self, receipt, headers, **keyword_headers) self.transport.stop()
[ "def", "disconnect", "(", "self", ",", "receipt", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "keyword_headers", ")", ":", "Protocol11", ".", "disconnect", "(", "self", ",", "receipt", ",", "headers", ",", "*", "*", "keyword_headers", ")", "self", ".", "transport", ".", "stop", "(", ")" ]
Call the protocol disconnection, and then stop the transport itself. :param str receipt: the receipt to use with the disconnect :param dict headers: a map of any additional headers to send with the disconnection :param keyword_headers: any additional headers to send with the disconnection
[ "Call", "the", "protocol", "disconnection", "and", "then", "stop", "the", "transport", "itself", "." ]
python
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L396-L406
def get_host(self, unique_id): """Gets the host of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of SOME_SENTINAL_VALUE :Parameter unique_id: the name of the process :raises NameError if the name is not valid process """ if unique_id in self.processes: return self.processes[unique_id].hostname logger.error("{0} not a known process".format(unique_id)) raise NameError("{0} not a known process".format(unique_id))
[ "def", "get_host", "(", "self", ",", "unique_id", ")", ":", "if", "unique_id", "in", "self", ".", "processes", ":", "return", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "logger", ".", "error", "(", "\"{0} not a known process\"", ".", "format", "(", "unique_id", ")", ")", "raise", "NameError", "(", "\"{0} not a known process\"", ".", "format", "(", "unique_id", ")", ")" ]
Gets the host of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of SOME_SENTINAL_VALUE :Parameter unique_id: the name of the process :raises NameError if the name is not valid process
[ "Gets", "the", "host", "of", "the", "process", "with", "unique_id", ".", "If", "the", "deployer", "does", "not", "know", "of", "a", "process", "with", "unique_id", "then", "it", "should", "return", "a", "value", "of", "SOME_SENTINAL_VALUE" ]
python
train
adaptive-learning/proso-apps
proso_user/views_classes.py
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views_classes.py#L181-L222
def login_student(request): """ Log in student POST parameters (JSON): student: profile id of the student """ if not get_config('proso_user', 'allow_login_students', default=False): return render_json(request, { 'error': _('Log in as student is not allowed.'), 'error_type': 'login_student_not_allowed' }, template='class_create_student.html', help_text=login_student.__doc__, status=403) if request.method == 'GET': return render(request, 'class_login_student.html', {}, help_text=login_student.__doc__) elif request.method == 'POST': if not request.user.is_authenticated() or not hasattr(request.user, "userprofile"): return render_json(request, { 'error': _('User is not logged in.'), 'error_type': 'user_unauthorized' }, template='class_create_student.html', status=401) data = json_body(request.body.decode("utf-8")) try: student = User.objects.get(userprofile=data.get('student'), userprofile__classes__owner=request.user.userprofile) except User.DoesNotExist: return render_json(request, { 'error': _('Student not found'), 'error_type': 'student_not_found' }, template='class_login_student.html', status=401) if not student.is_active: return render_json(request, { 'error': _('The account has not been activated.'), 'error_type': 'account_not_activated' }, template='class_login_student.html', status=401) student.backend = 'django.contrib.auth.backends.ModelBackend' login(request, student) request.method = "GET" return profile(request) else: return HttpResponseBadRequest("method %s is not allowed".format(request.method))
[ "def", "login_student", "(", "request", ")", ":", "if", "not", "get_config", "(", "'proso_user'", ",", "'allow_login_students'", ",", "default", "=", "False", ")", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'Log in as student is not allowed.'", ")", ",", "'error_type'", ":", "'login_student_not_allowed'", "}", ",", "template", "=", "'class_create_student.html'", ",", "help_text", "=", "login_student", ".", "__doc__", ",", "status", "=", "403", ")", "if", "request", ".", "method", "==", "'GET'", ":", "return", "render", "(", "request", ",", "'class_login_student.html'", ",", "{", "}", ",", "help_text", "=", "login_student", ".", "__doc__", ")", "elif", "request", ".", "method", "==", "'POST'", ":", "if", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", "or", "not", "hasattr", "(", "request", ".", "user", ",", "\"userprofile\"", ")", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'User is not logged in.'", ")", ",", "'error_type'", ":", "'user_unauthorized'", "}", ",", "template", "=", "'class_create_student.html'", ",", "status", "=", "401", ")", "data", "=", "json_body", "(", "request", ".", "body", ".", "decode", "(", "\"utf-8\"", ")", ")", "try", ":", "student", "=", "User", ".", "objects", ".", "get", "(", "userprofile", "=", "data", ".", "get", "(", "'student'", ")", ",", "userprofile__classes__owner", "=", "request", ".", "user", ".", "userprofile", ")", "except", "User", ".", "DoesNotExist", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'Student not found'", ")", ",", "'error_type'", ":", "'student_not_found'", "}", ",", "template", "=", "'class_login_student.html'", ",", "status", "=", "401", ")", "if", "not", "student", ".", "is_active", ":", "return", "render_json", "(", "request", ",", "{", "'error'", ":", "_", "(", "'The account has not been activated.'", ")", ",", "'error_type'", ":", "'account_not_activated'", "}", ",", "template", "=", "'class_login_student.html'", ",", "status", "=", "401", ")", "student", ".", "backend", "=", "'django.contrib.auth.backends.ModelBackend'", "login", "(", "request", ",", "student", ")", "request", ".", "method", "=", "\"GET\"", "return", "profile", "(", "request", ")", "else", ":", "return", "HttpResponseBadRequest", "(", "\"method %s is not allowed\"", ".", "format", "(", "request", ".", "method", ")", ")" ]
Log in student POST parameters (JSON): student: profile id of the student
[ "Log", "in", "student" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/pip/req/req_set.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/req/req_set.py#L205-L253
def add_requirement(self, install_req, parent_req_name=None): """Add install_req as a requirement to install. :param parent_req_name: The name of the requirement that needed this added. The name is used because when multiple unnamed requirements resolve to the same name, we could otherwise end up with dependency links that point outside the Requirements set. parent_req must already be added. Note that None implies that this is a user supplied requirement, vs an inferred one. :return: Additional requirements to scan. That is either [] if the requirement is not applicable, or [install_req] if the requirement is applicable and has just been added. """ name = install_req.name if not install_req.match_markers(): logger.warning("Ignoring %s: markers %r don't match your " "environment", install_req.name, install_req.markers) return [] install_req.as_egg = self.as_egg install_req.use_user_site = self.use_user_site install_req.target_dir = self.target_dir install_req.pycompile = self.pycompile if not name: # url or path requirement w/o an egg fragment self.unnamed_requirements.append(install_req) return [install_req] else: if parent_req_name is None and self.has_requirement(name): raise InstallationError( 'Double requirement given: %s (already in %s, name=%r)' % (install_req, self.get_requirement(name), name)) if not self.has_requirement(name): # Add requirement self.requirements[name] = install_req # FIXME: what about other normalizations? E.g., _ vs. -? if name.lower() != name: self.requirement_aliases[name.lower()] = name result = [install_req] else: # Canonicalise to the already-added object install_req = self.get_requirement(name) # No need to scan, this is a duplicate requirement. result = [] if parent_req_name: parent_req = self.get_requirement(parent_req_name) self._dependencies[parent_req].append(install_req) return result
[ "def", "add_requirement", "(", "self", ",", "install_req", ",", "parent_req_name", "=", "None", ")", ":", "name", "=", "install_req", ".", "name", "if", "not", "install_req", ".", "match_markers", "(", ")", ":", "logger", ".", "warning", "(", "\"Ignoring %s: markers %r don't match your \"", "\"environment\"", ",", "install_req", ".", "name", ",", "install_req", ".", "markers", ")", "return", "[", "]", "install_req", ".", "as_egg", "=", "self", ".", "as_egg", "install_req", ".", "use_user_site", "=", "self", ".", "use_user_site", "install_req", ".", "target_dir", "=", "self", ".", "target_dir", "install_req", ".", "pycompile", "=", "self", ".", "pycompile", "if", "not", "name", ":", "# url or path requirement w/o an egg fragment", "self", ".", "unnamed_requirements", ".", "append", "(", "install_req", ")", "return", "[", "install_req", "]", "else", ":", "if", "parent_req_name", "is", "None", "and", "self", ".", "has_requirement", "(", "name", ")", ":", "raise", "InstallationError", "(", "'Double requirement given: %s (already in %s, name=%r)'", "%", "(", "install_req", ",", "self", ".", "get_requirement", "(", "name", ")", ",", "name", ")", ")", "if", "not", "self", ".", "has_requirement", "(", "name", ")", ":", "# Add requirement", "self", ".", "requirements", "[", "name", "]", "=", "install_req", "# FIXME: what about other normalizations? E.g., _ vs. -?", "if", "name", ".", "lower", "(", ")", "!=", "name", ":", "self", ".", "requirement_aliases", "[", "name", ".", "lower", "(", ")", "]", "=", "name", "result", "=", "[", "install_req", "]", "else", ":", "# Canonicalise to the already-added object", "install_req", "=", "self", ".", "get_requirement", "(", "name", ")", "# No need to scan, this is a duplicate requirement.", "result", "=", "[", "]", "if", "parent_req_name", ":", "parent_req", "=", "self", ".", "get_requirement", "(", "parent_req_name", ")", "self", ".", "_dependencies", "[", "parent_req", "]", ".", "append", "(", "install_req", ")", "return", "result" ]
Add install_req as a requirement to install. :param parent_req_name: The name of the requirement that needed this added. The name is used because when multiple unnamed requirements resolve to the same name, we could otherwise end up with dependency links that point outside the Requirements set. parent_req must already be added. Note that None implies that this is a user supplied requirement, vs an inferred one. :return: Additional requirements to scan. That is either [] if the requirement is not applicable, or [install_req] if the requirement is applicable and has just been added.
[ "Add", "install_req", "as", "a", "requirement", "to", "install", "." ]
python
test
clalancette/pycdlib
pycdlib/eltorito.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/eltorito.py#L306-L339
def parse(self, valstr): # type: (bytes) -> None ''' A method to parse an El Torito Entry out of a string. Parameters: valstr - The string to parse the El Torito Entry out of. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Entry already initialized') (self.boot_indicator, self.boot_media_type, self.load_segment, self.system_type, unused1, self.sector_count, self.load_rba, self.selection_criteria_type, self.selection_criteria) = struct.unpack_from(self.FMT, valstr, 0) if self.boot_indicator not in (0x88, 0x00): raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito initial entry boot indicator') if self.boot_media_type > 4: raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito boot media type') # FIXME: check that the system type matches the partition table if unused1 != 0: raise pycdlibexception.PyCdlibInvalidISO('El Torito unused field must be 0') # According to the specification, the El Torito unused end field (bytes # 0xc - 0x1f, unused2 field) should be all zero. However, we have found # ISOs in the wild where that is not the case, so skip that particular # check here. self._initialized = True
[ "def", "parse", "(", "self", ",", "valstr", ")", ":", "# type: (bytes) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'El Torito Entry already initialized'", ")", "(", "self", ".", "boot_indicator", ",", "self", ".", "boot_media_type", ",", "self", ".", "load_segment", ",", "self", ".", "system_type", ",", "unused1", ",", "self", ".", "sector_count", ",", "self", ".", "load_rba", ",", "self", ".", "selection_criteria_type", ",", "self", ".", "selection_criteria", ")", "=", "struct", ".", "unpack_from", "(", "self", ".", "FMT", ",", "valstr", ",", "0", ")", "if", "self", ".", "boot_indicator", "not", "in", "(", "0x88", ",", "0x00", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Invalid El Torito initial entry boot indicator'", ")", "if", "self", ".", "boot_media_type", ">", "4", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Invalid El Torito boot media type'", ")", "# FIXME: check that the system type matches the partition table", "if", "unused1", "!=", "0", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'El Torito unused field must be 0'", ")", "# According to the specification, the El Torito unused end field (bytes", "# 0xc - 0x1f, unused2 field) should be all zero. However, we have found", "# ISOs in the wild where that is not the case, so skip that particular", "# check here.", "self", ".", "_initialized", "=", "True" ]
A method to parse an El Torito Entry out of a string. Parameters: valstr - The string to parse the El Torito Entry out of. Returns: Nothing.
[ "A", "method", "to", "parse", "an", "El", "Torito", "Entry", "out", "of", "a", "string", "." ]
python
train
CodersOfTheNight/oshino
oshino/agents/__init__.py
https://github.com/CodersOfTheNight/oshino/blob/00f7e151e3ce1f3a7f43b353b695c4dba83c7f28/oshino/agents/__init__.py#L66-L78
def ready(self): """ Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied """ logger = self.get_logger() now = current_ts() logger.trace("Current time: {0}".format(now)) logger.trace("Last Run: {0}".format(self._last_run)) delta = (now - self._last_run) logger.trace("Delta: {0}, Interval: {1}" .format(delta, self.interval * 1000)) return delta > self.interval * 1000
[ "def", "ready", "(", "self", ")", ":", "logger", "=", "self", ".", "get_logger", "(", ")", "now", "=", "current_ts", "(", ")", "logger", ".", "trace", "(", "\"Current time: {0}\"", ".", "format", "(", "now", ")", ")", "logger", ".", "trace", "(", "\"Last Run: {0}\"", ".", "format", "(", "self", ".", "_last_run", ")", ")", "delta", "=", "(", "now", "-", "self", ".", "_last_run", ")", "logger", ".", "trace", "(", "\"Delta: {0}, Interval: {1}\"", ".", "format", "(", "delta", ",", "self", ".", "interval", "*", "1000", ")", ")", "return", "delta", ">", "self", ".", "interval", "*", "1000" ]
Function used when agent is `lazy`. It is being processed only when `ready` condition is satisfied
[ "Function", "used", "when", "agent", "is", "lazy", ".", "It", "is", "being", "processed", "only", "when", "ready", "condition", "is", "satisfied" ]
python
train
bjodah/pycompilation
pycompilation/compilation.py
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/compilation.py#L617-L673
def compile_link_import_py_ext( srcs, extname=None, build_dir=None, compile_kwargs=None, link_kwargs=None, **kwargs): """ Compiles sources in `srcs` to a shared object (python extension) which is imported. If shared object is newer than the sources, they are not recompiled but instead it is imported. Parameters ---------- srcs: string list of paths to sources extname: string name of extension (default: None) (taken from the last file in `srcs` - without extension) build_dir: string path to directory in which objects files etc. are generated compile_kwargs: dict keyword arguments passed to compile_sources link_kwargs: dict keyword arguments passed to link_py_so **kwargs: additional keyword arguments overwrites to both compile_kwargs and link_kwargs useful for convenience e.g. when passing logger Returns ------- the imported module Examples -------- >>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\ 'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP >>> Aprim = mod.fft(A) # doctest: +SKIP """ build_dir = build_dir or '.' if extname is None: extname = os.path.splitext(os.path.basename(srcs[-1]))[0] compile_kwargs = compile_kwargs or {} compile_kwargs.update(kwargs) link_kwargs = link_kwargs or {} link_kwargs.update(kwargs) try: mod = import_module_from_file(os.path.join(build_dir, extname), srcs) except ImportError: objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir, cwd=build_dir, **compile_kwargs) so = link_py_so( objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs), **link_kwargs) mod = import_module_from_file(so) return mod
[ "def", "compile_link_import_py_ext", "(", "srcs", ",", "extname", "=", "None", ",", "build_dir", "=", "None", ",", "compile_kwargs", "=", "None", ",", "link_kwargs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "build_dir", "=", "build_dir", "or", "'.'", "if", "extname", "is", "None", ":", "extname", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "srcs", "[", "-", "1", "]", ")", ")", "[", "0", "]", "compile_kwargs", "=", "compile_kwargs", "or", "{", "}", "compile_kwargs", ".", "update", "(", "kwargs", ")", "link_kwargs", "=", "link_kwargs", "or", "{", "}", "link_kwargs", ".", "update", "(", "kwargs", ")", "try", ":", "mod", "=", "import_module_from_file", "(", "os", ".", "path", ".", "join", "(", "build_dir", ",", "extname", ")", ",", "srcs", ")", "except", "ImportError", ":", "objs", "=", "compile_sources", "(", "list", "(", "map", "(", "get_abspath", ",", "srcs", ")", ")", ",", "destdir", "=", "build_dir", ",", "cwd", "=", "build_dir", ",", "*", "*", "compile_kwargs", ")", "so", "=", "link_py_so", "(", "objs", ",", "cwd", "=", "build_dir", ",", "fort", "=", "any_fort", "(", "srcs", ")", ",", "cplus", "=", "any_cplus", "(", "srcs", ")", ",", "*", "*", "link_kwargs", ")", "mod", "=", "import_module_from_file", "(", "so", ")", "return", "mod" ]
Compiles sources in `srcs` to a shared object (python extension) which is imported. If shared object is newer than the sources, they are not recompiled but instead it is imported. Parameters ---------- srcs: string list of paths to sources extname: string name of extension (default: None) (taken from the last file in `srcs` - without extension) build_dir: string path to directory in which objects files etc. are generated compile_kwargs: dict keyword arguments passed to compile_sources link_kwargs: dict keyword arguments passed to link_py_so **kwargs: additional keyword arguments overwrites to both compile_kwargs and link_kwargs useful for convenience e.g. when passing logger Returns ------- the imported module Examples -------- >>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\ 'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP >>> Aprim = mod.fft(A) # doctest: +SKIP
[ "Compiles", "sources", "in", "srcs", "to", "a", "shared", "object", "(", "python", "extension", ")", "which", "is", "imported", ".", "If", "shared", "object", "is", "newer", "than", "the", "sources", "they", "are", "not", "recompiled", "but", "instead", "it", "is", "imported", "." ]
python
train
swharden/SWHLab
swhlab/core.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/core.py#L445-L449
def output_touch(self): """ensure the ./swhlab/ folder exists.""" if not os.path.exists(self.outFolder): self.log.debug("creating %s",self.outFolder) os.mkdir(self.outFolder)
[ "def", "output_touch", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "outFolder", ")", ":", "self", ".", "log", ".", "debug", "(", "\"creating %s\"", ",", "self", ".", "outFolder", ")", "os", ".", "mkdir", "(", "self", ".", "outFolder", ")" ]
ensure the ./swhlab/ folder exists.
[ "ensure", "the", ".", "/", "swhlab", "/", "folder", "exists", "." ]
python
valid
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_dai.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_dai.py#L68-L88
def arp_access_list_permit_permit_list_mac_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") arp = ET.SubElement(config, "arp", xmlns="urn:brocade.com:mgmt:brocade-dai") access_list = ET.SubElement(arp, "access-list") acl_name_key = ET.SubElement(access_list, "acl-name") acl_name_key.text = kwargs.pop('acl_name') permit = ET.SubElement(access_list, "permit") permit_list = ET.SubElement(permit, "permit-list") ip_type_key = ET.SubElement(permit_list, "ip-type") ip_type_key.text = kwargs.pop('ip_type') host_ip_key = ET.SubElement(permit_list, "host-ip") host_ip_key.text = kwargs.pop('host_ip') host_mac_key = ET.SubElement(permit_list, "host-mac") host_mac_key.text = kwargs.pop('host_mac') mac_type = ET.SubElement(permit_list, "mac-type") mac_type.text = kwargs.pop('mac_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "arp_access_list_permit_permit_list_mac_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "arp", "=", "ET", ".", "SubElement", "(", "config", ",", "\"arp\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-dai\"", ")", "access_list", "=", "ET", ".", "SubElement", "(", "arp", ",", "\"access-list\"", ")", "acl_name_key", "=", "ET", ".", "SubElement", "(", "access_list", ",", "\"acl-name\"", ")", "acl_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'acl_name'", ")", "permit", "=", "ET", ".", "SubElement", "(", "access_list", ",", "\"permit\"", ")", "permit_list", "=", "ET", ".", "SubElement", "(", "permit", ",", "\"permit-list\"", ")", "ip_type_key", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"ip-type\"", ")", "ip_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'ip_type'", ")", "host_ip_key", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"host-ip\"", ")", "host_ip_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'host_ip'", ")", "host_mac_key", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"host-mac\"", ")", "host_mac_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'host_mac'", ")", "mac_type", "=", "ET", ".", "SubElement", "(", "permit_list", ",", "\"mac-type\"", ")", "mac_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
bukun/TorCMS
torcms/model/post_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_model.py#L384-L397
def query_by_tag(cat_id, kind='1'): ''' Query recent posts of catalog. ''' return TabPost.select().join( TabPost2Tag, on=(TabPost.uid == TabPost2Tag.post_id) ).where( (TabPost.kind == kind) & (TabPost2Tag.tag_id == cat_id) ).order_by( TabPost.time_create.desc() )
[ "def", "query_by_tag", "(", "cat_id", ",", "kind", "=", "'1'", ")", ":", "return", "TabPost", ".", "select", "(", ")", ".", "join", "(", "TabPost2Tag", ",", "on", "=", "(", "TabPost", ".", "uid", "==", "TabPost2Tag", ".", "post_id", ")", ")", ".", "where", "(", "(", "TabPost", ".", "kind", "==", "kind", ")", "&", "(", "TabPost2Tag", ".", "tag_id", "==", "cat_id", ")", ")", ".", "order_by", "(", "TabPost", ".", "time_create", ".", "desc", "(", ")", ")" ]
Query recent posts of catalog.
[ "Query", "recent", "posts", "of", "catalog", "." ]
python
train
tensorflow/cleverhans
cleverhans/attacks/max_confidence.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/max_confidence.py#L112-L121
def attack_class(self, x, target_y): """ Run the attack on a specific target class. :param x: tf Tensor. The input example. :param target_y: tf Tensor. The attacker's desired target class. Returns: A targeted adversarial example, intended to be classified as the target class. """ adv = self.base_attacker.generate(x, y_target=target_y, **self.params) return adv
[ "def", "attack_class", "(", "self", ",", "x", ",", "target_y", ")", ":", "adv", "=", "self", ".", "base_attacker", ".", "generate", "(", "x", ",", "y_target", "=", "target_y", ",", "*", "*", "self", ".", "params", ")", "return", "adv" ]
Run the attack on a specific target class. :param x: tf Tensor. The input example. :param target_y: tf Tensor. The attacker's desired target class. Returns: A targeted adversarial example, intended to be classified as the target class.
[ "Run", "the", "attack", "on", "a", "specific", "target", "class", ".", ":", "param", "x", ":", "tf", "Tensor", ".", "The", "input", "example", ".", ":", "param", "target_y", ":", "tf", "Tensor", ".", "The", "attacker", "s", "desired", "target", "class", ".", "Returns", ":", "A", "targeted", "adversarial", "example", "intended", "to", "be", "classified", "as", "the", "target", "class", "." ]
python
train
PyAr/fades
fades/helpers.py
https://github.com/PyAr/fades/blob/e5ea457b09b105f321d4f81772f25e8695159604/fades/helpers.py#L245-L258
def check_pypi_exists(dependencies): """Check if the indicated dependencies actually exists in pypi.""" for dependency in dependencies.get('pypi', []): logger.debug("Checking if %r exists in PyPI", dependency) try: exists = _pypi_head_package(dependency) except Exception as error: logger.error("Error checking %s in PyPI: %r", dependency, error) raise FadesError("Could not check if dependency exists in PyPI") else: if not exists: logger.error("%s doesn't exists in PyPI.", dependency) return False return True
[ "def", "check_pypi_exists", "(", "dependencies", ")", ":", "for", "dependency", "in", "dependencies", ".", "get", "(", "'pypi'", ",", "[", "]", ")", ":", "logger", ".", "debug", "(", "\"Checking if %r exists in PyPI\"", ",", "dependency", ")", "try", ":", "exists", "=", "_pypi_head_package", "(", "dependency", ")", "except", "Exception", "as", "error", ":", "logger", ".", "error", "(", "\"Error checking %s in PyPI: %r\"", ",", "dependency", ",", "error", ")", "raise", "FadesError", "(", "\"Could not check if dependency exists in PyPI\"", ")", "else", ":", "if", "not", "exists", ":", "logger", ".", "error", "(", "\"%s doesn't exists in PyPI.\"", ",", "dependency", ")", "return", "False", "return", "True" ]
Check if the indicated dependencies actually exists in pypi.
[ "Check", "if", "the", "indicated", "dependencies", "actually", "exists", "in", "pypi", "." ]
python
train
ccubed/PyMoe
Pymoe/Kitsu/library.py
https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/library.py#L122-L133
def __format_filters(filters): """ Format filters for the api query (to filter[<filter-name>]) :param filters: dict: can be None, filters for the query :return: the formatted filters, or None """ if filters is not None: for k in filters: if 'filter[' not in k: filters['filter[{}]'.format(k)] = filters.pop(k) return filters
[ "def", "__format_filters", "(", "filters", ")", ":", "if", "filters", "is", "not", "None", ":", "for", "k", "in", "filters", ":", "if", "'filter['", "not", "in", "k", ":", "filters", "[", "'filter[{}]'", ".", "format", "(", "k", ")", "]", "=", "filters", ".", "pop", "(", "k", ")", "return", "filters" ]
Format filters for the api query (to filter[<filter-name>]) :param filters: dict: can be None, filters for the query :return: the formatted filters, or None
[ "Format", "filters", "for", "the", "api", "query", "(", "to", "filter", "[", "<filter", "-", "name", ">", "]", ")" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L6733-L6759
def gnpool(name, start, room, lenout=_default_len_out): """ Return names of kernel variables matching a specified template. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gnpool_c.html :param name: Template that names should match. :type name: str :param start: Index of first matching name to retrieve. :type start: int :param room: The largest number of values to return. :type room: int :param lenout: Length of strings in output array kvars. :type lenout: int :return: Kernel pool variables whose names match name. :rtype: list of str """ name = stypes.stringToCharP(name) start = ctypes.c_int(start) kvars = stypes.emptyCharArray(yLen=room, xLen=lenout) room = ctypes.c_int(room) lenout = ctypes.c_int(lenout) n = ctypes.c_int() found = ctypes.c_int() libspice.gnpool_c(name, start, room, lenout, ctypes.byref(n), kvars, ctypes.byref(found)) return stypes.cVectorToPython(kvars)[0:n.value], bool(found.value)
[ "def", "gnpool", "(", "name", ",", "start", ",", "room", ",", "lenout", "=", "_default_len_out", ")", ":", "name", "=", "stypes", ".", "stringToCharP", "(", "name", ")", "start", "=", "ctypes", ".", "c_int", "(", "start", ")", "kvars", "=", "stypes", ".", "emptyCharArray", "(", "yLen", "=", "room", ",", "xLen", "=", "lenout", ")", "room", "=", "ctypes", ".", "c_int", "(", "room", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "lenout", ")", "n", "=", "ctypes", ".", "c_int", "(", ")", "found", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "gnpool_c", "(", "name", ",", "start", ",", "room", ",", "lenout", ",", "ctypes", ".", "byref", "(", "n", ")", ",", "kvars", ",", "ctypes", ".", "byref", "(", "found", ")", ")", "return", "stypes", ".", "cVectorToPython", "(", "kvars", ")", "[", "0", ":", "n", ".", "value", "]", ",", "bool", "(", "found", ".", "value", ")" ]
Return names of kernel variables matching a specified template. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gnpool_c.html :param name: Template that names should match. :type name: str :param start: Index of first matching name to retrieve. :type start: int :param room: The largest number of values to return. :type room: int :param lenout: Length of strings in output array kvars. :type lenout: int :return: Kernel pool variables whose names match name. :rtype: list of str
[ "Return", "names", "of", "kernel", "variables", "matching", "a", "specified", "template", "." ]
python
train
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/prettygraph.py
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/prettygraph.py#L98-L102
def strip_prefixes(g: Graph): """ Remove the prefixes from the graph for aesthetics """ return re.sub(r'^@prefix .* .\n', '', g.serialize(format="turtle").decode(), flags=re.MULTILINE).strip()
[ "def", "strip_prefixes", "(", "g", ":", "Graph", ")", ":", "return", "re", ".", "sub", "(", "r'^@prefix .* .\\n'", ",", "''", ",", "g", ".", "serialize", "(", "format", "=", "\"turtle\"", ")", ".", "decode", "(", ")", ",", "flags", "=", "re", ".", "MULTILINE", ")", ".", "strip", "(", ")" ]
Remove the prefixes from the graph for aesthetics
[ "Remove", "the", "prefixes", "from", "the", "graph", "for", "aesthetics" ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/execution/execution_context.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/execution_context.py#L230-L242
def create_prefetch(self, addresses): """Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain). """ with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True)
[ "def", "create_prefetch", "(", "self", ",", "addresses", ")", ":", "with", "self", ".", "_lock", ":", "for", "add", "in", "addresses", ":", "self", ".", "_state", "[", "add", "]", "=", "_ContextFuture", "(", "address", "=", "add", ",", "wait_for_tree", "=", "True", ")" ]
Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain).
[ "Create", "futures", "needed", "before", "starting", "the", "process", "of", "reading", "the", "address", "s", "value", "from", "the", "merkle", "tree", "." ]
python
train
devassistant/devassistant
devassistant/dapi/dapicli.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/dapi/dapicli.py#L337-L346
def format_search(q, **kwargs): '''Formats the results of a search''' m = search(q, **kwargs) count = m['count'] if not count: raise DapiCommError('Could not find any DAP packages for your query.') return for mdap in m['results']: mdap = mdap['content_object'] return _format_dap_with_description(mdap)
[ "def", "format_search", "(", "q", ",", "*", "*", "kwargs", ")", ":", "m", "=", "search", "(", "q", ",", "*", "*", "kwargs", ")", "count", "=", "m", "[", "'count'", "]", "if", "not", "count", ":", "raise", "DapiCommError", "(", "'Could not find any DAP packages for your query.'", ")", "return", "for", "mdap", "in", "m", "[", "'results'", "]", ":", "mdap", "=", "mdap", "[", "'content_object'", "]", "return", "_format_dap_with_description", "(", "mdap", ")" ]
Formats the results of a search
[ "Formats", "the", "results", "of", "a", "search" ]
python
train
dshean/pygeotools
pygeotools/lib/geolib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/geolib.py#L1940-L1949
def get_xy_1D(ds, stride=1, getval=False): """Return 1D arrays of x and y map coordinates for input GDAL Dataset """ gt = ds.GetGeoTransform() #stride = stride_m/gt[1] pX = np.arange(0, ds.RasterXSize, stride) pY = np.arange(0, ds.RasterYSize, stride) mX, dummy = pixelToMap(pX, pY[0], gt) dummy, mY = pixelToMap(pX[0], pY, gt) return mX, mY
[ "def", "get_xy_1D", "(", "ds", ",", "stride", "=", "1", ",", "getval", "=", "False", ")", ":", "gt", "=", "ds", ".", "GetGeoTransform", "(", ")", "#stride = stride_m/gt[1]", "pX", "=", "np", ".", "arange", "(", "0", ",", "ds", ".", "RasterXSize", ",", "stride", ")", "pY", "=", "np", ".", "arange", "(", "0", ",", "ds", ".", "RasterYSize", ",", "stride", ")", "mX", ",", "dummy", "=", "pixelToMap", "(", "pX", ",", "pY", "[", "0", "]", ",", "gt", ")", "dummy", ",", "mY", "=", "pixelToMap", "(", "pX", "[", "0", "]", ",", "pY", ",", "gt", ")", "return", "mX", ",", "mY" ]
Return 1D arrays of x and y map coordinates for input GDAL Dataset
[ "Return", "1D", "arrays", "of", "x", "and", "y", "map", "coordinates", "for", "input", "GDAL", "Dataset" ]
python
train
AoiKuiyuyou/AoikLiveReload
src/aoiklivereload/aoiklivereload.py
https://github.com/AoiKuiyuyou/AoikLiveReload/blob/0d5adb12118a33749e6690a8165fdb769cff7d5c/src/aoiklivereload/aoiklivereload.py#L158-L223
def run_watcher(self): """ Watcher thread's function. :return: None. """ # Create observer observer = Observer() # Start observer observer.start() # Dict that maps file path to `watch object` watche_obj_map = {} # Run change check in a loop while not self._watcher_to_stop: # Get current watch paths old_watch_path_s = set(watche_obj_map) # Get new watch paths new_watch_path_s = self._find_watch_paths() # For each new watch path for new_watch_path in new_watch_path_s: # Remove from the old watch paths if exists old_watch_path_s.discard(new_watch_path) # If the new watch path was not watched if new_watch_path not in watche_obj_map: try: # Schedule a watch watch_obj = observer.schedule( # 2KGRW # `FileSystemEventHandler` instance self, # File path to watch new_watch_path, # Whether recursive recursive=True, ) # Store the watch obj watche_obj_map[new_watch_path] = watch_obj # If have error except OSError: # Set the watch object be None watche_obj_map[new_watch_path] = None # For each old watch path that is not in the new watch paths for old_watch_path in old_watch_path_s: # Get watch object watch_obj = watche_obj_map.pop(old_watch_path, None) # If have watch object if watch_obj is not None: # Unschedule the watch observer.unschedule(watch_obj) # Store new watch paths self._watch_paths = new_watch_path_s # Sleep before next check time.sleep(self._interval)
[ "def", "run_watcher", "(", "self", ")", ":", "# Create observer", "observer", "=", "Observer", "(", ")", "# Start observer", "observer", ".", "start", "(", ")", "# Dict that maps file path to `watch object`", "watche_obj_map", "=", "{", "}", "# Run change check in a loop", "while", "not", "self", ".", "_watcher_to_stop", ":", "# Get current watch paths", "old_watch_path_s", "=", "set", "(", "watche_obj_map", ")", "# Get new watch paths", "new_watch_path_s", "=", "self", ".", "_find_watch_paths", "(", ")", "# For each new watch path", "for", "new_watch_path", "in", "new_watch_path_s", ":", "# Remove from the old watch paths if exists", "old_watch_path_s", ".", "discard", "(", "new_watch_path", ")", "# If the new watch path was not watched", "if", "new_watch_path", "not", "in", "watche_obj_map", ":", "try", ":", "# Schedule a watch", "watch_obj", "=", "observer", ".", "schedule", "(", "# 2KGRW", "# `FileSystemEventHandler` instance", "self", ",", "# File path to watch", "new_watch_path", ",", "# Whether recursive", "recursive", "=", "True", ",", ")", "# Store the watch obj", "watche_obj_map", "[", "new_watch_path", "]", "=", "watch_obj", "# If have error", "except", "OSError", ":", "# Set the watch object be None", "watche_obj_map", "[", "new_watch_path", "]", "=", "None", "# For each old watch path that is not in the new watch paths", "for", "old_watch_path", "in", "old_watch_path_s", ":", "# Get watch object", "watch_obj", "=", "watche_obj_map", ".", "pop", "(", "old_watch_path", ",", "None", ")", "# If have watch object", "if", "watch_obj", "is", "not", "None", ":", "# Unschedule the watch", "observer", ".", "unschedule", "(", "watch_obj", ")", "# Store new watch paths", "self", ".", "_watch_paths", "=", "new_watch_path_s", "# Sleep before next check", "time", ".", "sleep", "(", "self", ".", "_interval", ")" ]
Watcher thread's function. :return: None.
[ "Watcher", "thread", "s", "function", "." ]
python
train
nadirizr/json-logic-py
json_logic/__init__.py
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L47-L56
def less(a, b, *args): """Implements the '<' operator with JS-style type coertion.""" types = set([type(a), type(b)]) if float in types or int in types: try: a, b = float(a), float(b) except TypeError: # NaN return False return a < b and (not args or less(b, *args))
[ "def", "less", "(", "a", ",", "b", ",", "*", "args", ")", ":", "types", "=", "set", "(", "[", "type", "(", "a", ")", ",", "type", "(", "b", ")", "]", ")", "if", "float", "in", "types", "or", "int", "in", "types", ":", "try", ":", "a", ",", "b", "=", "float", "(", "a", ")", ",", "float", "(", "b", ")", "except", "TypeError", ":", "# NaN", "return", "False", "return", "a", "<", "b", "and", "(", "not", "args", "or", "less", "(", "b", ",", "*", "args", ")", ")" ]
Implements the '<' operator with JS-style type coertion.
[ "Implements", "the", "<", "operator", "with", "JS", "-", "style", "type", "coertion", "." ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/trax/jaxboard.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L212-L249
def audio(self, tag, audiodata, step=None, sample_rate=44100): """Saves audio. NB: single channel only right now. Args: tag: str: label for this data audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave step: int: training step sample_rate: sample rate of passed in audio buffer """ audiodata = onp.array(audiodata) if step is None: step = self._step else: self._step = step audiodata = onp.clip(onp.squeeze(audiodata), -1, 1) if audiodata.ndim != 1: raise ValueError('Audio data must be 1D.') sample_list = (32767.0 * audiodata).astype(int).tolist() wio = io.BytesIO() wav_buf = wave.open(wio, 'wb') wav_buf.setnchannels(1) wav_buf.setsampwidth(2) wav_buf.setframerate(sample_rate) enc = b''.join([struct.pack('<h', v) for v in sample_list]) wav_buf.writeframes(enc) wav_buf.close() encoded_audio_bytes = wio.getvalue() wio.close() audio = Summary.Audio( sample_rate=sample_rate, num_channels=1, length_frames=len(sample_list), encoded_audio_string=encoded_audio_bytes, content_type='audio/wav') summary = Summary(value=[Summary.Value(tag=tag, audio=audio)]) self.add_summary(summary, step)
[ "def", "audio", "(", "self", ",", "tag", ",", "audiodata", ",", "step", "=", "None", ",", "sample_rate", "=", "44100", ")", ":", "audiodata", "=", "onp", ".", "array", "(", "audiodata", ")", "if", "step", "is", "None", ":", "step", "=", "self", ".", "_step", "else", ":", "self", ".", "_step", "=", "step", "audiodata", "=", "onp", ".", "clip", "(", "onp", ".", "squeeze", "(", "audiodata", ")", ",", "-", "1", ",", "1", ")", "if", "audiodata", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "'Audio data must be 1D.'", ")", "sample_list", "=", "(", "32767.0", "*", "audiodata", ")", ".", "astype", "(", "int", ")", ".", "tolist", "(", ")", "wio", "=", "io", ".", "BytesIO", "(", ")", "wav_buf", "=", "wave", ".", "open", "(", "wio", ",", "'wb'", ")", "wav_buf", ".", "setnchannels", "(", "1", ")", "wav_buf", ".", "setsampwidth", "(", "2", ")", "wav_buf", ".", "setframerate", "(", "sample_rate", ")", "enc", "=", "b''", ".", "join", "(", "[", "struct", ".", "pack", "(", "'<h'", ",", "v", ")", "for", "v", "in", "sample_list", "]", ")", "wav_buf", ".", "writeframes", "(", "enc", ")", "wav_buf", ".", "close", "(", ")", "encoded_audio_bytes", "=", "wio", ".", "getvalue", "(", ")", "wio", ".", "close", "(", ")", "audio", "=", "Summary", ".", "Audio", "(", "sample_rate", "=", "sample_rate", ",", "num_channels", "=", "1", ",", "length_frames", "=", "len", "(", "sample_list", ")", ",", "encoded_audio_string", "=", "encoded_audio_bytes", ",", "content_type", "=", "'audio/wav'", ")", "summary", "=", "Summary", "(", "value", "=", "[", "Summary", ".", "Value", "(", "tag", "=", "tag", ",", "audio", "=", "audio", ")", "]", ")", "self", ".", "add_summary", "(", "summary", ",", "step", ")" ]
Saves audio. NB: single channel only right now. Args: tag: str: label for this data audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave step: int: training step sample_rate: sample rate of passed in audio buffer
[ "Saves", "audio", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/attributes.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/attributes.py#L108-L118
def get_attribute_by_name_and_dimension(name, dimension_id=None,**kwargs): """ Get a specific attribute by its name. dimension_id can be None, because in attribute the dimension_id is not anymore mandatory """ try: attr_i = db.DBSession.query(Attr).filter(and_(Attr.name==name, Attr.dimension_id==dimension_id)).one() log.debug("Attribute retrieved") return attr_i except NoResultFound: return None
[ "def", "get_attribute_by_name_and_dimension", "(", "name", ",", "dimension_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "attr_i", "=", "db", ".", "DBSession", ".", "query", "(", "Attr", ")", ".", "filter", "(", "and_", "(", "Attr", ".", "name", "==", "name", ",", "Attr", ".", "dimension_id", "==", "dimension_id", ")", ")", ".", "one", "(", ")", "log", ".", "debug", "(", "\"Attribute retrieved\"", ")", "return", "attr_i", "except", "NoResultFound", ":", "return", "None" ]
Get a specific attribute by its name. dimension_id can be None, because in attribute the dimension_id is not anymore mandatory
[ "Get", "a", "specific", "attribute", "by", "its", "name", ".", "dimension_id", "can", "be", "None", "because", "in", "attribute", "the", "dimension_id", "is", "not", "anymore", "mandatory" ]
python
train
Kortemme-Lab/klab
klab/bio/relatrix.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/relatrix.py#L347-L372
def _create_inverse_maps(self): '''Create the inverse mappings (UniParc -> SEQRES -> ATOM -> Rosetta).''' # We have already determined that the inverse maps are well-defined (the normal maps are injective). The inverse maps will be partial maps in general. self.atom_to_rosetta_sequence_maps = {} for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.iteritems(): s = SequenceMap() for k, v, substitution_match in sequence_map: s.add(v, k, substitution_match) self.atom_to_rosetta_sequence_maps[chain_id] = s self.seqres_to_atom_sequence_maps = {} for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.iteritems(): s = SequenceMap() for k, v, substitution_match in sequence_map: s.add(v, k, substitution_match) self.seqres_to_atom_sequence_maps[chain_id] = s # This map uses PDB chain IDs as PDB chains may map to zero or one UniParc IDs whereas UniParc IDs may map to many PDB chains self.uniparc_to_seqres_sequence_maps = {} for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.iteritems(): s = UniParcPDBSequenceMap() for k, v, substitution_match in sequence_map: s.add(v, k, substitution_match) self.uniparc_to_seqres_sequence_maps[chain_id] = s
[ "def", "_create_inverse_maps", "(", "self", ")", ":", "# We have already determined that the inverse maps are well-defined (the normal maps are injective). The inverse maps will be partial maps in general.", "self", ".", "atom_to_rosetta_sequence_maps", "=", "{", "}", "for", "chain_id", ",", "sequence_map", "in", "self", ".", "rosetta_to_atom_sequence_maps", ".", "iteritems", "(", ")", ":", "s", "=", "SequenceMap", "(", ")", "for", "k", ",", "v", ",", "substitution_match", "in", "sequence_map", ":", "s", ".", "add", "(", "v", ",", "k", ",", "substitution_match", ")", "self", ".", "atom_to_rosetta_sequence_maps", "[", "chain_id", "]", "=", "s", "self", ".", "seqres_to_atom_sequence_maps", "=", "{", "}", "for", "chain_id", ",", "sequence_map", "in", "self", ".", "atom_to_seqres_sequence_maps", ".", "iteritems", "(", ")", ":", "s", "=", "SequenceMap", "(", ")", "for", "k", ",", "v", ",", "substitution_match", "in", "sequence_map", ":", "s", ".", "add", "(", "v", ",", "k", ",", "substitution_match", ")", "self", ".", "seqres_to_atom_sequence_maps", "[", "chain_id", "]", "=", "s", "# This map uses PDB chain IDs as PDB chains may map to zero or one UniParc IDs whereas UniParc IDs may map to many PDB chains", "self", ".", "uniparc_to_seqres_sequence_maps", "=", "{", "}", "for", "chain_id", ",", "sequence_map", "in", "self", ".", "seqres_to_uniparc_sequence_maps", ".", "iteritems", "(", ")", ":", "s", "=", "UniParcPDBSequenceMap", "(", ")", "for", "k", ",", "v", ",", "substitution_match", "in", "sequence_map", ":", "s", ".", "add", "(", "v", ",", "k", ",", "substitution_match", ")", "self", ".", "uniparc_to_seqres_sequence_maps", "[", "chain_id", "]", "=", "s" ]
Create the inverse mappings (UniParc -> SEQRES -> ATOM -> Rosetta).
[ "Create", "the", "inverse", "mappings", "(", "UniParc", "-", ">", "SEQRES", "-", ">", "ATOM", "-", ">", "Rosetta", ")", "." ]
python
train
thespacedoctor/polyglot
polyglot/markdown/translate.py
https://github.com/thespacedoctor/polyglot/blob/98038d746aa67e343b73b3ccee1e02d31dab81ec/polyglot/markdown/translate.py#L639-L669
def ul( self, text): """*convert plain-text to MMD unordered list* **Key Arguments:** - ``text`` -- the text to convert to MMD unordered list **Return:** - ``ul`` -- the MMD unordered list **Usage:** To convert text to a MMD unordered list: .. code-block:: python ul = md.ul(" This is a list item ") print ul # OUTPUT: # * This is a list item # """ m = self.reWS.match(text) ul = [] for l in m.group(2).split("\n"): prefix, text, suffix = self._snip_whitespace(l) ul.append("%(prefix)s* %(text)s " % locals()) return ("\n").join(ul) + "\n\n"
[ "def", "ul", "(", "self", ",", "text", ")", ":", "m", "=", "self", ".", "reWS", ".", "match", "(", "text", ")", "ul", "=", "[", "]", "for", "l", "in", "m", ".", "group", "(", "2", ")", ".", "split", "(", "\"\\n\"", ")", ":", "prefix", ",", "text", ",", "suffix", "=", "self", ".", "_snip_whitespace", "(", "l", ")", "ul", ".", "append", "(", "\"%(prefix)s* %(text)s \"", "%", "locals", "(", ")", ")", "return", "(", "\"\\n\"", ")", ".", "join", "(", "ul", ")", "+", "\"\\n\\n\"" ]
*convert plain-text to MMD unordered list* **Key Arguments:** - ``text`` -- the text to convert to MMD unordered list **Return:** - ``ul`` -- the MMD unordered list **Usage:** To convert text to a MMD unordered list: .. code-block:: python ul = md.ul(" This is a list item ") print ul # OUTPUT: # * This is a list item #
[ "*", "convert", "plain", "-", "text", "to", "MMD", "unordered", "list", "*" ]
python
train
Dentosal/python-sc2
sc2/bot_ai.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/bot_ai.py#L474-L480
def in_placement_grid(self, pos: Union[Point2, Point3, Unit]) -> bool: """ Returns True if you can place something at a position. Remember, buildings usually use 2x2, 3x3 or 5x5 of these grid points. Caution: some x and y offset might be required, see ramp code: https://github.com/Dentosal/python-sc2/blob/master/sc2/game_info.py#L17-L18 """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self._game_info.placement_grid[pos] != 0
[ "def", "in_placement_grid", "(", "self", ",", "pos", ":", "Union", "[", "Point2", ",", "Point3", ",", "Unit", "]", ")", "->", "bool", ":", "assert", "isinstance", "(", "pos", ",", "(", "Point2", ",", "Point3", ",", "Unit", ")", ")", "pos", "=", "pos", ".", "position", ".", "to2", ".", "rounded", "return", "self", ".", "_game_info", ".", "placement_grid", "[", "pos", "]", "!=", "0" ]
Returns True if you can place something at a position. Remember, buildings usually use 2x2, 3x3 or 5x5 of these grid points. Caution: some x and y offset might be required, see ramp code: https://github.com/Dentosal/python-sc2/blob/master/sc2/game_info.py#L17-L18
[ "Returns", "True", "if", "you", "can", "place", "something", "at", "a", "position", ".", "Remember", "buildings", "usually", "use", "2x2", "3x3", "or", "5x5", "of", "these", "grid", "points", ".", "Caution", ":", "some", "x", "and", "y", "offset", "might", "be", "required", "see", "ramp", "code", ":", "https", ":", "//", "github", ".", "com", "/", "Dentosal", "/", "python", "-", "sc2", "/", "blob", "/", "master", "/", "sc2", "/", "game_info", ".", "py#L17", "-", "L18" ]
python
train
zimeon/iiif
iiif/auth.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L82-L99
def login_service_description(self): """Login service description. The login service description _MUST_ include the token service description. The authentication pattern is indicated via the profile URI which is built using self.auth_pattern. """ label = 'Login to ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' desc = {"@id": self.login_uri, "profile": self.profile_base + self.auth_pattern, "label": label} if (self.header): desc['header'] = self.header if (self.description): desc['description'] = self.description return desc
[ "def", "login_service_description", "(", "self", ")", ":", "label", "=", "'Login to '", "+", "self", ".", "name", "if", "(", "self", ".", "auth_type", ")", ":", "label", "=", "label", "+", "' ('", "+", "self", ".", "auth_type", "+", "')'", "desc", "=", "{", "\"@id\"", ":", "self", ".", "login_uri", ",", "\"profile\"", ":", "self", ".", "profile_base", "+", "self", ".", "auth_pattern", ",", "\"label\"", ":", "label", "}", "if", "(", "self", ".", "header", ")", ":", "desc", "[", "'header'", "]", "=", "self", ".", "header", "if", "(", "self", ".", "description", ")", ":", "desc", "[", "'description'", "]", "=", "self", ".", "description", "return", "desc" ]
Login service description. The login service description _MUST_ include the token service description. The authentication pattern is indicated via the profile URI which is built using self.auth_pattern.
[ "Login", "service", "description", "." ]
python
train
PmagPy/PmagPy
pmagpy/convert_2_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/convert_2_magic.py#L4830-L5092
def jr6_jr6(mag_file, dir_path=".", input_dir_path="", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", specnum=1, samp_con='1', location='unknown', lat='', lon='', noave=False, meth_code="LP-NO", volume=12, JR=False, user=""): """ Convert JR6 .jr6 files to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) meth_code : str colon-delimited method codes, default "LP-NO" volume : float volume in ccs, default 12 JR : bool IODP samples were measured on the JOIDES RESOLUTION, default False user : str user name, default "" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY """ version_num = pmag.get_version() input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) specnum = - int(specnum) samp_con = str(samp_con) volume = float(volume) * 1e-6 # need to add these meas_file = pmag.resolve_file_name(meas_file, output_dir_path) spec_file = pmag.resolve_file_name(spec_file, output_dir_path) samp_file = pmag.resolve_file_name(samp_file, output_dir_path) site_file = pmag.resolve_file_name(site_file, output_dir_path) loc_file = pmag.resolve_file_name(loc_file, output_dir_path) mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if JR: if meth_code == "LP-NO": meth_code = "" meth_code = meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code = meth_code.strip(":") samp_con = '5' # format variables tmp_file = mag_file.split(os.extsep)[0]+os.extsep+'tmp' mag_file = pmag.resolve_file_name(mag_file, input_dir_path) if samp_con.startswith("4"): if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "4" elif samp_con.startswith("7"): if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "naming convention option [7] must be in form 7-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "7" else: Z = 1 # parse data # fix .jr6 file so that there are spaces between all the columns. pre_data = open(mag_file, 'r') tmp_data = open(tmp_file, 'w') if samp_con != '2': fixed_data = pre_data.read().replace('-', ' -') else: fixed_data = "" for line in pre_data.readlines(): entries = line.split() if len(entries) < 2: continue fixed_line = entries[0] + ' ' + reduce( lambda x, y: x+' '+y, [x.replace('-', ' -') for x in entries[1:]]) fixed_data += fixed_line+os.linesep tmp_data.write(fixed_data) tmp_data.close() pre_data.close() if not JR: column_names = ['specimen', 'step', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] else: # measured on the Joides Resolution JR6 column_names = ['specimen', 'step', 'negz', 'y', 'x', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if isinstance(data['x'][0], str): column_names = ['specimen', 'step', 'step_unit', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction', 'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd'] data = pd.read_csv(tmp_file, delim_whitespace=True, names=column_names, index_col=False) if JR: data['z'] = -data['negz'] cart = np.array([data['x'], data['y'], data['z']]).transpose() dir_dat = pmag.cart2dir(cart).transpose() data['dir_dec'] = dir_dat[0] data['dir_inc'] = dir_dat[1] # the data are in A/m - this converts to Am^2 data['magn_moment'] = dir_dat[2]*(10.0**data['expon'])*volume data['magn_volume'] = dir_dat[2] * \ (10.0**data['expon']) # A/m - data in A/m data['dip'] = -data['dip'] data['specimen'] # put data into magic tables MagRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] for rowNum, row in data.iterrows(): MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} specimen = row['specimen'] if specnum != 0: sample = specimen[:specnum] else: sample = specimen site = pmag.parse_site(sample, samp_con, Z) if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]: SpecRec['specimen'] = specimen SpecRec['sample'] = sample SpecRec["citations"] = "This study" SpecRec["analysts"] = user SpecRec['volume'] = volume SpecRecs.append(SpecRec) if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]: SampRec['sample'] = sample SampRec['site'] = site SampRec["citations"] = "This study" SampRec["analysts"] = user SampRec['azimuth'] = row['azimuth'] SampRec['dip'] = row['dip'] SampRec['bed_dip_direction'] = row['bed_dip_direction'] SampRec['bed_dip'] = row['bed_dip'] SampRec['method_codes'] = meth_code SampRecs.append(SampRec) if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]: SiteRec['site'] = site SiteRec['location'] = location SiteRec["citations"] = "This study" SiteRec["analysts"] = user SiteRec['lat'] = lat SiteRec['lon'] = lon SiteRecs.append(SiteRec) if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]: LocRec['location'] = location LocRec["citations"] = "This study" LocRec["analysts"] = user LocRec['lat_n'] = lat LocRec['lon_e'] = lon LocRec['lat_s'] = lat LocRec['lon_w'] = lon LocRecs.append(LocRec) MeasRec["citations"] = "This study" MeasRec["analysts"] = user MeasRec["specimen"] = specimen MeasRec['software_packages'] = version_num MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin MeasRec["quality"] = 'g' MeasRec["standard"] = 'u' MeasRec["treat_step_num"] = 0 MeasRec["treat_ac_field"] = '0' if row['step'] == 'NRM': meas_type = "LT-NO" elif 'step_unit' in row and row['step_unit'] == 'C': meas_type = "LT-T-Z" treat = float(row['step']) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0:2] == 'AD': meas_type = "LT-AF-Z" treat = float(row['step'][2:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'A': meas_type = "LT-AF-Z" treat = float(row['step'][1:]) MeasRec["treat_ac_field"] = '%8.3e' % ( treat*1e-3) # convert from mT to tesla elif row['step'][0] == 'TD': meas_type = "LT-T-Z" treat = float(row['step'][2:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin elif row['step'][0] == 'T': meas_type = "LT-T-Z" treat = float(row['step'][1:]) MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin else: # need to add IRM, and ARM options print("measurement type unknown", row['step']) return False, "measurement type unknown" MeasRec["magn_moment"] = str(row['magn_moment']) MeasRec["magn_volume"] = str(row['magn_volume']) MeasRec["dir_dec"] = str(row['dir_dec']) MeasRec["dir_inc"] = str(row['dir_inc']) MeasRec['method_codes'] = meas_type MagRecs.append(MeasRec) con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype='specimens', data=SpecRecs) con.add_magic_table_from_data(dtype='samples', data=SampRecs) con.add_magic_table_from_data(dtype='sites', data=SiteRecs) con.add_magic_table_from_data(dtype='locations', data=LocRecs) MeasOuts = pmag.measurements_methods3(MagRecs, noave) con.add_magic_table_from_data(dtype='measurements', data=MeasOuts) con.tables['specimens'].write_magic_file(custom_name=spec_file) con.tables['samples'].write_magic_file(custom_name=samp_file) con.tables['sites'].write_magic_file(custom_name=site_file) con.tables['locations'].write_magic_file(custom_name=loc_file) con.tables['measurements'].write_magic_file(custom_name=meas_file) try: os.remove(tmp_file) except (OSError, IOError) as e: print("couldn't remove temperary fixed JR6 file %s" % tmp_file) return True, meas_file
[ "def", "jr6_jr6", "(", "mag_file", ",", "dir_path", "=", "\".\"", ",", "input_dir_path", "=", "\"\"", ",", "meas_file", "=", "\"measurements.txt\"", ",", "spec_file", "=", "\"specimens.txt\"", ",", "samp_file", "=", "\"samples.txt\"", ",", "site_file", "=", "\"sites.txt\"", ",", "loc_file", "=", "\"locations.txt\"", ",", "specnum", "=", "1", ",", "samp_con", "=", "'1'", ",", "location", "=", "'unknown'", ",", "lat", "=", "''", ",", "lon", "=", "''", ",", "noave", "=", "False", ",", "meth_code", "=", "\"LP-NO\"", ",", "volume", "=", "12", ",", "JR", "=", "False", ",", "user", "=", "\"\"", ")", ":", "version_num", "=", "pmag", ".", "get_version", "(", ")", "input_dir_path", ",", "output_dir_path", "=", "pmag", ".", "fix_directories", "(", "input_dir_path", ",", "dir_path", ")", "specnum", "=", "-", "int", "(", "specnum", ")", "samp_con", "=", "str", "(", "samp_con", ")", "volume", "=", "float", "(", "volume", ")", "*", "1e-6", "# need to add these", "meas_file", "=", "pmag", ".", "resolve_file_name", "(", "meas_file", ",", "output_dir_path", ")", "spec_file", "=", "pmag", ".", "resolve_file_name", "(", "spec_file", ",", "output_dir_path", ")", "samp_file", "=", "pmag", ".", "resolve_file_name", "(", "samp_file", ",", "output_dir_path", ")", "site_file", "=", "pmag", ".", "resolve_file_name", "(", "site_file", ",", "output_dir_path", ")", "loc_file", "=", "pmag", ".", "resolve_file_name", "(", "loc_file", ",", "output_dir_path", ")", "mag_file", "=", "pmag", ".", "resolve_file_name", "(", "mag_file", ",", "input_dir_path", ")", "if", "JR", ":", "if", "meth_code", "==", "\"LP-NO\"", ":", "meth_code", "=", "\"\"", "meth_code", "=", "meth_code", "+", "\":FS-C-DRILL-IODP:SP-SS-C:SO-V\"", "meth_code", "=", "meth_code", ".", "strip", "(", "\":\"", ")", "samp_con", "=", "'5'", "# format variables", "tmp_file", "=", "mag_file", ".", "split", "(", "os", ".", "extsep", ")", "[", "0", "]", "+", "os", ".", "extsep", "+", "'tmp'", "mag_file", "=", "pmag", ".", "resolve_file_name", "(", "mag_file", ",", "input_dir_path", ")", "if", "samp_con", ".", "startswith", "(", "\"4\"", ")", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"option [4] must be in form 4-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [4] must be in form 4-Z where Z is an integer\"", "else", ":", "Z", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"4\"", "elif", "samp_con", ".", "startswith", "(", "\"7\"", ")", ":", "if", "\"-\"", "not", "in", "samp_con", ":", "print", "(", "\"option [7] must be in form 7-Z where Z is an integer\"", ")", "return", "False", ",", "\"naming convention option [7] must be in form 7-Z where Z is an integer\"", "else", ":", "Z", "=", "samp_con", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "samp_con", "=", "\"7\"", "else", ":", "Z", "=", "1", "# parse data", "# fix .jr6 file so that there are spaces between all the columns.", "pre_data", "=", "open", "(", "mag_file", ",", "'r'", ")", "tmp_data", "=", "open", "(", "tmp_file", ",", "'w'", ")", "if", "samp_con", "!=", "'2'", ":", "fixed_data", "=", "pre_data", ".", "read", "(", ")", ".", "replace", "(", "'-'", ",", "' -'", ")", "else", ":", "fixed_data", "=", "\"\"", "for", "line", "in", "pre_data", ".", "readlines", "(", ")", ":", "entries", "=", "line", ".", "split", "(", ")", "if", "len", "(", "entries", ")", "<", "2", ":", "continue", "fixed_line", "=", "entries", "[", "0", "]", "+", "' '", "+", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "' '", "+", "y", ",", "[", "x", ".", "replace", "(", "'-'", ",", "' -'", ")", "for", "x", "in", "entries", "[", "1", ":", "]", "]", ")", "fixed_data", "+=", "fixed_line", "+", "os", ".", "linesep", "tmp_data", ".", "write", "(", "fixed_data", ")", "tmp_data", ".", "close", "(", ")", "pre_data", ".", "close", "(", ")", "if", "not", "JR", ":", "column_names", "=", "[", "'specimen'", ",", "'step'", ",", "'x'", ",", "'y'", ",", "'z'", ",", "'expon'", ",", "'azimuth'", ",", "'dip'", ",", "'bed_dip_direction'", ",", "'bed_dip'", ",", "'bed_dip_dir2'", ",", "'bed_dip2'", ",", "'param1'", ",", "'param2'", ",", "'param3'", ",", "'param4'", ",", "'dir_csd'", "]", "else", ":", "# measured on the Joides Resolution JR6", "column_names", "=", "[", "'specimen'", ",", "'step'", ",", "'negz'", ",", "'y'", ",", "'x'", ",", "'expon'", ",", "'azimuth'", ",", "'dip'", ",", "'bed_dip_direction'", ",", "'bed_dip'", ",", "'bed_dip_dir2'", ",", "'bed_dip2'", ",", "'param1'", ",", "'param2'", ",", "'param3'", ",", "'param4'", ",", "'dir_csd'", "]", "data", "=", "pd", ".", "read_csv", "(", "tmp_file", ",", "delim_whitespace", "=", "True", ",", "names", "=", "column_names", ",", "index_col", "=", "False", ")", "if", "isinstance", "(", "data", "[", "'x'", "]", "[", "0", "]", ",", "str", ")", ":", "column_names", "=", "[", "'specimen'", ",", "'step'", ",", "'step_unit'", ",", "'x'", ",", "'y'", ",", "'z'", ",", "'expon'", ",", "'azimuth'", ",", "'dip'", ",", "'bed_dip_direction'", ",", "'bed_dip'", ",", "'bed_dip_dir2'", ",", "'bed_dip2'", ",", "'param1'", ",", "'param2'", ",", "'param3'", ",", "'param4'", ",", "'dir_csd'", "]", "data", "=", "pd", ".", "read_csv", "(", "tmp_file", ",", "delim_whitespace", "=", "True", ",", "names", "=", "column_names", ",", "index_col", "=", "False", ")", "if", "JR", ":", "data", "[", "'z'", "]", "=", "-", "data", "[", "'negz'", "]", "cart", "=", "np", ".", "array", "(", "[", "data", "[", "'x'", "]", ",", "data", "[", "'y'", "]", ",", "data", "[", "'z'", "]", "]", ")", ".", "transpose", "(", ")", "dir_dat", "=", "pmag", ".", "cart2dir", "(", "cart", ")", ".", "transpose", "(", ")", "data", "[", "'dir_dec'", "]", "=", "dir_dat", "[", "0", "]", "data", "[", "'dir_inc'", "]", "=", "dir_dat", "[", "1", "]", "# the data are in A/m - this converts to Am^2", "data", "[", "'magn_moment'", "]", "=", "dir_dat", "[", "2", "]", "*", "(", "10.0", "**", "data", "[", "'expon'", "]", ")", "*", "volume", "data", "[", "'magn_volume'", "]", "=", "dir_dat", "[", "2", "]", "*", "(", "10.0", "**", "data", "[", "'expon'", "]", ")", "# A/m - data in A/m", "data", "[", "'dip'", "]", "=", "-", "data", "[", "'dip'", "]", "data", "[", "'specimen'", "]", "# put data into magic tables", "MagRecs", ",", "SpecRecs", ",", "SampRecs", ",", "SiteRecs", ",", "LocRecs", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "for", "rowNum", ",", "row", "in", "data", ".", "iterrows", "(", ")", ":", "MeasRec", ",", "SpecRec", ",", "SampRec", ",", "SiteRec", ",", "LocRec", "=", "{", "}", ",", "{", "}", ",", "{", "}", ",", "{", "}", ",", "{", "}", "specimen", "=", "row", "[", "'specimen'", "]", "if", "specnum", "!=", "0", ":", "sample", "=", "specimen", "[", ":", "specnum", "]", "else", ":", "sample", "=", "specimen", "site", "=", "pmag", ".", "parse_site", "(", "sample", ",", "samp_con", ",", "Z", ")", "if", "specimen", "!=", "\"\"", "and", "specimen", "not", "in", "[", "x", "[", "'specimen'", "]", "if", "'specimen'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SpecRecs", "]", ":", "SpecRec", "[", "'specimen'", "]", "=", "specimen", "SpecRec", "[", "'sample'", "]", "=", "sample", "SpecRec", "[", "\"citations\"", "]", "=", "\"This study\"", "SpecRec", "[", "\"analysts\"", "]", "=", "user", "SpecRec", "[", "'volume'", "]", "=", "volume", "SpecRecs", ".", "append", "(", "SpecRec", ")", "if", "sample", "!=", "\"\"", "and", "sample", "not", "in", "[", "x", "[", "'sample'", "]", "if", "'sample'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SampRecs", "]", ":", "SampRec", "[", "'sample'", "]", "=", "sample", "SampRec", "[", "'site'", "]", "=", "site", "SampRec", "[", "\"citations\"", "]", "=", "\"This study\"", "SampRec", "[", "\"analysts\"", "]", "=", "user", "SampRec", "[", "'azimuth'", "]", "=", "row", "[", "'azimuth'", "]", "SampRec", "[", "'dip'", "]", "=", "row", "[", "'dip'", "]", "SampRec", "[", "'bed_dip_direction'", "]", "=", "row", "[", "'bed_dip_direction'", "]", "SampRec", "[", "'bed_dip'", "]", "=", "row", "[", "'bed_dip'", "]", "SampRec", "[", "'method_codes'", "]", "=", "meth_code", "SampRecs", ".", "append", "(", "SampRec", ")", "if", "site", "!=", "\"\"", "and", "site", "not", "in", "[", "x", "[", "'site'", "]", "if", "'site'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "SiteRecs", "]", ":", "SiteRec", "[", "'site'", "]", "=", "site", "SiteRec", "[", "'location'", "]", "=", "location", "SiteRec", "[", "\"citations\"", "]", "=", "\"This study\"", "SiteRec", "[", "\"analysts\"", "]", "=", "user", "SiteRec", "[", "'lat'", "]", "=", "lat", "SiteRec", "[", "'lon'", "]", "=", "lon", "SiteRecs", ".", "append", "(", "SiteRec", ")", "if", "location", "!=", "\"\"", "and", "location", "not", "in", "[", "x", "[", "'location'", "]", "if", "'location'", "in", "list", "(", "x", ".", "keys", "(", ")", ")", "else", "\"\"", "for", "x", "in", "LocRecs", "]", ":", "LocRec", "[", "'location'", "]", "=", "location", "LocRec", "[", "\"citations\"", "]", "=", "\"This study\"", "LocRec", "[", "\"analysts\"", "]", "=", "user", "LocRec", "[", "'lat_n'", "]", "=", "lat", "LocRec", "[", "'lon_e'", "]", "=", "lon", "LocRec", "[", "'lat_s'", "]", "=", "lat", "LocRec", "[", "'lon_w'", "]", "=", "lon", "LocRecs", ".", "append", "(", "LocRec", ")", "MeasRec", "[", "\"citations\"", "]", "=", "\"This study\"", "MeasRec", "[", "\"analysts\"", "]", "=", "user", "MeasRec", "[", "\"specimen\"", "]", "=", "specimen", "MeasRec", "[", "'software_packages'", "]", "=", "version_num", "MeasRec", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "MeasRec", "[", "\"meas_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "MeasRec", "[", "\"quality\"", "]", "=", "'g'", "MeasRec", "[", "\"standard\"", "]", "=", "'u'", "MeasRec", "[", "\"treat_step_num\"", "]", "=", "0", "MeasRec", "[", "\"treat_ac_field\"", "]", "=", "'0'", "if", "row", "[", "'step'", "]", "==", "'NRM'", ":", "meas_type", "=", "\"LT-NO\"", "elif", "'step_unit'", "in", "row", "and", "row", "[", "'step_unit'", "]", "==", "'C'", ":", "meas_type", "=", "\"LT-T-Z\"", "treat", "=", "float", "(", "row", "[", "'step'", "]", ")", "MeasRec", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "treat", "+", "273.", ")", "# temp in kelvin", "elif", "row", "[", "'step'", "]", "[", "0", ":", "2", "]", "==", "'AD'", ":", "meas_type", "=", "\"LT-AF-Z\"", "treat", "=", "float", "(", "row", "[", "'step'", "]", "[", "2", ":", "]", ")", "MeasRec", "[", "\"treat_ac_field\"", "]", "=", "'%8.3e'", "%", "(", "treat", "*", "1e-3", ")", "# convert from mT to tesla", "elif", "row", "[", "'step'", "]", "[", "0", "]", "==", "'A'", ":", "meas_type", "=", "\"LT-AF-Z\"", "treat", "=", "float", "(", "row", "[", "'step'", "]", "[", "1", ":", "]", ")", "MeasRec", "[", "\"treat_ac_field\"", "]", "=", "'%8.3e'", "%", "(", "treat", "*", "1e-3", ")", "# convert from mT to tesla", "elif", "row", "[", "'step'", "]", "[", "0", "]", "==", "'TD'", ":", "meas_type", "=", "\"LT-T-Z\"", "treat", "=", "float", "(", "row", "[", "'step'", "]", "[", "2", ":", "]", ")", "MeasRec", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "treat", "+", "273.", ")", "# temp in kelvin", "elif", "row", "[", "'step'", "]", "[", "0", "]", "==", "'T'", ":", "meas_type", "=", "\"LT-T-Z\"", "treat", "=", "float", "(", "row", "[", "'step'", "]", "[", "1", ":", "]", ")", "MeasRec", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "treat", "+", "273.", ")", "# temp in kelvin", "else", ":", "# need to add IRM, and ARM options", "print", "(", "\"measurement type unknown\"", ",", "row", "[", "'step'", "]", ")", "return", "False", ",", "\"measurement type unknown\"", "MeasRec", "[", "\"magn_moment\"", "]", "=", "str", "(", "row", "[", "'magn_moment'", "]", ")", "MeasRec", "[", "\"magn_volume\"", "]", "=", "str", "(", "row", "[", "'magn_volume'", "]", ")", "MeasRec", "[", "\"dir_dec\"", "]", "=", "str", "(", "row", "[", "'dir_dec'", "]", ")", "MeasRec", "[", "\"dir_inc\"", "]", "=", "str", "(", "row", "[", "'dir_inc'", "]", ")", "MeasRec", "[", "'method_codes'", "]", "=", "meas_type", "MagRecs", ".", "append", "(", "MeasRec", ")", "con", "=", "cb", ".", "Contribution", "(", "output_dir_path", ",", "read_tables", "=", "[", "]", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'specimens'", ",", "data", "=", "SpecRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'samples'", ",", "data", "=", "SampRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'sites'", ",", "data", "=", "SiteRecs", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'locations'", ",", "data", "=", "LocRecs", ")", "MeasOuts", "=", "pmag", ".", "measurements_methods3", "(", "MagRecs", ",", "noave", ")", "con", ".", "add_magic_table_from_data", "(", "dtype", "=", "'measurements'", ",", "data", "=", "MeasOuts", ")", "con", ".", "tables", "[", "'specimens'", "]", ".", "write_magic_file", "(", "custom_name", "=", "spec_file", ")", "con", ".", "tables", "[", "'samples'", "]", ".", "write_magic_file", "(", "custom_name", "=", "samp_file", ")", "con", ".", "tables", "[", "'sites'", "]", ".", "write_magic_file", "(", "custom_name", "=", "site_file", ")", "con", ".", "tables", "[", "'locations'", "]", ".", "write_magic_file", "(", "custom_name", "=", "loc_file", ")", "con", ".", "tables", "[", "'measurements'", "]", ".", "write_magic_file", "(", "custom_name", "=", "meas_file", ")", "try", ":", "os", ".", "remove", "(", "tmp_file", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "e", ":", "print", "(", "\"couldn't remove temperary fixed JR6 file %s\"", "%", "tmp_file", ")", "return", "True", ",", "meas_file" ]
Convert JR6 .jr6 files to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" specnum : int number of characters to designate a specimen, default 0 samp_con : str sample/site naming convention, default '1', see info below location : str location name, default "unknown" lat : float latitude, default "" lon : float longitude, default "" noave : bool do not average duplicate measurements, default False (so by default, DO average) meth_code : str colon-delimited method codes, default "LP-NO" volume : float volume in ccs, default 12 JR : bool IODP samples were measured on the JOIDES RESOLUTION, default False user : str user name, default "" Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[ "Convert", "JR6", ".", "jr6", "files", "to", "MagIC", "file", "(", "s", ")" ]
python
train
raamana/mrivis
mrivis/base.py
https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/base.py#L876-L886
def _check_image(self, image_nD): """Sanity checks on the image data""" self.input_image = load_image_from_disk(image_nD) if len(self.input_image.shape) < 3: raise ValueError('Input image must be atleast 3D') if np.count_nonzero(self.input_image) == 0: raise ValueError('Input image is completely filled with zeros! ' 'Must be non-empty')
[ "def", "_check_image", "(", "self", ",", "image_nD", ")", ":", "self", ".", "input_image", "=", "load_image_from_disk", "(", "image_nD", ")", "if", "len", "(", "self", ".", "input_image", ".", "shape", ")", "<", "3", ":", "raise", "ValueError", "(", "'Input image must be atleast 3D'", ")", "if", "np", ".", "count_nonzero", "(", "self", ".", "input_image", ")", "==", "0", ":", "raise", "ValueError", "(", "'Input image is completely filled with zeros! '", "'Must be non-empty'", ")" ]
Sanity checks on the image data
[ "Sanity", "checks", "on", "the", "image", "data" ]
python
train
python-wink/python-wink
src/pywink/devices/powerstrip.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/powerstrip.py#L79-L91
def set_state(self, state): """ :param state: a boolean of true (on) or false ('off') :return: nothing """ if self.index() == 0: values = {"outlets": [{"desired_state": {"powered": state}}, {}]} else: values = {"outlets": [{}, {"desired_state": {"powered": state}}]} response = self.api_interface.set_device_state(self, values, id_override=self.parent_id(), type_override="powerstrip") self._update_state_from_response(response)
[ "def", "set_state", "(", "self", ",", "state", ")", ":", "if", "self", ".", "index", "(", ")", "==", "0", ":", "values", "=", "{", "\"outlets\"", ":", "[", "{", "\"desired_state\"", ":", "{", "\"powered\"", ":", "state", "}", "}", ",", "{", "}", "]", "}", "else", ":", "values", "=", "{", "\"outlets\"", ":", "[", "{", "}", ",", "{", "\"desired_state\"", ":", "{", "\"powered\"", ":", "state", "}", "}", "]", "}", "response", "=", "self", ".", "api_interface", ".", "set_device_state", "(", "self", ",", "values", ",", "id_override", "=", "self", ".", "parent_id", "(", ")", ",", "type_override", "=", "\"powerstrip\"", ")", "self", ".", "_update_state_from_response", "(", "response", ")" ]
:param state: a boolean of true (on) or false ('off') :return: nothing
[ ":", "param", "state", ":", "a", "boolean", "of", "true", "(", "on", ")", "or", "false", "(", "off", ")", ":", "return", ":", "nothing" ]
python
train
Azure/azure-sdk-for-python
azure-mgmt-resource/azure/mgmt/resource/links/management_link_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-resource/azure/mgmt/resource/links/management_link_client.py#L96-L104
def models(cls, api_version=DEFAULT_API_VERSION): """Module depends on the API version: * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.links.v2016_09_01.models>` """ if api_version == '2016-09-01': from .v2016_09_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
[ "def", "models", "(", "cls", ",", "api_version", "=", "DEFAULT_API_VERSION", ")", ":", "if", "api_version", "==", "'2016-09-01'", ":", "from", ".", "v2016_09_01", "import", "models", "return", "models", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")" ]
Module depends on the API version: * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.links.v2016_09_01.models>`
[ "Module", "depends", "on", "the", "API", "version", ":" ]
python
test
Robin8Put/pmes
storage/rpc_methods.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/storage/rpc_methods.py#L110-L148
async def find_recent_news(self, **params): """Looking up recent news for account. Accepts: - public_key Returns: - list with dicts or empty """ # Check if params is not empty if params.get("message"): params = json.loads(params.get("message", "{}")) if not params: return {"error":400, "reason":"Missed required fields"} # Check if required parameter does exist public_key = params.get("public_key", None) if not public_key: return {"error":400, "reason":"Missed required fields"} # Check if current public_key does exist in database account = await self.collection.find_one({"public_key": public_key}) if not account: return {"error":404, "reason":"Current user not found"} # Connect to news collection news_db = client[settings.DBNAME] news_collection = news_db[settings.NEWS] news = [{i:new[i] for i in new if i != "_id"} async for new in news_collection.find( {"account_id":account["id"]}).sort([("$natural", -1)])] # Set news amount to zero. accounts_collection = news_db[settings.ACCOUNTS] await accounts_collection.find_one_and_update( {"public_key": params["public_key"]}, {"$set": {"news_count": 0}}) return news
[ "async", "def", "find_recent_news", "(", "self", ",", "*", "*", "params", ")", ":", "# Check if params is not empty", "if", "params", ".", "get", "(", "\"message\"", ")", ":", "params", "=", "json", ".", "loads", "(", "params", ".", "get", "(", "\"message\"", ",", "\"{}\"", ")", ")", "if", "not", "params", ":", "return", "{", "\"error\"", ":", "400", ",", "\"reason\"", ":", "\"Missed required fields\"", "}", "# Check if required parameter does exist", "public_key", "=", "params", ".", "get", "(", "\"public_key\"", ",", "None", ")", "if", "not", "public_key", ":", "return", "{", "\"error\"", ":", "400", ",", "\"reason\"", ":", "\"Missed required fields\"", "}", "# Check if current public_key does exist in database", "account", "=", "await", "self", ".", "collection", ".", "find_one", "(", "{", "\"public_key\"", ":", "public_key", "}", ")", "if", "not", "account", ":", "return", "{", "\"error\"", ":", "404", ",", "\"reason\"", ":", "\"Current user not found\"", "}", "# Connect to news collection", "news_db", "=", "client", "[", "settings", ".", "DBNAME", "]", "news_collection", "=", "news_db", "[", "settings", ".", "NEWS", "]", "news", "=", "[", "{", "i", ":", "new", "[", "i", "]", "for", "i", "in", "new", "if", "i", "!=", "\"_id\"", "}", "async", "for", "new", "in", "news_collection", ".", "find", "(", "{", "\"account_id\"", ":", "account", "[", "\"id\"", "]", "}", ")", ".", "sort", "(", "[", "(", "\"$natural\"", ",", "-", "1", ")", "]", ")", "]", "# Set news amount to zero.", "accounts_collection", "=", "news_db", "[", "settings", ".", "ACCOUNTS", "]", "await", "accounts_collection", ".", "find_one_and_update", "(", "{", "\"public_key\"", ":", "params", "[", "\"public_key\"", "]", "}", ",", "{", "\"$set\"", ":", "{", "\"news_count\"", ":", "0", "}", "}", ")", "return", "news" ]
Looking up recent news for account. Accepts: - public_key Returns: - list with dicts or empty
[ "Looking", "up", "recent", "news", "for", "account", ".", "Accepts", ":", "-", "public_key", "Returns", ":", "-", "list", "with", "dicts", "or", "empty" ]
python
train
StanfordVL/robosuite
robosuite/environments/base.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/base.py#L142-L149
def reset(self): """Resets simulation.""" # TODO(yukez): investigate black screen of death # if there is an active viewer window, destroy it self._destroy_viewer() self._reset_internal() self.sim.forward() return self._get_observation()
[ "def", "reset", "(", "self", ")", ":", "# TODO(yukez): investigate black screen of death", "# if there is an active viewer window, destroy it", "self", ".", "_destroy_viewer", "(", ")", "self", ".", "_reset_internal", "(", ")", "self", ".", "sim", ".", "forward", "(", ")", "return", "self", ".", "_get_observation", "(", ")" ]
Resets simulation.
[ "Resets", "simulation", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_antenna.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_antenna.py#L32-L52
def mavlink_packet(self, m): '''handle an incoming mavlink packet''' if self.gcs_location is None and self.module('wp').wploader.count() > 0: home = self.module('wp').get_home() self.gcs_location = (home.x, home.y) print("Antenna home set") if self.gcs_location is None: return if m.get_type() == 'GPS_RAW' and self.gcs_location is not None: (gcs_lat, gcs_lon) = self.gcs_location bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat, m.lon) elif m.get_type() == 'GPS_RAW_INT' and self.gcs_location is not None: (gcs_lat, gcs_lon) = self.gcs_location bearing = cuav_util.gps_bearing(gcs_lat, gcs_lon, m.lat / 1.0e7, m.lon / 1.0e7) else: return self.console.set_status('Antenna', 'Antenna %.0f' % bearing, row=0) if abs(bearing - self.last_bearing) > 5 and (time.time() - self.last_announce) > 15: self.last_bearing = bearing self.last_announce = time.time() self.say("Antenna %u" % int(bearing + 0.5))
[ "def", "mavlink_packet", "(", "self", ",", "m", ")", ":", "if", "self", ".", "gcs_location", "is", "None", "and", "self", ".", "module", "(", "'wp'", ")", ".", "wploader", ".", "count", "(", ")", ">", "0", ":", "home", "=", "self", ".", "module", "(", "'wp'", ")", ".", "get_home", "(", ")", "self", ".", "gcs_location", "=", "(", "home", ".", "x", ",", "home", ".", "y", ")", "print", "(", "\"Antenna home set\"", ")", "if", "self", ".", "gcs_location", "is", "None", ":", "return", "if", "m", ".", "get_type", "(", ")", "==", "'GPS_RAW'", "and", "self", ".", "gcs_location", "is", "not", "None", ":", "(", "gcs_lat", ",", "gcs_lon", ")", "=", "self", ".", "gcs_location", "bearing", "=", "cuav_util", ".", "gps_bearing", "(", "gcs_lat", ",", "gcs_lon", ",", "m", ".", "lat", ",", "m", ".", "lon", ")", "elif", "m", ".", "get_type", "(", ")", "==", "'GPS_RAW_INT'", "and", "self", ".", "gcs_location", "is", "not", "None", ":", "(", "gcs_lat", ",", "gcs_lon", ")", "=", "self", ".", "gcs_location", "bearing", "=", "cuav_util", ".", "gps_bearing", "(", "gcs_lat", ",", "gcs_lon", ",", "m", ".", "lat", "/", "1.0e7", ",", "m", ".", "lon", "/", "1.0e7", ")", "else", ":", "return", "self", ".", "console", ".", "set_status", "(", "'Antenna'", ",", "'Antenna %.0f'", "%", "bearing", ",", "row", "=", "0", ")", "if", "abs", "(", "bearing", "-", "self", ".", "last_bearing", ")", ">", "5", "and", "(", "time", ".", "time", "(", ")", "-", "self", ".", "last_announce", ")", ">", "15", ":", "self", ".", "last_bearing", "=", "bearing", "self", ".", "last_announce", "=", "time", ".", "time", "(", ")", "self", ".", "say", "(", "\"Antenna %u\"", "%", "int", "(", "bearing", "+", "0.5", ")", ")" ]
handle an incoming mavlink packet
[ "handle", "an", "incoming", "mavlink", "packet" ]
python
train
jalanb/pysyte
pysyte/bash/shell.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/bash/shell.py#L69-L71
def full_path(path): """Get the real path, expanding links and bashisms""" return os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
[ "def", "full_path", "(", "path", ")", ":", "return", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "expandvars", "(", "path", ")", ")", ")" ]
Get the real path, expanding links and bashisms
[ "Get", "the", "real", "path", "expanding", "links", "and", "bashisms" ]
python
train
bootphon/h5features
h5features/features.py
https://github.com/bootphon/h5features/blob/d5f95db0f1cee58ac1ba4575d1212e796c39e1f9/h5features/features.py#L215-L256
def create_dataset(self, group, chunk_size): """Initializes sparse specific datasets""" group.attrs['format'] = self.dformat group.attrs['dim'] = self.dim if chunk_size == 'auto': group.create_dataset( 'coordinates', (0, 2), dtype=np.float64, chunks=True, maxshape=(None, 2)) group.create_dataset( self.name, (0,), dtype=self.dtype, chunks=True, maxshape=(None,)) else: # for storing sparse data we don't use the self.nb_per_chunk, # which is only used by the Writer to determine times chunking. per_chunk = nb_per_chunk(self.dtype.itemsize, 1, chunk_size) group.create_dataset( 'coordinates', (0, 2), dtype=np.float64, chunks=(per_chunk, 2), maxshape=(None, 2)) group.create_dataset( self.name, (0,), dtype=self.dtype, chunks=(per_chunk,), maxshape=(None,)) dtype = np.int64 if chunk_size == 'auto': chunks = True self.nb_per_chunk = 'auto' else: chunks = (nb_per_chunk(np.dtype(dtype).itemsize, 1, chunk_size),) # Needed by Times.create_dataset self.nb_per_chunk = nb_per_chunk( self.dtype.itemsize, int(round(self.sparsity*self.dim)), chunk_size) group.create_dataset( 'frames', (0,), dtype=dtype, chunks=chunks, maxshape=(None,))
[ "def", "create_dataset", "(", "self", ",", "group", ",", "chunk_size", ")", ":", "group", ".", "attrs", "[", "'format'", "]", "=", "self", ".", "dformat", "group", ".", "attrs", "[", "'dim'", "]", "=", "self", ".", "dim", "if", "chunk_size", "==", "'auto'", ":", "group", ".", "create_dataset", "(", "'coordinates'", ",", "(", "0", ",", "2", ")", ",", "dtype", "=", "np", ".", "float64", ",", "chunks", "=", "True", ",", "maxshape", "=", "(", "None", ",", "2", ")", ")", "group", ".", "create_dataset", "(", "self", ".", "name", ",", "(", "0", ",", ")", ",", "dtype", "=", "self", ".", "dtype", ",", "chunks", "=", "True", ",", "maxshape", "=", "(", "None", ",", ")", ")", "else", ":", "# for storing sparse data we don't use the self.nb_per_chunk,", "# which is only used by the Writer to determine times chunking.", "per_chunk", "=", "nb_per_chunk", "(", "self", ".", "dtype", ".", "itemsize", ",", "1", ",", "chunk_size", ")", "group", ".", "create_dataset", "(", "'coordinates'", ",", "(", "0", ",", "2", ")", ",", "dtype", "=", "np", ".", "float64", ",", "chunks", "=", "(", "per_chunk", ",", "2", ")", ",", "maxshape", "=", "(", "None", ",", "2", ")", ")", "group", ".", "create_dataset", "(", "self", ".", "name", ",", "(", "0", ",", ")", ",", "dtype", "=", "self", ".", "dtype", ",", "chunks", "=", "(", "per_chunk", ",", ")", ",", "maxshape", "=", "(", "None", ",", ")", ")", "dtype", "=", "np", ".", "int64", "if", "chunk_size", "==", "'auto'", ":", "chunks", "=", "True", "self", ".", "nb_per_chunk", "=", "'auto'", "else", ":", "chunks", "=", "(", "nb_per_chunk", "(", "np", ".", "dtype", "(", "dtype", ")", ".", "itemsize", ",", "1", ",", "chunk_size", ")", ",", ")", "# Needed by Times.create_dataset", "self", ".", "nb_per_chunk", "=", "nb_per_chunk", "(", "self", ".", "dtype", ".", "itemsize", ",", "int", "(", "round", "(", "self", ".", "sparsity", "*", "self", ".", "dim", ")", ")", ",", "chunk_size", ")", "group", ".", "create_dataset", "(", "'frames'", ",", "(", "0", ",", ")", ",", "dtype", "=", "dtype", ",", "chunks", "=", "chunks", ",", "maxshape", "=", "(", "None", ",", ")", ")" ]
Initializes sparse specific datasets
[ "Initializes", "sparse", "specific", "datasets" ]
python
train
sailthru/sailthru-python-client
sailthru/sailthru_client.py
https://github.com/sailthru/sailthru-python-client/blob/22aa39ba0c5bddd7b8743e24ada331128c0f4f54/sailthru/sailthru_client.py#L88-L108
def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None): """ Remotely send an email template to multiple email addresses. http://docs.sailthru.com/api/send @param template: template string @param emails: List with email values or comma separated email string @param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself @param options: optional dictionary to include replyto and/or test keys @param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion """ _vars = _vars or {} evars = evars or {} options = options or {} data = {'template': template, 'email': ','.join(emails) if isinstance(emails, list) else emails, 'vars': _vars.copy(), 'evars': evars.copy(), 'options': options.copy()} if schedule_time is not None: data['schedule_time'] = schedule_time return self.api_post('send', data)
[ "def", "multi_send", "(", "self", ",", "template", ",", "emails", ",", "_vars", "=", "None", ",", "evars", "=", "None", ",", "schedule_time", "=", "None", ",", "options", "=", "None", ")", ":", "_vars", "=", "_vars", "or", "{", "}", "evars", "=", "evars", "or", "{", "}", "options", "=", "options", "or", "{", "}", "data", "=", "{", "'template'", ":", "template", ",", "'email'", ":", "','", ".", "join", "(", "emails", ")", "if", "isinstance", "(", "emails", ",", "list", ")", "else", "emails", ",", "'vars'", ":", "_vars", ".", "copy", "(", ")", ",", "'evars'", ":", "evars", ".", "copy", "(", ")", ",", "'options'", ":", "options", ".", "copy", "(", ")", "}", "if", "schedule_time", "is", "not", "None", ":", "data", "[", "'schedule_time'", "]", "=", "schedule_time", "return", "self", ".", "api_post", "(", "'send'", ",", "data", ")" ]
Remotely send an email template to multiple email addresses. http://docs.sailthru.com/api/send @param template: template string @param emails: List with email values or comma separated email string @param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself @param options: optional dictionary to include replyto and/or test keys @param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
[ "Remotely", "send", "an", "email", "template", "to", "multiple", "email", "addresses", ".", "http", ":", "//", "docs", ".", "sailthru", ".", "com", "/", "api", "/", "send" ]
python
train
catherinedevlin/ddl-generator
ddlgenerator/reshape.py
https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L18-L34
def clean_key_name(key): """ Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad. """ result = _illegal_in_column_name.sub("_", key.strip()) if result[0].isdigit(): result = '_%s' % result if result.upper() in sql_reserved_words: result = '_%s' % key return result.lower()
[ "def", "clean_key_name", "(", "key", ")", ":", "result", "=", "_illegal_in_column_name", ".", "sub", "(", "\"_\"", ",", "key", ".", "strip", "(", ")", ")", "if", "result", "[", "0", "]", ".", "isdigit", "(", ")", ":", "result", "=", "'_%s'", "%", "result", "if", "result", ".", "upper", "(", ")", "in", "sql_reserved_words", ":", "result", "=", "'_%s'", "%", "key", "return", "result", ".", "lower", "(", ")" ]
Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad.
[ "Makes", "key", "a", "valid", "and", "appropriate", "SQL", "column", "name", ":" ]
python
train
jrderuiter/pybiomart
src/pybiomart/server.py
https://github.com/jrderuiter/pybiomart/blob/7802d45fe88549ab0512d6f37f815fc43b172b39/src/pybiomart/server.py#L58-L62
def marts(self): """List of available marts.""" if self._marts is None: self._marts = self._fetch_marts() return self._marts
[ "def", "marts", "(", "self", ")", ":", "if", "self", ".", "_marts", "is", "None", ":", "self", ".", "_marts", "=", "self", ".", "_fetch_marts", "(", ")", "return", "self", ".", "_marts" ]
List of available marts.
[ "List", "of", "available", "marts", "." ]
python
train
jpablo128/simplystatic
bin/addrandompages.py
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/bin/addrandompages.py#L17-L25
def setup_parser(): '''Set up the command-line options.''' parser = argparse.ArgumentParser(description='Add random pages to existing site.') parser.add_argument('-d','--directory', action='store', default= os.getcwd(), help='Site directory (must be a valid s2 structure).') parser.add_argument('-n','--number', action='store', type=int, default = 20, help='Number of pages to generate.') return parser
[ "def", "setup_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Add random pages to existing site.'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--directory'", ",", "action", "=", "'store'", ",", "default", "=", "os", ".", "getcwd", "(", ")", ",", "help", "=", "'Site directory (must be a valid s2 structure).'", ")", "parser", ".", "add_argument", "(", "'-n'", ",", "'--number'", ",", "action", "=", "'store'", ",", "type", "=", "int", ",", "default", "=", "20", ",", "help", "=", "'Number of pages to generate.'", ")", "return", "parser" ]
Set up the command-line options.
[ "Set", "up", "the", "command", "-", "line", "options", "." ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L1851-L1876
def set_value(self, selector, new_value, by=By.CSS_SELECTOR, timeout=settings.LARGE_TIMEOUT): """ This method uses JavaScript to update a text field. """ if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT: timeout = self.__get_new_timeout(timeout) if page_utils.is_xpath_selector(selector): by = By.XPATH orginal_selector = selector css_selector = self.convert_to_css_selector(selector, by=by) self.__demo_mode_highlight_if_active(orginal_selector, by) if not self.demo_mode: self.scroll_to(orginal_selector, by=by, timeout=timeout) value = re.escape(new_value) value = self.__escape_quotes_if_needed(value) css_selector = re.escape(css_selector) css_selector = self.__escape_quotes_if_needed(css_selector) script = ("""document.querySelector('%s').value='%s';""" % (css_selector, value)) self.execute_script(script) if new_value.endswith('\n'): element = self.wait_for_element_present( orginal_selector, by=by, timeout=timeout) element.send_keys(Keys.RETURN) if settings.WAIT_FOR_RSC_ON_PAGE_LOADS: self.wait_for_ready_state_complete() self.__demo_mode_pause_if_active()
[ "def", "set_value", "(", "self", ",", "selector", ",", "new_value", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "settings", ".", "LARGE_TIMEOUT", ")", ":", "if", "self", ".", "timeout_multiplier", "and", "timeout", "==", "settings", ".", "LARGE_TIMEOUT", ":", "timeout", "=", "self", ".", "__get_new_timeout", "(", "timeout", ")", "if", "page_utils", ".", "is_xpath_selector", "(", "selector", ")", ":", "by", "=", "By", ".", "XPATH", "orginal_selector", "=", "selector", "css_selector", "=", "self", ".", "convert_to_css_selector", "(", "selector", ",", "by", "=", "by", ")", "self", ".", "__demo_mode_highlight_if_active", "(", "orginal_selector", ",", "by", ")", "if", "not", "self", ".", "demo_mode", ":", "self", ".", "scroll_to", "(", "orginal_selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")", "value", "=", "re", ".", "escape", "(", "new_value", ")", "value", "=", "self", ".", "__escape_quotes_if_needed", "(", "value", ")", "css_selector", "=", "re", ".", "escape", "(", "css_selector", ")", "css_selector", "=", "self", ".", "__escape_quotes_if_needed", "(", "css_selector", ")", "script", "=", "(", "\"\"\"document.querySelector('%s').value='%s';\"\"\"", "%", "(", "css_selector", ",", "value", ")", ")", "self", ".", "execute_script", "(", "script", ")", "if", "new_value", ".", "endswith", "(", "'\\n'", ")", ":", "element", "=", "self", ".", "wait_for_element_present", "(", "orginal_selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")", "element", ".", "send_keys", "(", "Keys", ".", "RETURN", ")", "if", "settings", ".", "WAIT_FOR_RSC_ON_PAGE_LOADS", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "self", ".", "__demo_mode_pause_if_active", "(", ")" ]
This method uses JavaScript to update a text field.
[ "This", "method", "uses", "JavaScript", "to", "update", "a", "text", "field", "." ]
python
train
scanny/python-pptx
spec/gen_spec/gen_spec.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/spec/gen_spec/gen_spec.py#L241-L251
def render_desc(desc): """calculate desc string, wrapped if too long""" desc = desc + '.' desc_lines = split_len(desc, 54) if len(desc_lines) > 1: join_str = "'\n%s'" % (' '*21) lines_str = join_str.join(desc_lines) out = "('%s')" % lines_str else: out = "'%s'" % desc_lines[0] return out
[ "def", "render_desc", "(", "desc", ")", ":", "desc", "=", "desc", "+", "'.'", "desc_lines", "=", "split_len", "(", "desc", ",", "54", ")", "if", "len", "(", "desc_lines", ")", ">", "1", ":", "join_str", "=", "\"'\\n%s'\"", "%", "(", "' '", "*", "21", ")", "lines_str", "=", "join_str", ".", "join", "(", "desc_lines", ")", "out", "=", "\"('%s')\"", "%", "lines_str", "else", ":", "out", "=", "\"'%s'\"", "%", "desc_lines", "[", "0", "]", "return", "out" ]
calculate desc string, wrapped if too long
[ "calculate", "desc", "string", "wrapped", "if", "too", "long" ]
python
train
genialis/resolwe
resolwe/flow/executors/run.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/run.py#L134-L279
async def _run(self, data_id, script): """Execute the script and save results.""" self.data_id = data_id # Fetch data instance to get any executor requirements. self.process = PROCESS requirements = self.process['requirements'] self.requirements = requirements.get('executor', {}).get(self.name, {}) # pylint: disable=no-member self.resources = requirements.get('resources', {}) logger.debug("Preparing output files for Data with id {}".format(data_id)) os.chdir(EXECUTOR_SETTINGS['DATA_DIR']) try: log_file = self._create_file('stdout.txt') json_file = self._create_file('jsonout.txt') except FileExistsError: logger.error("Stdout or jsonout out file already exists.") # Looks like executor was already ran for this Data object, # so don't raise the error to prevent setting status to error. await self._send_manager_command(ExecutorProtocol.ABORT, expect_reply=False) return proc_pid = await self.start() await self.update_data_status( status=DATA_META['STATUS_PROCESSING'], process_pid=proc_pid ) # Run process and handle intermediate results logger.info("Running program for Data with id {}".format(data_id)) logger.debug("The program for Data with id {} is: \n{}".format(data_id, script)) await self.run_script(script) spawn_processes = [] output = {} process_error, process_warning, process_info = [], [], [] process_progress, process_rc = 0, 0 # read process output try: stdout = self.get_stdout() while True: line = await stdout.readline() logger.debug("Process's output: {}".format(line.strip())) if not line: break line = line.decode('utf-8') try: if line.strip().startswith('run'): # Save process and spawn if no errors log_file.write(line) log_file.flush() for obj in iterjson(line[3:].strip()): spawn_processes.append(obj) elif line.strip().startswith('export'): file_name = line[6:].strip() export_folder = SETTINGS['FLOW_EXECUTOR']['UPLOAD_DIR'] unique_name = 'export_{}'.format(uuid.uuid4().hex) export_path = os.path.join(export_folder, unique_name) self.exported_files_mapper[self.data_id][file_name] = unique_name shutil.move(file_name, export_path) else: # If JSON, save to MongoDB updates = {} for obj in iterjson(line): for key, val in obj.items(): if key.startswith('proc.'): if key == 'proc.error': process_error.append(val) if not process_rc: process_rc = 1 updates['process_rc'] = process_rc updates['process_error'] = process_error updates['status'] = DATA_META['STATUS_ERROR'] elif key == 'proc.warning': process_warning.append(val) updates['process_warning'] = process_warning elif key == 'proc.info': process_info.append(val) updates['process_info'] = process_info elif key == 'proc.rc': process_rc = int(val) updates['process_rc'] = process_rc if process_rc != 0: updates['status'] = DATA_META['STATUS_ERROR'] elif key == 'proc.progress': process_progress = int(float(val) * 100) updates['process_progress'] = process_progress else: output[key] = val updates['output'] = output if updates: await self.update_data_status(**updates) # Process meta fields are collected in listener, so we can clear them. process_error, process_warning, process_info = [], [], [] if process_rc > 0: log_file.close() json_file.close() await self._send_manager_command(ExecutorProtocol.FINISH, extra_fields={ ExecutorProtocol.FINISH_PROCESS_RC: process_rc }) return # Debug output # Not referenced in Data object json_file.write(line) json_file.flush() except ValueError as ex: # Ignore if not JSON log_file.write(line) log_file.flush() except MemoryError as ex: logger.error("Out of memory:\n\n{}".format(ex)) except IOError as ex: # TODO: if ex.errno == 28: no more free space raise ex finally: # Store results log_file.close() json_file.close() return_code = await self.end() if process_rc < return_code: process_rc = return_code # send a notification to the executor listener that we're done finish_fields = { ExecutorProtocol.FINISH_PROCESS_RC: process_rc } if spawn_processes and process_rc == 0: finish_fields[ExecutorProtocol.FINISH_SPAWN_PROCESSES] = spawn_processes finish_fields[ExecutorProtocol.FINISH_EXPORTED_FILES] = self.exported_files_mapper return finish_fields
[ "async", "def", "_run", "(", "self", ",", "data_id", ",", "script", ")", ":", "self", ".", "data_id", "=", "data_id", "# Fetch data instance to get any executor requirements.", "self", ".", "process", "=", "PROCESS", "requirements", "=", "self", ".", "process", "[", "'requirements'", "]", "self", ".", "requirements", "=", "requirements", ".", "get", "(", "'executor'", ",", "{", "}", ")", ".", "get", "(", "self", ".", "name", ",", "{", "}", ")", "# pylint: disable=no-member", "self", ".", "resources", "=", "requirements", ".", "get", "(", "'resources'", ",", "{", "}", ")", "logger", ".", "debug", "(", "\"Preparing output files for Data with id {}\"", ".", "format", "(", "data_id", ")", ")", "os", ".", "chdir", "(", "EXECUTOR_SETTINGS", "[", "'DATA_DIR'", "]", ")", "try", ":", "log_file", "=", "self", ".", "_create_file", "(", "'stdout.txt'", ")", "json_file", "=", "self", ".", "_create_file", "(", "'jsonout.txt'", ")", "except", "FileExistsError", ":", "logger", ".", "error", "(", "\"Stdout or jsonout out file already exists.\"", ")", "# Looks like executor was already ran for this Data object,", "# so don't raise the error to prevent setting status to error.", "await", "self", ".", "_send_manager_command", "(", "ExecutorProtocol", ".", "ABORT", ",", "expect_reply", "=", "False", ")", "return", "proc_pid", "=", "await", "self", ".", "start", "(", ")", "await", "self", ".", "update_data_status", "(", "status", "=", "DATA_META", "[", "'STATUS_PROCESSING'", "]", ",", "process_pid", "=", "proc_pid", ")", "# Run process and handle intermediate results", "logger", ".", "info", "(", "\"Running program for Data with id {}\"", ".", "format", "(", "data_id", ")", ")", "logger", ".", "debug", "(", "\"The program for Data with id {} is: \\n{}\"", ".", "format", "(", "data_id", ",", "script", ")", ")", "await", "self", ".", "run_script", "(", "script", ")", "spawn_processes", "=", "[", "]", "output", "=", "{", "}", "process_error", ",", "process_warning", ",", "process_info", "=", "[", "]", ",", "[", "]", ",", "[", "]", "process_progress", ",", "process_rc", "=", "0", ",", "0", "# read process output", "try", ":", "stdout", "=", "self", ".", "get_stdout", "(", ")", "while", "True", ":", "line", "=", "await", "stdout", ".", "readline", "(", ")", "logger", ".", "debug", "(", "\"Process's output: {}\"", ".", "format", "(", "line", ".", "strip", "(", ")", ")", ")", "if", "not", "line", ":", "break", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", "try", ":", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "'run'", ")", ":", "# Save process and spawn if no errors", "log_file", ".", "write", "(", "line", ")", "log_file", ".", "flush", "(", ")", "for", "obj", "in", "iterjson", "(", "line", "[", "3", ":", "]", ".", "strip", "(", ")", ")", ":", "spawn_processes", ".", "append", "(", "obj", ")", "elif", "line", ".", "strip", "(", ")", ".", "startswith", "(", "'export'", ")", ":", "file_name", "=", "line", "[", "6", ":", "]", ".", "strip", "(", ")", "export_folder", "=", "SETTINGS", "[", "'FLOW_EXECUTOR'", "]", "[", "'UPLOAD_DIR'", "]", "unique_name", "=", "'export_{}'", ".", "format", "(", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "export_path", "=", "os", ".", "path", ".", "join", "(", "export_folder", ",", "unique_name", ")", "self", ".", "exported_files_mapper", "[", "self", ".", "data_id", "]", "[", "file_name", "]", "=", "unique_name", "shutil", ".", "move", "(", "file_name", ",", "export_path", ")", "else", ":", "# If JSON, save to MongoDB", "updates", "=", "{", "}", "for", "obj", "in", "iterjson", "(", "line", ")", ":", "for", "key", ",", "val", "in", "obj", ".", "items", "(", ")", ":", "if", "key", ".", "startswith", "(", "'proc.'", ")", ":", "if", "key", "==", "'proc.error'", ":", "process_error", ".", "append", "(", "val", ")", "if", "not", "process_rc", ":", "process_rc", "=", "1", "updates", "[", "'process_rc'", "]", "=", "process_rc", "updates", "[", "'process_error'", "]", "=", "process_error", "updates", "[", "'status'", "]", "=", "DATA_META", "[", "'STATUS_ERROR'", "]", "elif", "key", "==", "'proc.warning'", ":", "process_warning", ".", "append", "(", "val", ")", "updates", "[", "'process_warning'", "]", "=", "process_warning", "elif", "key", "==", "'proc.info'", ":", "process_info", ".", "append", "(", "val", ")", "updates", "[", "'process_info'", "]", "=", "process_info", "elif", "key", "==", "'proc.rc'", ":", "process_rc", "=", "int", "(", "val", ")", "updates", "[", "'process_rc'", "]", "=", "process_rc", "if", "process_rc", "!=", "0", ":", "updates", "[", "'status'", "]", "=", "DATA_META", "[", "'STATUS_ERROR'", "]", "elif", "key", "==", "'proc.progress'", ":", "process_progress", "=", "int", "(", "float", "(", "val", ")", "*", "100", ")", "updates", "[", "'process_progress'", "]", "=", "process_progress", "else", ":", "output", "[", "key", "]", "=", "val", "updates", "[", "'output'", "]", "=", "output", "if", "updates", ":", "await", "self", ".", "update_data_status", "(", "*", "*", "updates", ")", "# Process meta fields are collected in listener, so we can clear them.", "process_error", ",", "process_warning", ",", "process_info", "=", "[", "]", ",", "[", "]", ",", "[", "]", "if", "process_rc", ">", "0", ":", "log_file", ".", "close", "(", ")", "json_file", ".", "close", "(", ")", "await", "self", ".", "_send_manager_command", "(", "ExecutorProtocol", ".", "FINISH", ",", "extra_fields", "=", "{", "ExecutorProtocol", ".", "FINISH_PROCESS_RC", ":", "process_rc", "}", ")", "return", "# Debug output", "# Not referenced in Data object", "json_file", ".", "write", "(", "line", ")", "json_file", ".", "flush", "(", ")", "except", "ValueError", "as", "ex", ":", "# Ignore if not JSON", "log_file", ".", "write", "(", "line", ")", "log_file", ".", "flush", "(", ")", "except", "MemoryError", "as", "ex", ":", "logger", ".", "error", "(", "\"Out of memory:\\n\\n{}\"", ".", "format", "(", "ex", ")", ")", "except", "IOError", "as", "ex", ":", "# TODO: if ex.errno == 28: no more free space", "raise", "ex", "finally", ":", "# Store results", "log_file", ".", "close", "(", ")", "json_file", ".", "close", "(", ")", "return_code", "=", "await", "self", ".", "end", "(", ")", "if", "process_rc", "<", "return_code", ":", "process_rc", "=", "return_code", "# send a notification to the executor listener that we're done", "finish_fields", "=", "{", "ExecutorProtocol", ".", "FINISH_PROCESS_RC", ":", "process_rc", "}", "if", "spawn_processes", "and", "process_rc", "==", "0", ":", "finish_fields", "[", "ExecutorProtocol", ".", "FINISH_SPAWN_PROCESSES", "]", "=", "spawn_processes", "finish_fields", "[", "ExecutorProtocol", ".", "FINISH_EXPORTED_FILES", "]", "=", "self", ".", "exported_files_mapper", "return", "finish_fields" ]
Execute the script and save results.
[ "Execute", "the", "script", "and", "save", "results", "." ]
python
train
SeleniumHQ/selenium
py/selenium/webdriver/remote/webdriver.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webdriver.py#L1122-L1143
def set_window_size(self, width, height, windowHandle='current'): """ Sets the width and height of the current window. (window.resizeTo) :Args: - width: the width in pixels to set the window to - height: the height in pixels to set the window to :Usage: :: driver.set_window_size(800,600) """ if self.w3c: if windowHandle != 'current': warnings.warn("Only 'current' window is supported for W3C compatibile browsers.") self.set_window_rect(width=int(width), height=int(height)) else: self.execute(Command.SET_WINDOW_SIZE, { 'width': int(width), 'height': int(height), 'windowHandle': windowHandle})
[ "def", "set_window_size", "(", "self", ",", "width", ",", "height", ",", "windowHandle", "=", "'current'", ")", ":", "if", "self", ".", "w3c", ":", "if", "windowHandle", "!=", "'current'", ":", "warnings", ".", "warn", "(", "\"Only 'current' window is supported for W3C compatibile browsers.\"", ")", "self", ".", "set_window_rect", "(", "width", "=", "int", "(", "width", ")", ",", "height", "=", "int", "(", "height", ")", ")", "else", ":", "self", ".", "execute", "(", "Command", ".", "SET_WINDOW_SIZE", ",", "{", "'width'", ":", "int", "(", "width", ")", ",", "'height'", ":", "int", "(", "height", ")", ",", "'windowHandle'", ":", "windowHandle", "}", ")" ]
Sets the width and height of the current window. (window.resizeTo) :Args: - width: the width in pixels to set the window to - height: the height in pixels to set the window to :Usage: :: driver.set_window_size(800,600)
[ "Sets", "the", "width", "and", "height", "of", "the", "current", "window", ".", "(", "window", ".", "resizeTo", ")" ]
python
train
limodou/uliweb
uliweb/lib/werkzeug/serving.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/serving.py#L469-L486
def make_server(host, port, app=None, threaded=False, processes=1, request_handler=None, passthrough_errors=False, ssl_context=None): """Create a new server instance that is either threaded, or forks or just processes one request after another. """ if threaded and processes > 1: raise ValueError("cannot have a multithreaded and " "multi process server.") elif threaded: return ThreadedWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context) elif processes > 1: return ForkingWSGIServer(host, port, app, processes, request_handler, passthrough_errors, ssl_context) else: return BaseWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context)
[ "def", "make_server", "(", "host", ",", "port", ",", "app", "=", "None", ",", "threaded", "=", "False", ",", "processes", "=", "1", ",", "request_handler", "=", "None", ",", "passthrough_errors", "=", "False", ",", "ssl_context", "=", "None", ")", ":", "if", "threaded", "and", "processes", ">", "1", ":", "raise", "ValueError", "(", "\"cannot have a multithreaded and \"", "\"multi process server.\"", ")", "elif", "threaded", ":", "return", "ThreadedWSGIServer", "(", "host", ",", "port", ",", "app", ",", "request_handler", ",", "passthrough_errors", ",", "ssl_context", ")", "elif", "processes", ">", "1", ":", "return", "ForkingWSGIServer", "(", "host", ",", "port", ",", "app", ",", "processes", ",", "request_handler", ",", "passthrough_errors", ",", "ssl_context", ")", "else", ":", "return", "BaseWSGIServer", "(", "host", ",", "port", ",", "app", ",", "request_handler", ",", "passthrough_errors", ",", "ssl_context", ")" ]
Create a new server instance that is either threaded, or forks or just processes one request after another.
[ "Create", "a", "new", "server", "instance", "that", "is", "either", "threaded", "or", "forks", "or", "just", "processes", "one", "request", "after", "another", "." ]
python
train
tjcsl/cslbot
cslbot/hooks/url.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/hooks/url.py#L37-L77
def handle(send, msg, args): """Get titles for urls. Generate a short url. Get the page title. """ worker = args["handler"].workers result = worker.run_pool(get_urls, [msg]) try: urls = result.get(5) except multiprocessing.TimeoutError: worker.restart_pool() send("Url regex timed out.", target=args["config"]["core"]["ctrlchan"]) return for url in urls: # Prevent botloops if (args["db"].query(Urls).filter(Urls.url == url, Urls.time > datetime.now() - timedelta(seconds=10)).count() > 1): return if url.startswith("https://twitter.com"): tid = url.split("/")[-1] twitter_api = get_api(args["config"]) status = twitter_api.GetStatus(tid) text = status.text.replace("\n", " / ") send("** {} (@{}) on Twitter: {}".format(status.user.name, status.user.screen_name, text)) return imgkey = args["config"]["api"]["googleapikey"] title = urlutils.get_title(url, imgkey) shortkey = args["config"]["api"]["bitlykey"] short = urlutils.get_short(url, shortkey) last = args["db"].query(Urls).filter(Urls.url == url).order_by(Urls.time.desc()).first() if args["config"]["feature"].getboolean("linkread"): if last is not None: lasttime = last.time.strftime("%H:%M:%S on %Y-%m-%d") send("Url %s previously posted at %s by %s -- %s" % (short, lasttime, last.nick, title)) else: send("** %s - %s" % (title, short)) args["db"].add(Urls(url=url, title=title, nick=args["nick"], time=datetime.now()))
[ "def", "handle", "(", "send", ",", "msg", ",", "args", ")", ":", "worker", "=", "args", "[", "\"handler\"", "]", ".", "workers", "result", "=", "worker", ".", "run_pool", "(", "get_urls", ",", "[", "msg", "]", ")", "try", ":", "urls", "=", "result", ".", "get", "(", "5", ")", "except", "multiprocessing", ".", "TimeoutError", ":", "worker", ".", "restart_pool", "(", ")", "send", "(", "\"Url regex timed out.\"", ",", "target", "=", "args", "[", "\"config\"", "]", "[", "\"core\"", "]", "[", "\"ctrlchan\"", "]", ")", "return", "for", "url", "in", "urls", ":", "# Prevent botloops", "if", "(", "args", "[", "\"db\"", "]", ".", "query", "(", "Urls", ")", ".", "filter", "(", "Urls", ".", "url", "==", "url", ",", "Urls", ".", "time", ">", "datetime", ".", "now", "(", ")", "-", "timedelta", "(", "seconds", "=", "10", ")", ")", ".", "count", "(", ")", ">", "1", ")", ":", "return", "if", "url", ".", "startswith", "(", "\"https://twitter.com\"", ")", ":", "tid", "=", "url", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "twitter_api", "=", "get_api", "(", "args", "[", "\"config\"", "]", ")", "status", "=", "twitter_api", ".", "GetStatus", "(", "tid", ")", "text", "=", "status", ".", "text", ".", "replace", "(", "\"\\n\"", ",", "\" / \"", ")", "send", "(", "\"** {} (@{}) on Twitter: {}\"", ".", "format", "(", "status", ".", "user", ".", "name", ",", "status", ".", "user", ".", "screen_name", ",", "text", ")", ")", "return", "imgkey", "=", "args", "[", "\"config\"", "]", "[", "\"api\"", "]", "[", "\"googleapikey\"", "]", "title", "=", "urlutils", ".", "get_title", "(", "url", ",", "imgkey", ")", "shortkey", "=", "args", "[", "\"config\"", "]", "[", "\"api\"", "]", "[", "\"bitlykey\"", "]", "short", "=", "urlutils", ".", "get_short", "(", "url", ",", "shortkey", ")", "last", "=", "args", "[", "\"db\"", "]", ".", "query", "(", "Urls", ")", ".", "filter", "(", "Urls", ".", "url", "==", "url", ")", ".", "order_by", "(", "Urls", ".", "time", ".", "desc", "(", ")", ")", ".", "first", "(", ")", "if", "args", "[", "\"config\"", "]", "[", "\"feature\"", "]", ".", "getboolean", "(", "\"linkread\"", ")", ":", "if", "last", "is", "not", "None", ":", "lasttime", "=", "last", ".", "time", ".", "strftime", "(", "\"%H:%M:%S on %Y-%m-%d\"", ")", "send", "(", "\"Url %s previously posted at %s by %s -- %s\"", "%", "(", "short", ",", "lasttime", ",", "last", ".", "nick", ",", "title", ")", ")", "else", ":", "send", "(", "\"** %s - %s\"", "%", "(", "title", ",", "short", ")", ")", "args", "[", "\"db\"", "]", ".", "add", "(", "Urls", "(", "url", "=", "url", ",", "title", "=", "title", ",", "nick", "=", "args", "[", "\"nick\"", "]", ",", "time", "=", "datetime", ".", "now", "(", ")", ")", ")" ]
Get titles for urls. Generate a short url. Get the page title.
[ "Get", "titles", "for", "urls", "." ]
python
train
PythonCharmers/python-future
src/future/backports/email/__init__.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/__init__.py#L72-L78
def message_from_binary_file(fp, *args, **kws): """Read a binary file and parse its contents into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from future.backports.email.parser import BytesParser return BytesParser(*args, **kws).parse(fp)
[ "def", "message_from_binary_file", "(", "fp", ",", "*", "args", ",", "*", "*", "kws", ")", ":", "from", "future", ".", "backports", ".", "email", ".", "parser", "import", "BytesParser", "return", "BytesParser", "(", "*", "args", ",", "*", "*", "kws", ")", ".", "parse", "(", "fp", ")" ]
Read a binary file and parse its contents into a Message object model. Optional _class and strict are passed to the Parser constructor.
[ "Read", "a", "binary", "file", "and", "parse", "its", "contents", "into", "a", "Message", "object", "model", "." ]
python
train
pytroll/satpy
satpy/readers/hrit_jma.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/hrit_jma.py#L298-L301
def _mask_space(self, data): """Mask space pixels""" geomask = get_geostationary_mask(area=self.area) return data.where(geomask)
[ "def", "_mask_space", "(", "self", ",", "data", ")", ":", "geomask", "=", "get_geostationary_mask", "(", "area", "=", "self", ".", "area", ")", "return", "data", ".", "where", "(", "geomask", ")" ]
Mask space pixels
[ "Mask", "space", "pixels" ]
python
train
abingham/spor
src/spor/cli.py
https://github.com/abingham/spor/blob/673c8c36c99a4b9ea882f002bfb529f1eca89126/src/spor/cli.py#L227-L261
def details_handler(args): """usage: {program} details <anchor-id> [<path>] Get the details of a single anchor. """ repo = _open_repo(args) _, anchor = _get_anchor(repo, args['<anchor-id>']) print("""path: {file_path} encoding: {encoding} [before] {before} -------------- [topic] {topic} -------------- [after] {after} -------------- offset: {offset} width: {width}""".format( file_path=anchor.file_path, encoding=anchor.encoding, before=anchor.context.before, topic=anchor.context.topic, after=anchor.context.after, offset=anchor.context.offset, width=anchor.context.width)) return ExitCode.OK
[ "def", "details_handler", "(", "args", ")", ":", "repo", "=", "_open_repo", "(", "args", ")", "_", ",", "anchor", "=", "_get_anchor", "(", "repo", ",", "args", "[", "'<anchor-id>'", "]", ")", "print", "(", "\"\"\"path: {file_path}\nencoding: {encoding}\n\n[before]\n{before}\n--------------\n\n[topic]\n{topic}\n--------------\n\n[after]\n{after}\n--------------\n\noffset: {offset}\nwidth: {width}\"\"\"", ".", "format", "(", "file_path", "=", "anchor", ".", "file_path", ",", "encoding", "=", "anchor", ".", "encoding", ",", "before", "=", "anchor", ".", "context", ".", "before", ",", "topic", "=", "anchor", ".", "context", ".", "topic", ",", "after", "=", "anchor", ".", "context", ".", "after", ",", "offset", "=", "anchor", ".", "context", ".", "offset", ",", "width", "=", "anchor", ".", "context", ".", "width", ")", ")", "return", "ExitCode", ".", "OK" ]
usage: {program} details <anchor-id> [<path>] Get the details of a single anchor.
[ "usage", ":", "{", "program", "}", "details", "<anchor", "-", "id", ">", "[", "<path", ">", "]" ]
python
train
pkgw/pwkit
pwkit/bblocks.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/bblocks.py#L386-L444
def bs_tt_bblock (times, tstarts, tstops, p0=0.05, nbootstrap=512): """Bayesian Blocks for time-tagged events with bootstrapping uncertainty assessment. THE UNCERTAINTIES ARE NOT VERY GOOD! Arguments: tstarts - Array of input bin start times. tstops - Array of input bin stop times. times - Array of event arrival times. p0=0.05 - Probability of preferring solutions with additional bins. nbootstrap=512 - Number of bootstrap runs to perform. Returns a Holder with: blockstarts - Start times of output blocks. bsrates - Mean event rate in each bin from bootstrap analysis. bsrstds - ~Uncertainty: stddev of event rate in each bin from bootstrap analysis. counts - Number of events in each output block. finalp0 - Final value of p0, after iteration to minimize `nblocks`. ledges - Times of left edges of output blocks. midpoints - Times of midpoints of output blocks. nblocks - Number of output blocks. ncells - Number of input cells/bins. origp0 - Original value of p0. rates - Event rate associated with each block. redges - Times of right edges of output blocks. widths - Width of each output block. """ times = np.asarray (times) tstarts = np.asarray (tstarts) tstops = np.asarray (tstops) nevents = times.size if nevents < 1: raise ValueError ('must be given at least 1 event') info = tt_bblock (tstarts, tstops, times, p0) # Now bootstrap resample to assess uncertainties on the bin heights. This # is the approach recommended by Scargle+. bsrsums = np.zeros (info.nblocks) bsrsumsqs = np.zeros (info.nblocks) for _ in range (nbootstrap): bstimes = times[np.random.randint (0, times.size, times.size)] bstimes.sort () bsinfo = tt_bblock (tstarts, tstops, bstimes, p0) blocknums = np.minimum (np.searchsorted (bsinfo.redges, info.midpoints), bsinfo.nblocks - 1) samprates = bsinfo.rates[blocknums] bsrsums += samprates bsrsumsqs += samprates**2 bsrmeans = bsrsums / nbootstrap mask = bsrsumsqs / nbootstrap <= bsrmeans**2 bsrstds = np.sqrt (np.where (mask, 0, bsrsumsqs / nbootstrap - bsrmeans**2)) info.bsrates = bsrmeans info.bsrstds = bsrstds return info
[ "def", "bs_tt_bblock", "(", "times", ",", "tstarts", ",", "tstops", ",", "p0", "=", "0.05", ",", "nbootstrap", "=", "512", ")", ":", "times", "=", "np", ".", "asarray", "(", "times", ")", "tstarts", "=", "np", ".", "asarray", "(", "tstarts", ")", "tstops", "=", "np", ".", "asarray", "(", "tstops", ")", "nevents", "=", "times", ".", "size", "if", "nevents", "<", "1", ":", "raise", "ValueError", "(", "'must be given at least 1 event'", ")", "info", "=", "tt_bblock", "(", "tstarts", ",", "tstops", ",", "times", ",", "p0", ")", "# Now bootstrap resample to assess uncertainties on the bin heights. This", "# is the approach recommended by Scargle+.", "bsrsums", "=", "np", ".", "zeros", "(", "info", ".", "nblocks", ")", "bsrsumsqs", "=", "np", ".", "zeros", "(", "info", ".", "nblocks", ")", "for", "_", "in", "range", "(", "nbootstrap", ")", ":", "bstimes", "=", "times", "[", "np", ".", "random", ".", "randint", "(", "0", ",", "times", ".", "size", ",", "times", ".", "size", ")", "]", "bstimes", ".", "sort", "(", ")", "bsinfo", "=", "tt_bblock", "(", "tstarts", ",", "tstops", ",", "bstimes", ",", "p0", ")", "blocknums", "=", "np", ".", "minimum", "(", "np", ".", "searchsorted", "(", "bsinfo", ".", "redges", ",", "info", ".", "midpoints", ")", ",", "bsinfo", ".", "nblocks", "-", "1", ")", "samprates", "=", "bsinfo", ".", "rates", "[", "blocknums", "]", "bsrsums", "+=", "samprates", "bsrsumsqs", "+=", "samprates", "**", "2", "bsrmeans", "=", "bsrsums", "/", "nbootstrap", "mask", "=", "bsrsumsqs", "/", "nbootstrap", "<=", "bsrmeans", "**", "2", "bsrstds", "=", "np", ".", "sqrt", "(", "np", ".", "where", "(", "mask", ",", "0", ",", "bsrsumsqs", "/", "nbootstrap", "-", "bsrmeans", "**", "2", ")", ")", "info", ".", "bsrates", "=", "bsrmeans", "info", ".", "bsrstds", "=", "bsrstds", "return", "info" ]
Bayesian Blocks for time-tagged events with bootstrapping uncertainty assessment. THE UNCERTAINTIES ARE NOT VERY GOOD! Arguments: tstarts - Array of input bin start times. tstops - Array of input bin stop times. times - Array of event arrival times. p0=0.05 - Probability of preferring solutions with additional bins. nbootstrap=512 - Number of bootstrap runs to perform. Returns a Holder with: blockstarts - Start times of output blocks. bsrates - Mean event rate in each bin from bootstrap analysis. bsrstds - ~Uncertainty: stddev of event rate in each bin from bootstrap analysis. counts - Number of events in each output block. finalp0 - Final value of p0, after iteration to minimize `nblocks`. ledges - Times of left edges of output blocks. midpoints - Times of midpoints of output blocks. nblocks - Number of output blocks. ncells - Number of input cells/bins. origp0 - Original value of p0. rates - Event rate associated with each block. redges - Times of right edges of output blocks. widths - Width of each output block.
[ "Bayesian", "Blocks", "for", "time", "-", "tagged", "events", "with", "bootstrapping", "uncertainty", "assessment", ".", "THE", "UNCERTAINTIES", "ARE", "NOT", "VERY", "GOOD!", "Arguments", ":" ]
python
train
dhermes/bezier
src/bezier/_curve_helpers.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_curve_helpers.py#L871-L923
def maybe_reduce(nodes): r"""Reduce nodes in a curve if they are degree-elevated. .. note:: This is a helper for :func:`_full_reduce`. Hence there is no corresponding Fortran speedup. We check if the nodes are degree-elevated by projecting onto the space of degree-elevated curves of the same degree, then comparing to the projection. We form the projection by taking the corresponding (right) elevation matrix :math:`E` (from one degree lower) and forming :math:`E^T \left(E E^T\right)^{-1} E`. Args: nodes (numpy.ndarray): The nodes in the curve. Returns: Tuple[bool, numpy.ndarray]: Pair of values. The first indicates if the ``nodes`` were reduced. The second is the resulting nodes, either the reduced ones or the original passed in. Raises: .UnsupportedDegree: If the curve is degree 5 or higher. """ _, num_nodes = nodes.shape if num_nodes < 2: return False, nodes elif num_nodes == 2: projection = _PROJECTION0 denom = _PROJ_DENOM0 elif num_nodes == 3: projection = _PROJECTION1 denom = _PROJ_DENOM1 elif num_nodes == 4: projection = _PROJECTION2 denom = _PROJ_DENOM2 elif num_nodes == 5: projection = _PROJECTION3 denom = _PROJ_DENOM3 else: raise _helpers.UnsupportedDegree( num_nodes - 1, supported=(0, 1, 2, 3, 4) ) projected = _helpers.matrix_product(nodes, projection) / denom relative_err = projection_error(nodes, projected) if relative_err < _REDUCE_THRESHOLD: return True, reduce_pseudo_inverse(nodes) else: return False, nodes
[ "def", "maybe_reduce", "(", "nodes", ")", ":", "_", ",", "num_nodes", "=", "nodes", ".", "shape", "if", "num_nodes", "<", "2", ":", "return", "False", ",", "nodes", "elif", "num_nodes", "==", "2", ":", "projection", "=", "_PROJECTION0", "denom", "=", "_PROJ_DENOM0", "elif", "num_nodes", "==", "3", ":", "projection", "=", "_PROJECTION1", "denom", "=", "_PROJ_DENOM1", "elif", "num_nodes", "==", "4", ":", "projection", "=", "_PROJECTION2", "denom", "=", "_PROJ_DENOM2", "elif", "num_nodes", "==", "5", ":", "projection", "=", "_PROJECTION3", "denom", "=", "_PROJ_DENOM3", "else", ":", "raise", "_helpers", ".", "UnsupportedDegree", "(", "num_nodes", "-", "1", ",", "supported", "=", "(", "0", ",", "1", ",", "2", ",", "3", ",", "4", ")", ")", "projected", "=", "_helpers", ".", "matrix_product", "(", "nodes", ",", "projection", ")", "/", "denom", "relative_err", "=", "projection_error", "(", "nodes", ",", "projected", ")", "if", "relative_err", "<", "_REDUCE_THRESHOLD", ":", "return", "True", ",", "reduce_pseudo_inverse", "(", "nodes", ")", "else", ":", "return", "False", ",", "nodes" ]
r"""Reduce nodes in a curve if they are degree-elevated. .. note:: This is a helper for :func:`_full_reduce`. Hence there is no corresponding Fortran speedup. We check if the nodes are degree-elevated by projecting onto the space of degree-elevated curves of the same degree, then comparing to the projection. We form the projection by taking the corresponding (right) elevation matrix :math:`E` (from one degree lower) and forming :math:`E^T \left(E E^T\right)^{-1} E`. Args: nodes (numpy.ndarray): The nodes in the curve. Returns: Tuple[bool, numpy.ndarray]: Pair of values. The first indicates if the ``nodes`` were reduced. The second is the resulting nodes, either the reduced ones or the original passed in. Raises: .UnsupportedDegree: If the curve is degree 5 or higher.
[ "r", "Reduce", "nodes", "in", "a", "curve", "if", "they", "are", "degree", "-", "elevated", "." ]
python
train
Neurita/boyle
boyle/files/file_tree_map.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/file_tree_map.py#L326-L334
def create_folder(dirpath, overwrite=False): """ Will create dirpath folder. If dirpath already exists and overwrite is False, will append a '+' suffix to dirpath until dirpath does not exist.""" if not overwrite: while op.exists(dirpath): dirpath += '+' os.makedirs(dirpath, exist_ok=overwrite) return dirpath
[ "def", "create_folder", "(", "dirpath", ",", "overwrite", "=", "False", ")", ":", "if", "not", "overwrite", ":", "while", "op", ".", "exists", "(", "dirpath", ")", ":", "dirpath", "+=", "'+'", "os", ".", "makedirs", "(", "dirpath", ",", "exist_ok", "=", "overwrite", ")", "return", "dirpath" ]
Will create dirpath folder. If dirpath already exists and overwrite is False, will append a '+' suffix to dirpath until dirpath does not exist.
[ "Will", "create", "dirpath", "folder", ".", "If", "dirpath", "already", "exists", "and", "overwrite", "is", "False", "will", "append", "a", "+", "suffix", "to", "dirpath", "until", "dirpath", "does", "not", "exist", "." ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1736-L1742
def transformer_ada_lmpackedbase_dialog(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.max_length = 1024 hparams.ffn_layer = "dense_relu_dense" hparams.batch_size = 4096 return hparams
[ "def", "transformer_ada_lmpackedbase_dialog", "(", ")", ":", "hparams", "=", "transformer_base_vq_ada_32ex_packed", "(", ")", "hparams", ".", "max_length", "=", "1024", "hparams", ".", "ffn_layer", "=", "\"dense_relu_dense\"", "hparams", ".", "batch_size", "=", "4096", "return", "hparams" ]
Set of hyperparameters.
[ "Set", "of", "hyperparameters", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2419-L2422
def oauth_client_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/oauth_clients#create-client" api_path = "/api/v2/oauth/clients.json" return self.call(api_path, method="POST", data=data, **kwargs)
[ "def", "oauth_client_create", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/oauth/clients.json\"", "return", "self", ".", "call", "(", "api_path", ",", "method", "=", "\"POST\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/oauth_clients#create-client
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "oauth_clients#create", "-", "client" ]
python
train
IdentityPython/pysaml2
example/idp2/idp_uwsgi.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/example/idp2/idp_uwsgi.py#L342-L389
def redirect(self): """ This is the HTTP-redirect endpoint """ logger.info("--- In SSO Redirect ---") saml_msg = self.unpack_redirect() try: _key = saml_msg["key"] saml_msg = IDP.ticket[_key] self.req_info = saml_msg["req_info"] del IDP.ticket[_key] except KeyError: try: self.req_info = IDP.parse_authn_request(saml_msg["SAMLRequest"], BINDING_HTTP_REDIRECT) except KeyError: resp = BadRequest("Message signature verification failure") return resp(self.environ, self.start_response) _req = self.req_info.message if "SigAlg" in saml_msg and "Signature" in saml_msg: # Signed # request issuer = _req.issuer.text _certs = IDP.metadata.certs(issuer, "any", "signing") verified_ok = False for cert in _certs: if verify_redirect_signature(saml_msg, IDP.sec.sec_backend, cert): verified_ok = True break if not verified_ok: resp = BadRequest("Message signature verification failure") return resp(self.environ, self.start_response) if self.user: if _req.force_authn: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) else: return self.operation(saml_msg, BINDING_HTTP_REDIRECT) else: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) else: return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
[ "def", "redirect", "(", "self", ")", ":", "logger", ".", "info", "(", "\"--- In SSO Redirect ---\"", ")", "saml_msg", "=", "self", ".", "unpack_redirect", "(", ")", "try", ":", "_key", "=", "saml_msg", "[", "\"key\"", "]", "saml_msg", "=", "IDP", ".", "ticket", "[", "_key", "]", "self", ".", "req_info", "=", "saml_msg", "[", "\"req_info\"", "]", "del", "IDP", ".", "ticket", "[", "_key", "]", "except", "KeyError", ":", "try", ":", "self", ".", "req_info", "=", "IDP", ".", "parse_authn_request", "(", "saml_msg", "[", "\"SAMLRequest\"", "]", ",", "BINDING_HTTP_REDIRECT", ")", "except", "KeyError", ":", "resp", "=", "BadRequest", "(", "\"Message signature verification failure\"", ")", "return", "resp", "(", "self", ".", "environ", ",", "self", ".", "start_response", ")", "_req", "=", "self", ".", "req_info", ".", "message", "if", "\"SigAlg\"", "in", "saml_msg", "and", "\"Signature\"", "in", "saml_msg", ":", "# Signed", "# request", "issuer", "=", "_req", ".", "issuer", ".", "text", "_certs", "=", "IDP", ".", "metadata", ".", "certs", "(", "issuer", ",", "\"any\"", ",", "\"signing\"", ")", "verified_ok", "=", "False", "for", "cert", "in", "_certs", ":", "if", "verify_redirect_signature", "(", "saml_msg", ",", "IDP", ".", "sec", ".", "sec_backend", ",", "cert", ")", ":", "verified_ok", "=", "True", "break", "if", "not", "verified_ok", ":", "resp", "=", "BadRequest", "(", "\"Message signature verification failure\"", ")", "return", "resp", "(", "self", ".", "environ", ",", "self", ".", "start_response", ")", "if", "self", ".", "user", ":", "if", "_req", ".", "force_authn", ":", "saml_msg", "[", "\"req_info\"", "]", "=", "self", ".", "req_info", "key", "=", "self", ".", "_store_request", "(", "saml_msg", ")", "return", "self", ".", "not_authn", "(", "key", ",", "_req", ".", "requested_authn_context", ")", "else", ":", "return", "self", ".", "operation", "(", "saml_msg", ",", "BINDING_HTTP_REDIRECT", ")", "else", ":", "saml_msg", "[", "\"req_info\"", "]", "=", "self", ".", "req_info", "key", "=", "self", ".", "_store_request", "(", "saml_msg", ")", "return", "self", ".", "not_authn", "(", "key", ",", "_req", ".", "requested_authn_context", ")", "else", ":", "return", "self", ".", "operation", "(", "saml_msg", ",", "BINDING_HTTP_REDIRECT", ")" ]
This is the HTTP-redirect endpoint
[ "This", "is", "the", "HTTP", "-", "redirect", "endpoint" ]
python
train
Jammy2211/PyAutoLens
autolens/model/profiles/geometry_profiles.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/profiles/geometry_profiles.py#L75-L119
def move_grid_to_radial_minimum(func): """ Checks whether any coordinates in the grid are radially near (0.0, 0.0), which can lead to numerical faults in \ the evaluation of a light or mass profiles. If any coordinates are radially within the the radial minimum \ threshold, their (y,x) coordinates are shifted to that value to ensure they are evaluated correctly. By default this radial minimum is not used, and users should be certain they use a value that does not impact \ results. Parameters ---------- func : (profile, *args, **kwargs) -> Object A function that takes a grid of coordinates which may have a singularity as (0.0, 0.0) Returns ------- A function that can except cartesian or transformed coordinates """ @wraps(func) def wrapper(profile, grid, *args, **kwargs): """ Parameters ---------- profile : SphericalProfile The profiles that owns the function grid : ndarray PlaneCoordinates in either cartesian or profiles coordinate system args kwargs Returns ------- A value or coordinate in the same coordinate system as those passed in. """ radial_minimum_config = conf.NamedConfig(f"{conf.instance.config_path}/radial_minimum.ini") grid_radial_minimum = radial_minimum_config.get("radial_minimum", profile.__class__.__name__, float) with np.errstate(all='ignore'): # Division by zero fixed via isnan grid_radii = profile.grid_to_grid_radii(grid=grid) grid_radial_scale = np.where(grid_radii < grid_radial_minimum, grid_radial_minimum / grid_radii, 1.0) grid = np.multiply(grid, grid_radial_scale[:, None]) grid[np.isnan(grid)] = grid_radial_minimum return func(profile, grid, *args, **kwargs) return wrapper
[ "def", "move_grid_to_radial_minimum", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "profile", ",", "grid", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n\n Parameters\n ----------\n profile : SphericalProfile\n The profiles that owns the function\n grid : ndarray\n PlaneCoordinates in either cartesian or profiles coordinate system\n args\n kwargs\n\n Returns\n -------\n A value or coordinate in the same coordinate system as those passed in.\n \"\"\"", "radial_minimum_config", "=", "conf", ".", "NamedConfig", "(", "f\"{conf.instance.config_path}/radial_minimum.ini\"", ")", "grid_radial_minimum", "=", "radial_minimum_config", ".", "get", "(", "\"radial_minimum\"", ",", "profile", ".", "__class__", ".", "__name__", ",", "float", ")", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "# Division by zero fixed via isnan", "grid_radii", "=", "profile", ".", "grid_to_grid_radii", "(", "grid", "=", "grid", ")", "grid_radial_scale", "=", "np", ".", "where", "(", "grid_radii", "<", "grid_radial_minimum", ",", "grid_radial_minimum", "/", "grid_radii", ",", "1.0", ")", "grid", "=", "np", ".", "multiply", "(", "grid", ",", "grid_radial_scale", "[", ":", ",", "None", "]", ")", "grid", "[", "np", ".", "isnan", "(", "grid", ")", "]", "=", "grid_radial_minimum", "return", "func", "(", "profile", ",", "grid", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Checks whether any coordinates in the grid are radially near (0.0, 0.0), which can lead to numerical faults in \ the evaluation of a light or mass profiles. If any coordinates are radially within the the radial minimum \ threshold, their (y,x) coordinates are shifted to that value to ensure they are evaluated correctly. By default this radial minimum is not used, and users should be certain they use a value that does not impact \ results. Parameters ---------- func : (profile, *args, **kwargs) -> Object A function that takes a grid of coordinates which may have a singularity as (0.0, 0.0) Returns ------- A function that can except cartesian or transformed coordinates
[ "Checks", "whether", "any", "coordinates", "in", "the", "grid", "are", "radially", "near", "(", "0", ".", "0", "0", ".", "0", ")", "which", "can", "lead", "to", "numerical", "faults", "in", "\\", "the", "evaluation", "of", "a", "light", "or", "mass", "profiles", ".", "If", "any", "coordinates", "are", "radially", "within", "the", "the", "radial", "minimum", "\\", "threshold", "their", "(", "y", "x", ")", "coordinates", "are", "shifted", "to", "that", "value", "to", "ensure", "they", "are", "evaluated", "correctly", "." ]
python
valid
ranaroussi/qtpylib
qtpylib/tools.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/tools.py#L459-L473
def get_timezone(as_timedelta=False): """ utility to get the machine's timezone """ try: offset_hour = -(time.altzone if time.daylight else time.timezone) except Exception as e: offset_hour = -(datetime.datetime.now() - datetime.datetime.utcnow()).seconds offset_hour = offset_hour // 3600 offset_hour = offset_hour if offset_hour < 10 else offset_hour // 10 if as_timedelta: return datetime.timedelta(hours=offset_hour) return 'Etc/GMT%+d' % offset_hour
[ "def", "get_timezone", "(", "as_timedelta", "=", "False", ")", ":", "try", ":", "offset_hour", "=", "-", "(", "time", ".", "altzone", "if", "time", ".", "daylight", "else", "time", ".", "timezone", ")", "except", "Exception", "as", "e", ":", "offset_hour", "=", "-", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ")", ".", "seconds", "offset_hour", "=", "offset_hour", "//", "3600", "offset_hour", "=", "offset_hour", "if", "offset_hour", "<", "10", "else", "offset_hour", "//", "10", "if", "as_timedelta", ":", "return", "datetime", ".", "timedelta", "(", "hours", "=", "offset_hour", ")", "return", "'Etc/GMT%+d'", "%", "offset_hour" ]
utility to get the machine's timezone
[ "utility", "to", "get", "the", "machine", "s", "timezone" ]
python
train
pandas-dev/pandas
pandas/core/internals/blocks.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2508-L2519
def _try_coerce_result(self, result): """ reverse of try_coerce_args / try_operate """ if isinstance(result, np.ndarray): mask = isna(result) if result.dtype.kind in ['i', 'f']: result = result.astype('m8[ns]') result[mask] = tslibs.iNaT elif isinstance(result, (np.integer, np.float)): result = self._box_func(result) return result
[ "def", "_try_coerce_result", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "mask", "=", "isna", "(", "result", ")", "if", "result", ".", "dtype", ".", "kind", "in", "[", "'i'", ",", "'f'", "]", ":", "result", "=", "result", ".", "astype", "(", "'m8[ns]'", ")", "result", "[", "mask", "]", "=", "tslibs", ".", "iNaT", "elif", "isinstance", "(", "result", ",", "(", "np", ".", "integer", ",", "np", ".", "float", ")", ")", ":", "result", "=", "self", ".", "_box_func", "(", "result", ")", "return", "result" ]
reverse of try_coerce_args / try_operate
[ "reverse", "of", "try_coerce_args", "/", "try_operate" ]
python
train
bskinn/opan
opan/xyz.py
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L725-L770
def geom_iter(self, g_nums): """Iterator over a subset of geometries. The indices of the geometries to be returned are indicated by an iterable of |int|\\ s passed as `g_nums`. As with :meth:`geom_single`, each geometry is returned as a length-3N |npfloat_| with each atom's x/y/z coordinates grouped together:: [A1x, A1y, A1z, A2x, A2y, A2z, ...] In order to use NumPy `slicing or advanced indexing <http://docs.scipy.org/doc/numpy-1.10.0/reference/ arrays.indexing.html>`__, :data:`geoms` must first be explicitly converted to |nparray|, e.g.:: >>> x = opan.xyz.OpanXYZ(path='...') >>> np.array(x.geoms)[[2,6,9]] Parameters ---------- g_nums length-R iterable of |int| -- Indices of the desired geometries Yields ------ geom length-3N |npfloat_| -- Vectors of the atomic coordinates for each geometry indicated in `g_nums` Raises ------ ~exceptions.IndexError If an item in `g_nums` is invalid (out of range) """ # Using the custom coded pack_tups to not have to care whether the # input is iterable from .utils import pack_tups vals = pack_tups(g_nums) for val in vals: yield self.geom_single(val[0])
[ "def", "geom_iter", "(", "self", ",", "g_nums", ")", ":", "# Using the custom coded pack_tups to not have to care whether the", "# input is iterable", "from", ".", "utils", "import", "pack_tups", "vals", "=", "pack_tups", "(", "g_nums", ")", "for", "val", "in", "vals", ":", "yield", "self", ".", "geom_single", "(", "val", "[", "0", "]", ")" ]
Iterator over a subset of geometries. The indices of the geometries to be returned are indicated by an iterable of |int|\\ s passed as `g_nums`. As with :meth:`geom_single`, each geometry is returned as a length-3N |npfloat_| with each atom's x/y/z coordinates grouped together:: [A1x, A1y, A1z, A2x, A2y, A2z, ...] In order to use NumPy `slicing or advanced indexing <http://docs.scipy.org/doc/numpy-1.10.0/reference/ arrays.indexing.html>`__, :data:`geoms` must first be explicitly converted to |nparray|, e.g.:: >>> x = opan.xyz.OpanXYZ(path='...') >>> np.array(x.geoms)[[2,6,9]] Parameters ---------- g_nums length-R iterable of |int| -- Indices of the desired geometries Yields ------ geom length-3N |npfloat_| -- Vectors of the atomic coordinates for each geometry indicated in `g_nums` Raises ------ ~exceptions.IndexError If an item in `g_nums` is invalid (out of range)
[ "Iterator", "over", "a", "subset", "of", "geometries", "." ]
python
train
Contraz/demosys-py
demosys/project/base.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/project/base.py#L270-L276
def get_runnable_effects(self) -> List[Effect]: """ Returns all runnable effects in the project. :return: List of all runnable effects """ return [effect for name, effect in self._effects.items() if effect.runnable]
[ "def", "get_runnable_effects", "(", "self", ")", "->", "List", "[", "Effect", "]", ":", "return", "[", "effect", "for", "name", ",", "effect", "in", "self", ".", "_effects", ".", "items", "(", ")", "if", "effect", ".", "runnable", "]" ]
Returns all runnable effects in the project. :return: List of all runnable effects
[ "Returns", "all", "runnable", "effects", "in", "the", "project", ".", ":", "return", ":", "List", "of", "all", "runnable", "effects" ]
python
valid
captin411/ofxclient
ofxclient/institution.py
https://github.com/captin411/ofxclient/blob/4da2719f0ecbbf5eee62fb82c1b3b34ec955ee5e/ofxclient/institution.py#L98-L132
def authenticate(self, username=None, password=None): """Test the authentication credentials Raises a ``ValueError`` if there is a problem authenticating with the human readable reason given by the institution. :param username: optional username (use self.username by default) :type username: string or None :param password: optional password (use self.password by default) :type password: string or None """ u = self.username p = self.password if username and password: u = username p = password client = self.client() query = client.authenticated_query(username=u, password=p) res = client.post(query) ofx = BeautifulSoup(res, 'lxml') sonrs = ofx.find('sonrs') code = int(sonrs.find('code').contents[0].strip()) try: status = sonrs.find('message').contents[0].strip() except Exception: status = '' if code == 0: return 1 raise ValueError(status)
[ "def", "authenticate", "(", "self", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "u", "=", "self", ".", "username", "p", "=", "self", ".", "password", "if", "username", "and", "password", ":", "u", "=", "username", "p", "=", "password", "client", "=", "self", ".", "client", "(", ")", "query", "=", "client", ".", "authenticated_query", "(", "username", "=", "u", ",", "password", "=", "p", ")", "res", "=", "client", ".", "post", "(", "query", ")", "ofx", "=", "BeautifulSoup", "(", "res", ",", "'lxml'", ")", "sonrs", "=", "ofx", ".", "find", "(", "'sonrs'", ")", "code", "=", "int", "(", "sonrs", ".", "find", "(", "'code'", ")", ".", "contents", "[", "0", "]", ".", "strip", "(", ")", ")", "try", ":", "status", "=", "sonrs", ".", "find", "(", "'message'", ")", ".", "contents", "[", "0", "]", ".", "strip", "(", ")", "except", "Exception", ":", "status", "=", "''", "if", "code", "==", "0", ":", "return", "1", "raise", "ValueError", "(", "status", ")" ]
Test the authentication credentials Raises a ``ValueError`` if there is a problem authenticating with the human readable reason given by the institution. :param username: optional username (use self.username by default) :type username: string or None :param password: optional password (use self.password by default) :type password: string or None
[ "Test", "the", "authentication", "credentials" ]
python
train
acutesoftware/virtual-AI-simulator
vais/envirosim.py
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/envirosim.py#L79-L88
def get_affects_for_param(self, nme): """ searches all affects and returns a list that affect the param named 'nme' """ res = [] for a in self.affects: if a.name == nme: res.append(a) return res
[ "def", "get_affects_for_param", "(", "self", ",", "nme", ")", ":", "res", "=", "[", "]", "for", "a", "in", "self", ".", "affects", ":", "if", "a", ".", "name", "==", "nme", ":", "res", ".", "append", "(", "a", ")", "return", "res" ]
searches all affects and returns a list that affect the param named 'nme'
[ "searches", "all", "affects", "and", "returns", "a", "list", "that", "affect", "the", "param", "named", "nme" ]
python
train
cldf/clts
src/pyclts/transcriptionsystem.py
https://github.com/cldf/clts/blob/2798554c9c4e668bce0e4f5b0d91cf03c2d7c13a/src/pyclts/transcriptionsystem.py#L179-L276
def _parse(self, string): """Parse a string and return its features. :param string: A one-symbol string in NFD Notes ----- Strategy is rather simple: we determine the base part of a string and then search left and right of this part for the additional features as expressed by the diacritics. Fails if a segment has more than one basic part. """ nstring = self._norm(string) # check whether sound is in self.sounds if nstring in self.sounds: sound = self.sounds[nstring] sound.normalized = nstring != string sound.source = string return sound match = list(self._regex.finditer(nstring)) # if the match has length 2, we assume that we have two sounds, so we split # the sound and pass it on for separate evaluation (recursive function) if len(match) == 2: sound1 = self._parse(nstring[:match[1].start()]) sound2 = self._parse(nstring[match[1].start():]) # if we have ANY unknown sound, we mark the whole sound as unknown, if # we have two known sounds of the same type (vowel or consonant), we # either construct a diphthong or a cluster if 'unknownsound' not in (sound1.type, sound2.type) and \ sound1.type == sound2.type: # diphthong creation if sound1.type == 'vowel': return Diphthong.from_sounds( # noqa: F405 string, sound1, sound2, self) elif sound1.type == 'consonant' and \ sound1.manner in ('stop', 'implosive', 'click', 'nasal') and \ sound2.manner in ('stop', 'implosive', 'affricate', 'fricative'): return Cluster.from_sounds( # noqa: F405 string, sound1, sound2, self) return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 if len(match) != 1: # Either no match or more than one; both is considered an error. return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 pre, mid, post = nstring.partition(nstring[match[0].start():match[0].end()]) base_sound = self.sounds[mid] if isinstance(base_sound, Marker): # noqa: F405 assert pre or post return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 # A base sound with diacritics or a custom symbol. features = attr.asdict(base_sound) features.update( source=string, generated=True, normalized=nstring != string, base=base_sound.grapheme) # we construct two versions: the "normal" version and the version where # we search for aliases and normalize them (as our features system for # diacritics may well define aliases grapheme, sound = '', '' for dia in [p + EMPTY for p in pre]: feature = self.diacritics[base_sound.type].get(dia, {}) if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature # we add the unaliased version to the grapheme grapheme += dia[0] # we add the corrected version (if this is needed) to the sound sound += self.features[base_sound.type][feature][0] # add the base sound grapheme += base_sound.grapheme sound += base_sound.s for dia in [EMPTY + p for p in post]: feature = self.diacritics[base_sound.type].get(dia, {}) # we are strict: if we don't know the feature, it's an unknown # sound if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature grapheme += dia[1] sound += self.features[base_sound.type][feature][1] features['grapheme'] = sound new_sound = self.sound_classes[base_sound.type](**features) # check whether grapheme differs from re-generated sound if text_type(new_sound) != sound: new_sound.alias = True if grapheme != sound: new_sound.alias = True new_sound.grapheme = grapheme return new_sound
[ "def", "_parse", "(", "self", ",", "string", ")", ":", "nstring", "=", "self", ".", "_norm", "(", "string", ")", "# check whether sound is in self.sounds", "if", "nstring", "in", "self", ".", "sounds", ":", "sound", "=", "self", ".", "sounds", "[", "nstring", "]", "sound", ".", "normalized", "=", "nstring", "!=", "string", "sound", ".", "source", "=", "string", "return", "sound", "match", "=", "list", "(", "self", ".", "_regex", ".", "finditer", "(", "nstring", ")", ")", "# if the match has length 2, we assume that we have two sounds, so we split", "# the sound and pass it on for separate evaluation (recursive function)", "if", "len", "(", "match", ")", "==", "2", ":", "sound1", "=", "self", ".", "_parse", "(", "nstring", "[", ":", "match", "[", "1", "]", ".", "start", "(", ")", "]", ")", "sound2", "=", "self", ".", "_parse", "(", "nstring", "[", "match", "[", "1", "]", ".", "start", "(", ")", ":", "]", ")", "# if we have ANY unknown sound, we mark the whole sound as unknown, if", "# we have two known sounds of the same type (vowel or consonant), we", "# either construct a diphthong or a cluster", "if", "'unknownsound'", "not", "in", "(", "sound1", ".", "type", ",", "sound2", ".", "type", ")", "and", "sound1", ".", "type", "==", "sound2", ".", "type", ":", "# diphthong creation", "if", "sound1", ".", "type", "==", "'vowel'", ":", "return", "Diphthong", ".", "from_sounds", "(", "# noqa: F405", "string", ",", "sound1", ",", "sound2", ",", "self", ")", "elif", "sound1", ".", "type", "==", "'consonant'", "and", "sound1", ".", "manner", "in", "(", "'stop'", ",", "'implosive'", ",", "'click'", ",", "'nasal'", ")", "and", "sound2", ".", "manner", "in", "(", "'stop'", ",", "'implosive'", ",", "'affricate'", ",", "'fricative'", ")", ":", "return", "Cluster", ".", "from_sounds", "(", "# noqa: F405", "string", ",", "sound1", ",", "sound2", ",", "self", ")", "return", "UnknownSound", "(", "grapheme", "=", "nstring", ",", "source", "=", "string", ",", "ts", "=", "self", ")", "# noqa: F405", "if", "len", "(", "match", ")", "!=", "1", ":", "# Either no match or more than one; both is considered an error.", "return", "UnknownSound", "(", "grapheme", "=", "nstring", ",", "source", "=", "string", ",", "ts", "=", "self", ")", "# noqa: F405", "pre", ",", "mid", ",", "post", "=", "nstring", ".", "partition", "(", "nstring", "[", "match", "[", "0", "]", ".", "start", "(", ")", ":", "match", "[", "0", "]", ".", "end", "(", ")", "]", ")", "base_sound", "=", "self", ".", "sounds", "[", "mid", "]", "if", "isinstance", "(", "base_sound", ",", "Marker", ")", ":", "# noqa: F405", "assert", "pre", "or", "post", "return", "UnknownSound", "(", "grapheme", "=", "nstring", ",", "source", "=", "string", ",", "ts", "=", "self", ")", "# noqa: F405", "# A base sound with diacritics or a custom symbol.", "features", "=", "attr", ".", "asdict", "(", "base_sound", ")", "features", ".", "update", "(", "source", "=", "string", ",", "generated", "=", "True", ",", "normalized", "=", "nstring", "!=", "string", ",", "base", "=", "base_sound", ".", "grapheme", ")", "# we construct two versions: the \"normal\" version and the version where", "# we search for aliases and normalize them (as our features system for", "# diacritics may well define aliases", "grapheme", ",", "sound", "=", "''", ",", "''", "for", "dia", "in", "[", "p", "+", "EMPTY", "for", "p", "in", "pre", "]", ":", "feature", "=", "self", ".", "diacritics", "[", "base_sound", ".", "type", "]", ".", "get", "(", "dia", ",", "{", "}", ")", "if", "not", "feature", ":", "return", "UnknownSound", "(", "# noqa: F405", "grapheme", "=", "nstring", ",", "source", "=", "string", ",", "ts", "=", "self", ")", "features", "[", "self", ".", "_feature_values", "[", "feature", "]", "]", "=", "feature", "# we add the unaliased version to the grapheme", "grapheme", "+=", "dia", "[", "0", "]", "# we add the corrected version (if this is needed) to the sound", "sound", "+=", "self", ".", "features", "[", "base_sound", ".", "type", "]", "[", "feature", "]", "[", "0", "]", "# add the base sound", "grapheme", "+=", "base_sound", ".", "grapheme", "sound", "+=", "base_sound", ".", "s", "for", "dia", "in", "[", "EMPTY", "+", "p", "for", "p", "in", "post", "]", ":", "feature", "=", "self", ".", "diacritics", "[", "base_sound", ".", "type", "]", ".", "get", "(", "dia", ",", "{", "}", ")", "# we are strict: if we don't know the feature, it's an unknown", "# sound", "if", "not", "feature", ":", "return", "UnknownSound", "(", "# noqa: F405", "grapheme", "=", "nstring", ",", "source", "=", "string", ",", "ts", "=", "self", ")", "features", "[", "self", ".", "_feature_values", "[", "feature", "]", "]", "=", "feature", "grapheme", "+=", "dia", "[", "1", "]", "sound", "+=", "self", ".", "features", "[", "base_sound", ".", "type", "]", "[", "feature", "]", "[", "1", "]", "features", "[", "'grapheme'", "]", "=", "sound", "new_sound", "=", "self", ".", "sound_classes", "[", "base_sound", ".", "type", "]", "(", "*", "*", "features", ")", "# check whether grapheme differs from re-generated sound", "if", "text_type", "(", "new_sound", ")", "!=", "sound", ":", "new_sound", ".", "alias", "=", "True", "if", "grapheme", "!=", "sound", ":", "new_sound", ".", "alias", "=", "True", "new_sound", ".", "grapheme", "=", "grapheme", "return", "new_sound" ]
Parse a string and return its features. :param string: A one-symbol string in NFD Notes ----- Strategy is rather simple: we determine the base part of a string and then search left and right of this part for the additional features as expressed by the diacritics. Fails if a segment has more than one basic part.
[ "Parse", "a", "string", "and", "return", "its", "features", "." ]
python
valid
mrstephenneal/mysql-toolkit
mysql/toolkit/components/operations/remove.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/operations/remove.py#L5-L11
def truncate(self, table): """Empty a table by deleting all of its rows.""" if isinstance(table, (list, set, tuple)): for t in table: self._truncate(t) else: self._truncate(table)
[ "def", "truncate", "(", "self", ",", "table", ")", ":", "if", "isinstance", "(", "table", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", ":", "for", "t", "in", "table", ":", "self", ".", "_truncate", "(", "t", ")", "else", ":", "self", ".", "_truncate", "(", "table", ")" ]
Empty a table by deleting all of its rows.
[ "Empty", "a", "table", "by", "deleting", "all", "of", "its", "rows", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1326-L1368
def _vertex_data_to_sframe(data, vid_field): """ Convert data into a vertex data sframe. Using vid_field to identify the id column. The returned sframe will have id column name '__id'. """ if isinstance(data, SFrame): # '__id' already in the sframe, and it is ok to not specify vid_field if vid_field is None and _VID_COLUMN in data.column_names(): return data if vid_field is None: raise ValueError("vid_field must be specified for SFrame input") data_copy = copy.copy(data) data_copy.rename({vid_field: _VID_COLUMN}, inplace=True) return data_copy if type(data) == Vertex or type(data) == list: return _vertex_list_to_sframe(data, '__id') elif HAS_PANDAS and type(data) == pd.DataFrame: if vid_field is None: # using the dataframe index as vertex id if data.index.is_unique: if not ("index" in data.columns): # pandas reset_index() will insert a new column of name "index". sf = SFrame(data.reset_index()) # "index" sf.rename({'index': _VID_COLUMN}, inplace=True) return sf else: # pandas reset_index() will insert a new column of name "level_0" if there exists a column named "index". sf = SFrame(data.reset_index()) # "level_0" sf.rename({'level_0': _VID_COLUMN}, inplace=True) return sf else: raise ValueError("Index of the vertices dataframe is not unique, \ try specifying vid_field name to use a column for vertex ids.") else: sf = SFrame(data) if _VID_COLUMN in sf.column_names(): raise ValueError('%s reserved vid column name already exists in the SFrame' % _VID_COLUMN) sf.rename({vid_field: _VID_COLUMN}, inplace=True) return sf else: raise TypeError('Vertices type %s is Not supported.' % str(type(data)))
[ "def", "_vertex_data_to_sframe", "(", "data", ",", "vid_field", ")", ":", "if", "isinstance", "(", "data", ",", "SFrame", ")", ":", "# '__id' already in the sframe, and it is ok to not specify vid_field", "if", "vid_field", "is", "None", "and", "_VID_COLUMN", "in", "data", ".", "column_names", "(", ")", ":", "return", "data", "if", "vid_field", "is", "None", ":", "raise", "ValueError", "(", "\"vid_field must be specified for SFrame input\"", ")", "data_copy", "=", "copy", ".", "copy", "(", "data", ")", "data_copy", ".", "rename", "(", "{", "vid_field", ":", "_VID_COLUMN", "}", ",", "inplace", "=", "True", ")", "return", "data_copy", "if", "type", "(", "data", ")", "==", "Vertex", "or", "type", "(", "data", ")", "==", "list", ":", "return", "_vertex_list_to_sframe", "(", "data", ",", "'__id'", ")", "elif", "HAS_PANDAS", "and", "type", "(", "data", ")", "==", "pd", ".", "DataFrame", ":", "if", "vid_field", "is", "None", ":", "# using the dataframe index as vertex id", "if", "data", ".", "index", ".", "is_unique", ":", "if", "not", "(", "\"index\"", "in", "data", ".", "columns", ")", ":", "# pandas reset_index() will insert a new column of name \"index\".", "sf", "=", "SFrame", "(", "data", ".", "reset_index", "(", ")", ")", "# \"index\"", "sf", ".", "rename", "(", "{", "'index'", ":", "_VID_COLUMN", "}", ",", "inplace", "=", "True", ")", "return", "sf", "else", ":", "# pandas reset_index() will insert a new column of name \"level_0\" if there exists a column named \"index\".", "sf", "=", "SFrame", "(", "data", ".", "reset_index", "(", ")", ")", "# \"level_0\"", "sf", ".", "rename", "(", "{", "'level_0'", ":", "_VID_COLUMN", "}", ",", "inplace", "=", "True", ")", "return", "sf", "else", ":", "raise", "ValueError", "(", "\"Index of the vertices dataframe is not unique, \\\n try specifying vid_field name to use a column for vertex ids.\"", ")", "else", ":", "sf", "=", "SFrame", "(", "data", ")", "if", "_VID_COLUMN", "in", "sf", ".", "column_names", "(", ")", ":", "raise", "ValueError", "(", "'%s reserved vid column name already exists in the SFrame'", "%", "_VID_COLUMN", ")", "sf", ".", "rename", "(", "{", "vid_field", ":", "_VID_COLUMN", "}", ",", "inplace", "=", "True", ")", "return", "sf", "else", ":", "raise", "TypeError", "(", "'Vertices type %s is Not supported.'", "%", "str", "(", "type", "(", "data", ")", ")", ")" ]
Convert data into a vertex data sframe. Using vid_field to identify the id column. The returned sframe will have id column name '__id'.
[ "Convert", "data", "into", "a", "vertex", "data", "sframe", ".", "Using", "vid_field", "to", "identify", "the", "id", "column", ".", "The", "returned", "sframe", "will", "have", "id", "column", "name", "__id", "." ]
python
train
bitesofcode/projexui
projexui/dialogs/xmessagebox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xmessagebox.py#L25-L36
def setVisible( self, state ): """ Updates the visible state for this message box. :param state | <bool> """ super(XMessageBox, self).setVisible(state) if ( state ): self.startTimer(100) self.layout().setSizeConstraint(QLayout.SetNoConstraint) self.resize( self.width() + 100, self.height() )
[ "def", "setVisible", "(", "self", ",", "state", ")", ":", "super", "(", "XMessageBox", ",", "self", ")", ".", "setVisible", "(", "state", ")", "if", "(", "state", ")", ":", "self", ".", "startTimer", "(", "100", ")", "self", ".", "layout", "(", ")", ".", "setSizeConstraint", "(", "QLayout", ".", "SetNoConstraint", ")", "self", ".", "resize", "(", "self", ".", "width", "(", ")", "+", "100", ",", "self", ".", "height", "(", ")", ")" ]
Updates the visible state for this message box. :param state | <bool>
[ "Updates", "the", "visible", "state", "for", "this", "message", "box", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
rwl/pylon
pyreto/rlopf.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/rlopf.py#L241-L253
def getActorLimits(self): """ Returns a list of 2-tuples, e.g. [(-3.14, 3.14), (-0.001, 0.001)], one tuple per parameter, giving min and max for that parameter. """ generators = [g for g in self.env.case.online_generators if g.bus.type != REFERENCE] limits = [] for g in generators: limits.append((g.p_min, g.p_max)) logger.info("Actor limits: %s" % limits) return limits
[ "def", "getActorLimits", "(", "self", ")", ":", "generators", "=", "[", "g", "for", "g", "in", "self", ".", "env", ".", "case", ".", "online_generators", "if", "g", ".", "bus", ".", "type", "!=", "REFERENCE", "]", "limits", "=", "[", "]", "for", "g", "in", "generators", ":", "limits", ".", "append", "(", "(", "g", ".", "p_min", ",", "g", ".", "p_max", ")", ")", "logger", ".", "info", "(", "\"Actor limits: %s\"", "%", "limits", ")", "return", "limits" ]
Returns a list of 2-tuples, e.g. [(-3.14, 3.14), (-0.001, 0.001)], one tuple per parameter, giving min and max for that parameter.
[ "Returns", "a", "list", "of", "2", "-", "tuples", "e", ".", "g", ".", "[", "(", "-", "3", ".", "14", "3", ".", "14", ")", "(", "-", "0", ".", "001", "0", ".", "001", ")", "]", "one", "tuple", "per", "parameter", "giving", "min", "and", "max", "for", "that", "parameter", "." ]
python
train
unbit/sftpclone
sftpclone/sftpclone.py
https://github.com/unbit/sftpclone/blob/1cc89478e680fc4e0d12b1a15b5bafd0390d05da/sftpclone/sftpclone.py#L415-L445
def check_for_deletion(self, relative_path=None): """Traverse the entire remote_path tree. Find files/directories that need to be deleted, not being present in the local folder. """ if not relative_path: relative_path = str() # root of shared directory tree remote_path = path_join(self.remote_path, relative_path) local_path = path_join(self.local_path, relative_path) for remote_st in self.sftp.listdir_attr(remote_path): r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename)) inner_remote_path = path_join(remote_path, remote_st.filename) inner_local_path = path_join(local_path, remote_st.filename) # check if remote_st is a symlink # otherwise could delete file outside shared directory if S_ISLNK(r_lstat.st_mode): if self._must_be_deleted(inner_local_path, r_lstat): self.remote_delete(inner_remote_path, r_lstat) continue if self._must_be_deleted(inner_local_path, remote_st): self.remote_delete(inner_remote_path, remote_st) elif S_ISDIR(remote_st.st_mode): self.check_for_deletion( path_join(relative_path, remote_st.filename) )
[ "def", "check_for_deletion", "(", "self", ",", "relative_path", "=", "None", ")", ":", "if", "not", "relative_path", ":", "relative_path", "=", "str", "(", ")", "# root of shared directory tree", "remote_path", "=", "path_join", "(", "self", ".", "remote_path", ",", "relative_path", ")", "local_path", "=", "path_join", "(", "self", ".", "local_path", ",", "relative_path", ")", "for", "remote_st", "in", "self", ".", "sftp", ".", "listdir_attr", "(", "remote_path", ")", ":", "r_lstat", "=", "self", ".", "sftp", ".", "lstat", "(", "path_join", "(", "remote_path", ",", "remote_st", ".", "filename", ")", ")", "inner_remote_path", "=", "path_join", "(", "remote_path", ",", "remote_st", ".", "filename", ")", "inner_local_path", "=", "path_join", "(", "local_path", ",", "remote_st", ".", "filename", ")", "# check if remote_st is a symlink", "# otherwise could delete file outside shared directory", "if", "S_ISLNK", "(", "r_lstat", ".", "st_mode", ")", ":", "if", "self", ".", "_must_be_deleted", "(", "inner_local_path", ",", "r_lstat", ")", ":", "self", ".", "remote_delete", "(", "inner_remote_path", ",", "r_lstat", ")", "continue", "if", "self", ".", "_must_be_deleted", "(", "inner_local_path", ",", "remote_st", ")", ":", "self", ".", "remote_delete", "(", "inner_remote_path", ",", "remote_st", ")", "elif", "S_ISDIR", "(", "remote_st", ".", "st_mode", ")", ":", "self", ".", "check_for_deletion", "(", "path_join", "(", "relative_path", ",", "remote_st", ".", "filename", ")", ")" ]
Traverse the entire remote_path tree. Find files/directories that need to be deleted, not being present in the local folder.
[ "Traverse", "the", "entire", "remote_path", "tree", "." ]
python
train
tradenity/python-sdk
tradenity/resources/table_rate_rule.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/table_rate_rule.py#L492-L512
def get_table_rate_rule_by_id(cls, table_rate_rule_id, **kwargs): """Find TableRateRule Return single instance of TableRateRule by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_rule_id: ID of tableRateRule to return (required) :return: TableRateRule If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs) else: (data) = cls._get_table_rate_rule_by_id_with_http_info(table_rate_rule_id, **kwargs) return data
[ "def", "get_table_rate_rule_by_id", "(", "cls", ",", "table_rate_rule_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_table_rate_rule_by_id_with_http_info", "(", "table_rate_rule_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_get_table_rate_rule_by_id_with_http_info", "(", "table_rate_rule_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Find TableRateRule Return single instance of TableRateRule by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_rule_by_id(table_rate_rule_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_rule_id: ID of tableRateRule to return (required) :return: TableRateRule If the method is called asynchronously, returns the request thread.
[ "Find", "TableRateRule" ]
python
train
mgraffg/EvoDAG
EvoDAG/model.py
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/model.py#L438-L445
def graphviz(self, directory, **kwargs): "Directory to store the graphviz models" import os if not os.path.isdir(directory): os.mkdir(directory) output = os.path.join(directory, 'evodag-%s') for k, m in enumerate(self.models): m.graphviz(output % k, **kwargs)
[ "def", "graphviz", "(", "self", ",", "directory", ",", "*", "*", "kwargs", ")", ":", "import", "os", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "os", ".", "mkdir", "(", "directory", ")", "output", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'evodag-%s'", ")", "for", "k", ",", "m", "in", "enumerate", "(", "self", ".", "models", ")", ":", "m", ".", "graphviz", "(", "output", "%", "k", ",", "*", "*", "kwargs", ")" ]
Directory to store the graphviz models
[ "Directory", "to", "store", "the", "graphviz", "models" ]
python
train
stephenmcd/sphinx-me
sphinx_me.py
https://github.com/stephenmcd/sphinx-me/blob/9f51a04d58a90834a787246ce475a564b4f9e5ee/sphinx_me.py#L21-L57
def install(): """ Main entry point for running sphinx_me as a script. Creates a docs directory in the current directory and adds the required files for generating Sphinx docs from the project's README file - a conf module that calls setup_conf() from this module, and an index file that includes the project's README. """ for name in listdir(getcwd()): if splitext(name)[0].upper() == "README": readme = name break else: print() print("ABORT: No README file in the current directory.") return docs_path = join(getcwd(), "docs") if not isdir(docs_path): mkdir(docs_path) with open(join(docs_path, "index.rst"), "w") as f: f.write(".. include:: ../%s" % readme) with open(join(docs_path, "conf.py"), "w") as f: f.write("# This file is automatically generated via sphinx-me\n") f.write("from sphinx_me import setup_conf; setup_conf(globals())\n") print() print("SUCCESS: Sphinx docs layout created in %s" % docs_path) try: import sphinx except ImportError: print() print("Sphinx not installed. Not building docs.") else: build_path = join(docs_path, "build") Popen(["sphinx-build", docs_path, build_path]).wait() print() print("Docs built in %s" % build_path)
[ "def", "install", "(", ")", ":", "for", "name", "in", "listdir", "(", "getcwd", "(", ")", ")", ":", "if", "splitext", "(", "name", ")", "[", "0", "]", ".", "upper", "(", ")", "==", "\"README\"", ":", "readme", "=", "name", "break", "else", ":", "print", "(", ")", "print", "(", "\"ABORT: No README file in the current directory.\"", ")", "return", "docs_path", "=", "join", "(", "getcwd", "(", ")", ",", "\"docs\"", ")", "if", "not", "isdir", "(", "docs_path", ")", ":", "mkdir", "(", "docs_path", ")", "with", "open", "(", "join", "(", "docs_path", ",", "\"index.rst\"", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\".. include:: ../%s\"", "%", "readme", ")", "with", "open", "(", "join", "(", "docs_path", ",", "\"conf.py\"", ")", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"# This file is automatically generated via sphinx-me\\n\"", ")", "f", ".", "write", "(", "\"from sphinx_me import setup_conf; setup_conf(globals())\\n\"", ")", "print", "(", ")", "print", "(", "\"SUCCESS: Sphinx docs layout created in %s\"", "%", "docs_path", ")", "try", ":", "import", "sphinx", "except", "ImportError", ":", "print", "(", ")", "print", "(", "\"Sphinx not installed. Not building docs.\"", ")", "else", ":", "build_path", "=", "join", "(", "docs_path", ",", "\"build\"", ")", "Popen", "(", "[", "\"sphinx-build\"", ",", "docs_path", ",", "build_path", "]", ")", ".", "wait", "(", ")", "print", "(", ")", "print", "(", "\"Docs built in %s\"", "%", "build_path", ")" ]
Main entry point for running sphinx_me as a script. Creates a docs directory in the current directory and adds the required files for generating Sphinx docs from the project's README file - a conf module that calls setup_conf() from this module, and an index file that includes the project's README.
[ "Main", "entry", "point", "for", "running", "sphinx_me", "as", "a", "script", ".", "Creates", "a", "docs", "directory", "in", "the", "current", "directory", "and", "adds", "the", "required", "files", "for", "generating", "Sphinx", "docs", "from", "the", "project", "s", "README", "file", "-", "a", "conf", "module", "that", "calls", "setup_conf", "()", "from", "this", "module", "and", "an", "index", "file", "that", "includes", "the", "project", "s", "README", "." ]
python
train
hozn/coilmq
coilmq/store/sa/__init__.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/store/sa/__init__.py#L169-L187
def size(self, destination): """ Size of the queue for specified destination. @param destination: The queue destination (e.g. /queue/foo) @type destination: C{str} @return: The number of frames in specified queue. @rtype: C{int} """ session = meta.Session() sel = select([func.count(model.frames_table.c.message_id)]).where( model.frames_table.c.destination == destination) result = session.execute(sel) first = result.fetchone() if not first: return 0 else: return int(first[0])
[ "def", "size", "(", "self", ",", "destination", ")", ":", "session", "=", "meta", ".", "Session", "(", ")", "sel", "=", "select", "(", "[", "func", ".", "count", "(", "model", ".", "frames_table", ".", "c", ".", "message_id", ")", "]", ")", ".", "where", "(", "model", ".", "frames_table", ".", "c", ".", "destination", "==", "destination", ")", "result", "=", "session", ".", "execute", "(", "sel", ")", "first", "=", "result", ".", "fetchone", "(", ")", "if", "not", "first", ":", "return", "0", "else", ":", "return", "int", "(", "first", "[", "0", "]", ")" ]
Size of the queue for specified destination. @param destination: The queue destination (e.g. /queue/foo) @type destination: C{str} @return: The number of frames in specified queue. @rtype: C{int}
[ "Size", "of", "the", "queue", "for", "specified", "destination", "." ]
python
train
solvebio/solvebio-python
solvebio/cli/auth.py
https://github.com/solvebio/solvebio-python/blob/b29614643043afd19c1d8074e8f25c6700d51a73/solvebio/cli/auth.py#L48-L75
def login(*args, **kwargs): """ Prompt user for login information (domain/email/password). Domain, email and password are used to get the user's API key. Always updates the stored credentials file. """ if args and args[0].api_key: # Handle command-line arguments if provided. solvebio.login(api_key=args[0].api_key) elif kwargs: # Run the global login() if kwargs are provided # or local credentials are found. solvebio.login(**kwargs) else: interactive_login() # Print information about the current user user = client.whoami() if user: print_user(user) save_credentials(user['email'].lower(), solvebio.api_key) _print_msg('Updated local credentials.') return True else: _print_msg('Invalid credentials. You may not be logged-in.') return False
[ "def", "login", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "and", "args", "[", "0", "]", ".", "api_key", ":", "# Handle command-line arguments if provided.", "solvebio", ".", "login", "(", "api_key", "=", "args", "[", "0", "]", ".", "api_key", ")", "elif", "kwargs", ":", "# Run the global login() if kwargs are provided", "# or local credentials are found.", "solvebio", ".", "login", "(", "*", "*", "kwargs", ")", "else", ":", "interactive_login", "(", ")", "# Print information about the current user", "user", "=", "client", ".", "whoami", "(", ")", "if", "user", ":", "print_user", "(", "user", ")", "save_credentials", "(", "user", "[", "'email'", "]", ".", "lower", "(", ")", ",", "solvebio", ".", "api_key", ")", "_print_msg", "(", "'Updated local credentials.'", ")", "return", "True", "else", ":", "_print_msg", "(", "'Invalid credentials. You may not be logged-in.'", ")", "return", "False" ]
Prompt user for login information (domain/email/password). Domain, email and password are used to get the user's API key. Always updates the stored credentials file.
[ "Prompt", "user", "for", "login", "information", "(", "domain", "/", "email", "/", "password", ")", ".", "Domain", "email", "and", "password", "are", "used", "to", "get", "the", "user", "s", "API", "key", "." ]
python
test
Kortemme-Lab/pull_into_place
pull_into_place/structures.py
https://github.com/Kortemme-Lab/pull_into_place/blob/247f303100a612cc90cf31c86e4fe5052eb28c8d/pull_into_place/structures.py#L121-L449
def read_and_calculate(workspace, pdb_paths): """ Calculate a variety of score and distance metrics for the given structures. """ # Parse the given restraints file. The restraints definitions are used to # calculate the "restraint_dist" metric, which reflects how well each # structure achieves the desired geometry. Note that this is calculated # whether or not restraints were used to create the structures in question. # For example, the validation runs don't use restraints but the restraint # distance is a very important metric for deciding which designs worked. restraints = parse_restraints(workspace.restraints_path) # Calculate score and distance metrics for each structure. from klab.bio.basics import residue_type_3to1_map records = [] metadata = {} num_restraints = len(restraints) + 1 atom_xyzs = {} fragment_size = 0 # It's kinda hard to tell which lines are part of the score table. The # first column has some pretty heterogeneous strings (examples below) and # all the other columns are just numbers. My strategy here is to try to # make a regular expression that matches all of these examples, with the # exception of the ligand. I think the ligand will simply be too # heterogeneous to match, and the purpose of this is to get dunbrack # scores, which the ligand doesn't have. # # MET:NtermProteinFull_1 # ASN_2 # LYS:protein_cutpoint_lower_39 # ASP:protein_cutpoint_upper_40 # ALA:CtermProteinFull_124 # HIS_D_224 # pdb_EQU_250 score_table_pattern = re.compile( r'^[A-Z]{3}(?:_[A-Z])?' # Residue name with optional tautomer. r'(?::[A-Za-z_]+)?' # Optional patch type. r'_([0-9]+) ' # Residue number preceded by underscore. ) # The terminal space is important to match # the full residue number. for i, path in enumerate(sorted(pdb_paths)): record = {'path': os.path.basename(path)} sequence = "" sequence_map = {} last_residue_id = None dunbrack_index = None dunbrack_scores = {} # Update the user on our progress, because this is often slow. sys.stdout.write("\rReading '{}' [{}/{}]".format( os.path.relpath(os.path.dirname(path)), i+1, len(pdb_paths))) sys.stdout.flush() # Read the PDB file, which we are assuming is gzipped. try: with gzip.open(path) as file: lines = file.readlines() except IOError: print "\nFailed to read '{}'".format(path) continue if not lines: print "\n{} is empty".format(path) continue # Get different information from different lines in the PDB file. Some # of these lines are specific to different simulations. for line in lines: line = line.decode('utf8') score_table_match = \ dunbrack_index and score_table_pattern.match(line) if line.startswith('pose'): meta = ScoreMetadata( name='total_score', title='Total Score', unit='REU', order=1, ) record['total_score'] = float(line.split()[1]) metadata[meta.name] = meta elif line.startswith('label'): fields = line.split() dunbrack_index = fields.index('fa_dun') elif score_table_match: residue_id = int(score_table_match.group(1)) for restraint in restraints: if residue_id in restraint.residue_ids: dunbrack_score = float(line.split()[dunbrack_index]) dunbrack_scores[residue_id] = dunbrack_score break elif line.startswith('rmsd'): meta = ScoreMetadata( name='loop_rmsd', title='Loop RMSD (Backbone Heavy-Atom)', unit='Å', guide=1.0, lower=0.0, upper='95%', order=4, ) record[meta.name] = float(line.split()[1]) metadata[meta.name] = meta elif line.startswith(' all_heavy_atom_unsats'): meta = ScoreMetadata( name='buried_unsats', title='Buried Unsatsified H-Bonds', order=5, ) record[meta.name] = float(line.split()[2]) metadata[meta.name] = meta elif line.startswith(' sc_heavy_atom_unsats'): meta = ScoreMetadata( name='buried_unsats_sidechain', title='Buried Unsatisfied H-Bonds (Sidechain)', order=5, ) record[meta.name] = float(line.split()[2]) metadata[meta.name] = meta elif line.startswith(' bb_heavy_atom_unsats'): meta = ScoreMetadata( name='buried_unsats_backbone', title='Buried Unsatisfied H-Bonds (Backbone)', order=5, ) record[meta.name] = float(line.split()[2]) metadata[meta.name] = meta elif line.startswith('time'): meta = ScoreMetadata( name='simulation_time', title='Simulation Time', unit='sec', order=5, ) record[meta.name] = float(line.split()[1]) metadata[meta.name] = meta elif line.startswith('FragmentScoreFilter '): fragment_size = line.split()[2].split('-')[0] elif line.startswith('FSF') or line.startswith('FragmentScoreFilter_metric'): splitline = line.split() if splitline[1] == 'Max': max_res = 0 max_crmsd = 0 if splitline[3] == 'res:': max_res = splitline[3] meta = ScoreMetadata( name='max_fragment_crmsd_position', title = 'Max {}-Residue Fragment RMSD \ (C-Alpha) Position'.format(fragment_size), order=7) elif splitline[3] == 'score:': max_crmsd = splitline[3] meta = ScoreMetadata( name='max_fragment_crmsd_score', title = 'Max {}-Residue Fragment RMSD \ (C-Alpha)'.format(fragment_size), order=7) elif splitline[1] == 'Min': min_res = 0 min_crmsd = 0 if splitline[3] == 'res:': min_res = splitline[3] meta = ScoreMetadata( name='min_fragment_crmsd_position', title = 'Min {}-Residue Fragment RMSD \ (C-Alpha) Position'.format(fragment_size), order=8) elif splitline[3] == 'score:': min_crmsd = splitline[3] meta = ScoreMetadata( name='min_fragment_crmsd_score', title = 'Min {}-Residue Fragment RMSD \ (C-Alpha)'.format(fragment_size), order=8) elif splitline[1] == 'Avg': meta = ScoreMetadata( name='avg_fragment_crmsd', title='Avg {}-Residue Fragment RMSD \ (C-Alpha)'.format(fragment_size), order=9) else: position = splitline[2] crmsd = splitline[4] meta = ScoreMetadata( name='fragment_crmsd_pos_{}'.format(position), title='{}-Residue Fragment RMSD at Res {} \ (C-Alpha)'.format(fragment_size,position), order=6) record[meta.name] = float(splitline[4]) metadata[meta.name] = meta elif line.startswith('EXTRA_SCORE'): tokens = line[len('EXTRA_SCORE_'):].rsplit(None, 1) meta = parse_extra_metric(tokens[0], 5) record[meta.name] = float(tokens[1]) metadata[meta.name] = meta elif line.startswith('EXTRA_METRIC'): tokens = line[len('EXTRA_METRIC '):].rsplit(None, 1) # Ignore the BuriedUnsat filter. It just reports 911 every # time, and we extract the actual buried unsat information from # some other lines it adds to the PDB. if tokens[0] == 'IGNORE': continue if tokens[0] == 'Buried Unsatisfied H-Bonds [-|#]': continue meta = parse_extra_metric(tokens[0], 5) record[meta.name] = float(tokens[1]) metadata[meta.name] = meta elif (line.startswith('ATOM') or line.startswith('HETATM')): atom_name = line[12:16].strip() residue_id = int(line[22:26].strip()) residue_name = line[17:20].strip() # Keep track of this model's sequence. if residue_id != last_residue_id: if line.startswith('ATOM'): one_letter_code = residue_type_3to1_map.get(residue_name, 'X') sequence += one_letter_code sequence_map[residue_id] = one_letter_code last_residue_id = residue_id elif line.startswith('HETATM'): sequence_map[residue_id] = 'X' last_residue_id = residue_id # Save the coordinate for this atom. This will be used later # to calculate restraint distances. atom_xyzs[atom_name, residue_id] = xyz_to_array(( line[30:38], line[38:46], line[46:54])) # Calculate how well each restraint was satisfied. restraint_values = {} restraint_values_by_residue = {} is_sidechain_restraint = {} restraint_units = { 'dist': 'Å', 'angle': '°', } for restraint in restraints: d = restraint.distance_from_ideal(atom_xyzs) metric = restraint.metric backbone_atoms = set(['N', 'C', 'CA', 'O']) backbone_restraint = backbone_atoms.issuperset(restraint.atom_names) restraint_values.setdefault(metric, []).append(d) restraint_values_by_residue.setdefault(metric, {}) for i in restraint.residue_ids: restraint_values_by_residue[metric].setdefault(i, []).append(d) is_sidechain_restraint[i] = (not backbone_restraint) \ or is_sidechain_restraint.get(i, False) for metric, values in restraint_values.items(): meta = ScoreMetadata( name='restraint_{0}'.format(metric), title='Restraint Satisfaction', unit=restraint_units[metric], guide=1.0, lower=0.0, upper='95%', order=2, ) record[meta.name] = np.max(values) metadata[meta.name] = meta for metric, values_by_residue in restraint_values_by_residue.items(): if len(values_by_residue) <= 1: continue for i in values_by_residue: # I want to put the amino acid in these names, because I think # it looks nice, but it causes problems for positions that can # mutate. So I assume that if a position has a sidechain # restraint, it must not be allowed to mutate. aa = sequence_map[i] if is_sidechain_restraint[i] else 'X' res = '{0}{1}'.format(aa, i) meta = ScoreMetadata( name='restraint_{0}_{1}'.format(metric, res.lower()), title='Restraint Satisfaction for {0}'.format(res), unit=restraint_units[metric], guide=1.0, lower=0.0, upper='95%', order=3, ) record[meta.name] = np.max(values_by_residue[i]) metadata[meta.name] = meta # Finish calculating some records that depend on the whole structure. record['sequence'] = sequence for i, score in dunbrack_scores.items(): aa = sequence_map[i] if is_sidechain_restraint[i] else 'X' res = '{0}{1}'.format(aa, i) meta = ScoreMetadata( name='dunbrack_score_{0}'.format(res.lower()), title='Dunbrack Score for {0}'.format(res), unit='REU', order=5, ) record[meta.name] = score metadata[meta.name] = meta records.append(record) if pdb_paths: sys.stdout.write('\n') return records, metadata
[ "def", "read_and_calculate", "(", "workspace", ",", "pdb_paths", ")", ":", "# Parse the given restraints file. The restraints definitions are used to", "# calculate the \"restraint_dist\" metric, which reflects how well each", "# structure achieves the desired geometry. Note that this is calculated", "# whether or not restraints were used to create the structures in question.", "# For example, the validation runs don't use restraints but the restraint", "# distance is a very important metric for deciding which designs worked.", "restraints", "=", "parse_restraints", "(", "workspace", ".", "restraints_path", ")", "# Calculate score and distance metrics for each structure.", "from", "klab", ".", "bio", ".", "basics", "import", "residue_type_3to1_map", "records", "=", "[", "]", "metadata", "=", "{", "}", "num_restraints", "=", "len", "(", "restraints", ")", "+", "1", "atom_xyzs", "=", "{", "}", "fragment_size", "=", "0", "# It's kinda hard to tell which lines are part of the score table. The ", "# first column has some pretty heterogeneous strings (examples below) and ", "# all the other columns are just numbers. My strategy here is to try to ", "# make a regular expression that matches all of these examples, with the ", "# exception of the ligand. I think the ligand will simply be too ", "# heterogeneous to match, and the purpose of this is to get dunbrack ", "# scores, which the ligand doesn't have.", "#", "# MET:NtermProteinFull_1", "# ASN_2", "# LYS:protein_cutpoint_lower_39", "# ASP:protein_cutpoint_upper_40", "# ALA:CtermProteinFull_124", "# HIS_D_224", "# pdb_EQU_250", "score_table_pattern", "=", "re", ".", "compile", "(", "r'^[A-Z]{3}(?:_[A-Z])?'", "# Residue name with optional tautomer.", "r'(?::[A-Za-z_]+)?'", "# Optional patch type.", "r'_([0-9]+) '", "# Residue number preceded by underscore.", ")", "# The terminal space is important to match", "# the full residue number.", "for", "i", ",", "path", "in", "enumerate", "(", "sorted", "(", "pdb_paths", ")", ")", ":", "record", "=", "{", "'path'", ":", "os", ".", "path", ".", "basename", "(", "path", ")", "}", "sequence", "=", "\"\"", "sequence_map", "=", "{", "}", "last_residue_id", "=", "None", "dunbrack_index", "=", "None", "dunbrack_scores", "=", "{", "}", "# Update the user on our progress, because this is often slow.", "sys", ".", "stdout", ".", "write", "(", "\"\\rReading '{}' [{}/{}]\"", ".", "format", "(", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", ",", "i", "+", "1", ",", "len", "(", "pdb_paths", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Read the PDB file, which we are assuming is gzipped.", "try", ":", "with", "gzip", ".", "open", "(", "path", ")", "as", "file", ":", "lines", "=", "file", ".", "readlines", "(", ")", "except", "IOError", ":", "print", "\"\\nFailed to read '{}'\"", ".", "format", "(", "path", ")", "continue", "if", "not", "lines", ":", "print", "\"\\n{} is empty\"", ".", "format", "(", "path", ")", "continue", "# Get different information from different lines in the PDB file. Some", "# of these lines are specific to different simulations.", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "decode", "(", "'utf8'", ")", "score_table_match", "=", "dunbrack_index", "and", "score_table_pattern", ".", "match", "(", "line", ")", "if", "line", ".", "startswith", "(", "'pose'", ")", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'total_score'", ",", "title", "=", "'Total Score'", ",", "unit", "=", "'REU'", ",", "order", "=", "1", ",", ")", "record", "[", "'total_score'", "]", "=", "float", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "'label'", ")", ":", "fields", "=", "line", ".", "split", "(", ")", "dunbrack_index", "=", "fields", ".", "index", "(", "'fa_dun'", ")", "elif", "score_table_match", ":", "residue_id", "=", "int", "(", "score_table_match", ".", "group", "(", "1", ")", ")", "for", "restraint", "in", "restraints", ":", "if", "residue_id", "in", "restraint", ".", "residue_ids", ":", "dunbrack_score", "=", "float", "(", "line", ".", "split", "(", ")", "[", "dunbrack_index", "]", ")", "dunbrack_scores", "[", "residue_id", "]", "=", "dunbrack_score", "break", "elif", "line", ".", "startswith", "(", "'rmsd'", ")", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'loop_rmsd'", ",", "title", "=", "'Loop RMSD (Backbone Heavy-Atom)'", ",", "unit", "=", "'Å',", "", "guide", "=", "1.0", ",", "lower", "=", "0.0", ",", "upper", "=", "'95%'", ",", "order", "=", "4", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "' all_heavy_atom_unsats'", ")", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'buried_unsats'", ",", "title", "=", "'Buried Unsatsified H-Bonds'", ",", "order", "=", "5", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "line", ".", "split", "(", ")", "[", "2", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "' sc_heavy_atom_unsats'", ")", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'buried_unsats_sidechain'", ",", "title", "=", "'Buried Unsatisfied H-Bonds (Sidechain)'", ",", "order", "=", "5", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "line", ".", "split", "(", ")", "[", "2", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "' bb_heavy_atom_unsats'", ")", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'buried_unsats_backbone'", ",", "title", "=", "'Buried Unsatisfied H-Bonds (Backbone)'", ",", "order", "=", "5", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "line", ".", "split", "(", ")", "[", "2", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "'time'", ")", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'simulation_time'", ",", "title", "=", "'Simulation Time'", ",", "unit", "=", "'sec'", ",", "order", "=", "5", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "'FragmentScoreFilter '", ")", ":", "fragment_size", "=", "line", ".", "split", "(", ")", "[", "2", "]", ".", "split", "(", "'-'", ")", "[", "0", "]", "elif", "line", ".", "startswith", "(", "'FSF'", ")", "or", "line", ".", "startswith", "(", "'FragmentScoreFilter_metric'", ")", ":", "splitline", "=", "line", ".", "split", "(", ")", "if", "splitline", "[", "1", "]", "==", "'Max'", ":", "max_res", "=", "0", "max_crmsd", "=", "0", "if", "splitline", "[", "3", "]", "==", "'res:'", ":", "max_res", "=", "splitline", "[", "3", "]", "meta", "=", "ScoreMetadata", "(", "name", "=", "'max_fragment_crmsd_position'", ",", "title", "=", "'Max {}-Residue Fragment RMSD \\\n(C-Alpha) Position'", ".", "format", "(", "fragment_size", ")", ",", "order", "=", "7", ")", "elif", "splitline", "[", "3", "]", "==", "'score:'", ":", "max_crmsd", "=", "splitline", "[", "3", "]", "meta", "=", "ScoreMetadata", "(", "name", "=", "'max_fragment_crmsd_score'", ",", "title", "=", "'Max {}-Residue Fragment RMSD \\\n(C-Alpha)'", ".", "format", "(", "fragment_size", ")", ",", "order", "=", "7", ")", "elif", "splitline", "[", "1", "]", "==", "'Min'", ":", "min_res", "=", "0", "min_crmsd", "=", "0", "if", "splitline", "[", "3", "]", "==", "'res:'", ":", "min_res", "=", "splitline", "[", "3", "]", "meta", "=", "ScoreMetadata", "(", "name", "=", "'min_fragment_crmsd_position'", ",", "title", "=", "'Min {}-Residue Fragment RMSD \\\n(C-Alpha) Position'", ".", "format", "(", "fragment_size", ")", ",", "order", "=", "8", ")", "elif", "splitline", "[", "3", "]", "==", "'score:'", ":", "min_crmsd", "=", "splitline", "[", "3", "]", "meta", "=", "ScoreMetadata", "(", "name", "=", "'min_fragment_crmsd_score'", ",", "title", "=", "'Min {}-Residue Fragment RMSD \\\n(C-Alpha)'", ".", "format", "(", "fragment_size", ")", ",", "order", "=", "8", ")", "elif", "splitline", "[", "1", "]", "==", "'Avg'", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'avg_fragment_crmsd'", ",", "title", "=", "'Avg {}-Residue Fragment RMSD \\\n(C-Alpha)'", ".", "format", "(", "fragment_size", ")", ",", "order", "=", "9", ")", "else", ":", "position", "=", "splitline", "[", "2", "]", "crmsd", "=", "splitline", "[", "4", "]", "meta", "=", "ScoreMetadata", "(", "name", "=", "'fragment_crmsd_pos_{}'", ".", "format", "(", "position", ")", ",", "title", "=", "'{}-Residue Fragment RMSD at Res {} \\\n(C-Alpha)'", ".", "format", "(", "fragment_size", ",", "position", ")", ",", "order", "=", "6", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "splitline", "[", "4", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "'EXTRA_SCORE'", ")", ":", "tokens", "=", "line", "[", "len", "(", "'EXTRA_SCORE_'", ")", ":", "]", ".", "rsplit", "(", "None", ",", "1", ")", "meta", "=", "parse_extra_metric", "(", "tokens", "[", "0", "]", ",", "5", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "tokens", "[", "1", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "line", ".", "startswith", "(", "'EXTRA_METRIC'", ")", ":", "tokens", "=", "line", "[", "len", "(", "'EXTRA_METRIC '", ")", ":", "]", ".", "rsplit", "(", "None", ",", "1", ")", "# Ignore the BuriedUnsat filter. It just reports 911 every ", "# time, and we extract the actual buried unsat information from ", "# some other lines it adds to the PDB.", "if", "tokens", "[", "0", "]", "==", "'IGNORE'", ":", "continue", "if", "tokens", "[", "0", "]", "==", "'Buried Unsatisfied H-Bonds [-|#]'", ":", "continue", "meta", "=", "parse_extra_metric", "(", "tokens", "[", "0", "]", ",", "5", ")", "record", "[", "meta", ".", "name", "]", "=", "float", "(", "tokens", "[", "1", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "elif", "(", "line", ".", "startswith", "(", "'ATOM'", ")", "or", "line", ".", "startswith", "(", "'HETATM'", ")", ")", ":", "atom_name", "=", "line", "[", "12", ":", "16", "]", ".", "strip", "(", ")", "residue_id", "=", "int", "(", "line", "[", "22", ":", "26", "]", ".", "strip", "(", ")", ")", "residue_name", "=", "line", "[", "17", ":", "20", "]", ".", "strip", "(", ")", "# Keep track of this model's sequence.", "if", "residue_id", "!=", "last_residue_id", ":", "if", "line", ".", "startswith", "(", "'ATOM'", ")", ":", "one_letter_code", "=", "residue_type_3to1_map", ".", "get", "(", "residue_name", ",", "'X'", ")", "sequence", "+=", "one_letter_code", "sequence_map", "[", "residue_id", "]", "=", "one_letter_code", "last_residue_id", "=", "residue_id", "elif", "line", ".", "startswith", "(", "'HETATM'", ")", ":", "sequence_map", "[", "residue_id", "]", "=", "'X'", "last_residue_id", "=", "residue_id", "# Save the coordinate for this atom. This will be used later ", "# to calculate restraint distances.", "atom_xyzs", "[", "atom_name", ",", "residue_id", "]", "=", "xyz_to_array", "(", "(", "line", "[", "30", ":", "38", "]", ",", "line", "[", "38", ":", "46", "]", ",", "line", "[", "46", ":", "54", "]", ")", ")", "# Calculate how well each restraint was satisfied.", "restraint_values", "=", "{", "}", "restraint_values_by_residue", "=", "{", "}", "is_sidechain_restraint", "=", "{", "}", "restraint_units", "=", "{", "'dist'", ":", "'Å',", "", "'angle'", ":", "'°',", "", "}", "for", "restraint", "in", "restraints", ":", "d", "=", "restraint", ".", "distance_from_ideal", "(", "atom_xyzs", ")", "metric", "=", "restraint", ".", "metric", "backbone_atoms", "=", "set", "(", "[", "'N'", ",", "'C'", ",", "'CA'", ",", "'O'", "]", ")", "backbone_restraint", "=", "backbone_atoms", ".", "issuperset", "(", "restraint", ".", "atom_names", ")", "restraint_values", ".", "setdefault", "(", "metric", ",", "[", "]", ")", ".", "append", "(", "d", ")", "restraint_values_by_residue", ".", "setdefault", "(", "metric", ",", "{", "}", ")", "for", "i", "in", "restraint", ".", "residue_ids", ":", "restraint_values_by_residue", "[", "metric", "]", ".", "setdefault", "(", "i", ",", "[", "]", ")", ".", "append", "(", "d", ")", "is_sidechain_restraint", "[", "i", "]", "=", "(", "not", "backbone_restraint", ")", "or", "is_sidechain_restraint", ".", "get", "(", "i", ",", "False", ")", "for", "metric", ",", "values", "in", "restraint_values", ".", "items", "(", ")", ":", "meta", "=", "ScoreMetadata", "(", "name", "=", "'restraint_{0}'", ".", "format", "(", "metric", ")", ",", "title", "=", "'Restraint Satisfaction'", ",", "unit", "=", "restraint_units", "[", "metric", "]", ",", "guide", "=", "1.0", ",", "lower", "=", "0.0", ",", "upper", "=", "'95%'", ",", "order", "=", "2", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "np", ".", "max", "(", "values", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "for", "metric", ",", "values_by_residue", "in", "restraint_values_by_residue", ".", "items", "(", ")", ":", "if", "len", "(", "values_by_residue", ")", "<=", "1", ":", "continue", "for", "i", "in", "values_by_residue", ":", "# I want to put the amino acid in these names, because I think ", "# it looks nice, but it causes problems for positions that can ", "# mutate. So I assume that if a position has a sidechain ", "# restraint, it must not be allowed to mutate.", "aa", "=", "sequence_map", "[", "i", "]", "if", "is_sidechain_restraint", "[", "i", "]", "else", "'X'", "res", "=", "'{0}{1}'", ".", "format", "(", "aa", ",", "i", ")", "meta", "=", "ScoreMetadata", "(", "name", "=", "'restraint_{0}_{1}'", ".", "format", "(", "metric", ",", "res", ".", "lower", "(", ")", ")", ",", "title", "=", "'Restraint Satisfaction for {0}'", ".", "format", "(", "res", ")", ",", "unit", "=", "restraint_units", "[", "metric", "]", ",", "guide", "=", "1.0", ",", "lower", "=", "0.0", ",", "upper", "=", "'95%'", ",", "order", "=", "3", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "np", ".", "max", "(", "values_by_residue", "[", "i", "]", ")", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "# Finish calculating some records that depend on the whole structure.", "record", "[", "'sequence'", "]", "=", "sequence", "for", "i", ",", "score", "in", "dunbrack_scores", ".", "items", "(", ")", ":", "aa", "=", "sequence_map", "[", "i", "]", "if", "is_sidechain_restraint", "[", "i", "]", "else", "'X'", "res", "=", "'{0}{1}'", ".", "format", "(", "aa", ",", "i", ")", "meta", "=", "ScoreMetadata", "(", "name", "=", "'dunbrack_score_{0}'", ".", "format", "(", "res", ".", "lower", "(", ")", ")", ",", "title", "=", "'Dunbrack Score for {0}'", ".", "format", "(", "res", ")", ",", "unit", "=", "'REU'", ",", "order", "=", "5", ",", ")", "record", "[", "meta", ".", "name", "]", "=", "score", "metadata", "[", "meta", ".", "name", "]", "=", "meta", "records", ".", "append", "(", "record", ")", "if", "pdb_paths", ":", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")", "return", "records", ",", "metadata" ]
Calculate a variety of score and distance metrics for the given structures.
[ "Calculate", "a", "variety", "of", "score", "and", "distance", "metrics", "for", "the", "given", "structures", "." ]
python
train
KE-works/pykechain
pykechain/client.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/client.py#L870-L901
def teams(self, name=None, id=None, is_hidden=False, **kwargs): """ Teams of KE-chain. Provide a list of :class:`Team`s of KE-chain. You can filter on teamname or id or any other advanced filter. :param name: (optional) teamname to filter :type name: basestring or None :param id: (optional) id of the team to filter :type id: basestring or None :param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden) :type is_hidden: bool or None :param kwargs: Additional filtering keyword=value arguments :type kwargs: dict or None :return: List of :class:`Teams` :raises NotFoundError: when a team could not be found """ request_params = { 'name': name, 'id': id, 'is_hidden': is_hidden } if kwargs: request_params.update(**kwargs) r = self._request('GET', self._build_url('teams'), params=request_params) if r.status_code != requests.codes.ok: # pragma: no cover raise NotFoundError("Could not find teams: '{}'".format(r.json())) data = r.json() return [Team(team, client=self) for team in data['results']]
[ "def", "teams", "(", "self", ",", "name", "=", "None", ",", "id", "=", "None", ",", "is_hidden", "=", "False", ",", "*", "*", "kwargs", ")", ":", "request_params", "=", "{", "'name'", ":", "name", ",", "'id'", ":", "id", ",", "'is_hidden'", ":", "is_hidden", "}", "if", "kwargs", ":", "request_params", ".", "update", "(", "*", "*", "kwargs", ")", "r", "=", "self", ".", "_request", "(", "'GET'", ",", "self", ".", "_build_url", "(", "'teams'", ")", ",", "params", "=", "request_params", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "# pragma: no cover", "raise", "NotFoundError", "(", "\"Could not find teams: '{}'\"", ".", "format", "(", "r", ".", "json", "(", ")", ")", ")", "data", "=", "r", ".", "json", "(", ")", "return", "[", "Team", "(", "team", ",", "client", "=", "self", ")", "for", "team", "in", "data", "[", "'results'", "]", "]" ]
Teams of KE-chain. Provide a list of :class:`Team`s of KE-chain. You can filter on teamname or id or any other advanced filter. :param name: (optional) teamname to filter :type name: basestring or None :param id: (optional) id of the team to filter :type id: basestring or None :param is_hidden: (optional) boolean to show non-hidden or hidden teams or both (None) (default is non-hidden) :type is_hidden: bool or None :param kwargs: Additional filtering keyword=value arguments :type kwargs: dict or None :return: List of :class:`Teams` :raises NotFoundError: when a team could not be found
[ "Teams", "of", "KE", "-", "chain", "." ]
python
train
python-bugzilla/python-bugzilla
bugzilla/rhbugzilla.py
https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/rhbugzilla.py#L160-L207
def update_external_tracker(self, ids=None, ext_type_id=None, ext_type_description=None, ext_type_url=None, ext_bz_bug_id=None, bug_ids=None, ext_status=None, ext_description=None, ext_priority=None): """ Wrapper method to allow adding of external tracking bugs using the ExternalBugs::WebService::update_external_bug method. This is documented at https://bugzilla.redhat.com/docs/en/html/api/extensions/ExternalBugs/lib/WebService.html#update_external_bug ids: A single external tracker bug id or list of external tracker bug ids. ext_type_id: The external tracker id as used by Bugzilla. ext_type_description: The external tracker description as used by Bugzilla. ext_type_url: The external tracker url as used by Bugzilla. ext_bz_bug_id: A single external bug id or list of external bug ids (ie: the bug number in the external tracker). bug_ids: A single bug id or list of bug ids to have external tracker info updated. ext_status: The status of the external bug. ext_description: The description of the external bug. ext_priority: The priority of the external bug. """ params = {} if ids is not None: params['ids'] = self._listify(ids) if ext_type_id is not None: params['ext_type_id'] = ext_type_id if ext_type_description is not None: params['ext_type_description'] = ext_type_description if ext_type_url is not None: params['ext_type_url'] = ext_type_url if ext_bz_bug_id is not None: params['ext_bz_bug_id'] = self._listify(ext_bz_bug_id) if bug_ids is not None: params['bug_ids'] = self._listify(bug_ids) if ext_status is not None: params['ext_status'] = ext_status if ext_description is not None: params['ext_description'] = ext_description if ext_priority is not None: params['ext_priority'] = ext_priority log.debug("Calling ExternalBugs.update_external_bug(%s)", params) return self._proxy.ExternalBugs.update_external_bug(params)
[ "def", "update_external_tracker", "(", "self", ",", "ids", "=", "None", ",", "ext_type_id", "=", "None", ",", "ext_type_description", "=", "None", ",", "ext_type_url", "=", "None", ",", "ext_bz_bug_id", "=", "None", ",", "bug_ids", "=", "None", ",", "ext_status", "=", "None", ",", "ext_description", "=", "None", ",", "ext_priority", "=", "None", ")", ":", "params", "=", "{", "}", "if", "ids", "is", "not", "None", ":", "params", "[", "'ids'", "]", "=", "self", ".", "_listify", "(", "ids", ")", "if", "ext_type_id", "is", "not", "None", ":", "params", "[", "'ext_type_id'", "]", "=", "ext_type_id", "if", "ext_type_description", "is", "not", "None", ":", "params", "[", "'ext_type_description'", "]", "=", "ext_type_description", "if", "ext_type_url", "is", "not", "None", ":", "params", "[", "'ext_type_url'", "]", "=", "ext_type_url", "if", "ext_bz_bug_id", "is", "not", "None", ":", "params", "[", "'ext_bz_bug_id'", "]", "=", "self", ".", "_listify", "(", "ext_bz_bug_id", ")", "if", "bug_ids", "is", "not", "None", ":", "params", "[", "'bug_ids'", "]", "=", "self", ".", "_listify", "(", "bug_ids", ")", "if", "ext_status", "is", "not", "None", ":", "params", "[", "'ext_status'", "]", "=", "ext_status", "if", "ext_description", "is", "not", "None", ":", "params", "[", "'ext_description'", "]", "=", "ext_description", "if", "ext_priority", "is", "not", "None", ":", "params", "[", "'ext_priority'", "]", "=", "ext_priority", "log", ".", "debug", "(", "\"Calling ExternalBugs.update_external_bug(%s)\"", ",", "params", ")", "return", "self", ".", "_proxy", ".", "ExternalBugs", ".", "update_external_bug", "(", "params", ")" ]
Wrapper method to allow adding of external tracking bugs using the ExternalBugs::WebService::update_external_bug method. This is documented at https://bugzilla.redhat.com/docs/en/html/api/extensions/ExternalBugs/lib/WebService.html#update_external_bug ids: A single external tracker bug id or list of external tracker bug ids. ext_type_id: The external tracker id as used by Bugzilla. ext_type_description: The external tracker description as used by Bugzilla. ext_type_url: The external tracker url as used by Bugzilla. ext_bz_bug_id: A single external bug id or list of external bug ids (ie: the bug number in the external tracker). bug_ids: A single bug id or list of bug ids to have external tracker info updated. ext_status: The status of the external bug. ext_description: The description of the external bug. ext_priority: The priority of the external bug.
[ "Wrapper", "method", "to", "allow", "adding", "of", "external", "tracking", "bugs", "using", "the", "ExternalBugs", "::", "WebService", "::", "update_external_bug", "method", "." ]
python
train
pantsbuild/pants
src/python/pants/source/wrapped_globs.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/source/wrapped_globs.py#L176-L211
def create_fileset_with_spec(cls, rel_path, *patterns, **kwargs): """ :param rel_path: The relative path to create a FilesetWithSpec for. :param patterns: glob patterns to apply. :param exclude: A list of {,r,z}globs objects, strings, or lists of strings to exclude. NB: this argument is contained within **kwargs! """ for pattern in patterns: if not isinstance(pattern, string_types): raise ValueError("Expected string patterns for {}: got {}".format(cls.__name__, patterns)) raw_exclude = kwargs.pop('exclude', []) buildroot = get_buildroot() root = os.path.normpath(os.path.join(buildroot, rel_path)) # making sure there are no unknown arguments. unknown_args = set(kwargs.keys()) - cls.KNOWN_PARAMETERS if unknown_args: raise ValueError('Unexpected arguments while parsing globs: {}'.format( ', '.join(unknown_args))) for glob in patterns: if cls._is_glob_dir_outside_root(glob, root): raise ValueError('Invalid glob {}, points outside BUILD file root {}'.format(glob, root)) exclude = cls.process_raw_exclude(raw_exclude) files_calculator = cls._file_calculator(root, patterns, kwargs, exclude) rel_root = fast_relpath(root, buildroot) if rel_root == '.': rel_root = '' filespec = cls.to_filespec(patterns, root=rel_root, exclude=exclude) return LazyFilesetWithSpec(rel_root, filespec, files_calculator)
[ "def", "create_fileset_with_spec", "(", "cls", ",", "rel_path", ",", "*", "patterns", ",", "*", "*", "kwargs", ")", ":", "for", "pattern", "in", "patterns", ":", "if", "not", "isinstance", "(", "pattern", ",", "string_types", ")", ":", "raise", "ValueError", "(", "\"Expected string patterns for {}: got {}\"", ".", "format", "(", "cls", ".", "__name__", ",", "patterns", ")", ")", "raw_exclude", "=", "kwargs", ".", "pop", "(", "'exclude'", ",", "[", "]", ")", "buildroot", "=", "get_buildroot", "(", ")", "root", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "buildroot", ",", "rel_path", ")", ")", "# making sure there are no unknown arguments.", "unknown_args", "=", "set", "(", "kwargs", ".", "keys", "(", ")", ")", "-", "cls", ".", "KNOWN_PARAMETERS", "if", "unknown_args", ":", "raise", "ValueError", "(", "'Unexpected arguments while parsing globs: {}'", ".", "format", "(", "', '", ".", "join", "(", "unknown_args", ")", ")", ")", "for", "glob", "in", "patterns", ":", "if", "cls", ".", "_is_glob_dir_outside_root", "(", "glob", ",", "root", ")", ":", "raise", "ValueError", "(", "'Invalid glob {}, points outside BUILD file root {}'", ".", "format", "(", "glob", ",", "root", ")", ")", "exclude", "=", "cls", ".", "process_raw_exclude", "(", "raw_exclude", ")", "files_calculator", "=", "cls", ".", "_file_calculator", "(", "root", ",", "patterns", ",", "kwargs", ",", "exclude", ")", "rel_root", "=", "fast_relpath", "(", "root", ",", "buildroot", ")", "if", "rel_root", "==", "'.'", ":", "rel_root", "=", "''", "filespec", "=", "cls", ".", "to_filespec", "(", "patterns", ",", "root", "=", "rel_root", ",", "exclude", "=", "exclude", ")", "return", "LazyFilesetWithSpec", "(", "rel_root", ",", "filespec", ",", "files_calculator", ")" ]
:param rel_path: The relative path to create a FilesetWithSpec for. :param patterns: glob patterns to apply. :param exclude: A list of {,r,z}globs objects, strings, or lists of strings to exclude. NB: this argument is contained within **kwargs!
[ ":", "param", "rel_path", ":", "The", "relative", "path", "to", "create", "a", "FilesetWithSpec", "for", ".", ":", "param", "patterns", ":", "glob", "patterns", "to", "apply", ".", ":", "param", "exclude", ":", "A", "list", "of", "{", "r", "z", "}", "globs", "objects", "strings", "or", "lists", "of", "strings", "to", "exclude", ".", "NB", ":", "this", "argument", "is", "contained", "within", "**", "kwargs!" ]
python
train
sentinel-hub/eo-learn
core/eolearn/core/graph.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/graph.py#L187-L208
def _is_cyclic(dag): """True if the directed graph dag contains a cycle. False otherwise. The algorithm is naive, running in O(V^2) time, and not intended for serious use! For production purposes on larger graphs consider implementing Tarjan's O(V+E)-time algorithm instead. :type dag: DirectedGraph """ # pylint: disable=invalid-name vertices = dag.vertices() for w in vertices: stack = [w] seen = set() while stack: u = stack.pop() seen.add(u) for v in dag[u]: if v == w: return True if v not in seen: stack.append(v) return False
[ "def", "_is_cyclic", "(", "dag", ")", ":", "# pylint: disable=invalid-name\r", "vertices", "=", "dag", ".", "vertices", "(", ")", "for", "w", "in", "vertices", ":", "stack", "=", "[", "w", "]", "seen", "=", "set", "(", ")", "while", "stack", ":", "u", "=", "stack", ".", "pop", "(", ")", "seen", ".", "add", "(", "u", ")", "for", "v", "in", "dag", "[", "u", "]", ":", "if", "v", "==", "w", ":", "return", "True", "if", "v", "not", "in", "seen", ":", "stack", ".", "append", "(", "v", ")", "return", "False" ]
True if the directed graph dag contains a cycle. False otherwise. The algorithm is naive, running in O(V^2) time, and not intended for serious use! For production purposes on larger graphs consider implementing Tarjan's O(V+E)-time algorithm instead. :type dag: DirectedGraph
[ "True", "if", "the", "directed", "graph", "dag", "contains", "a", "cycle", ".", "False", "otherwise", ".", "The", "algorithm", "is", "naive", "running", "in", "O", "(", "V^2", ")", "time", "and", "not", "intended", "for", "serious", "use!", "For", "production", "purposes", "on", "larger", "graphs", "consider", "implementing", "Tarjan", "s", "O", "(", "V", "+", "E", ")", "-", "time", "algorithm", "instead", ".", ":", "type", "dag", ":", "DirectedGraph" ]
python
train
gboeing/osmnx
osmnx/core.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/core.py#L1525-L1601
def graph_from_point(center_point, distance=1000, distance_type='bbox', network_type='all_private', simplify=True, retain_all=False, truncate_by_edge=False, name='unnamed', timeout=180, memory=None, max_query_area_size=50*1000*50*1000, clean_periphery=True, infrastructure='way["highway"]', custom_filter=None): """ Create a networkx graph from OSM data within some distance of some (lat, lon) center point. Parameters ---------- center_point : tuple the (lat, lon) central point around which to construct the graph distance : int retain only those nodes within this many meters of the center of the graph, with distance determined according to distance_type argument distance_type : string {'network', 'bbox'} if 'bbox', retain only those nodes within a bounding box of the distance parameter. if 'network', retain only those nodes within some network distance from the center-most node. network_type : string what type of street network to get simplify : bool if true, simplify the graph topology retain_all : bool if True, return the entire graph even if it is not connected truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox name : string the name of the graph timeout : int the timeout interval for requests and to pass to API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max size for any part of the geometry, in square degrees: any polygon bigger will get divided up for multiple queries to API clean_periphery : bool, if True (and simplify=True), buffer 0.5km to get a graph larger than requested, then simplify, then truncate it to requested spatial extent infrastructure : string download infrastructure of given type (default is streets (ie, 'way["highway"]') but other infrastructures may be selected like power grids (ie, 'way["power"~"line"]')) custom_filter : string a custom network filter to be used instead of the network_type presets Returns ------- networkx multidigraph """ if distance_type not in ['bbox', 'network']: raise InvalidDistanceType('distance_type must be "bbox" or "network"') # create a bounding box from the center point and the distance in each # direction north, south, east, west = bbox_from_point(center_point, distance) # create a graph from the bounding box G = graph_from_bbox(north, south, east, west, network_type=network_type, simplify=simplify, retain_all=retain_all, truncate_by_edge=truncate_by_edge, name=name, timeout=timeout, memory=memory, max_query_area_size=max_query_area_size, clean_periphery=clean_periphery, infrastructure=infrastructure, custom_filter=custom_filter) # if the network distance_type is network, find the node in the graph # nearest to the center point, and truncate the graph by network distance # from this node if distance_type == 'network': centermost_node = get_nearest_node(G, center_point) G = truncate_graph_dist(G, centermost_node, max_distance=distance) log('graph_from_point() returning graph with {:,} nodes and {:,} edges'.format(len(list(G.nodes())), len(list(G.edges())))) return G
[ "def", "graph_from_point", "(", "center_point", ",", "distance", "=", "1000", ",", "distance_type", "=", "'bbox'", ",", "network_type", "=", "'all_private'", ",", "simplify", "=", "True", ",", "retain_all", "=", "False", ",", "truncate_by_edge", "=", "False", ",", "name", "=", "'unnamed'", ",", "timeout", "=", "180", ",", "memory", "=", "None", ",", "max_query_area_size", "=", "50", "*", "1000", "*", "50", "*", "1000", ",", "clean_periphery", "=", "True", ",", "infrastructure", "=", "'way[\"highway\"]'", ",", "custom_filter", "=", "None", ")", ":", "if", "distance_type", "not", "in", "[", "'bbox'", ",", "'network'", "]", ":", "raise", "InvalidDistanceType", "(", "'distance_type must be \"bbox\" or \"network\"'", ")", "# create a bounding box from the center point and the distance in each", "# direction", "north", ",", "south", ",", "east", ",", "west", "=", "bbox_from_point", "(", "center_point", ",", "distance", ")", "# create a graph from the bounding box", "G", "=", "graph_from_bbox", "(", "north", ",", "south", ",", "east", ",", "west", ",", "network_type", "=", "network_type", ",", "simplify", "=", "simplify", ",", "retain_all", "=", "retain_all", ",", "truncate_by_edge", "=", "truncate_by_edge", ",", "name", "=", "name", ",", "timeout", "=", "timeout", ",", "memory", "=", "memory", ",", "max_query_area_size", "=", "max_query_area_size", ",", "clean_periphery", "=", "clean_periphery", ",", "infrastructure", "=", "infrastructure", ",", "custom_filter", "=", "custom_filter", ")", "# if the network distance_type is network, find the node in the graph", "# nearest to the center point, and truncate the graph by network distance", "# from this node", "if", "distance_type", "==", "'network'", ":", "centermost_node", "=", "get_nearest_node", "(", "G", ",", "center_point", ")", "G", "=", "truncate_graph_dist", "(", "G", ",", "centermost_node", ",", "max_distance", "=", "distance", ")", "log", "(", "'graph_from_point() returning graph with {:,} nodes and {:,} edges'", ".", "format", "(", "len", "(", "list", "(", "G", ".", "nodes", "(", ")", ")", ")", ",", "len", "(", "list", "(", "G", ".", "edges", "(", ")", ")", ")", ")", ")", "return", "G" ]
Create a networkx graph from OSM data within some distance of some (lat, lon) center point. Parameters ---------- center_point : tuple the (lat, lon) central point around which to construct the graph distance : int retain only those nodes within this many meters of the center of the graph, with distance determined according to distance_type argument distance_type : string {'network', 'bbox'} if 'bbox', retain only those nodes within a bounding box of the distance parameter. if 'network', retain only those nodes within some network distance from the center-most node. network_type : string what type of street network to get simplify : bool if true, simplify the graph topology retain_all : bool if True, return the entire graph even if it is not connected truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox name : string the name of the graph timeout : int the timeout interval for requests and to pass to API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max size for any part of the geometry, in square degrees: any polygon bigger will get divided up for multiple queries to API clean_periphery : bool, if True (and simplify=True), buffer 0.5km to get a graph larger than requested, then simplify, then truncate it to requested spatial extent infrastructure : string download infrastructure of given type (default is streets (ie, 'way["highway"]') but other infrastructures may be selected like power grids (ie, 'way["power"~"line"]')) custom_filter : string a custom network filter to be used instead of the network_type presets Returns ------- networkx multidigraph
[ "Create", "a", "networkx", "graph", "from", "OSM", "data", "within", "some", "distance", "of", "some", "(", "lat", "lon", ")", "center", "point", "." ]
python
train
cltk/cltk
cltk/stem/akkadian/atf_converter.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/akkadian/atf_converter.py#L66-L79
def _convert_number_to_subscript(num): """ Converts number into subscript input = ["a", "a1", "a2", "a3", "be2", "be3", "bad2", "bad3"] output = ["a", "a₁", "a₂", "a₃", "be₂", "be₃", "bad₂", "bad₃"] :param num: number called after sign :return: number in subscript """ subscript = '' for character in str(num): subscript += chr(0x2080 + int(character)) return subscript
[ "def", "_convert_number_to_subscript", "(", "num", ")", ":", "subscript", "=", "''", "for", "character", "in", "str", "(", "num", ")", ":", "subscript", "+=", "chr", "(", "0x2080", "+", "int", "(", "character", ")", ")", "return", "subscript" ]
Converts number into subscript input = ["a", "a1", "a2", "a3", "be2", "be3", "bad2", "bad3"] output = ["a", "a₁", "a₂", "a₃", "be₂", "be₃", "bad₂", "bad₃"] :param num: number called after sign :return: number in subscript
[ "Converts", "number", "into", "subscript" ]
python
train
Kortemme-Lab/klab
klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L862-L993
def analyze_multiple( benchmark_runs, analysis_sets = [], # Singleton arguments analysis_directory = None, remove_existing_analysis_directory = True, quick_plots = False, use_multiprocessing = True, verbose = True, compile_pdf = True, limit_to_complete_presence = True, all_by_all_comparisons = False, ): '''This function runs the analysis for multiple input settings''' if remove_existing_analysis_directory and os.path.isdir(analysis_directory): shutil.rmtree(analysis_directory) unique_ajps = BenchmarkRun.get_unique_ajps( benchmark_runs ) if limit_to_complete_presence: BenchmarkRun.get_common_subset( benchmark_runs, verbose = not use_multiprocessing ) unique_ajps = BenchmarkRun.get_unique_ajps( benchmark_runs ) ### Process each benchmark run object individually if use_multiprocessing: pool = mp.Pool() singleton_chapters = [] calculated_brs = [] def save_latex_report(t): br, unique_name, latex_report = t latex_report.set_title_page( title = unique_name ) singleton_chapters.append( latex_report ) calculated_brs.append( br ) for br in benchmark_runs: for analysis_set in analysis_sets: unique_name = br.get_definitive_name(unique_ajps, join_character = '\n') filepath_unique_name = br.get_definitive_name(unique_ajps, join_character = '-') subdir = os.path.join(analysis_directory, os.path.join('analysis_sets', os.path.join(analysis_set, filepath_unique_name) ) ) if use_multiprocessing: pool.apply_async( _full_analysis_mp_alias, ( br, analysis_set, subdir, unique_name, False, quick_plots ), callback = save_latex_report ) else: print 'Individual report saving in:', subdir save_latex_report( _full_analysis_mp_alias( br, analysis_set, subdir, unique_name, True, quick_plots ) ) if use_multiprocessing: pool.close() pool.join() benchmark_runs = calculated_brs ### Pointwise all-by-all comparison comparison_chapters = [] if all_by_all_comparisons: if use_multiprocessing: pool = mp.Pool() def save_latex_report(t): latex_report = t comparison_chapters.append( latex_report ) comparisons_subdir = os.path.join(analysis_directory, 'comparison_analysis_sets') for analysis_set in analysis_sets: analysis_set_subdir = os.path.join(comparisons_subdir, analysis_set) for i, br_i in enumerate(benchmark_runs): for j, br_j in enumerate(benchmark_runs): if i > j: if use_multiprocessing: br_i_copy = copy.deepcopy( br_i ) br_j_copy = copy.deepcopy( br_j ) pool.apply_async( _compare_mp_alias, (br_i_copy, br_j_copy, analysis_set, analysis_set_subdir, unique_ajps, False), callback = save_latex_report ) else: save_latex_report( _compare_mp_alias(br_i, br_j, analysis_set, analysis_set_subdir, unique_ajps, True) ) if use_multiprocessing: pool.close() pool.join() intro_report = lr.LatexReport() intro_report.set_title_page('All data comparison') # All data series comparison # Get joined stats comparison dataframe stats_df = BenchmarkRun.get_stats_comparison_dataframe( benchmark_runs, unique_ajps, output_csv = os.path.join(analysis_directory, 'analysis_metrics.csv'), ) intro_report.add_section_page( title = 'Case comparison tables' ) intro_report.content.extend( BenchmarkRun.make_case_description_tables( stats_df ) ) intro_report.add_section_page('All data plots') subplot_directory = os.path.join(analysis_directory, 'subplots') if not os.path.isdir( subplot_directory ): os.makedirs(subplot_directory) runtime_df = benchmark_runs[0]._get_dataframe_columns( ['RunTime'] ) runtime_df.columns = [ benchmark_runs[0].get_definitive_name(unique_ajps, join_character = '\n', prepend_label = False) ] for br in benchmark_runs[1:]: inner_runtime_df = br._get_dataframe_columns( ['RunTime'] ) inner_runtime_df.columns = [ br.get_definitive_name(unique_ajps, join_character = '\n', prepend_label = False) ] runtime_df = runtime_df.merge( inner_runtime_df, left_index = True, right_index = True, ) intro_report.add_plot( general_matplotlib.plot_box( runtime_df, output_directory = subplot_directory, plot_title = 'Prediction Run Times', output_name = 'runtimes', fig_height = 6.7, fig_width = 10, ylabel = 'Run time (minutes)', xlabel = 'Prediction Set', verbose = verbose, xtick_fontsize = 4, log_y = True, label_n = False, rotation_angle = 45, ), plot_title = 'Run times' ) # Report concatenation main_latex_report = lr.LatexReport() main_latex_report.set_title_page('$\Delta\Delta G$ Report') main_latex_report.add_chapter(intro_report) for chapter in comparison_chapters: main_latex_report.add_chapter(chapter) for chapter in singleton_chapters: main_latex_report.add_chapter(chapter) main_latex_report.generate_pdf_report( os.path.join( analysis_directory, 'report.pdf' ), verbose = verbose, compile_pdf = compile_pdf, ) print os.path.join( analysis_directory, 'report.pdf' )
[ "def", "analyze_multiple", "(", "benchmark_runs", ",", "analysis_sets", "=", "[", "]", ",", "# Singleton arguments", "analysis_directory", "=", "None", ",", "remove_existing_analysis_directory", "=", "True", ",", "quick_plots", "=", "False", ",", "use_multiprocessing", "=", "True", ",", "verbose", "=", "True", ",", "compile_pdf", "=", "True", ",", "limit_to_complete_presence", "=", "True", ",", "all_by_all_comparisons", "=", "False", ",", ")", ":", "if", "remove_existing_analysis_directory", "and", "os", ".", "path", ".", "isdir", "(", "analysis_directory", ")", ":", "shutil", ".", "rmtree", "(", "analysis_directory", ")", "unique_ajps", "=", "BenchmarkRun", ".", "get_unique_ajps", "(", "benchmark_runs", ")", "if", "limit_to_complete_presence", ":", "BenchmarkRun", ".", "get_common_subset", "(", "benchmark_runs", ",", "verbose", "=", "not", "use_multiprocessing", ")", "unique_ajps", "=", "BenchmarkRun", ".", "get_unique_ajps", "(", "benchmark_runs", ")", "### Process each benchmark run object individually", "if", "use_multiprocessing", ":", "pool", "=", "mp", ".", "Pool", "(", ")", "singleton_chapters", "=", "[", "]", "calculated_brs", "=", "[", "]", "def", "save_latex_report", "(", "t", ")", ":", "br", ",", "unique_name", ",", "latex_report", "=", "t", "latex_report", ".", "set_title_page", "(", "title", "=", "unique_name", ")", "singleton_chapters", ".", "append", "(", "latex_report", ")", "calculated_brs", ".", "append", "(", "br", ")", "for", "br", "in", "benchmark_runs", ":", "for", "analysis_set", "in", "analysis_sets", ":", "unique_name", "=", "br", ".", "get_definitive_name", "(", "unique_ajps", ",", "join_character", "=", "'\\n'", ")", "filepath_unique_name", "=", "br", ".", "get_definitive_name", "(", "unique_ajps", ",", "join_character", "=", "'-'", ")", "subdir", "=", "os", ".", "path", ".", "join", "(", "analysis_directory", ",", "os", ".", "path", ".", "join", "(", "'analysis_sets'", ",", "os", ".", "path", ".", "join", "(", "analysis_set", ",", "filepath_unique_name", ")", ")", ")", "if", "use_multiprocessing", ":", "pool", ".", "apply_async", "(", "_full_analysis_mp_alias", ",", "(", "br", ",", "analysis_set", ",", "subdir", ",", "unique_name", ",", "False", ",", "quick_plots", ")", ",", "callback", "=", "save_latex_report", ")", "else", ":", "print", "'Individual report saving in:'", ",", "subdir", "save_latex_report", "(", "_full_analysis_mp_alias", "(", "br", ",", "analysis_set", ",", "subdir", ",", "unique_name", ",", "True", ",", "quick_plots", ")", ")", "if", "use_multiprocessing", ":", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "benchmark_runs", "=", "calculated_brs", "### Pointwise all-by-all comparison", "comparison_chapters", "=", "[", "]", "if", "all_by_all_comparisons", ":", "if", "use_multiprocessing", ":", "pool", "=", "mp", ".", "Pool", "(", ")", "def", "save_latex_report", "(", "t", ")", ":", "latex_report", "=", "t", "comparison_chapters", ".", "append", "(", "latex_report", ")", "comparisons_subdir", "=", "os", ".", "path", ".", "join", "(", "analysis_directory", ",", "'comparison_analysis_sets'", ")", "for", "analysis_set", "in", "analysis_sets", ":", "analysis_set_subdir", "=", "os", ".", "path", ".", "join", "(", "comparisons_subdir", ",", "analysis_set", ")", "for", "i", ",", "br_i", "in", "enumerate", "(", "benchmark_runs", ")", ":", "for", "j", ",", "br_j", "in", "enumerate", "(", "benchmark_runs", ")", ":", "if", "i", ">", "j", ":", "if", "use_multiprocessing", ":", "br_i_copy", "=", "copy", ".", "deepcopy", "(", "br_i", ")", "br_j_copy", "=", "copy", ".", "deepcopy", "(", "br_j", ")", "pool", ".", "apply_async", "(", "_compare_mp_alias", ",", "(", "br_i_copy", ",", "br_j_copy", ",", "analysis_set", ",", "analysis_set_subdir", ",", "unique_ajps", ",", "False", ")", ",", "callback", "=", "save_latex_report", ")", "else", ":", "save_latex_report", "(", "_compare_mp_alias", "(", "br_i", ",", "br_j", ",", "analysis_set", ",", "analysis_set_subdir", ",", "unique_ajps", ",", "True", ")", ")", "if", "use_multiprocessing", ":", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "intro_report", "=", "lr", ".", "LatexReport", "(", ")", "intro_report", ".", "set_title_page", "(", "'All data comparison'", ")", "# All data series comparison", "# Get joined stats comparison dataframe", "stats_df", "=", "BenchmarkRun", ".", "get_stats_comparison_dataframe", "(", "benchmark_runs", ",", "unique_ajps", ",", "output_csv", "=", "os", ".", "path", ".", "join", "(", "analysis_directory", ",", "'analysis_metrics.csv'", ")", ",", ")", "intro_report", ".", "add_section_page", "(", "title", "=", "'Case comparison tables'", ")", "intro_report", ".", "content", ".", "extend", "(", "BenchmarkRun", ".", "make_case_description_tables", "(", "stats_df", ")", ")", "intro_report", ".", "add_section_page", "(", "'All data plots'", ")", "subplot_directory", "=", "os", ".", "path", ".", "join", "(", "analysis_directory", ",", "'subplots'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "subplot_directory", ")", ":", "os", ".", "makedirs", "(", "subplot_directory", ")", "runtime_df", "=", "benchmark_runs", "[", "0", "]", ".", "_get_dataframe_columns", "(", "[", "'RunTime'", "]", ")", "runtime_df", ".", "columns", "=", "[", "benchmark_runs", "[", "0", "]", ".", "get_definitive_name", "(", "unique_ajps", ",", "join_character", "=", "'\\n'", ",", "prepend_label", "=", "False", ")", "]", "for", "br", "in", "benchmark_runs", "[", "1", ":", "]", ":", "inner_runtime_df", "=", "br", ".", "_get_dataframe_columns", "(", "[", "'RunTime'", "]", ")", "inner_runtime_df", ".", "columns", "=", "[", "br", ".", "get_definitive_name", "(", "unique_ajps", ",", "join_character", "=", "'\\n'", ",", "prepend_label", "=", "False", ")", "]", "runtime_df", "=", "runtime_df", ".", "merge", "(", "inner_runtime_df", ",", "left_index", "=", "True", ",", "right_index", "=", "True", ",", ")", "intro_report", ".", "add_plot", "(", "general_matplotlib", ".", "plot_box", "(", "runtime_df", ",", "output_directory", "=", "subplot_directory", ",", "plot_title", "=", "'Prediction Run Times'", ",", "output_name", "=", "'runtimes'", ",", "fig_height", "=", "6.7", ",", "fig_width", "=", "10", ",", "ylabel", "=", "'Run time (minutes)'", ",", "xlabel", "=", "'Prediction Set'", ",", "verbose", "=", "verbose", ",", "xtick_fontsize", "=", "4", ",", "log_y", "=", "True", ",", "label_n", "=", "False", ",", "rotation_angle", "=", "45", ",", ")", ",", "plot_title", "=", "'Run times'", ")", "# Report concatenation", "main_latex_report", "=", "lr", ".", "LatexReport", "(", ")", "main_latex_report", ".", "set_title_page", "(", "'$\\Delta\\Delta G$ Report'", ")", "main_latex_report", ".", "add_chapter", "(", "intro_report", ")", "for", "chapter", "in", "comparison_chapters", ":", "main_latex_report", ".", "add_chapter", "(", "chapter", ")", "for", "chapter", "in", "singleton_chapters", ":", "main_latex_report", ".", "add_chapter", "(", "chapter", ")", "main_latex_report", ".", "generate_pdf_report", "(", "os", ".", "path", ".", "join", "(", "analysis_directory", ",", "'report.pdf'", ")", ",", "verbose", "=", "verbose", ",", "compile_pdf", "=", "compile_pdf", ",", ")", "print", "os", ".", "path", ".", "join", "(", "analysis_directory", ",", "'report.pdf'", ")" ]
This function runs the analysis for multiple input settings
[ "This", "function", "runs", "the", "analysis", "for", "multiple", "input", "settings" ]
python
train
wndhydrnt/python-oauth2
oauth2/client_authenticator.py
https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/client_authenticator.py#L123-L151
def http_basic_auth(request): """ Extracts the credentials of a client using HTTP Basic Auth. Expects the ``client_id`` to be the username and the ``client_secret`` to be the password part of the Authorization header. :param request: The incoming request :type request: oauth2.web.Request :return: A tuple in the format of (<CLIENT ID>, <CLIENT SECRET>)` :rtype: tuple """ auth_header = request.header("authorization") if auth_header is None: raise OAuthInvalidError(error="invalid_request", explanation="Authorization header is missing") auth_parts = auth_header.strip().encode("latin1").split(None) if auth_parts[0].strip().lower() != b'basic': raise OAuthInvalidError( error="invalid_request", explanation="Provider supports basic authentication only") client_id, client_secret = b64decode(auth_parts[1]).split(b':', 1) return client_id.decode("latin1"), client_secret.decode("latin1")
[ "def", "http_basic_auth", "(", "request", ")", ":", "auth_header", "=", "request", ".", "header", "(", "\"authorization\"", ")", "if", "auth_header", "is", "None", ":", "raise", "OAuthInvalidError", "(", "error", "=", "\"invalid_request\"", ",", "explanation", "=", "\"Authorization header is missing\"", ")", "auth_parts", "=", "auth_header", ".", "strip", "(", ")", ".", "encode", "(", "\"latin1\"", ")", ".", "split", "(", "None", ")", "if", "auth_parts", "[", "0", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "!=", "b'basic'", ":", "raise", "OAuthInvalidError", "(", "error", "=", "\"invalid_request\"", ",", "explanation", "=", "\"Provider supports basic authentication only\"", ")", "client_id", ",", "client_secret", "=", "b64decode", "(", "auth_parts", "[", "1", "]", ")", ".", "split", "(", "b':'", ",", "1", ")", "return", "client_id", ".", "decode", "(", "\"latin1\"", ")", ",", "client_secret", ".", "decode", "(", "\"latin1\"", ")" ]
Extracts the credentials of a client using HTTP Basic Auth. Expects the ``client_id`` to be the username and the ``client_secret`` to be the password part of the Authorization header. :param request: The incoming request :type request: oauth2.web.Request :return: A tuple in the format of (<CLIENT ID>, <CLIENT SECRET>)` :rtype: tuple
[ "Extracts", "the", "credentials", "of", "a", "client", "using", "HTTP", "Basic", "Auth", "." ]
python
train
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/core/rest.py
https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/core/rest.py#L21-L92
def create_requests_session( retries=None, backoff_factor=None, status_forcelist=None, pools_size=4, maxsize=4, ssl_verify=None, ssl_cert=None, proxy=None, session=None, ): """Create a requests session that retries some errors.""" # pylint: disable=too-many-branches config = Configuration() if retries is None: if config.error_retry_max is None: retries = 5 else: retries = config.error_retry_max if backoff_factor is None: if config.error_retry_backoff is None: backoff_factor = 0.23 else: backoff_factor = config.error_retry_backoff if status_forcelist is None: if config.error_retry_codes is None: status_forcelist = [500, 502, 503, 504] else: status_forcelist = config.error_retry_codes if ssl_verify is None: ssl_verify = config.verify_ssl if ssl_cert is None: if config.cert_file and config.key_file: ssl_cert = (config.cert_file, config.key_file) elif config.cert_file: ssl_cert = config.cert_file if proxy is None: proxy = Configuration().proxy session = session or requests.Session() session.verify = ssl_verify session.cert = ssl_cert if proxy: session.proxies = {"http": proxy, "https": proxy} retry = Retry( backoff_factor=backoff_factor, connect=retries, method_whitelist=False, read=retries, status_forcelist=tuple(status_forcelist), total=retries, ) adapter = HTTPAdapter( max_retries=retry, pool_connections=pools_size, pool_maxsize=maxsize, pool_block=True, ) session.mount("http://", adapter) session.mount("https://", adapter) return session
[ "def", "create_requests_session", "(", "retries", "=", "None", ",", "backoff_factor", "=", "None", ",", "status_forcelist", "=", "None", ",", "pools_size", "=", "4", ",", "maxsize", "=", "4", ",", "ssl_verify", "=", "None", ",", "ssl_cert", "=", "None", ",", "proxy", "=", "None", ",", "session", "=", "None", ",", ")", ":", "# pylint: disable=too-many-branches", "config", "=", "Configuration", "(", ")", "if", "retries", "is", "None", ":", "if", "config", ".", "error_retry_max", "is", "None", ":", "retries", "=", "5", "else", ":", "retries", "=", "config", ".", "error_retry_max", "if", "backoff_factor", "is", "None", ":", "if", "config", ".", "error_retry_backoff", "is", "None", ":", "backoff_factor", "=", "0.23", "else", ":", "backoff_factor", "=", "config", ".", "error_retry_backoff", "if", "status_forcelist", "is", "None", ":", "if", "config", ".", "error_retry_codes", "is", "None", ":", "status_forcelist", "=", "[", "500", ",", "502", ",", "503", ",", "504", "]", "else", ":", "status_forcelist", "=", "config", ".", "error_retry_codes", "if", "ssl_verify", "is", "None", ":", "ssl_verify", "=", "config", ".", "verify_ssl", "if", "ssl_cert", "is", "None", ":", "if", "config", ".", "cert_file", "and", "config", ".", "key_file", ":", "ssl_cert", "=", "(", "config", ".", "cert_file", ",", "config", ".", "key_file", ")", "elif", "config", ".", "cert_file", ":", "ssl_cert", "=", "config", ".", "cert_file", "if", "proxy", "is", "None", ":", "proxy", "=", "Configuration", "(", ")", ".", "proxy", "session", "=", "session", "or", "requests", ".", "Session", "(", ")", "session", ".", "verify", "=", "ssl_verify", "session", ".", "cert", "=", "ssl_cert", "if", "proxy", ":", "session", ".", "proxies", "=", "{", "\"http\"", ":", "proxy", ",", "\"https\"", ":", "proxy", "}", "retry", "=", "Retry", "(", "backoff_factor", "=", "backoff_factor", ",", "connect", "=", "retries", ",", "method_whitelist", "=", "False", ",", "read", "=", "retries", ",", "status_forcelist", "=", "tuple", "(", "status_forcelist", ")", ",", "total", "=", "retries", ",", ")", "adapter", "=", "HTTPAdapter", "(", "max_retries", "=", "retry", ",", "pool_connections", "=", "pools_size", ",", "pool_maxsize", "=", "maxsize", ",", "pool_block", "=", "True", ",", ")", "session", ".", "mount", "(", "\"http://\"", ",", "adapter", ")", "session", ".", "mount", "(", "\"https://\"", ",", "adapter", ")", "return", "session" ]
Create a requests session that retries some errors.
[ "Create", "a", "requests", "session", "that", "retries", "some", "errors", "." ]
python
train