repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
oblalex/verboselib
verboselib/management/utils.py
https://github.com/oblalex/verboselib/blob/3c108bef060b091e1f7c08861ab07672c87ddcff/verboselib/management/utils.py#L10-L39
def find_command(cmd, path=None, pathext=None): """ Taken `from Django http://bit.ly/1njB3Y9>`_. """ if path is None: path = os.environ.get('PATH', '').split(os.pathsep) if isinstance(path, string_types): path = [path] # check if there are path extensions for Windows executables if pathext is None: pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD') pathext = pathext.split(os.pathsep) # don't use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [''] break # check if we find the command on PATH for p in path: f = os.path.join(p, cmd) if os.path.isfile(f): return f for ext in pathext: fext = f + ext if os.path.isfile(fext): return fext return None
[ "def", "find_command", "(", "cmd", ",", "path", "=", "None", ",", "pathext", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pathsep", ")", "if", "isinstance", "(", "path", ",", "string_types", ")", ":", "path", "=", "[", "path", "]", "# check if there are path extensions for Windows executables", "if", "pathext", "is", "None", ":", "pathext", "=", "os", ".", "environ", ".", "get", "(", "'PATHEXT'", ",", "'.COM;.EXE;.BAT;.CMD'", ")", "pathext", "=", "pathext", ".", "split", "(", "os", ".", "pathsep", ")", "# don't use extensions if the command ends with one of them", "for", "ext", "in", "pathext", ":", "if", "cmd", ".", "endswith", "(", "ext", ")", ":", "pathext", "=", "[", "''", "]", "break", "# check if we find the command on PATH", "for", "p", "in", "path", ":", "f", "=", "os", ".", "path", ".", "join", "(", "p", ",", "cmd", ")", "if", "os", ".", "path", ".", "isfile", "(", "f", ")", ":", "return", "f", "for", "ext", "in", "pathext", ":", "fext", "=", "f", "+", "ext", "if", "os", ".", "path", ".", "isfile", "(", "fext", ")", ":", "return", "fext", "return", "None" ]
Taken `from Django http://bit.ly/1njB3Y9>`_.
[ "Taken", "from", "Django", "http", ":", "//", "bit", ".", "ly", "/", "1njB3Y9", ">", "_", "." ]
python
train
DataDog/integrations-core
varnish/datadog_checks/varnish/varnish.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/varnish/datadog_checks/varnish/varnish.py#L256-L333
def _parse_varnishadm(self, output, tags): """ Parse out service checks from varnishadm. Example output: Backend b0 is Sick Current states good: 2 threshold: 3 window: 5 Average responsetime of good probes: 0.000000 Oldest Newest ================================================================ -------------------------------------------------------------444 Good IPv4 -------------------------------------------------------------XXX Good Xmit -------------------------------------------------------------RRR Good Recv ----------------------------------------------------------HHH--- Happy Backend b1 is Sick Current states good: 2 threshold: 3 window: 5 Average responsetime of good probes: 0.000000 Oldest Newest ================================================================ ----------------------------------------------------------HHH--- Happy Example output (new output format): Backend name Admin Probe boot.default probe Healthy (no probe) boot.backend2 probe Healthy 4/4 Current states good: 4 threshold: 3 window: 4 Average response time of good probes: 0.002504 Oldest ================================================== Newest --------------------------------------------------------------44 Good IPv4 --------------------------------------------------------------XX Good Xmit --------------------------------------------------------------RR Good Recv ------------------------------------------------------------HHHH Happy """ # Process status by backend. backends_by_status = defaultdict(list) for line in output.split("\n"): backend, status, message = None, None, None # split string and remove all empty fields tokens = filter(None, line.strip().split(' ')) tokens = [t for t in tokens] if len(tokens): if tokens == ['Backend', 'name', 'Admin', 'Probe']: # skip the column headers that exist in new output format continue # parse new output format # the backend name will include the vcl name # so split on first . to remove prefix elif len(tokens) >= 4 and tokens[1] in ['healthy', 'sick']: # If the backend health was overriden, lets grab the # overriden value instead of the probed health backend = tokens[0].split('.', 1)[-1] status = tokens[1].lower() elif len(tokens) >= 4 and tokens[1] == 'probe': backend = tokens[0].split('.', 1)[-1] status = tokens[2].lower() # Parse older Varnish backend output elif tokens[0] == 'Backend': backend = tokens[1] status = tokens[-1].lower() if tokens[0] == 'Current' and backend is not None: try: message = ' '.join(tokens[2:]).strip() except Exception: # If we can't parse a message still send a status. self.log.exception('Error when parsing message from varnishadm') message = '' if backend is not None: backends_by_status[status].append((backend, message)) for status, backends in iteritems(backends_by_status): check_status = BackendStatus.to_check_status(status) for backend, message in backends: service_checks_tags = ['backend:%s' % backend] + tags self.service_check(self.SERVICE_CHECK_NAME, check_status, tags=service_checks_tags, message=message)
[ "def", "_parse_varnishadm", "(", "self", ",", "output", ",", "tags", ")", ":", "# Process status by backend.", "backends_by_status", "=", "defaultdict", "(", "list", ")", "for", "line", "in", "output", ".", "split", "(", "\"\\n\"", ")", ":", "backend", ",", "status", ",", "message", "=", "None", ",", "None", ",", "None", "# split string and remove all empty fields", "tokens", "=", "filter", "(", "None", ",", "line", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", ")", "tokens", "=", "[", "t", "for", "t", "in", "tokens", "]", "if", "len", "(", "tokens", ")", ":", "if", "tokens", "==", "[", "'Backend'", ",", "'name'", ",", "'Admin'", ",", "'Probe'", "]", ":", "# skip the column headers that exist in new output format", "continue", "# parse new output format", "# the backend name will include the vcl name", "# so split on first . to remove prefix", "elif", "len", "(", "tokens", ")", ">=", "4", "and", "tokens", "[", "1", "]", "in", "[", "'healthy'", ",", "'sick'", "]", ":", "# If the backend health was overriden, lets grab the", "# overriden value instead of the probed health", "backend", "=", "tokens", "[", "0", "]", ".", "split", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", "status", "=", "tokens", "[", "1", "]", ".", "lower", "(", ")", "elif", "len", "(", "tokens", ")", ">=", "4", "and", "tokens", "[", "1", "]", "==", "'probe'", ":", "backend", "=", "tokens", "[", "0", "]", ".", "split", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", "status", "=", "tokens", "[", "2", "]", ".", "lower", "(", ")", "# Parse older Varnish backend output", "elif", "tokens", "[", "0", "]", "==", "'Backend'", ":", "backend", "=", "tokens", "[", "1", "]", "status", "=", "tokens", "[", "-", "1", "]", ".", "lower", "(", ")", "if", "tokens", "[", "0", "]", "==", "'Current'", "and", "backend", "is", "not", "None", ":", "try", ":", "message", "=", "' '", ".", "join", "(", "tokens", "[", "2", ":", "]", ")", ".", "strip", "(", ")", "except", "Exception", ":", "# If we can't parse a message still send a status.", "self", ".", "log", ".", "exception", "(", "'Error when parsing message from varnishadm'", ")", "message", "=", "''", "if", "backend", "is", "not", "None", ":", "backends_by_status", "[", "status", "]", ".", "append", "(", "(", "backend", ",", "message", ")", ")", "for", "status", ",", "backends", "in", "iteritems", "(", "backends_by_status", ")", ":", "check_status", "=", "BackendStatus", ".", "to_check_status", "(", "status", ")", "for", "backend", ",", "message", "in", "backends", ":", "service_checks_tags", "=", "[", "'backend:%s'", "%", "backend", "]", "+", "tags", "self", ".", "service_check", "(", "self", ".", "SERVICE_CHECK_NAME", ",", "check_status", ",", "tags", "=", "service_checks_tags", ",", "message", "=", "message", ")" ]
Parse out service checks from varnishadm. Example output: Backend b0 is Sick Current states good: 2 threshold: 3 window: 5 Average responsetime of good probes: 0.000000 Oldest Newest ================================================================ -------------------------------------------------------------444 Good IPv4 -------------------------------------------------------------XXX Good Xmit -------------------------------------------------------------RRR Good Recv ----------------------------------------------------------HHH--- Happy Backend b1 is Sick Current states good: 2 threshold: 3 window: 5 Average responsetime of good probes: 0.000000 Oldest Newest ================================================================ ----------------------------------------------------------HHH--- Happy Example output (new output format): Backend name Admin Probe boot.default probe Healthy (no probe) boot.backend2 probe Healthy 4/4 Current states good: 4 threshold: 3 window: 4 Average response time of good probes: 0.002504 Oldest ================================================== Newest --------------------------------------------------------------44 Good IPv4 --------------------------------------------------------------XX Good Xmit --------------------------------------------------------------RR Good Recv ------------------------------------------------------------HHHH Happy
[ "Parse", "out", "service", "checks", "from", "varnishadm", "." ]
python
train
05bit/peewee-async
peewee_async.py
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L1168-L1175
async def connect(self): """Create connection pool asynchronously. """ self.pool = await aiomysql.create_pool( loop=self.loop, db=self.database, connect_timeout=self.timeout, **self.connect_params)
[ "async", "def", "connect", "(", "self", ")", ":", "self", ".", "pool", "=", "await", "aiomysql", ".", "create_pool", "(", "loop", "=", "self", ".", "loop", ",", "db", "=", "self", ".", "database", ",", "connect_timeout", "=", "self", ".", "timeout", ",", "*", "*", "self", ".", "connect_params", ")" ]
Create connection pool asynchronously.
[ "Create", "connection", "pool", "asynchronously", "." ]
python
train
KieranWynn/pyquaternion
pyquaternion/quaternion.py
https://github.com/KieranWynn/pyquaternion/blob/d2aad7f3fb0d4b9cc23aa72b390e9b2e1273eae9/pyquaternion/quaternion.py#L237-L257
def _from_axis_angle(cls, axis, angle): """Initialise from axis and angle representation Create a Quaternion by specifying the 3-vector rotation axis and rotation angle (in radians) from which the quaternion's rotation should be created. Params: axis: a valid numpy 3-vector angle: a real valued angle in radians """ mag_sq = np.dot(axis, axis) if mag_sq == 0.0: raise ZeroDivisionError("Provided rotation axis has no length") # Ensure axis is in unit vector form if (abs(1.0 - mag_sq) > 1e-12): axis = axis / sqrt(mag_sq) theta = angle / 2.0 r = cos(theta) i = axis * sin(theta) return cls(r, i[0], i[1], i[2])
[ "def", "_from_axis_angle", "(", "cls", ",", "axis", ",", "angle", ")", ":", "mag_sq", "=", "np", ".", "dot", "(", "axis", ",", "axis", ")", "if", "mag_sq", "==", "0.0", ":", "raise", "ZeroDivisionError", "(", "\"Provided rotation axis has no length\"", ")", "# Ensure axis is in unit vector form", "if", "(", "abs", "(", "1.0", "-", "mag_sq", ")", ">", "1e-12", ")", ":", "axis", "=", "axis", "/", "sqrt", "(", "mag_sq", ")", "theta", "=", "angle", "/", "2.0", "r", "=", "cos", "(", "theta", ")", "i", "=", "axis", "*", "sin", "(", "theta", ")", "return", "cls", "(", "r", ",", "i", "[", "0", "]", ",", "i", "[", "1", "]", ",", "i", "[", "2", "]", ")" ]
Initialise from axis and angle representation Create a Quaternion by specifying the 3-vector rotation axis and rotation angle (in radians) from which the quaternion's rotation should be created. Params: axis: a valid numpy 3-vector angle: a real valued angle in radians
[ "Initialise", "from", "axis", "and", "angle", "representation" ]
python
train
Nic30/hwt
hwt/serializer/verilog/serializer.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/serializer/verilog/serializer.py#L55-L95
def hardcodeRomIntoProcess(cls, rom): """ Due to verilog restrictions it is not posible to use array constants and rom memories has to be hardcoded as process """ processes = [] signals = [] for e in rom.endpoints: assert isinstance(e, Operator) and e.operator == AllOps.INDEX, e me, index = e.operands assert me is rom # construct output of the rom romValSig = rom.ctx.sig(rom.name, dtype=e.result._dtype) signals.append(romValSig) romValSig.hidden = False # construct process which will represent content of the rom cases = [(toHVal(i), [romValSig(v), ]) for i, v in enumerate(rom.defVal.val)] statements = [SwitchContainer(index, cases), ] for (_, (stm, )) in cases: stm.parentStm = statements[0] p = HWProcess(rom.name, statements, {index, }, {index, }, {romValSig, }) processes.append(p) # override usage of original index operator on rom # to use signal generated from this process def replaceOrigRomIndexExpr(x): if x is e.result: return romValSig else: return x for _e in e.result.endpoints: _e.operands = tuple(map(replaceOrigRomIndexExpr, _e.operands)) e.result = romValSig return processes, signals
[ "def", "hardcodeRomIntoProcess", "(", "cls", ",", "rom", ")", ":", "processes", "=", "[", "]", "signals", "=", "[", "]", "for", "e", "in", "rom", ".", "endpoints", ":", "assert", "isinstance", "(", "e", ",", "Operator", ")", "and", "e", ".", "operator", "==", "AllOps", ".", "INDEX", ",", "e", "me", ",", "index", "=", "e", ".", "operands", "assert", "me", "is", "rom", "# construct output of the rom", "romValSig", "=", "rom", ".", "ctx", ".", "sig", "(", "rom", ".", "name", ",", "dtype", "=", "e", ".", "result", ".", "_dtype", ")", "signals", ".", "append", "(", "romValSig", ")", "romValSig", ".", "hidden", "=", "False", "# construct process which will represent content of the rom", "cases", "=", "[", "(", "toHVal", "(", "i", ")", ",", "[", "romValSig", "(", "v", ")", ",", "]", ")", "for", "i", ",", "v", "in", "enumerate", "(", "rom", ".", "defVal", ".", "val", ")", "]", "statements", "=", "[", "SwitchContainer", "(", "index", ",", "cases", ")", ",", "]", "for", "(", "_", ",", "(", "stm", ",", ")", ")", "in", "cases", ":", "stm", ".", "parentStm", "=", "statements", "[", "0", "]", "p", "=", "HWProcess", "(", "rom", ".", "name", ",", "statements", ",", "{", "index", ",", "}", ",", "{", "index", ",", "}", ",", "{", "romValSig", ",", "}", ")", "processes", ".", "append", "(", "p", ")", "# override usage of original index operator on rom", "# to use signal generated from this process", "def", "replaceOrigRomIndexExpr", "(", "x", ")", ":", "if", "x", "is", "e", ".", "result", ":", "return", "romValSig", "else", ":", "return", "x", "for", "_e", "in", "e", ".", "result", ".", "endpoints", ":", "_e", ".", "operands", "=", "tuple", "(", "map", "(", "replaceOrigRomIndexExpr", ",", "_e", ".", "operands", ")", ")", "e", ".", "result", "=", "romValSig", "return", "processes", ",", "signals" ]
Due to verilog restrictions it is not posible to use array constants and rom memories has to be hardcoded as process
[ "Due", "to", "verilog", "restrictions", "it", "is", "not", "posible", "to", "use", "array", "constants", "and", "rom", "memories", "has", "to", "be", "hardcoded", "as", "process" ]
python
test
openpermissions/perch
perch/organisation.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/organisation.py#L176-L195
def all(cls, state=None, include_deactivated=False): """ Get all organisations :param state: State of organisation :param include_deactivated: Flag to include deactivated :returns: list of Organisation instances :raises: SocketError, CouchException """ if state and state not in validators.VALID_STATES: raise exceptions.ValidationError('Invalid "state"') elif state: organisations = yield views.organisations.get(key=state, include_docs=True) elif include_deactivated: organisations = yield views.organisations.get(include_docs=True) else: organisations = yield views.active_organisations.get(include_docs=True) raise Return([cls(**org['doc']) for org in organisations['rows']])
[ "def", "all", "(", "cls", ",", "state", "=", "None", ",", "include_deactivated", "=", "False", ")", ":", "if", "state", "and", "state", "not", "in", "validators", ".", "VALID_STATES", ":", "raise", "exceptions", ".", "ValidationError", "(", "'Invalid \"state\"'", ")", "elif", "state", ":", "organisations", "=", "yield", "views", ".", "organisations", ".", "get", "(", "key", "=", "state", ",", "include_docs", "=", "True", ")", "elif", "include_deactivated", ":", "organisations", "=", "yield", "views", ".", "organisations", ".", "get", "(", "include_docs", "=", "True", ")", "else", ":", "organisations", "=", "yield", "views", ".", "active_organisations", ".", "get", "(", "include_docs", "=", "True", ")", "raise", "Return", "(", "[", "cls", "(", "*", "*", "org", "[", "'doc'", "]", ")", "for", "org", "in", "organisations", "[", "'rows'", "]", "]", ")" ]
Get all organisations :param state: State of organisation :param include_deactivated: Flag to include deactivated :returns: list of Organisation instances :raises: SocketError, CouchException
[ "Get", "all", "organisations" ]
python
train
assemblerflow/flowcraft
flowcraft/generator/inspect.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L937-L1018
def log_parser(self): """Method that parses the nextflow log file once and updates the submitted number of samples for each process """ # Check the timestamp of the log file. Only proceed with the parsing # if it changed from the previous time. size_stamp = os.path.getsize(self.log_file) self.log_retry = 0 if size_stamp and size_stamp == self.log_sizestamp: return else: logger.debug("Updating log size stamp to: {}".format(size_stamp)) self.log_sizestamp = size_stamp # Regular expression to catch four groups: # 1. Start timestamp # 2. Work directory hash # 3. Process name # 4. Tag name r = ".* (.*) \[.*\].*\[(.*)\].*process > (.*) \((.*)\).*" with open(self.log_file) as fh: for line in fh: if "Submitted process >" in line or \ "Re-submitted process >" in line or \ "Cached process >" in line: m = re.match(r, line) if not m: continue time_start = m.group(1) workdir = m.group(2) process = m.group(3) tag = m.group(4) # Skip if this line has already been parsed if time_start + tag not in self.stored_log_ids: self.stored_log_ids.append(time_start + tag) else: continue # For first time processes if process not in self.processes: continue p = self.processes[process] # Skip is process/tag combination has finished or is retrying if tag in list(p["finished"]) + list(p["retry"]): continue # Update failed process/tags when they have been re-submitted if tag in list(p["failed"]) and \ "Re-submitted process >" in line: p["retry"].add(tag) self.send = True continue # Set process barrier to running. Check for barrier status # are performed at the end of the trace parsing in the # _update_barrier_status method. p["barrier"] = "R" if tag not in p["submitted"]: p["submitted"].add(tag) # Update the process_tags attribute with the new tag. # Update only when the tag does not exist. This may rarely # occur when the tag is parsed first in the trace file if tag not in self.process_tags[process]: self.process_tags[process][tag] = { "workdir": self._expand_path(workdir), "start": time_start } self.send = True # When the tag is filled in the trace file parsing, # the timestamp may not be present in the trace. In # those cases, fill that information here. elif not self.process_tags[process][tag]["start"]: self.process_tags[process][tag]["start"] = time_start self.send = True self._update_pipeline_status()
[ "def", "log_parser", "(", "self", ")", ":", "# Check the timestamp of the log file. Only proceed with the parsing", "# if it changed from the previous time.", "size_stamp", "=", "os", ".", "path", ".", "getsize", "(", "self", ".", "log_file", ")", "self", ".", "log_retry", "=", "0", "if", "size_stamp", "and", "size_stamp", "==", "self", ".", "log_sizestamp", ":", "return", "else", ":", "logger", ".", "debug", "(", "\"Updating log size stamp to: {}\"", ".", "format", "(", "size_stamp", ")", ")", "self", ".", "log_sizestamp", "=", "size_stamp", "# Regular expression to catch four groups:", "# 1. Start timestamp", "# 2. Work directory hash", "# 3. Process name", "# 4. Tag name", "r", "=", "\".* (.*) \\[.*\\].*\\[(.*)\\].*process > (.*) \\((.*)\\).*\"", "with", "open", "(", "self", ".", "log_file", ")", "as", "fh", ":", "for", "line", "in", "fh", ":", "if", "\"Submitted process >\"", "in", "line", "or", "\"Re-submitted process >\"", "in", "line", "or", "\"Cached process >\"", "in", "line", ":", "m", "=", "re", ".", "match", "(", "r", ",", "line", ")", "if", "not", "m", ":", "continue", "time_start", "=", "m", ".", "group", "(", "1", ")", "workdir", "=", "m", ".", "group", "(", "2", ")", "process", "=", "m", ".", "group", "(", "3", ")", "tag", "=", "m", ".", "group", "(", "4", ")", "# Skip if this line has already been parsed", "if", "time_start", "+", "tag", "not", "in", "self", ".", "stored_log_ids", ":", "self", ".", "stored_log_ids", ".", "append", "(", "time_start", "+", "tag", ")", "else", ":", "continue", "# For first time processes", "if", "process", "not", "in", "self", ".", "processes", ":", "continue", "p", "=", "self", ".", "processes", "[", "process", "]", "# Skip is process/tag combination has finished or is retrying", "if", "tag", "in", "list", "(", "p", "[", "\"finished\"", "]", ")", "+", "list", "(", "p", "[", "\"retry\"", "]", ")", ":", "continue", "# Update failed process/tags when they have been re-submitted", "if", "tag", "in", "list", "(", "p", "[", "\"failed\"", "]", ")", "and", "\"Re-submitted process >\"", "in", "line", ":", "p", "[", "\"retry\"", "]", ".", "add", "(", "tag", ")", "self", ".", "send", "=", "True", "continue", "# Set process barrier to running. Check for barrier status", "# are performed at the end of the trace parsing in the", "# _update_barrier_status method.", "p", "[", "\"barrier\"", "]", "=", "\"R\"", "if", "tag", "not", "in", "p", "[", "\"submitted\"", "]", ":", "p", "[", "\"submitted\"", "]", ".", "add", "(", "tag", ")", "# Update the process_tags attribute with the new tag.", "# Update only when the tag does not exist. This may rarely", "# occur when the tag is parsed first in the trace file", "if", "tag", "not", "in", "self", ".", "process_tags", "[", "process", "]", ":", "self", ".", "process_tags", "[", "process", "]", "[", "tag", "]", "=", "{", "\"workdir\"", ":", "self", ".", "_expand_path", "(", "workdir", ")", ",", "\"start\"", ":", "time_start", "}", "self", ".", "send", "=", "True", "# When the tag is filled in the trace file parsing,", "# the timestamp may not be present in the trace. In", "# those cases, fill that information here.", "elif", "not", "self", ".", "process_tags", "[", "process", "]", "[", "tag", "]", "[", "\"start\"", "]", ":", "self", ".", "process_tags", "[", "process", "]", "[", "tag", "]", "[", "\"start\"", "]", "=", "time_start", "self", ".", "send", "=", "True", "self", ".", "_update_pipeline_status", "(", ")" ]
Method that parses the nextflow log file once and updates the submitted number of samples for each process
[ "Method", "that", "parses", "the", "nextflow", "log", "file", "once", "and", "updates", "the", "submitted", "number", "of", "samples", "for", "each", "process" ]
python
test
openstack/networking-cisco
networking_cisco/plugins/cisco/db/device_manager/hosting_device_manager_db.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/db/device_manager/hosting_device_manager_db.py#L502-L507
def delete_all_hosting_devices(self, context, force_delete=False): """Deletes all hosting devices.""" for item in self._get_collection_query( context, hd_models.HostingDeviceTemplate): self.delete_all_hosting_devices_by_template( context, template=item, force_delete=force_delete)
[ "def", "delete_all_hosting_devices", "(", "self", ",", "context", ",", "force_delete", "=", "False", ")", ":", "for", "item", "in", "self", ".", "_get_collection_query", "(", "context", ",", "hd_models", ".", "HostingDeviceTemplate", ")", ":", "self", ".", "delete_all_hosting_devices_by_template", "(", "context", ",", "template", "=", "item", ",", "force_delete", "=", "force_delete", ")" ]
Deletes all hosting devices.
[ "Deletes", "all", "hosting", "devices", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/bugzillarest.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/bugzillarest.py#L429-L448
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the login, password and token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if BugzillaRESTClient.PBUGZILLA_LOGIN in payload: payload.pop(BugzillaRESTClient.PBUGZILLA_LOGIN) if BugzillaRESTClient.PBUGZILLA_PASSWORD in payload: payload.pop(BugzillaRESTClient.PBUGZILLA_PASSWORD) if BugzillaRESTClient.PBUGZILLA_TOKEN in payload: payload.pop(BugzillaRESTClient.PBUGZILLA_TOKEN) return url, headers, payload
[ "def", "sanitize_for_archive", "(", "url", ",", "headers", ",", "payload", ")", ":", "if", "BugzillaRESTClient", ".", "PBUGZILLA_LOGIN", "in", "payload", ":", "payload", ".", "pop", "(", "BugzillaRESTClient", ".", "PBUGZILLA_LOGIN", ")", "if", "BugzillaRESTClient", ".", "PBUGZILLA_PASSWORD", "in", "payload", ":", "payload", ".", "pop", "(", "BugzillaRESTClient", ".", "PBUGZILLA_PASSWORD", ")", "if", "BugzillaRESTClient", ".", "PBUGZILLA_TOKEN", "in", "payload", ":", "payload", ".", "pop", "(", "BugzillaRESTClient", ".", "PBUGZILLA_TOKEN", ")", "return", "url", ",", "headers", ",", "payload" ]
Sanitize payload of a HTTP request by removing the login, password and token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload
[ "Sanitize", "payload", "of", "a", "HTTP", "request", "by", "removing", "the", "login", "password", "and", "token", "information", "before", "storing", "/", "retrieving", "archived", "items" ]
python
test
LionelAuroux/pyrser
pyrser/parsing/node.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/parsing/node.py#L81-L89
def set(self, othernode): """allow to completly mutate the node into any subclasses of Node""" self.__class__ = othernode.__class__ self.clean() if len(othernode) > 0: for k, v in othernode.items(): self[k] = v for k, v in vars(othernode).items(): setattr(self, k, v)
[ "def", "set", "(", "self", ",", "othernode", ")", ":", "self", ".", "__class__", "=", "othernode", ".", "__class__", "self", ".", "clean", "(", ")", "if", "len", "(", "othernode", ")", ">", "0", ":", "for", "k", ",", "v", "in", "othernode", ".", "items", "(", ")", ":", "self", "[", "k", "]", "=", "v", "for", "k", ",", "v", "in", "vars", "(", "othernode", ")", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
allow to completly mutate the node into any subclasses of Node
[ "allow", "to", "completly", "mutate", "the", "node", "into", "any", "subclasses", "of", "Node" ]
python
test
TestInABox/stackInABox
stackinabox/services/service.py
https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/services/service.py#L316-L322
def reset(self): """Reset the service to its' initial state.""" logger.debug('StackInABoxService ({0}): Reset' .format(self.__id, self.name)) self.base_url = '/{0}'.format(self.name) logger.debug('StackInABoxService ({0}): Hosting Service {1}' .format(self.__id, self.name))
[ "def", "reset", "(", "self", ")", ":", "logger", ".", "debug", "(", "'StackInABoxService ({0}): Reset'", ".", "format", "(", "self", ".", "__id", ",", "self", ".", "name", ")", ")", "self", ".", "base_url", "=", "'/{0}'", ".", "format", "(", "self", ".", "name", ")", "logger", ".", "debug", "(", "'StackInABoxService ({0}): Hosting Service {1}'", ".", "format", "(", "self", ".", "__id", ",", "self", ".", "name", ")", ")" ]
Reset the service to its' initial state.
[ "Reset", "the", "service", "to", "its", "initial", "state", "." ]
python
train
jvamvas/rhymediscovery
rhymediscovery/evaluate_schemes.py
https://github.com/jvamvas/rhymediscovery/blob/b76509c98554b12efa06fe9ab557cca5fa5e4a79/rhymediscovery/evaluate_schemes.py#L107-L155
def compare(stanzas, gold_schemes, found_schemes): """get accuracy and precision/recall""" result = SuccessMeasure() total = float(len(gold_schemes)) correct = 0.0 for (g, f) in zip(gold_schemes, found_schemes): if g == f: correct += 1 result.accuracy = correct / total # for each word, let rhymeset[word] = set of words in rest of stanza rhyming with the word # precision = # correct words in rhymeset[word]/# words in proposed rhymeset[word] # recall = # correct words in rhymeset[word]/# words in reference words in rhymeset[word] # total precision and recall = avg over all words over all stanzas tot_p = 0.0 tot_r = 0.0 tot_words = 0.0 for (s, g, f) in zip(stanzas, gold_schemes, found_schemes): stanzasize = len(s) for wi, word in enumerate(s): grhymeset_word = set( map(lambda x: x[0], filter(lambda x: x[1] == g[wi], zip(range(wi + 1, stanzasize), g[wi + 1:])))) frhymeset_word = set( map(lambda x: x[0], filter(lambda x: x[1] == f[wi], zip(range(wi + 1, stanzasize), f[wi + 1:])))) if len(grhymeset_word) == 0: continue tot_words += 1 if len(frhymeset_word) == 0: continue # find intersection correct = float(len(grhymeset_word.intersection(frhymeset_word))) precision = correct / len(frhymeset_word) recall = correct / len(grhymeset_word) tot_p += precision tot_r += recall precision = tot_p / tot_words recall = tot_r / tot_words result.precision = precision result.recall = recall if precision + recall > 0: result.f_score = 2 * precision * recall / (precision + recall) return result
[ "def", "compare", "(", "stanzas", ",", "gold_schemes", ",", "found_schemes", ")", ":", "result", "=", "SuccessMeasure", "(", ")", "total", "=", "float", "(", "len", "(", "gold_schemes", ")", ")", "correct", "=", "0.0", "for", "(", "g", ",", "f", ")", "in", "zip", "(", "gold_schemes", ",", "found_schemes", ")", ":", "if", "g", "==", "f", ":", "correct", "+=", "1", "result", ".", "accuracy", "=", "correct", "/", "total", "# for each word, let rhymeset[word] = set of words in rest of stanza rhyming with the word", "# precision = # correct words in rhymeset[word]/# words in proposed rhymeset[word]", "# recall = # correct words in rhymeset[word]/# words in reference words in rhymeset[word]", "# total precision and recall = avg over all words over all stanzas", "tot_p", "=", "0.0", "tot_r", "=", "0.0", "tot_words", "=", "0.0", "for", "(", "s", ",", "g", ",", "f", ")", "in", "zip", "(", "stanzas", ",", "gold_schemes", ",", "found_schemes", ")", ":", "stanzasize", "=", "len", "(", "s", ")", "for", "wi", ",", "word", "in", "enumerate", "(", "s", ")", ":", "grhymeset_word", "=", "set", "(", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "filter", "(", "lambda", "x", ":", "x", "[", "1", "]", "==", "g", "[", "wi", "]", ",", "zip", "(", "range", "(", "wi", "+", "1", ",", "stanzasize", ")", ",", "g", "[", "wi", "+", "1", ":", "]", ")", ")", ")", ")", "frhymeset_word", "=", "set", "(", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "filter", "(", "lambda", "x", ":", "x", "[", "1", "]", "==", "f", "[", "wi", "]", ",", "zip", "(", "range", "(", "wi", "+", "1", ",", "stanzasize", ")", ",", "f", "[", "wi", "+", "1", ":", "]", ")", ")", ")", ")", "if", "len", "(", "grhymeset_word", ")", "==", "0", ":", "continue", "tot_words", "+=", "1", "if", "len", "(", "frhymeset_word", ")", "==", "0", ":", "continue", "# find intersection", "correct", "=", "float", "(", "len", "(", "grhymeset_word", ".", "intersection", "(", "frhymeset_word", ")", ")", ")", "precision", "=", "correct", "/", "len", "(", "frhymeset_word", ")", "recall", "=", "correct", "/", "len", "(", "grhymeset_word", ")", "tot_p", "+=", "precision", "tot_r", "+=", "recall", "precision", "=", "tot_p", "/", "tot_words", "recall", "=", "tot_r", "/", "tot_words", "result", ".", "precision", "=", "precision", "result", ".", "recall", "=", "recall", "if", "precision", "+", "recall", ">", "0", ":", "result", ".", "f_score", "=", "2", "*", "precision", "*", "recall", "/", "(", "precision", "+", "recall", ")", "return", "result" ]
get accuracy and precision/recall
[ "get", "accuracy", "and", "precision", "/", "recall" ]
python
train
osilkin98/PyBRY
generator.py
https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/generator.py#L167-L205
def generate_lbryd_wrapper(url=LBRY_API_RAW_JSON_URL, read_file=__LBRYD_BASE_FPATH__, write_file=LBRYD_FPATH): """ Generates the actual functions for lbryd_api.py based on lbry's documentation :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :param str read_file: This is the path to the file from which we will be reading :param str write_file: Path from project root to the file we'll be writing to. """ functions = get_lbry_api_function_docs(url) # Open the actual file for appending with open(write_file, 'w') as lbry_file: lbry_file.write("# This file was generated at build time using the generator function\n") lbry_file.write("# You may edit but do so with caution\n") with open(read_file, 'r') as template: header = template.read() lbry_file.write(header) # Iterate through all the functions we retrieved for func in functions: method_definition = generate_method_definition(func) # Write to file lbry_file.write(method_definition) try: from yapf.yapflib.yapf_api import FormatFile # Now we should format the file using the yapf formatter FormatFile(write_file, in_place=True) except ImportError as IE: print("[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard") print(IE)
[ "def", "generate_lbryd_wrapper", "(", "url", "=", "LBRY_API_RAW_JSON_URL", ",", "read_file", "=", "__LBRYD_BASE_FPATH__", ",", "write_file", "=", "LBRYD_FPATH", ")", ":", "functions", "=", "get_lbry_api_function_docs", "(", "url", ")", "# Open the actual file for appending", "with", "open", "(", "write_file", ",", "'w'", ")", "as", "lbry_file", ":", "lbry_file", ".", "write", "(", "\"# This file was generated at build time using the generator function\\n\"", ")", "lbry_file", ".", "write", "(", "\"# You may edit but do so with caution\\n\"", ")", "with", "open", "(", "read_file", ",", "'r'", ")", "as", "template", ":", "header", "=", "template", ".", "read", "(", ")", "lbry_file", ".", "write", "(", "header", ")", "# Iterate through all the functions we retrieved", "for", "func", "in", "functions", ":", "method_definition", "=", "generate_method_definition", "(", "func", ")", "# Write to file", "lbry_file", ".", "write", "(", "method_definition", ")", "try", ":", "from", "yapf", ".", "yapflib", ".", "yapf_api", "import", "FormatFile", "# Now we should format the file using the yapf formatter", "FormatFile", "(", "write_file", ",", "in_place", "=", "True", ")", "except", "ImportError", "as", "IE", ":", "print", "(", "\"[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard\"", ")", "print", "(", "IE", ")" ]
Generates the actual functions for lbryd_api.py based on lbry's documentation :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :param str read_file: This is the path to the file from which we will be reading :param str write_file: Path from project root to the file we'll be writing to.
[ "Generates", "the", "actual", "functions", "for", "lbryd_api", ".", "py", "based", "on", "lbry", "s", "documentation" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/generator_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L906-L943
def tfrecord_iterator(filenames, gzipped=False, example_spec=None): """Yields records from TFRecord files. Args: filenames: list<str>, list of TFRecord filenames to read from. gzipped: bool, whether the TFRecord files are gzip-encoded. example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>, if provided, will parse each record as a tensorflow.Example proto. Yields: Records (or parsed Examples, if example_spec is provided) from files. """ with tf.Graph().as_default(): dataset = tf.data.Dataset.from_tensor_slices(filenames) def _load_records(filename): return tf.data.TFRecordDataset( filename, compression_type=tf.constant("GZIP") if gzipped else None, buffer_size=16 * 1000 * 1000) dataset = dataset.flat_map(_load_records) def _parse_example(ex_ser): return tf.parse_single_example(ex_ser, example_spec) if example_spec: dataset = dataset.map(_parse_example, num_parallel_calls=32) dataset = dataset.prefetch(100) record_it = dataset.make_one_shot_iterator().get_next() with tf.Session() as sess: while True: try: ex = sess.run(record_it) yield ex except tf.errors.OutOfRangeError: break
[ "def", "tfrecord_iterator", "(", "filenames", ",", "gzipped", "=", "False", ",", "example_spec", "=", "None", ")", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "filenames", ")", "def", "_load_records", "(", "filename", ")", ":", "return", "tf", ".", "data", ".", "TFRecordDataset", "(", "filename", ",", "compression_type", "=", "tf", ".", "constant", "(", "\"GZIP\"", ")", "if", "gzipped", "else", "None", ",", "buffer_size", "=", "16", "*", "1000", "*", "1000", ")", "dataset", "=", "dataset", ".", "flat_map", "(", "_load_records", ")", "def", "_parse_example", "(", "ex_ser", ")", ":", "return", "tf", ".", "parse_single_example", "(", "ex_ser", ",", "example_spec", ")", "if", "example_spec", ":", "dataset", "=", "dataset", ".", "map", "(", "_parse_example", ",", "num_parallel_calls", "=", "32", ")", "dataset", "=", "dataset", ".", "prefetch", "(", "100", ")", "record_it", "=", "dataset", ".", "make_one_shot_iterator", "(", ")", ".", "get_next", "(", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "while", "True", ":", "try", ":", "ex", "=", "sess", ".", "run", "(", "record_it", ")", "yield", "ex", "except", "tf", ".", "errors", ".", "OutOfRangeError", ":", "break" ]
Yields records from TFRecord files. Args: filenames: list<str>, list of TFRecord filenames to read from. gzipped: bool, whether the TFRecord files are gzip-encoded. example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>, if provided, will parse each record as a tensorflow.Example proto. Yields: Records (or parsed Examples, if example_spec is provided) from files.
[ "Yields", "records", "from", "TFRecord", "files", "." ]
python
train
etingof/pysmi
pysmi/lexer/smi.py
https://github.com/etingof/pysmi/blob/379a0a384c81875731be51a054bdacced6260fd8/pysmi/lexer/smi.py#L256-L259
def t_QUOTED_STRING(self, t): r'\"[^\"]*\"' t.lexer.lineno += len(re.findall(r'\r\n|\n|\r', t.value)) return t
[ "def", "t_QUOTED_STRING", "(", "self", ",", "t", ")", ":", "t", ".", "lexer", ".", "lineno", "+=", "len", "(", "re", ".", "findall", "(", "r'\\r\\n|\\n|\\r'", ",", "t", ".", "value", ")", ")", "return", "t" ]
r'\"[^\"]*\"
[ "r", "\\", "[", "^", "\\", "]", "*", "\\" ]
python
valid
bryanwweber/thermohw
thermohw/filters.py
https://github.com/bryanwweber/thermohw/blob/b6be276c14f8adf6ae23f5498065de74f868ccaa/thermohw/filters.py#L25-L54
def div_filter(key: str, value: list, format: str, meta: Any) -> Optional[list]: """Filter the JSON ``value`` for alert divs. Arguments --------- key Key of the structure value Values in the structure format Output format of the processing meta Meta information """ if key != "Div" or format != "latex": return None [[_, classes, _], contents] = value try: alert_type = [name.split("-")[1] for name in classes if "-" in name][0] except IndexError: return None if alert_type not in ALLOWED_ALERT_TYPES.__members__: return None filtered = [RawBlock("latex", rf"\begin{{{alert_type}box}}")] filtered.extend(contents) filtered.append(RawBlock("latex", rf"\end{{{alert_type}box}}")) return filtered
[ "def", "div_filter", "(", "key", ":", "str", ",", "value", ":", "list", ",", "format", ":", "str", ",", "meta", ":", "Any", ")", "->", "Optional", "[", "list", "]", ":", "if", "key", "!=", "\"Div\"", "or", "format", "!=", "\"latex\"", ":", "return", "None", "[", "[", "_", ",", "classes", ",", "_", "]", ",", "contents", "]", "=", "value", "try", ":", "alert_type", "=", "[", "name", ".", "split", "(", "\"-\"", ")", "[", "1", "]", "for", "name", "in", "classes", "if", "\"-\"", "in", "name", "]", "[", "0", "]", "except", "IndexError", ":", "return", "None", "if", "alert_type", "not", "in", "ALLOWED_ALERT_TYPES", ".", "__members__", ":", "return", "None", "filtered", "=", "[", "RawBlock", "(", "\"latex\"", ",", "rf\"\\begin{{{alert_type}box}}\"", ")", "]", "filtered", ".", "extend", "(", "contents", ")", "filtered", ".", "append", "(", "RawBlock", "(", "\"latex\"", ",", "rf\"\\end{{{alert_type}box}}\"", ")", ")", "return", "filtered" ]
Filter the JSON ``value`` for alert divs. Arguments --------- key Key of the structure value Values in the structure format Output format of the processing meta Meta information
[ "Filter", "the", "JSON", "value", "for", "alert", "divs", "." ]
python
train
Yubico/yubikey-manager
ykman/cli/piv.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/piv.py#L677-L711
def change_puk(ctx, puk, new_puk): """ Change the PUK code. If the PIN is lost or blocked it can be reset using a PUK. The PUK must be between 6 and 8 characters long, and supports any type of alphanumeric characters. """ controller = ctx.obj['controller'] if not puk: puk = _prompt_pin(ctx, prompt='Enter your current PUK') if not new_puk: new_puk = click.prompt( 'Enter your new PUK', default='', hide_input=True, show_default=False, confirmation_prompt=True, err=True) if not _valid_pin_length(puk): ctx.fail('Current PUK must be between 6 and 8 characters long.') if not _valid_pin_length(new_puk): ctx.fail('New PUK must be between 6 and 8 characters long.') try: controller.change_puk(puk, new_puk) click.echo('New PUK set.') except AuthenticationBlocked as e: logger.debug('PUK is blocked.', exc_info=e) ctx.fail('PUK is blocked.') except WrongPuk as e: logger.debug( 'Failed to change PUK, %d tries left', e.tries_left, exc_info=e) ctx.fail('PUK change failed - %d tries left.' % e.tries_left)
[ "def", "change_puk", "(", "ctx", ",", "puk", ",", "new_puk", ")", ":", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "if", "not", "puk", ":", "puk", "=", "_prompt_pin", "(", "ctx", ",", "prompt", "=", "'Enter your current PUK'", ")", "if", "not", "new_puk", ":", "new_puk", "=", "click", ".", "prompt", "(", "'Enter your new PUK'", ",", "default", "=", "''", ",", "hide_input", "=", "True", ",", "show_default", "=", "False", ",", "confirmation_prompt", "=", "True", ",", "err", "=", "True", ")", "if", "not", "_valid_pin_length", "(", "puk", ")", ":", "ctx", ".", "fail", "(", "'Current PUK must be between 6 and 8 characters long.'", ")", "if", "not", "_valid_pin_length", "(", "new_puk", ")", ":", "ctx", ".", "fail", "(", "'New PUK must be between 6 and 8 characters long.'", ")", "try", ":", "controller", ".", "change_puk", "(", "puk", ",", "new_puk", ")", "click", ".", "echo", "(", "'New PUK set.'", ")", "except", "AuthenticationBlocked", "as", "e", ":", "logger", ".", "debug", "(", "'PUK is blocked.'", ",", "exc_info", "=", "e", ")", "ctx", ".", "fail", "(", "'PUK is blocked.'", ")", "except", "WrongPuk", "as", "e", ":", "logger", ".", "debug", "(", "'Failed to change PUK, %d tries left'", ",", "e", ".", "tries_left", ",", "exc_info", "=", "e", ")", "ctx", ".", "fail", "(", "'PUK change failed - %d tries left.'", "%", "e", ".", "tries_left", ")" ]
Change the PUK code. If the PIN is lost or blocked it can be reset using a PUK. The PUK must be between 6 and 8 characters long, and supports any type of alphanumeric characters.
[ "Change", "the", "PUK", "code", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xquerybuilderwidget/xquerylinewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xquerybuilderwidget/xquerylinewidget.py#L263-L276
def setTerms( self, terms ): """ Sets the term options for this widget. :param terms | [<str>, ..] """ self.uiTermDDL.blockSignals(True) term = self.uiTermDDL.currentText() self.uiTermDDL.clear() self.uiTermDDL.addItems(terms) self.uiTermDDL.setCurrentIndex(self.uiTermDDL.findText(term)) self.applyRule() self.uiTermDDL.blockSignals(False)
[ "def", "setTerms", "(", "self", ",", "terms", ")", ":", "self", ".", "uiTermDDL", ".", "blockSignals", "(", "True", ")", "term", "=", "self", ".", "uiTermDDL", ".", "currentText", "(", ")", "self", ".", "uiTermDDL", ".", "clear", "(", ")", "self", ".", "uiTermDDL", ".", "addItems", "(", "terms", ")", "self", ".", "uiTermDDL", ".", "setCurrentIndex", "(", "self", ".", "uiTermDDL", ".", "findText", "(", "term", ")", ")", "self", ".", "applyRule", "(", ")", "self", ".", "uiTermDDL", ".", "blockSignals", "(", "False", ")" ]
Sets the term options for this widget. :param terms | [<str>, ..]
[ "Sets", "the", "term", "options", "for", "this", "widget", ".", ":", "param", "terms", "|", "[", "<str", ">", "..", "]" ]
python
train
lepture/flask-oauthlib
flask_oauthlib/provider/oauth1.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth1.py#L673-L683
def get_default_realms(self, client_key, request): """Default realms of the client.""" log.debug('Get realms for %r', client_key) if not request.client: request.client = self._clientgetter(client_key=client_key) client = request.client if hasattr(client, 'default_realms'): return client.default_realms return []
[ "def", "get_default_realms", "(", "self", ",", "client_key", ",", "request", ")", ":", "log", ".", "debug", "(", "'Get realms for %r'", ",", "client_key", ")", "if", "not", "request", ".", "client", ":", "request", ".", "client", "=", "self", ".", "_clientgetter", "(", "client_key", "=", "client_key", ")", "client", "=", "request", ".", "client", "if", "hasattr", "(", "client", ",", "'default_realms'", ")", ":", "return", "client", ".", "default_realms", "return", "[", "]" ]
Default realms of the client.
[ "Default", "realms", "of", "the", "client", "." ]
python
test
Unidata/siphon
siphon/cdmr/ncstream.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L201-L221
def process_vlen(data_header, array): """Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array """ source = iter(array) return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype) for size in data_header.vlens])
[ "def", "process_vlen", "(", "data_header", ",", "array", ")", ":", "source", "=", "iter", "(", "array", ")", "return", "np", ".", "array", "(", "[", "np", ".", "fromiter", "(", "itertools", ".", "islice", "(", "source", ",", "size", ")", ",", "dtype", "=", "array", ".", "dtype", ")", "for", "size", "in", "data_header", ".", "vlens", "]", ")" ]
Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array
[ "Process", "vlen", "coming", "back", "from", "NCStream", "v2", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/global_lc_holder/linecard/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/global_lc_holder/linecard/__init__.py#L92-L113
def _set_linecards(self, v, load=False): """ Setter method for linecards, mapped from YANG variable /rbridge_id/global_lc_holder/linecard/linecards (list) If this variable is read-only (config: false) in the source YANG file, then _set_linecards is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_linecards() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("linecardName",linecards.linecards, yang_name="linecards", rest_name="linecards", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='linecardName', extensions={u'tailf-common': {u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'linecardManagement'}}), is_container='list', yang_name="linecards", rest_name="linecards", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'linecardManagement'}}, namespace='urn:brocade.com:mgmt:brocade-linecard-management', defining_module='brocade-linecard-management', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """linecards must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("linecardName",linecards.linecards, yang_name="linecards", rest_name="linecards", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='linecardName', extensions={u'tailf-common': {u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'linecardManagement'}}), is_container='list', yang_name="linecards", rest_name="linecards", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'linecardManagement'}}, namespace='urn:brocade.com:mgmt:brocade-linecard-management', defining_module='brocade-linecard-management', yang_type='list', is_config=True)""", }) self.__linecards = t if hasattr(self, '_set'): self._set()
[ "def", "_set_linecards", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"linecardName\"", ",", "linecards", ".", "linecards", ",", "yang_name", "=", "\"linecards\"", ",", "rest_name", "=", "\"linecards\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'linecardName'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'callpoint'", ":", "u'linecardManagement'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"linecards\"", ",", "rest_name", "=", "\"linecards\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'cli-sequence-commands'", ":", "None", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'callpoint'", ":", "u'linecardManagement'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-linecard-management'", ",", "defining_module", "=", "'brocade-linecard-management'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"linecards must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"linecardName\",linecards.linecards, yang_name=\"linecards\", rest_name=\"linecards\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='linecardName', extensions={u'tailf-common': {u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'linecardManagement'}}), is_container='list', yang_name=\"linecards\", rest_name=\"linecards\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'linecardManagement'}}, namespace='urn:brocade.com:mgmt:brocade-linecard-management', defining_module='brocade-linecard-management', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__linecards", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for linecards, mapped from YANG variable /rbridge_id/global_lc_holder/linecard/linecards (list) If this variable is read-only (config: false) in the source YANG file, then _set_linecards is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_linecards() directly.
[ "Setter", "method", "for", "linecards", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "global_lc_holder", "/", "linecard", "/", "linecards", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_linecards", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_linecards", "()", "directly", "." ]
python
train
diging/tethne
tethne/model/corpus/mallet.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L151-L161
def _generate_corpus(self): """ Writes a corpus to disk amenable to MALLET topic modeling. """ target = self.temp + 'mallet' paths = write_documents(self.corpus, target, self.featureset_name, ['date', 'title']) self.corpus_path, self.metapath = paths self._export_corpus()
[ "def", "_generate_corpus", "(", "self", ")", ":", "target", "=", "self", ".", "temp", "+", "'mallet'", "paths", "=", "write_documents", "(", "self", ".", "corpus", ",", "target", ",", "self", ".", "featureset_name", ",", "[", "'date'", ",", "'title'", "]", ")", "self", ".", "corpus_path", ",", "self", ".", "metapath", "=", "paths", "self", ".", "_export_corpus", "(", ")" ]
Writes a corpus to disk amenable to MALLET topic modeling.
[ "Writes", "a", "corpus", "to", "disk", "amenable", "to", "MALLET", "topic", "modeling", "." ]
python
train
pvlib/pvlib-python
pvlib/location.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/location.py#L282-L319
def get_sun_rise_set_transit(self, times, method='pyephem', **kwargs): """ Calculate sunrise, sunset and transit times. Parameters ---------- times : DatetimeIndex Must be localized to the Location method : str, default 'pyephem' 'pyephem', 'spa', or 'geometric' kwargs are passed to the relevant functions. See solarposition.sun_rise_set_transit_<method> for details. Returns ------- result : DataFrame Column names are: ``sunrise, sunset, transit``. """ if method == 'pyephem': result = solarposition.sun_rise_set_transit_ephem( times, self.latitude, self.longitude, **kwargs) elif method == 'spa': result = solarposition.sun_rise_set_transit_spa( times, self.latitude, self.longitude, **kwargs) elif method == 'geometric': sr, ss, tr = solarposition.sun_rise_set_transit_geometric( times, self.latitude, self.longitude, **kwargs) result = pd.DataFrame(index=times, data={'sunrise': sr, 'sunset': ss, 'transit': tr}) else: raise ValueError('{} is not a valid method. Must be ' 'one of pyephem, spa, geometric' .format(method)) return result
[ "def", "get_sun_rise_set_transit", "(", "self", ",", "times", ",", "method", "=", "'pyephem'", ",", "*", "*", "kwargs", ")", ":", "if", "method", "==", "'pyephem'", ":", "result", "=", "solarposition", ".", "sun_rise_set_transit_ephem", "(", "times", ",", "self", ".", "latitude", ",", "self", ".", "longitude", ",", "*", "*", "kwargs", ")", "elif", "method", "==", "'spa'", ":", "result", "=", "solarposition", ".", "sun_rise_set_transit_spa", "(", "times", ",", "self", ".", "latitude", ",", "self", ".", "longitude", ",", "*", "*", "kwargs", ")", "elif", "method", "==", "'geometric'", ":", "sr", ",", "ss", ",", "tr", "=", "solarposition", ".", "sun_rise_set_transit_geometric", "(", "times", ",", "self", ".", "latitude", ",", "self", ".", "longitude", ",", "*", "*", "kwargs", ")", "result", "=", "pd", ".", "DataFrame", "(", "index", "=", "times", ",", "data", "=", "{", "'sunrise'", ":", "sr", ",", "'sunset'", ":", "ss", ",", "'transit'", ":", "tr", "}", ")", "else", ":", "raise", "ValueError", "(", "'{} is not a valid method. Must be '", "'one of pyephem, spa, geometric'", ".", "format", "(", "method", ")", ")", "return", "result" ]
Calculate sunrise, sunset and transit times. Parameters ---------- times : DatetimeIndex Must be localized to the Location method : str, default 'pyephem' 'pyephem', 'spa', or 'geometric' kwargs are passed to the relevant functions. See solarposition.sun_rise_set_transit_<method> for details. Returns ------- result : DataFrame Column names are: ``sunrise, sunset, transit``.
[ "Calculate", "sunrise", "sunset", "and", "transit", "times", "." ]
python
train
berkeley-cocosci/Wallace
examples/rogers/experiment.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/examples/rogers/experiment.py#L40-L61
def setup(self): """First time setup.""" super(RogersExperiment, self).setup() for net in random.sample(self.networks(role="experiment"), self.catch_repeats): net.role = "catch" for net in self.networks(): source = RogersSource(network=net) source.create_information() if net.role == "practice": env = RogersEnvironment(network=net) env.create_state(proportion=self.practice_difficulty) if net.role == "catch": env = RogersEnvironment(network=net) env.create_state(proportion=self.catch_difficulty) if net.role == "experiment": difficulty = self.difficulties[self.networks(role="experiment") .index(net)] env = RogersEnvironment(network=net) env.create_state(proportion=difficulty)
[ "def", "setup", "(", "self", ")", ":", "super", "(", "RogersExperiment", ",", "self", ")", ".", "setup", "(", ")", "for", "net", "in", "random", ".", "sample", "(", "self", ".", "networks", "(", "role", "=", "\"experiment\"", ")", ",", "self", ".", "catch_repeats", ")", ":", "net", ".", "role", "=", "\"catch\"", "for", "net", "in", "self", ".", "networks", "(", ")", ":", "source", "=", "RogersSource", "(", "network", "=", "net", ")", "source", ".", "create_information", "(", ")", "if", "net", ".", "role", "==", "\"practice\"", ":", "env", "=", "RogersEnvironment", "(", "network", "=", "net", ")", "env", ".", "create_state", "(", "proportion", "=", "self", ".", "practice_difficulty", ")", "if", "net", ".", "role", "==", "\"catch\"", ":", "env", "=", "RogersEnvironment", "(", "network", "=", "net", ")", "env", ".", "create_state", "(", "proportion", "=", "self", ".", "catch_difficulty", ")", "if", "net", ".", "role", "==", "\"experiment\"", ":", "difficulty", "=", "self", ".", "difficulties", "[", "self", ".", "networks", "(", "role", "=", "\"experiment\"", ")", ".", "index", "(", "net", ")", "]", "env", "=", "RogersEnvironment", "(", "network", "=", "net", ")", "env", ".", "create_state", "(", "proportion", "=", "difficulty", ")" ]
First time setup.
[ "First", "time", "setup", "." ]
python
train
Microsoft/nni
tools/nni_cmd/nnictl.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/nnictl.py#L46-L198
def parse_args(): '''Definite the arguments users need to follow and input''' parser = argparse.ArgumentParser(prog='nnictl', description='use nnictl command to control nni experiments') parser.add_argument('--version', '-v', action='store_true') parser.set_defaults(func=nni_info) # create subparsers for args with sub values subparsers = parser.add_subparsers() # parse start command parser_start = subparsers.add_parser('create', help='create a new experiment') parser_start.add_argument('--config', '-c', required=True, dest='config', help='the path of yaml config file') parser_start.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', help='the port of restful server') parser_start.add_argument('--debug', '-d', action='store_true', help=' set debug mode') parser_start.set_defaults(func=create_experiment) # parse resume command parser_resume = subparsers.add_parser('resume', help='resume a new experiment') parser_resume.add_argument('id', nargs='?', help='The id of the experiment you want to resume') parser_resume.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', help='the port of restful server') parser_resume.add_argument('--debug', '-d', action='store_true', help=' set debug mode') parser_resume.set_defaults(func=resume_experiment) # parse update command parser_updater = subparsers.add_parser('update', help='update the experiment') #add subparsers for parser_updater parser_updater_subparsers = parser_updater.add_subparsers() parser_updater_searchspace = parser_updater_subparsers.add_parser('searchspace', help='update searchspace') parser_updater_searchspace.add_argument('id', nargs='?', help='the id of experiment') parser_updater_searchspace.add_argument('--filename', '-f', required=True) parser_updater_searchspace.set_defaults(func=update_searchspace) parser_updater_concurrency = parser_updater_subparsers.add_parser('concurrency', help='update concurrency') parser_updater_concurrency.add_argument('id', nargs='?', help='the id of experiment') parser_updater_concurrency.add_argument('--value', '-v', required=True) parser_updater_concurrency.set_defaults(func=update_concurrency) parser_updater_duration = parser_updater_subparsers.add_parser('duration', help='update duration') parser_updater_duration.add_argument('id', nargs='?', help='the id of experiment') parser_updater_duration.add_argument('--value', '-v', required=True, help='the unit of time should in {\'s\', \'m\', \'h\', \'d\'}') parser_updater_duration.set_defaults(func=update_duration) parser_updater_trialnum = parser_updater_subparsers.add_parser('trialnum', help='update maxtrialnum') parser_updater_trialnum.add_argument('--id', '-i', dest='id', help='the id of experiment') parser_updater_trialnum.add_argument('--value', '-v', required=True) parser_updater_trialnum.set_defaults(func=update_trialnum) #parse stop command parser_stop = subparsers.add_parser('stop', help='stop the experiment') parser_stop.add_argument('id', nargs='?', help='the id of experiment, use \'all\' to stop all running experiments') parser_stop.set_defaults(func=stop_experiment) #parse trial command parser_trial = subparsers.add_parser('trial', help='get trial information') #add subparsers for parser_trial parser_trial_subparsers = parser_trial.add_subparsers() parser_trial_ls = parser_trial_subparsers.add_parser('ls', help='list trial jobs') parser_trial_ls.add_argument('id', nargs='?', help='the id of experiment') parser_trial_ls.set_defaults(func=trial_ls) parser_trial_kill = parser_trial_subparsers.add_parser('kill', help='kill trial jobs') parser_trial_kill.add_argument('id', nargs='?', help='the id of experiment') parser_trial_kill.add_argument('--trial_id', '-T', required=True, dest='trial_id', help='the id of trial to be killed') parser_trial_kill.set_defaults(func=trial_kill) #parse experiment command parser_experiment = subparsers.add_parser('experiment', help='get experiment information') #add subparsers for parser_experiment parser_experiment_subparsers = parser_experiment.add_subparsers() parser_experiment_show = parser_experiment_subparsers.add_parser('show', help='show the information of experiment') parser_experiment_show.add_argument('id', nargs='?', help='the id of experiment') parser_experiment_show.set_defaults(func=list_experiment) parser_experiment_status = parser_experiment_subparsers.add_parser('status', help='show the status of experiment') parser_experiment_status.add_argument('id', nargs='?', help='the id of experiment') parser_experiment_status.set_defaults(func=experiment_status) parser_experiment_list = parser_experiment_subparsers.add_parser('list', help='list all of running experiment ids') parser_experiment_list.add_argument('all', nargs='?', help='list all of experiments') parser_experiment_list.set_defaults(func=experiment_list) #import tuning data parser_import_data = parser_experiment_subparsers.add_parser('import', help='import additional data') parser_import_data.add_argument('id', nargs='?', help='the id of experiment') parser_import_data.add_argument('--filename', '-f', required=True) parser_import_data.set_defaults(func=import_data) #export trial data parser_trial_export = parser_experiment_subparsers.add_parser('export', help='export trial job results to csv or json') parser_trial_export.add_argument('id', nargs='?', help='the id of experiment') parser_trial_export.add_argument('--type', '-t', choices=['json', 'csv'], required=True, dest='type', help='target file type') parser_trial_export.add_argument('--filename', '-f', required=True, dest='path', help='target file path') parser_trial_export.set_defaults(func=export_trials_data) #TODO:finish webui function #parse board command parser_webui = subparsers.add_parser('webui', help='get web ui information') #add subparsers for parser_board parser_webui_subparsers = parser_webui.add_subparsers() parser_webui_url = parser_webui_subparsers.add_parser('url', help='show the url of web ui') parser_webui_url.add_argument('id', nargs='?', help='the id of experiment') parser_webui_url.set_defaults(func=webui_url) #parse config command parser_config = subparsers.add_parser('config', help='get config information') parser_config_subparsers = parser_config.add_subparsers() parser_config_show = parser_config_subparsers.add_parser('show', help='show the information of config') parser_config_show.add_argument('id', nargs='?', help='the id of experiment') parser_config_show.set_defaults(func=get_config) #parse log command parser_log = subparsers.add_parser('log', help='get log information') # add subparsers for parser_log parser_log_subparsers = parser_log.add_subparsers() parser_log_stdout = parser_log_subparsers.add_parser('stdout', help='get stdout information') parser_log_stdout.add_argument('id', nargs='?', help='the id of experiment') parser_log_stdout.add_argument('--tail', '-T', dest='tail', type=int, help='get tail -100 content of stdout') parser_log_stdout.add_argument('--head', '-H', dest='head', type=int, help='get head -100 content of stdout') parser_log_stdout.add_argument('--path', action='store_true', default=False, help='get the path of stdout file') parser_log_stdout.set_defaults(func=log_stdout) parser_log_stderr = parser_log_subparsers.add_parser('stderr', help='get stderr information') parser_log_stderr.add_argument('id', nargs='?', help='the id of experiment') parser_log_stderr.add_argument('--tail', '-T', dest='tail', type=int, help='get tail -100 content of stderr') parser_log_stderr.add_argument('--head', '-H', dest='head', type=int, help='get head -100 content of stderr') parser_log_stderr.add_argument('--path', action='store_true', default=False, help='get the path of stderr file') parser_log_stderr.set_defaults(func=log_stderr) parser_log_trial = parser_log_subparsers.add_parser('trial', help='get trial log path') parser_log_trial.add_argument('id', nargs='?', help='the id of experiment') parser_log_trial.add_argument('--trial_id', '-T', dest='trial_id', help='find trial log path by id') parser_log_trial.set_defaults(func=log_trial) #parse package command parser_package = subparsers.add_parser('package', help='control nni tuner and assessor packages') # add subparsers for parser_package parser_package_subparsers = parser_package.add_subparsers() parser_package_install = parser_package_subparsers.add_parser('install', help='install packages') parser_package_install.add_argument('--name', '-n', dest='name', help='package name to be installed') parser_package_install.set_defaults(func=package_install) parser_package_show = parser_package_subparsers.add_parser('show', help='show the information of packages') parser_package_show.set_defaults(func=package_show) #parse tensorboard command parser_tensorboard = subparsers.add_parser('tensorboard', help='manage tensorboard') parser_tensorboard_subparsers = parser_tensorboard.add_subparsers() parser_tensorboard_start = parser_tensorboard_subparsers.add_parser('start', help='start tensorboard') parser_tensorboard_start.add_argument('id', nargs='?', help='the id of experiment') parser_tensorboard_start.add_argument('--trial_id', '-T', dest='trial_id', help='the id of trial') parser_tensorboard_start.add_argument('--port', dest='port', default=6006, help='the port to start tensorboard') parser_tensorboard_start.set_defaults(func=start_tensorboard) parser_tensorboard_start = parser_tensorboard_subparsers.add_parser('stop', help='stop tensorboard') parser_tensorboard_start.add_argument('id', nargs='?', help='the id of experiment') parser_tensorboard_start.set_defaults(func=stop_tensorboard) #parse top command parser_top = subparsers.add_parser('top', help='monitor the experiment') parser_top.add_argument('--time', '-t', dest='time', type=int, default=3, help='the time interval to update the experiment status, ' \ 'the unit is second') parser_top.set_defaults(func=monitor_experiment) args = parser.parse_args() args.func(args)
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'nnictl'", ",", "description", "=", "'use nnictl command to control nni experiments'", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "'-v'", ",", "action", "=", "'store_true'", ")", "parser", ".", "set_defaults", "(", "func", "=", "nni_info", ")", "# create subparsers for args with sub values", "subparsers", "=", "parser", ".", "add_subparsers", "(", ")", "# parse start command", "parser_start", "=", "subparsers", ".", "add_parser", "(", "'create'", ",", "help", "=", "'create a new experiment'", ")", "parser_start", ".", "add_argument", "(", "'--config'", ",", "'-c'", ",", "required", "=", "True", ",", "dest", "=", "'config'", ",", "help", "=", "'the path of yaml config file'", ")", "parser_start", ".", "add_argument", "(", "'--port'", ",", "'-p'", ",", "default", "=", "DEFAULT_REST_PORT", ",", "dest", "=", "'port'", ",", "help", "=", "'the port of restful server'", ")", "parser_start", ".", "add_argument", "(", "'--debug'", ",", "'-d'", ",", "action", "=", "'store_true'", ",", "help", "=", "' set debug mode'", ")", "parser_start", ".", "set_defaults", "(", "func", "=", "create_experiment", ")", "# parse resume command", "parser_resume", "=", "subparsers", ".", "add_parser", "(", "'resume'", ",", "help", "=", "'resume a new experiment'", ")", "parser_resume", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'The id of the experiment you want to resume'", ")", "parser_resume", ".", "add_argument", "(", "'--port'", ",", "'-p'", ",", "default", "=", "DEFAULT_REST_PORT", ",", "dest", "=", "'port'", ",", "help", "=", "'the port of restful server'", ")", "parser_resume", ".", "add_argument", "(", "'--debug'", ",", "'-d'", ",", "action", "=", "'store_true'", ",", "help", "=", "' set debug mode'", ")", "parser_resume", ".", "set_defaults", "(", "func", "=", "resume_experiment", ")", "# parse update command", "parser_updater", "=", "subparsers", ".", "add_parser", "(", "'update'", ",", "help", "=", "'update the experiment'", ")", "#add subparsers for parser_updater", "parser_updater_subparsers", "=", "parser_updater", ".", "add_subparsers", "(", ")", "parser_updater_searchspace", "=", "parser_updater_subparsers", ".", "add_parser", "(", "'searchspace'", ",", "help", "=", "'update searchspace'", ")", "parser_updater_searchspace", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_updater_searchspace", ".", "add_argument", "(", "'--filename'", ",", "'-f'", ",", "required", "=", "True", ")", "parser_updater_searchspace", ".", "set_defaults", "(", "func", "=", "update_searchspace", ")", "parser_updater_concurrency", "=", "parser_updater_subparsers", ".", "add_parser", "(", "'concurrency'", ",", "help", "=", "'update concurrency'", ")", "parser_updater_concurrency", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_updater_concurrency", ".", "add_argument", "(", "'--value'", ",", "'-v'", ",", "required", "=", "True", ")", "parser_updater_concurrency", ".", "set_defaults", "(", "func", "=", "update_concurrency", ")", "parser_updater_duration", "=", "parser_updater_subparsers", ".", "add_parser", "(", "'duration'", ",", "help", "=", "'update duration'", ")", "parser_updater_duration", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_updater_duration", ".", "add_argument", "(", "'--value'", ",", "'-v'", ",", "required", "=", "True", ",", "help", "=", "'the unit of time should in {\\'s\\', \\'m\\', \\'h\\', \\'d\\'}'", ")", "parser_updater_duration", ".", "set_defaults", "(", "func", "=", "update_duration", ")", "parser_updater_trialnum", "=", "parser_updater_subparsers", ".", "add_parser", "(", "'trialnum'", ",", "help", "=", "'update maxtrialnum'", ")", "parser_updater_trialnum", ".", "add_argument", "(", "'--id'", ",", "'-i'", ",", "dest", "=", "'id'", ",", "help", "=", "'the id of experiment'", ")", "parser_updater_trialnum", ".", "add_argument", "(", "'--value'", ",", "'-v'", ",", "required", "=", "True", ")", "parser_updater_trialnum", ".", "set_defaults", "(", "func", "=", "update_trialnum", ")", "#parse stop command", "parser_stop", "=", "subparsers", ".", "add_parser", "(", "'stop'", ",", "help", "=", "'stop the experiment'", ")", "parser_stop", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment, use \\'all\\' to stop all running experiments'", ")", "parser_stop", ".", "set_defaults", "(", "func", "=", "stop_experiment", ")", "#parse trial command", "parser_trial", "=", "subparsers", ".", "add_parser", "(", "'trial'", ",", "help", "=", "'get trial information'", ")", "#add subparsers for parser_trial", "parser_trial_subparsers", "=", "parser_trial", ".", "add_subparsers", "(", ")", "parser_trial_ls", "=", "parser_trial_subparsers", ".", "add_parser", "(", "'ls'", ",", "help", "=", "'list trial jobs'", ")", "parser_trial_ls", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_trial_ls", ".", "set_defaults", "(", "func", "=", "trial_ls", ")", "parser_trial_kill", "=", "parser_trial_subparsers", ".", "add_parser", "(", "'kill'", ",", "help", "=", "'kill trial jobs'", ")", "parser_trial_kill", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_trial_kill", ".", "add_argument", "(", "'--trial_id'", ",", "'-T'", ",", "required", "=", "True", ",", "dest", "=", "'trial_id'", ",", "help", "=", "'the id of trial to be killed'", ")", "parser_trial_kill", ".", "set_defaults", "(", "func", "=", "trial_kill", ")", "#parse experiment command", "parser_experiment", "=", "subparsers", ".", "add_parser", "(", "'experiment'", ",", "help", "=", "'get experiment information'", ")", "#add subparsers for parser_experiment", "parser_experiment_subparsers", "=", "parser_experiment", ".", "add_subparsers", "(", ")", "parser_experiment_show", "=", "parser_experiment_subparsers", ".", "add_parser", "(", "'show'", ",", "help", "=", "'show the information of experiment'", ")", "parser_experiment_show", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_experiment_show", ".", "set_defaults", "(", "func", "=", "list_experiment", ")", "parser_experiment_status", "=", "parser_experiment_subparsers", ".", "add_parser", "(", "'status'", ",", "help", "=", "'show the status of experiment'", ")", "parser_experiment_status", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_experiment_status", ".", "set_defaults", "(", "func", "=", "experiment_status", ")", "parser_experiment_list", "=", "parser_experiment_subparsers", ".", "add_parser", "(", "'list'", ",", "help", "=", "'list all of running experiment ids'", ")", "parser_experiment_list", ".", "add_argument", "(", "'all'", ",", "nargs", "=", "'?'", ",", "help", "=", "'list all of experiments'", ")", "parser_experiment_list", ".", "set_defaults", "(", "func", "=", "experiment_list", ")", "#import tuning data", "parser_import_data", "=", "parser_experiment_subparsers", ".", "add_parser", "(", "'import'", ",", "help", "=", "'import additional data'", ")", "parser_import_data", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_import_data", ".", "add_argument", "(", "'--filename'", ",", "'-f'", ",", "required", "=", "True", ")", "parser_import_data", ".", "set_defaults", "(", "func", "=", "import_data", ")", "#export trial data", "parser_trial_export", "=", "parser_experiment_subparsers", ".", "add_parser", "(", "'export'", ",", "help", "=", "'export trial job results to csv or json'", ")", "parser_trial_export", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_trial_export", ".", "add_argument", "(", "'--type'", ",", "'-t'", ",", "choices", "=", "[", "'json'", ",", "'csv'", "]", ",", "required", "=", "True", ",", "dest", "=", "'type'", ",", "help", "=", "'target file type'", ")", "parser_trial_export", ".", "add_argument", "(", "'--filename'", ",", "'-f'", ",", "required", "=", "True", ",", "dest", "=", "'path'", ",", "help", "=", "'target file path'", ")", "parser_trial_export", ".", "set_defaults", "(", "func", "=", "export_trials_data", ")", "#TODO:finish webui function", "#parse board command", "parser_webui", "=", "subparsers", ".", "add_parser", "(", "'webui'", ",", "help", "=", "'get web ui information'", ")", "#add subparsers for parser_board", "parser_webui_subparsers", "=", "parser_webui", ".", "add_subparsers", "(", ")", "parser_webui_url", "=", "parser_webui_subparsers", ".", "add_parser", "(", "'url'", ",", "help", "=", "'show the url of web ui'", ")", "parser_webui_url", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_webui_url", ".", "set_defaults", "(", "func", "=", "webui_url", ")", "#parse config command", "parser_config", "=", "subparsers", ".", "add_parser", "(", "'config'", ",", "help", "=", "'get config information'", ")", "parser_config_subparsers", "=", "parser_config", ".", "add_subparsers", "(", ")", "parser_config_show", "=", "parser_config_subparsers", ".", "add_parser", "(", "'show'", ",", "help", "=", "'show the information of config'", ")", "parser_config_show", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_config_show", ".", "set_defaults", "(", "func", "=", "get_config", ")", "#parse log command", "parser_log", "=", "subparsers", ".", "add_parser", "(", "'log'", ",", "help", "=", "'get log information'", ")", "# add subparsers for parser_log", "parser_log_subparsers", "=", "parser_log", ".", "add_subparsers", "(", ")", "parser_log_stdout", "=", "parser_log_subparsers", ".", "add_parser", "(", "'stdout'", ",", "help", "=", "'get stdout information'", ")", "parser_log_stdout", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_log_stdout", ".", "add_argument", "(", "'--tail'", ",", "'-T'", ",", "dest", "=", "'tail'", ",", "type", "=", "int", ",", "help", "=", "'get tail -100 content of stdout'", ")", "parser_log_stdout", ".", "add_argument", "(", "'--head'", ",", "'-H'", ",", "dest", "=", "'head'", ",", "type", "=", "int", ",", "help", "=", "'get head -100 content of stdout'", ")", "parser_log_stdout", ".", "add_argument", "(", "'--path'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'get the path of stdout file'", ")", "parser_log_stdout", ".", "set_defaults", "(", "func", "=", "log_stdout", ")", "parser_log_stderr", "=", "parser_log_subparsers", ".", "add_parser", "(", "'stderr'", ",", "help", "=", "'get stderr information'", ")", "parser_log_stderr", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_log_stderr", ".", "add_argument", "(", "'--tail'", ",", "'-T'", ",", "dest", "=", "'tail'", ",", "type", "=", "int", ",", "help", "=", "'get tail -100 content of stderr'", ")", "parser_log_stderr", ".", "add_argument", "(", "'--head'", ",", "'-H'", ",", "dest", "=", "'head'", ",", "type", "=", "int", ",", "help", "=", "'get head -100 content of stderr'", ")", "parser_log_stderr", ".", "add_argument", "(", "'--path'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'get the path of stderr file'", ")", "parser_log_stderr", ".", "set_defaults", "(", "func", "=", "log_stderr", ")", "parser_log_trial", "=", "parser_log_subparsers", ".", "add_parser", "(", "'trial'", ",", "help", "=", "'get trial log path'", ")", "parser_log_trial", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_log_trial", ".", "add_argument", "(", "'--trial_id'", ",", "'-T'", ",", "dest", "=", "'trial_id'", ",", "help", "=", "'find trial log path by id'", ")", "parser_log_trial", ".", "set_defaults", "(", "func", "=", "log_trial", ")", "#parse package command", "parser_package", "=", "subparsers", ".", "add_parser", "(", "'package'", ",", "help", "=", "'control nni tuner and assessor packages'", ")", "# add subparsers for parser_package", "parser_package_subparsers", "=", "parser_package", ".", "add_subparsers", "(", ")", "parser_package_install", "=", "parser_package_subparsers", ".", "add_parser", "(", "'install'", ",", "help", "=", "'install packages'", ")", "parser_package_install", ".", "add_argument", "(", "'--name'", ",", "'-n'", ",", "dest", "=", "'name'", ",", "help", "=", "'package name to be installed'", ")", "parser_package_install", ".", "set_defaults", "(", "func", "=", "package_install", ")", "parser_package_show", "=", "parser_package_subparsers", ".", "add_parser", "(", "'show'", ",", "help", "=", "'show the information of packages'", ")", "parser_package_show", ".", "set_defaults", "(", "func", "=", "package_show", ")", "#parse tensorboard command", "parser_tensorboard", "=", "subparsers", ".", "add_parser", "(", "'tensorboard'", ",", "help", "=", "'manage tensorboard'", ")", "parser_tensorboard_subparsers", "=", "parser_tensorboard", ".", "add_subparsers", "(", ")", "parser_tensorboard_start", "=", "parser_tensorboard_subparsers", ".", "add_parser", "(", "'start'", ",", "help", "=", "'start tensorboard'", ")", "parser_tensorboard_start", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_tensorboard_start", ".", "add_argument", "(", "'--trial_id'", ",", "'-T'", ",", "dest", "=", "'trial_id'", ",", "help", "=", "'the id of trial'", ")", "parser_tensorboard_start", ".", "add_argument", "(", "'--port'", ",", "dest", "=", "'port'", ",", "default", "=", "6006", ",", "help", "=", "'the port to start tensorboard'", ")", "parser_tensorboard_start", ".", "set_defaults", "(", "func", "=", "start_tensorboard", ")", "parser_tensorboard_start", "=", "parser_tensorboard_subparsers", ".", "add_parser", "(", "'stop'", ",", "help", "=", "'stop tensorboard'", ")", "parser_tensorboard_start", ".", "add_argument", "(", "'id'", ",", "nargs", "=", "'?'", ",", "help", "=", "'the id of experiment'", ")", "parser_tensorboard_start", ".", "set_defaults", "(", "func", "=", "stop_tensorboard", ")", "#parse top command", "parser_top", "=", "subparsers", ".", "add_parser", "(", "'top'", ",", "help", "=", "'monitor the experiment'", ")", "parser_top", ".", "add_argument", "(", "'--time'", ",", "'-t'", ",", "dest", "=", "'time'", ",", "type", "=", "int", ",", "default", "=", "3", ",", "help", "=", "'the time interval to update the experiment status, '", "'the unit is second'", ")", "parser_top", ".", "set_defaults", "(", "func", "=", "monitor_experiment", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "args", ".", "func", "(", "args", ")" ]
Definite the arguments users need to follow and input
[ "Definite", "the", "arguments", "users", "need", "to", "follow", "and", "input" ]
python
train
johnnoone/aioconsul
aioconsul/common/util.py
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/common/util.py#L39-L48
def duration_to_timedelta(obj): """Converts duration to timedelta >>> duration_to_timedelta("10m") >>> datetime.timedelta(0, 600) """ matches = DURATION_PATTERN.search(obj) matches = matches.groupdict(default="0") matches = {k: int(v) for k, v in matches.items()} return timedelta(**matches)
[ "def", "duration_to_timedelta", "(", "obj", ")", ":", "matches", "=", "DURATION_PATTERN", ".", "search", "(", "obj", ")", "matches", "=", "matches", ".", "groupdict", "(", "default", "=", "\"0\"", ")", "matches", "=", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "matches", ".", "items", "(", ")", "}", "return", "timedelta", "(", "*", "*", "matches", ")" ]
Converts duration to timedelta >>> duration_to_timedelta("10m") >>> datetime.timedelta(0, 600)
[ "Converts", "duration", "to", "timedelta" ]
python
train
iwanbk/nyamuk
nyamuk/base_nyamuk.py
https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/base_nyamuk.py#L232-L236
def socket_close(self): """Close our socket.""" if self.sock != NC.INVALID_SOCKET: self.sock.close() self.sock = NC.INVALID_SOCKET
[ "def", "socket_close", "(", "self", ")", ":", "if", "self", ".", "sock", "!=", "NC", ".", "INVALID_SOCKET", ":", "self", ".", "sock", ".", "close", "(", ")", "self", ".", "sock", "=", "NC", ".", "INVALID_SOCKET" ]
Close our socket.
[ "Close", "our", "socket", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/mtf_image_transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_image_transformer.py#L514-L520
def mtf_image_transformer_cifar_4x(): """Data parallel CIFAR parameters.""" hparams = mtf_image_transformer_base_cifar() hparams.mesh_shape = "batch:32" hparams.layout = "batch:batch" hparams.batch_size = 128 return hparams
[ "def", "mtf_image_transformer_cifar_4x", "(", ")", ":", "hparams", "=", "mtf_image_transformer_base_cifar", "(", ")", "hparams", ".", "mesh_shape", "=", "\"batch:32\"", "hparams", ".", "layout", "=", "\"batch:batch\"", "hparams", ".", "batch_size", "=", "128", "return", "hparams" ]
Data parallel CIFAR parameters.
[ "Data", "parallel", "CIFAR", "parameters", "." ]
python
train
theolind/pymysensors
mysensors/__init__.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/__init__.py#L433-L437
def handle_line(self, line): """Handle incoming string data one line at a time.""" if not self.gateway.can_log: _LOGGER.debug('Receiving %s', line) self.gateway.add_job(self.gateway.logic, line)
[ "def", "handle_line", "(", "self", ",", "line", ")", ":", "if", "not", "self", ".", "gateway", ".", "can_log", ":", "_LOGGER", ".", "debug", "(", "'Receiving %s'", ",", "line", ")", "self", ".", "gateway", ".", "add_job", "(", "self", ".", "gateway", ".", "logic", ",", "line", ")" ]
Handle incoming string data one line at a time.
[ "Handle", "incoming", "string", "data", "one", "line", "at", "a", "time", "." ]
python
train
common-workflow-language/cwltool
cwltool/provenance.py
https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/provenance.py#L568-L660
def declare_directory(self, value): # type: (MutableMapping) -> ProvEntity """Register any nested files/directories.""" # FIXME: Calculate a hash-like identifier for directory # so we get same value if it's the same filenames/hashes # in a different location. # For now, mint a new UUID to identify this directory, but # attempt to keep it inside the value dictionary dir_id = value.setdefault("@id", uuid.uuid4().urn) # New annotation file to keep the ORE Folder listing ore_doc_fn = dir_id.replace("urn:uuid:", "directory-") + ".ttl" dir_bundle = self.document.bundle(self.metadata_ns[ore_doc_fn]) coll = self.document.entity( dir_id, [(provM.PROV_TYPE, WFPROV["Artifact"]), (provM.PROV_TYPE, PROV["Collection"]), (provM.PROV_TYPE, PROV["Dictionary"]), (provM.PROV_TYPE, RO["Folder"])]) # ORE description of ro:Folder, saved separately coll_b = dir_bundle.entity( dir_id, [(provM.PROV_TYPE, RO["Folder"]), (provM.PROV_TYPE, ORE["Aggregation"])]) self.document.mentionOf(dir_id + "#ore", dir_id, dir_bundle.identifier) # dir_manifest = dir_bundle.entity( # dir_bundle.identifier, {PROV["type"]: ORE["ResourceMap"], # ORE["describes"]: coll_b.identifier}) coll_attribs = [(ORE["isDescribedBy"], dir_bundle.identifier)] coll_b_attribs = [] # type: List[Tuple[Identifier, ProvEntity]] # FIXME: .listing might not be populated yet - hopefully # a later call to this method will sort that is_empty = True if "listing" not in value: fsaccess = StdFsAccess("") get_listing(fsaccess, value) for entry in value.get("listing", []): is_empty = False # Declare child-artifacts entity = self.declare_artefact(entry) self.document.membership(coll, entity) # Membership relation aka our ORE Proxy m_id = uuid.uuid4().urn m_entity = self.document.entity(m_id) m_b = dir_bundle.entity(m_id) # PROV-O style Dictionary # https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition # ..as prov.py do not currently allow PROV-N extensions # like hadDictionaryMember(..) m_entity.add_asserted_type(PROV["KeyEntityPair"]) m_entity.add_attributes({ PROV["pairKey"]: entry["basename"], PROV["pairEntity"]: entity, }) # As well as a being a # http://wf4ever.github.io/ro/2016-01-28/ro/#FolderEntry m_b.add_asserted_type(RO["FolderEntry"]) m_b.add_asserted_type(ORE["Proxy"]) m_b.add_attributes({ RO["entryName"]: entry["basename"], ORE["proxyIn"]: coll, ORE["proxyFor"]: entity, }) coll_attribs.append((PROV["hadDictionaryMember"], m_entity)) coll_b_attribs.append((ORE["aggregates"], m_b)) coll.add_attributes(coll_attribs) coll_b.add_attributes(coll_b_attribs) # Also Save ORE Folder as annotation metadata ore_doc = ProvDocument() ore_doc.add_namespace(ORE) ore_doc.add_namespace(RO) ore_doc.add_namespace(UUID) ore_doc.add_bundle(dir_bundle) ore_doc = ore_doc.flattened() ore_doc_path = posixpath.join(_posix_path(METADATA), ore_doc_fn) with self.research_object.write_bag_file(ore_doc_path) as provenance_file: ore_doc.serialize(provenance_file, format="rdf", rdf_format="turtle") self.research_object.add_annotation(dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri) if is_empty: # Empty directory coll.add_asserted_type(PROV["EmptyCollection"]) coll.add_asserted_type(PROV["EmptyDictionary"]) self.research_object.add_uri(coll.identifier.uri) return coll
[ "def", "declare_directory", "(", "self", ",", "value", ")", ":", "# type: (MutableMapping) -> ProvEntity", "# FIXME: Calculate a hash-like identifier for directory", "# so we get same value if it's the same filenames/hashes", "# in a different location.", "# For now, mint a new UUID to identify this directory, but", "# attempt to keep it inside the value dictionary", "dir_id", "=", "value", ".", "setdefault", "(", "\"@id\"", ",", "uuid", ".", "uuid4", "(", ")", ".", "urn", ")", "# New annotation file to keep the ORE Folder listing", "ore_doc_fn", "=", "dir_id", ".", "replace", "(", "\"urn:uuid:\"", ",", "\"directory-\"", ")", "+", "\".ttl\"", "dir_bundle", "=", "self", ".", "document", ".", "bundle", "(", "self", ".", "metadata_ns", "[", "ore_doc_fn", "]", ")", "coll", "=", "self", ".", "document", ".", "entity", "(", "dir_id", ",", "[", "(", "provM", ".", "PROV_TYPE", ",", "WFPROV", "[", "\"Artifact\"", "]", ")", ",", "(", "provM", ".", "PROV_TYPE", ",", "PROV", "[", "\"Collection\"", "]", ")", ",", "(", "provM", ".", "PROV_TYPE", ",", "PROV", "[", "\"Dictionary\"", "]", ")", ",", "(", "provM", ".", "PROV_TYPE", ",", "RO", "[", "\"Folder\"", "]", ")", "]", ")", "# ORE description of ro:Folder, saved separately", "coll_b", "=", "dir_bundle", ".", "entity", "(", "dir_id", ",", "[", "(", "provM", ".", "PROV_TYPE", ",", "RO", "[", "\"Folder\"", "]", ")", ",", "(", "provM", ".", "PROV_TYPE", ",", "ORE", "[", "\"Aggregation\"", "]", ")", "]", ")", "self", ".", "document", ".", "mentionOf", "(", "dir_id", "+", "\"#ore\"", ",", "dir_id", ",", "dir_bundle", ".", "identifier", ")", "# dir_manifest = dir_bundle.entity(", "# dir_bundle.identifier, {PROV[\"type\"]: ORE[\"ResourceMap\"],", "# ORE[\"describes\"]: coll_b.identifier})", "coll_attribs", "=", "[", "(", "ORE", "[", "\"isDescribedBy\"", "]", ",", "dir_bundle", ".", "identifier", ")", "]", "coll_b_attribs", "=", "[", "]", "# type: List[Tuple[Identifier, ProvEntity]]", "# FIXME: .listing might not be populated yet - hopefully", "# a later call to this method will sort that", "is_empty", "=", "True", "if", "\"listing\"", "not", "in", "value", ":", "fsaccess", "=", "StdFsAccess", "(", "\"\"", ")", "get_listing", "(", "fsaccess", ",", "value", ")", "for", "entry", "in", "value", ".", "get", "(", "\"listing\"", ",", "[", "]", ")", ":", "is_empty", "=", "False", "# Declare child-artifacts", "entity", "=", "self", ".", "declare_artefact", "(", "entry", ")", "self", ".", "document", ".", "membership", "(", "coll", ",", "entity", ")", "# Membership relation aka our ORE Proxy", "m_id", "=", "uuid", ".", "uuid4", "(", ")", ".", "urn", "m_entity", "=", "self", ".", "document", ".", "entity", "(", "m_id", ")", "m_b", "=", "dir_bundle", ".", "entity", "(", "m_id", ")", "# PROV-O style Dictionary", "# https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition", "# ..as prov.py do not currently allow PROV-N extensions", "# like hadDictionaryMember(..)", "m_entity", ".", "add_asserted_type", "(", "PROV", "[", "\"KeyEntityPair\"", "]", ")", "m_entity", ".", "add_attributes", "(", "{", "PROV", "[", "\"pairKey\"", "]", ":", "entry", "[", "\"basename\"", "]", ",", "PROV", "[", "\"pairEntity\"", "]", ":", "entity", ",", "}", ")", "# As well as a being a", "# http://wf4ever.github.io/ro/2016-01-28/ro/#FolderEntry", "m_b", ".", "add_asserted_type", "(", "RO", "[", "\"FolderEntry\"", "]", ")", "m_b", ".", "add_asserted_type", "(", "ORE", "[", "\"Proxy\"", "]", ")", "m_b", ".", "add_attributes", "(", "{", "RO", "[", "\"entryName\"", "]", ":", "entry", "[", "\"basename\"", "]", ",", "ORE", "[", "\"proxyIn\"", "]", ":", "coll", ",", "ORE", "[", "\"proxyFor\"", "]", ":", "entity", ",", "}", ")", "coll_attribs", ".", "append", "(", "(", "PROV", "[", "\"hadDictionaryMember\"", "]", ",", "m_entity", ")", ")", "coll_b_attribs", ".", "append", "(", "(", "ORE", "[", "\"aggregates\"", "]", ",", "m_b", ")", ")", "coll", ".", "add_attributes", "(", "coll_attribs", ")", "coll_b", ".", "add_attributes", "(", "coll_b_attribs", ")", "# Also Save ORE Folder as annotation metadata", "ore_doc", "=", "ProvDocument", "(", ")", "ore_doc", ".", "add_namespace", "(", "ORE", ")", "ore_doc", ".", "add_namespace", "(", "RO", ")", "ore_doc", ".", "add_namespace", "(", "UUID", ")", "ore_doc", ".", "add_bundle", "(", "dir_bundle", ")", "ore_doc", "=", "ore_doc", ".", "flattened", "(", ")", "ore_doc_path", "=", "posixpath", ".", "join", "(", "_posix_path", "(", "METADATA", ")", ",", "ore_doc_fn", ")", "with", "self", ".", "research_object", ".", "write_bag_file", "(", "ore_doc_path", ")", "as", "provenance_file", ":", "ore_doc", ".", "serialize", "(", "provenance_file", ",", "format", "=", "\"rdf\"", ",", "rdf_format", "=", "\"turtle\"", ")", "self", ".", "research_object", ".", "add_annotation", "(", "dir_id", ",", "[", "ore_doc_fn", "]", ",", "ORE", "[", "\"isDescribedBy\"", "]", ".", "uri", ")", "if", "is_empty", ":", "# Empty directory", "coll", ".", "add_asserted_type", "(", "PROV", "[", "\"EmptyCollection\"", "]", ")", "coll", ".", "add_asserted_type", "(", "PROV", "[", "\"EmptyDictionary\"", "]", ")", "self", ".", "research_object", ".", "add_uri", "(", "coll", ".", "identifier", ".", "uri", ")", "return", "coll" ]
Register any nested files/directories.
[ "Register", "any", "nested", "files", "/", "directories", "." ]
python
train
redhat-openstack/python-tripleo-helper
tripleohelper/ssh.py
https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/ssh.py#L163-L221
def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,), error_callback=None, custom_log=None, retry=0): """Run a command on the remote host. The command is run on the remote host, if there is a redirected host then the command will be run on that redirected host. See __init__. :param cmd: the command to run :type cmd: str :param sudo: True if the command should be run with sudo, this parameter disable the use of environment files. :type sudo: str :param success_status: the list of the possible success status :type success_status: list :param error_callback: if provided, the callback to call in case of a failure. it will be called with two args, the output of the command and the returned error code. :return: the tuple (output of the command, returned code) :rtype: tuple :param custom_log: a optional string to record in the log instead of the command. This is useful for example if you want to hide a password. :type custom_log: str """ self._check_started() cmd_output = io.StringIO() channel = self._get_channel() cmd = self._prepare_cmd(cmd, sudo=sudo) if not custom_log: custom_log = cmd LOG.info("%s run '%s'" % (self.description, custom_log)) channel.exec_command(cmd) while True: received = None rl, _, _ = select.select([channel], [], [], 30) if rl: received = channel.recv(1024).decode('UTF-8', 'ignore').strip() if received: LOG.debug(received) cmd_output.write(received) if channel.exit_status_ready() and not received: break cmd_output = cmd_output.getvalue() exit_status = channel.exit_status try: return self._evaluate_run_result( exit_status, cmd_output, ignore_error=ignore_error, success_status=success_status, error_callback=error_callback, custom_log=custom_log) except (paramiko.ssh_exception.SSHException, socket.error) as e: if not retry: raise e else: return self.run( cmd, sudo=sudo, ignore_error=ignore_error, success_status=success_status, error_callback=error_callback, custom_log=custom_log, retry=(retry - 1))
[ "def", "run", "(", "self", ",", "cmd", ",", "sudo", "=", "False", ",", "ignore_error", "=", "False", ",", "success_status", "=", "(", "0", ",", ")", ",", "error_callback", "=", "None", ",", "custom_log", "=", "None", ",", "retry", "=", "0", ")", ":", "self", ".", "_check_started", "(", ")", "cmd_output", "=", "io", ".", "StringIO", "(", ")", "channel", "=", "self", ".", "_get_channel", "(", ")", "cmd", "=", "self", ".", "_prepare_cmd", "(", "cmd", ",", "sudo", "=", "sudo", ")", "if", "not", "custom_log", ":", "custom_log", "=", "cmd", "LOG", ".", "info", "(", "\"%s run '%s'\"", "%", "(", "self", ".", "description", ",", "custom_log", ")", ")", "channel", ".", "exec_command", "(", "cmd", ")", "while", "True", ":", "received", "=", "None", "rl", ",", "_", ",", "_", "=", "select", ".", "select", "(", "[", "channel", "]", ",", "[", "]", ",", "[", "]", ",", "30", ")", "if", "rl", ":", "received", "=", "channel", ".", "recv", "(", "1024", ")", ".", "decode", "(", "'UTF-8'", ",", "'ignore'", ")", ".", "strip", "(", ")", "if", "received", ":", "LOG", ".", "debug", "(", "received", ")", "cmd_output", ".", "write", "(", "received", ")", "if", "channel", ".", "exit_status_ready", "(", ")", "and", "not", "received", ":", "break", "cmd_output", "=", "cmd_output", ".", "getvalue", "(", ")", "exit_status", "=", "channel", ".", "exit_status", "try", ":", "return", "self", ".", "_evaluate_run_result", "(", "exit_status", ",", "cmd_output", ",", "ignore_error", "=", "ignore_error", ",", "success_status", "=", "success_status", ",", "error_callback", "=", "error_callback", ",", "custom_log", "=", "custom_log", ")", "except", "(", "paramiko", ".", "ssh_exception", ".", "SSHException", ",", "socket", ".", "error", ")", "as", "e", ":", "if", "not", "retry", ":", "raise", "e", "else", ":", "return", "self", ".", "run", "(", "cmd", ",", "sudo", "=", "sudo", ",", "ignore_error", "=", "ignore_error", ",", "success_status", "=", "success_status", ",", "error_callback", "=", "error_callback", ",", "custom_log", "=", "custom_log", ",", "retry", "=", "(", "retry", "-", "1", ")", ")" ]
Run a command on the remote host. The command is run on the remote host, if there is a redirected host then the command will be run on that redirected host. See __init__. :param cmd: the command to run :type cmd: str :param sudo: True if the command should be run with sudo, this parameter disable the use of environment files. :type sudo: str :param success_status: the list of the possible success status :type success_status: list :param error_callback: if provided, the callback to call in case of a failure. it will be called with two args, the output of the command and the returned error code. :return: the tuple (output of the command, returned code) :rtype: tuple :param custom_log: a optional string to record in the log instead of the command. This is useful for example if you want to hide a password. :type custom_log: str
[ "Run", "a", "command", "on", "the", "remote", "host", "." ]
python
train
mbj4668/pyang
pyang/statements.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L537-L556
def v_grammar_unique_defs(ctx, stmt): """Verify that all typedefs and groupings are unique Called for every statement. Stores all typedefs in stmt.i_typedef, groupings in stmt.i_grouping """ defs = [('typedef', 'TYPE_ALREADY_DEFINED', stmt.i_typedefs), ('grouping', 'GROUPING_ALREADY_DEFINED', stmt.i_groupings)] if stmt.parent is None: defs.extend( [('feature', 'FEATURE_ALREADY_DEFINED', stmt.i_features), ('identity', 'IDENTITY_ALREADY_DEFINED', stmt.i_identities), ('extension', 'EXTENSION_ALREADY_DEFINED', stmt.i_extensions)]) for (keyword, errcode, dict) in defs: for definition in stmt.search(keyword): if definition.arg in dict: other = dict[definition.arg] err_add(ctx.errors, definition.pos, errcode, (definition.arg, other.pos)) else: dict[definition.arg] = definition
[ "def", "v_grammar_unique_defs", "(", "ctx", ",", "stmt", ")", ":", "defs", "=", "[", "(", "'typedef'", ",", "'TYPE_ALREADY_DEFINED'", ",", "stmt", ".", "i_typedefs", ")", ",", "(", "'grouping'", ",", "'GROUPING_ALREADY_DEFINED'", ",", "stmt", ".", "i_groupings", ")", "]", "if", "stmt", ".", "parent", "is", "None", ":", "defs", ".", "extend", "(", "[", "(", "'feature'", ",", "'FEATURE_ALREADY_DEFINED'", ",", "stmt", ".", "i_features", ")", ",", "(", "'identity'", ",", "'IDENTITY_ALREADY_DEFINED'", ",", "stmt", ".", "i_identities", ")", ",", "(", "'extension'", ",", "'EXTENSION_ALREADY_DEFINED'", ",", "stmt", ".", "i_extensions", ")", "]", ")", "for", "(", "keyword", ",", "errcode", ",", "dict", ")", "in", "defs", ":", "for", "definition", "in", "stmt", ".", "search", "(", "keyword", ")", ":", "if", "definition", ".", "arg", "in", "dict", ":", "other", "=", "dict", "[", "definition", ".", "arg", "]", "err_add", "(", "ctx", ".", "errors", ",", "definition", ".", "pos", ",", "errcode", ",", "(", "definition", ".", "arg", ",", "other", ".", "pos", ")", ")", "else", ":", "dict", "[", "definition", ".", "arg", "]", "=", "definition" ]
Verify that all typedefs and groupings are unique Called for every statement. Stores all typedefs in stmt.i_typedef, groupings in stmt.i_grouping
[ "Verify", "that", "all", "typedefs", "and", "groupings", "are", "unique", "Called", "for", "every", "statement", ".", "Stores", "all", "typedefs", "in", "stmt", ".", "i_typedef", "groupings", "in", "stmt", ".", "i_grouping" ]
python
train
ClericPy/torequests
torequests/utils.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/utils.py#L261-L268
def slice_by_size(seq, size): """Slice a sequence into chunks, return as a generation of chunks with `size`.""" filling = null for it in zip(*(itertools_chain(seq, [filling] * size),) * size): if filling in it: it = tuple(i for i in it if i is not filling) if it: yield it
[ "def", "slice_by_size", "(", "seq", ",", "size", ")", ":", "filling", "=", "null", "for", "it", "in", "zip", "(", "*", "(", "itertools_chain", "(", "seq", ",", "[", "filling", "]", "*", "size", ")", ",", ")", "*", "size", ")", ":", "if", "filling", "in", "it", ":", "it", "=", "tuple", "(", "i", "for", "i", "in", "it", "if", "i", "is", "not", "filling", ")", "if", "it", ":", "yield", "it" ]
Slice a sequence into chunks, return as a generation of chunks with `size`.
[ "Slice", "a", "sequence", "into", "chunks", "return", "as", "a", "generation", "of", "chunks", "with", "size", "." ]
python
train
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L1174-L1192
def get_wallet_balance(wallet_name, api_key, omit_addresses=False, coin_symbol='btc'): ''' This is particularly useful over get_wallet_transactions and get_wallet_addresses in cases where you have lots of addresses/transactions. Much less data to return. ''' assert is_valid_coin_symbol(coin_symbol) assert api_key assert len(wallet_name) <= 25, wallet_name assert isinstance(omit_addresses, bool), omit_addresses params = {'token': api_key} if omit_addresses: params['omitWalletAddresses'] = 'true' url = make_url(coin_symbol, 'addrs/{}/balance'.format(wallet_name)) r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
[ "def", "get_wallet_balance", "(", "wallet_name", ",", "api_key", ",", "omit_addresses", "=", "False", ",", "coin_symbol", "=", "'btc'", ")", ":", "assert", "is_valid_coin_symbol", "(", "coin_symbol", ")", "assert", "api_key", "assert", "len", "(", "wallet_name", ")", "<=", "25", ",", "wallet_name", "assert", "isinstance", "(", "omit_addresses", ",", "bool", ")", ",", "omit_addresses", "params", "=", "{", "'token'", ":", "api_key", "}", "if", "omit_addresses", ":", "params", "[", "'omitWalletAddresses'", "]", "=", "'true'", "url", "=", "make_url", "(", "coin_symbol", ",", "'addrs/{}/balance'", ".", "format", "(", "wallet_name", ")", ")", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "verify", "=", "True", ",", "timeout", "=", "TIMEOUT_IN_SECONDS", ")", "return", "get_valid_json", "(", "r", ")" ]
This is particularly useful over get_wallet_transactions and get_wallet_addresses in cases where you have lots of addresses/transactions. Much less data to return.
[ "This", "is", "particularly", "useful", "over", "get_wallet_transactions", "and", "get_wallet_addresses", "in", "cases", "where", "you", "have", "lots", "of", "addresses", "/", "transactions", ".", "Much", "less", "data", "to", "return", "." ]
python
train
johnnoone/json-spec
src/jsonspec/operations/bases.py
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/operations/bases.py#L189-L203
def copy(self, dest, src): """Copy element from sequence, member from mapping. :param dest: the destination :type dest: Pointer :param src: the source :type src: Pointer :return: resolved document :rtype: Target """ doc = fragment = deepcopy(self.document) for token in Pointer(src): fragment = token.extract(fragment, bypass_ref=True) return Target(doc).add(dest, fragment)
[ "def", "copy", "(", "self", ",", "dest", ",", "src", ")", ":", "doc", "=", "fragment", "=", "deepcopy", "(", "self", ".", "document", ")", "for", "token", "in", "Pointer", "(", "src", ")", ":", "fragment", "=", "token", ".", "extract", "(", "fragment", ",", "bypass_ref", "=", "True", ")", "return", "Target", "(", "doc", ")", ".", "add", "(", "dest", ",", "fragment", ")" ]
Copy element from sequence, member from mapping. :param dest: the destination :type dest: Pointer :param src: the source :type src: Pointer :return: resolved document :rtype: Target
[ "Copy", "element", "from", "sequence", "member", "from", "mapping", "." ]
python
train
UpCloudLtd/upcloud-python-api
upcloud_api/tag.py
https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/tag.py#L30-L47
def _reset(self, **kwargs): """ Reset the objects attributes. Accepts servers as either unflattened or flattened UUID strings or Server objects. """ super(Tag, self)._reset(**kwargs) # backup name for changing it (look: Tag.save) self._api_name = self.name # flatten { servers: { server: [] } } if 'server' in self.servers: self.servers = kwargs['servers']['server'] # convert UUIDs into server objects if self.servers and isinstance(self.servers[0], six.string_types): self.servers = [Server(uuid=server, populated=False) for server in self.servers]
[ "def", "_reset", "(", "self", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Tag", ",", "self", ")", ".", "_reset", "(", "*", "*", "kwargs", ")", "# backup name for changing it (look: Tag.save)", "self", ".", "_api_name", "=", "self", ".", "name", "# flatten { servers: { server: [] } }", "if", "'server'", "in", "self", ".", "servers", ":", "self", ".", "servers", "=", "kwargs", "[", "'servers'", "]", "[", "'server'", "]", "# convert UUIDs into server objects", "if", "self", ".", "servers", "and", "isinstance", "(", "self", ".", "servers", "[", "0", "]", ",", "six", ".", "string_types", ")", ":", "self", ".", "servers", "=", "[", "Server", "(", "uuid", "=", "server", ",", "populated", "=", "False", ")", "for", "server", "in", "self", ".", "servers", "]" ]
Reset the objects attributes. Accepts servers as either unflattened or flattened UUID strings or Server objects.
[ "Reset", "the", "objects", "attributes", "." ]
python
train
bwohlberg/sporco
sporco/prox/_l1proj.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/prox/_l1proj.py#L110-L154
def _proj_l1_sortsum(v, gamma, axis=None): r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of :cite:`duchi-2008-efficient`. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` axis : None or int, optional (default None) Axes of `v` over which to compute the :math:`\ell_1` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct norm values are computed over the indices of the remaining axes of input array `v`. **Note:** specifying a tuple of ints is not supported by this function. Returns ------- x : ndarray Output array """ if axis is None and norm_l1(v) <= gamma: return v if axis is not None and axis < 0: axis = v.ndim + axis av = np.abs(v) vs = np.sort(av, axis=axis) if axis is None: N = v.size c = 1.0 / np.arange(1, N + 1, dtype=v.dtype).reshape(v.shape) vs = vs[::-1].reshape(v.shape) else: N = v.shape[axis] ns = [v.shape[k] if k == axis else 1 for k in range(v.ndim)] c = 1.0 / np.arange(1, N + 1, dtype=v.dtype).reshape(ns) vs = vs[(slice(None),) * axis + (slice(None, None, -1),)] t = c * (np.cumsum(vs, axis=axis).reshape(v.shape) - gamma) K = np.sum(vs >= t, axis=axis, keepdims=True) t = (np.sum(vs * (vs >= t), axis=axis, keepdims=True) - gamma) / K t = np.asarray(np.maximum(0, t), dtype=v.dtype) return np.sign(v) * np.where(av > t, av - t, 0)
[ "def", "_proj_l1_sortsum", "(", "v", ",", "gamma", ",", "axis", "=", "None", ")", ":", "if", "axis", "is", "None", "and", "norm_l1", "(", "v", ")", "<=", "gamma", ":", "return", "v", "if", "axis", "is", "not", "None", "and", "axis", "<", "0", ":", "axis", "=", "v", ".", "ndim", "+", "axis", "av", "=", "np", ".", "abs", "(", "v", ")", "vs", "=", "np", ".", "sort", "(", "av", ",", "axis", "=", "axis", ")", "if", "axis", "is", "None", ":", "N", "=", "v", ".", "size", "c", "=", "1.0", "/", "np", ".", "arange", "(", "1", ",", "N", "+", "1", ",", "dtype", "=", "v", ".", "dtype", ")", ".", "reshape", "(", "v", ".", "shape", ")", "vs", "=", "vs", "[", ":", ":", "-", "1", "]", ".", "reshape", "(", "v", ".", "shape", ")", "else", ":", "N", "=", "v", ".", "shape", "[", "axis", "]", "ns", "=", "[", "v", ".", "shape", "[", "k", "]", "if", "k", "==", "axis", "else", "1", "for", "k", "in", "range", "(", "v", ".", "ndim", ")", "]", "c", "=", "1.0", "/", "np", ".", "arange", "(", "1", ",", "N", "+", "1", ",", "dtype", "=", "v", ".", "dtype", ")", ".", "reshape", "(", "ns", ")", "vs", "=", "vs", "[", "(", "slice", "(", "None", ")", ",", ")", "*", "axis", "+", "(", "slice", "(", "None", ",", "None", ",", "-", "1", ")", ",", ")", "]", "t", "=", "c", "*", "(", "np", ".", "cumsum", "(", "vs", ",", "axis", "=", "axis", ")", ".", "reshape", "(", "v", ".", "shape", ")", "-", "gamma", ")", "K", "=", "np", ".", "sum", "(", "vs", ">=", "t", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "t", "=", "(", "np", ".", "sum", "(", "vs", "*", "(", "vs", ">=", "t", ")", ",", "axis", "=", "axis", ",", "keepdims", "=", "True", ")", "-", "gamma", ")", "/", "K", "t", "=", "np", ".", "asarray", "(", "np", ".", "maximum", "(", "0", ",", "t", ")", ",", "dtype", "=", "v", ".", "dtype", ")", "return", "np", ".", "sign", "(", "v", ")", "*", "np", ".", "where", "(", "av", ">", "t", ",", "av", "-", "t", ",", "0", ")" ]
r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of :cite:`duchi-2008-efficient`. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` axis : None or int, optional (default None) Axes of `v` over which to compute the :math:`\ell_1` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct norm values are computed over the indices of the remaining axes of input array `v`. **Note:** specifying a tuple of ints is not supported by this function. Returns ------- x : ndarray Output array
[ "r", "Projection", "operator", "of", "the", ":", "math", ":", "\\", "ell_1", "norm", ".", "The", "solution", "is", "computed", "via", "the", "method", "of", ":", "cite", ":", "duchi", "-", "2008", "-", "efficient", "." ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/view.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/view.py#L46-L85
def export(self, Height=None, options=None, outputFile=None, Resolution=None,\ Units=None, Width=None, Zoom=None, view="current", verbose=False): """ Exports the current view to a graphics file and returns the path to the saved file. PNG and JPEG formats have options for scaling, while other formats only have the option 'exportTextAsFont'. For the PDF format, exporting text as font does not work for two-byte characters such as Chinese or Japanese. To avoid corrupted texts in the exported PDF, please set false to 'exportTextAsFont' when exporting networks including those non-English characters. :param Height (string, optional): The height of the exported image. Valid only for bitmap formats, such as PNG and JPEG. :param options (string, optional): The format of the output file. = ['JPEG (*.jpeg, *.jpg)', 'PDF (*.pdf)', 'PNG (*.png)', 'PostScript (*.ps)', 'SVG (*.svg)'] :param OutputFile (string, optional): The path name of the file where the view must be saved to. By default, the view's title is used as the file name. :param Resolution (string, optional): The resolution of the exported image, in DPI. Valid only for bitmap formats, when the selected width and height 'units' is inches. The possible values are: 72 (default), 100, 150, 300, 600. = ['72', '100', '150', '300', '600'] :param Units (string, optional): The units for the 'width' and 'height' values. Valid only for bitmap formats, such as PNG and JPEG. The possible values are: pixels (default), inches. = ['pixels', 'inches'] :param Width (string, optional): The width of the exported image. Valid only for bitmap formats, such as PNG and JPEG. :param Zoom (string, optional): The zoom value to proportionally scale the image. The default value is 100.0. Valid only for bitmap formats, such as PNG and JPEG :param verbose: print more :returns: path to the saved file """ PARAMS=set_param(["Height","options","outputFile","Resolution",\ "Units","Width","Zoom","view"],\ [Height,options,outputFile,Resolution,Units,Width,Zoom,view ]) response=api(url=self.__url+"/export", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "export", "(", "self", ",", "Height", "=", "None", ",", "options", "=", "None", ",", "outputFile", "=", "None", ",", "Resolution", "=", "None", ",", "Units", "=", "None", ",", "Width", "=", "None", ",", "Zoom", "=", "None", ",", "view", "=", "\"current\"", ",", "verbose", "=", "False", ")", ":", "PARAMS", "=", "set_param", "(", "[", "\"Height\"", ",", "\"options\"", ",", "\"outputFile\"", ",", "\"Resolution\"", ",", "\"Units\"", ",", "\"Width\"", ",", "\"Zoom\"", ",", "\"view\"", "]", ",", "[", "Height", ",", "options", ",", "outputFile", ",", "Resolution", ",", "Units", ",", "Width", ",", "Zoom", ",", "view", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/export\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"POST\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Exports the current view to a graphics file and returns the path to the saved file. PNG and JPEG formats have options for scaling, while other formats only have the option 'exportTextAsFont'. For the PDF format, exporting text as font does not work for two-byte characters such as Chinese or Japanese. To avoid corrupted texts in the exported PDF, please set false to 'exportTextAsFont' when exporting networks including those non-English characters. :param Height (string, optional): The height of the exported image. Valid only for bitmap formats, such as PNG and JPEG. :param options (string, optional): The format of the output file. = ['JPEG (*.jpeg, *.jpg)', 'PDF (*.pdf)', 'PNG (*.png)', 'PostScript (*.ps)', 'SVG (*.svg)'] :param OutputFile (string, optional): The path name of the file where the view must be saved to. By default, the view's title is used as the file name. :param Resolution (string, optional): The resolution of the exported image, in DPI. Valid only for bitmap formats, when the selected width and height 'units' is inches. The possible values are: 72 (default), 100, 150, 300, 600. = ['72', '100', '150', '300', '600'] :param Units (string, optional): The units for the 'width' and 'height' values. Valid only for bitmap formats, such as PNG and JPEG. The possible values are: pixels (default), inches. = ['pixels', 'inches'] :param Width (string, optional): The width of the exported image. Valid only for bitmap formats, such as PNG and JPEG. :param Zoom (string, optional): The zoom value to proportionally scale the image. The default value is 100.0. Valid only for bitmap formats, such as PNG and JPEG :param verbose: print more :returns: path to the saved file
[ "Exports", "the", "current", "view", "to", "a", "graphics", "file", "and", "returns", "the", "path", "to", "the", "saved", "file", ".", "PNG", "and", "JPEG", "formats", "have", "options", "for", "scaling", "while", "other", "formats", "only", "have", "the", "option", "exportTextAsFont", ".", "For", "the", "PDF", "format", "exporting", "text", "as", "font", "does", "not", "work", "for", "two", "-", "byte", "characters", "such", "as", "Chinese", "or", "Japanese", ".", "To", "avoid", "corrupted", "texts", "in", "the", "exported", "PDF", "please", "set", "false", "to", "exportTextAsFont", "when", "exporting", "networks", "including", "those", "non", "-", "English", "characters", "." ]
python
train
djm/python-scrapyd-api
scrapyd_api/wrapper.py
https://github.com/djm/python-scrapyd-api/blob/42f287cf83c3a5bd46795f4f85cce02a56829921/scrapyd_api/wrapper.py#L161-L169
def list_versions(self, project): """ Lists all deployed versions of a specific project. First class, maps to Scrapyd's list versions endpoint. """ url = self._build_url(constants.LIST_VERSIONS_ENDPOINT) params = {'project': project} json = self.client.get(url, params=params, timeout=self.timeout) return json['versions']
[ "def", "list_versions", "(", "self", ",", "project", ")", ":", "url", "=", "self", ".", "_build_url", "(", "constants", ".", "LIST_VERSIONS_ENDPOINT", ")", "params", "=", "{", "'project'", ":", "project", "}", "json", "=", "self", ".", "client", ".", "get", "(", "url", ",", "params", "=", "params", ",", "timeout", "=", "self", ".", "timeout", ")", "return", "json", "[", "'versions'", "]" ]
Lists all deployed versions of a specific project. First class, maps to Scrapyd's list versions endpoint.
[ "Lists", "all", "deployed", "versions", "of", "a", "specific", "project", ".", "First", "class", "maps", "to", "Scrapyd", "s", "list", "versions", "endpoint", "." ]
python
train
aaugustin/websockets
src/websockets/protocol.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/protocol.py#L1011-L1074
async def close_connection(self) -> None: """ 7.1.1. Close the WebSocket Connection When the opening handshake succeeds, :meth:`connection_open` starts this coroutine in a task. It waits for the data transfer phase to complete then it closes the TCP connection cleanly. When the opening handshake fails, :meth:`fail_connection` does the same. There's no data transfer phase in that case. """ try: # Wait for the data transfer phase to complete. if hasattr(self, "transfer_data_task"): try: await self.transfer_data_task except asyncio.CancelledError: pass # Cancel the keepalive ping task. if hasattr(self, "keepalive_ping_task"): self.keepalive_ping_task.cancel() # A client should wait for a TCP close from the server. if self.is_client and hasattr(self, "transfer_data_task"): if await self.wait_for_connection_lost(): return logger.debug("%s ! timed out waiting for TCP close", self.side) # Half-close the TCP connection if possible (when there's no TLS). if self.writer.can_write_eof(): logger.debug("%s x half-closing TCP connection", self.side) self.writer.write_eof() if await self.wait_for_connection_lost(): return logger.debug("%s ! timed out waiting for TCP close", self.side) finally: # The try/finally ensures that the transport never remains open, # even if this coroutine is canceled (for example). # If connection_lost() was called, the TCP connection is closed. # However, if TLS is enabled, the transport still needs closing. # Else asyncio complains: ResourceWarning: unclosed transport. if self.connection_lost_waiter.done() and not self.secure: return # Close the TCP connection. Buffers are flushed asynchronously. logger.debug("%s x closing TCP connection", self.side) self.writer.close() if await self.wait_for_connection_lost(): return logger.debug("%s ! timed out waiting for TCP close", self.side) # Abort the TCP connection. Buffers are discarded. logger.debug("%s x aborting TCP connection", self.side) # mypy thinks self.writer.transport is a BaseTransport, not a Transport. self.writer.transport.abort() # type: ignore # connection_lost() is called quickly after aborting. await self.wait_for_connection_lost()
[ "async", "def", "close_connection", "(", "self", ")", "->", "None", ":", "try", ":", "# Wait for the data transfer phase to complete.", "if", "hasattr", "(", "self", ",", "\"transfer_data_task\"", ")", ":", "try", ":", "await", "self", ".", "transfer_data_task", "except", "asyncio", ".", "CancelledError", ":", "pass", "# Cancel the keepalive ping task.", "if", "hasattr", "(", "self", ",", "\"keepalive_ping_task\"", ")", ":", "self", ".", "keepalive_ping_task", ".", "cancel", "(", ")", "# A client should wait for a TCP close from the server.", "if", "self", ".", "is_client", "and", "hasattr", "(", "self", ",", "\"transfer_data_task\"", ")", ":", "if", "await", "self", ".", "wait_for_connection_lost", "(", ")", ":", "return", "logger", ".", "debug", "(", "\"%s ! timed out waiting for TCP close\"", ",", "self", ".", "side", ")", "# Half-close the TCP connection if possible (when there's no TLS).", "if", "self", ".", "writer", ".", "can_write_eof", "(", ")", ":", "logger", ".", "debug", "(", "\"%s x half-closing TCP connection\"", ",", "self", ".", "side", ")", "self", ".", "writer", ".", "write_eof", "(", ")", "if", "await", "self", ".", "wait_for_connection_lost", "(", ")", ":", "return", "logger", ".", "debug", "(", "\"%s ! timed out waiting for TCP close\"", ",", "self", ".", "side", ")", "finally", ":", "# The try/finally ensures that the transport never remains open,", "# even if this coroutine is canceled (for example).", "# If connection_lost() was called, the TCP connection is closed.", "# However, if TLS is enabled, the transport still needs closing.", "# Else asyncio complains: ResourceWarning: unclosed transport.", "if", "self", ".", "connection_lost_waiter", ".", "done", "(", ")", "and", "not", "self", ".", "secure", ":", "return", "# Close the TCP connection. Buffers are flushed asynchronously.", "logger", ".", "debug", "(", "\"%s x closing TCP connection\"", ",", "self", ".", "side", ")", "self", ".", "writer", ".", "close", "(", ")", "if", "await", "self", ".", "wait_for_connection_lost", "(", ")", ":", "return", "logger", ".", "debug", "(", "\"%s ! timed out waiting for TCP close\"", ",", "self", ".", "side", ")", "# Abort the TCP connection. Buffers are discarded.", "logger", ".", "debug", "(", "\"%s x aborting TCP connection\"", ",", "self", ".", "side", ")", "# mypy thinks self.writer.transport is a BaseTransport, not a Transport.", "self", ".", "writer", ".", "transport", ".", "abort", "(", ")", "# type: ignore", "# connection_lost() is called quickly after aborting.", "await", "self", ".", "wait_for_connection_lost", "(", ")" ]
7.1.1. Close the WebSocket Connection When the opening handshake succeeds, :meth:`connection_open` starts this coroutine in a task. It waits for the data transfer phase to complete then it closes the TCP connection cleanly. When the opening handshake fails, :meth:`fail_connection` does the same. There's no data transfer phase in that case.
[ "7", ".", "1", ".", "1", ".", "Close", "the", "WebSocket", "Connection" ]
python
train
f3at/feat
src/feat/agencies/common.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agencies/common.py#L294-L317
def call_agent_side(self, method, *args, **kwargs): ''' Call the method, wrap it in Deferred and bind error handler. ''' assert not self._finalize_called, ("Attempt to call agent side code " "after finalize() method has been " "called. Method: %r" % (method, )) ensure_state = kwargs.pop('ensure_state', None) d = defer.Deferred(canceller=self._cancel_agent_side_call) self._agent_jobs.append(d) if ensure_state: # call method only if state check is checks in d.addCallback( lambda _: (self._ensure_state(ensure_state) and method(*args, **kwargs))) else: d.addCallback(defer.drop_param, method, *args, **kwargs) d.addErrback(self._error_handler, method) d.addBoth(defer.bridge_param, self._remove_agent_job, d) time.call_next(d.callback, None) return d
[ "def", "call_agent_side", "(", "self", ",", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "not", "self", ".", "_finalize_called", ",", "(", "\"Attempt to call agent side code \"", "\"after finalize() method has been \"", "\"called. Method: %r\"", "%", "(", "method", ",", ")", ")", "ensure_state", "=", "kwargs", ".", "pop", "(", "'ensure_state'", ",", "None", ")", "d", "=", "defer", ".", "Deferred", "(", "canceller", "=", "self", ".", "_cancel_agent_side_call", ")", "self", ".", "_agent_jobs", ".", "append", "(", "d", ")", "if", "ensure_state", ":", "# call method only if state check is checks in", "d", ".", "addCallback", "(", "lambda", "_", ":", "(", "self", ".", "_ensure_state", "(", "ensure_state", ")", "and", "method", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", ")", "else", ":", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", "d", ".", "addErrback", "(", "self", ".", "_error_handler", ",", "method", ")", "d", ".", "addBoth", "(", "defer", ".", "bridge_param", ",", "self", ".", "_remove_agent_job", ",", "d", ")", "time", ".", "call_next", "(", "d", ".", "callback", ",", "None", ")", "return", "d" ]
Call the method, wrap it in Deferred and bind error handler.
[ "Call", "the", "method", "wrap", "it", "in", "Deferred", "and", "bind", "error", "handler", "." ]
python
train
luckydonald/pytgbot
pytgbot/api_types/sendable/passport.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/sendable/passport.py#L754-L774
def from_array(array): """ Deserialize a new PassportElementErrorFiles from a given dictionary. :return: new PassportElementErrorFiles instance. :rtype: PassportElementErrorFiles """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") data = {} data['source'] = u(array.get('source')) data['type'] = u(array.get('type')) data['file_hashes'] = PassportElementErrorFiles._builtin_from_array_list(required_type=unicode_type, value=array.get('file_hashes'), list_level=1) data['message'] = u(array.get('message')) instance = PassportElementErrorFiles(**data) instance._raw = array return instance
[ "def", "from_array", "(", "array", ")", ":", "if", "array", "is", "None", "or", "not", "array", ":", "return", "None", "# end if", "assert_type_or_raise", "(", "array", ",", "dict", ",", "parameter_name", "=", "\"array\"", ")", "data", "=", "{", "}", "data", "[", "'source'", "]", "=", "u", "(", "array", ".", "get", "(", "'source'", ")", ")", "data", "[", "'type'", "]", "=", "u", "(", "array", ".", "get", "(", "'type'", ")", ")", "data", "[", "'file_hashes'", "]", "=", "PassportElementErrorFiles", ".", "_builtin_from_array_list", "(", "required_type", "=", "unicode_type", ",", "value", "=", "array", ".", "get", "(", "'file_hashes'", ")", ",", "list_level", "=", "1", ")", "data", "[", "'message'", "]", "=", "u", "(", "array", ".", "get", "(", "'message'", ")", ")", "instance", "=", "PassportElementErrorFiles", "(", "*", "*", "data", ")", "instance", ".", "_raw", "=", "array", "return", "instance" ]
Deserialize a new PassportElementErrorFiles from a given dictionary. :return: new PassportElementErrorFiles instance. :rtype: PassportElementErrorFiles
[ "Deserialize", "a", "new", "PassportElementErrorFiles", "from", "a", "given", "dictionary", "." ]
python
train
open-mmlab/mmcv
mmcv/video/io.py
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/video/io.py#L288-L332
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True): """Read the frame images from a directory and join them as a video Args: frame_dir (str): The directory containing video frames. video_file (str): Output filename. fps (float): FPS of the output video. fourcc (str): Fourcc of the output video, this should be compatible with the output file type. filename_tmpl (str): Filename template with the index as the variable. start (int): Starting frame index. end (int): Ending frame index. show_progress (bool): Whether to show a progress bar. """ if end == 0: ext = filename_tmpl.split('.')[-1] end = len([name for name in scandir(frame_dir, ext)]) first_file = osp.join(frame_dir, filename_tmpl.format(start)) check_file_exist(first_file, 'The start frame not found: ' + first_file) img = cv2.imread(first_file) height, width = img.shape[:2] resolution = (width, height) vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution) def write_frame(file_idx): filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) img = cv2.imread(filename) vwriter.write(img) if show_progress: track_progress(write_frame, range(start, end)) else: for i in range(start, end): filename = osp.join(frame_dir, filename_tmpl.format(i)) img = cv2.imread(filename) vwriter.write(img) vwriter.release()
[ "def", "frames2video", "(", "frame_dir", ",", "video_file", ",", "fps", "=", "30", ",", "fourcc", "=", "'XVID'", ",", "filename_tmpl", "=", "'{:06d}.jpg'", ",", "start", "=", "0", ",", "end", "=", "0", ",", "show_progress", "=", "True", ")", ":", "if", "end", "==", "0", ":", "ext", "=", "filename_tmpl", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "end", "=", "len", "(", "[", "name", "for", "name", "in", "scandir", "(", "frame_dir", ",", "ext", ")", "]", ")", "first_file", "=", "osp", ".", "join", "(", "frame_dir", ",", "filename_tmpl", ".", "format", "(", "start", ")", ")", "check_file_exist", "(", "first_file", ",", "'The start frame not found: '", "+", "first_file", ")", "img", "=", "cv2", ".", "imread", "(", "first_file", ")", "height", ",", "width", "=", "img", ".", "shape", "[", ":", "2", "]", "resolution", "=", "(", "width", ",", "height", ")", "vwriter", "=", "cv2", ".", "VideoWriter", "(", "video_file", ",", "VideoWriter_fourcc", "(", "*", "fourcc", ")", ",", "fps", ",", "resolution", ")", "def", "write_frame", "(", "file_idx", ")", ":", "filename", "=", "osp", ".", "join", "(", "frame_dir", ",", "filename_tmpl", ".", "format", "(", "file_idx", ")", ")", "img", "=", "cv2", ".", "imread", "(", "filename", ")", "vwriter", ".", "write", "(", "img", ")", "if", "show_progress", ":", "track_progress", "(", "write_frame", ",", "range", "(", "start", ",", "end", ")", ")", "else", ":", "for", "i", "in", "range", "(", "start", ",", "end", ")", ":", "filename", "=", "osp", ".", "join", "(", "frame_dir", ",", "filename_tmpl", ".", "format", "(", "i", ")", ")", "img", "=", "cv2", ".", "imread", "(", "filename", ")", "vwriter", ".", "write", "(", "img", ")", "vwriter", ".", "release", "(", ")" ]
Read the frame images from a directory and join them as a video Args: frame_dir (str): The directory containing video frames. video_file (str): Output filename. fps (float): FPS of the output video. fourcc (str): Fourcc of the output video, this should be compatible with the output file type. filename_tmpl (str): Filename template with the index as the variable. start (int): Starting frame index. end (int): Ending frame index. show_progress (bool): Whether to show a progress bar.
[ "Read", "the", "frame", "images", "from", "a", "directory", "and", "join", "them", "as", "a", "video" ]
python
test
jasonbot/arcrest
arcrest/server.py
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L2052-L2083
def SolveClosestFacility(self, facilities=None, incidents=None, barriers=None, polylineBarriers=None, polygonBarriers=None, attributeParameterValues=None, returnDirections=None, directionsLanguage=None, directionsStyleName=None, directionsLengthUnits=None, directionsTimeAttributeName=None, returnCFRoutes=None, returnFacilities=None, returnIncidents=None, returnBarriers=None, returnPolylineBarriers=None, returnPolygonBarriers=None, facilityReturnType=None, outputLines=None, defaultCutoff=None, defaultTargetFacilityCount=None, travelDirection=None, outSR=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits=None): """The solve operation is performed on a network layer resource of type closest facility.""" raise NotImplementedError()
[ "def", "SolveClosestFacility", "(", "self", ",", "facilities", "=", "None", ",", "incidents", "=", "None", ",", "barriers", "=", "None", ",", "polylineBarriers", "=", "None", ",", "polygonBarriers", "=", "None", ",", "attributeParameterValues", "=", "None", ",", "returnDirections", "=", "None", ",", "directionsLanguage", "=", "None", ",", "directionsStyleName", "=", "None", ",", "directionsLengthUnits", "=", "None", ",", "directionsTimeAttributeName", "=", "None", ",", "returnCFRoutes", "=", "None", ",", "returnFacilities", "=", "None", ",", "returnIncidents", "=", "None", ",", "returnBarriers", "=", "None", ",", "returnPolylineBarriers", "=", "None", ",", "returnPolygonBarriers", "=", "None", ",", "facilityReturnType", "=", "None", ",", "outputLines", "=", "None", ",", "defaultCutoff", "=", "None", ",", "defaultTargetFacilityCount", "=", "None", ",", "travelDirection", "=", "None", ",", "outSR", "=", "None", ",", "impedanceAttributeName", "=", "None", ",", "restrictionAttributeNames", "=", "None", ",", "restrictUTurns", "=", "None", ",", "useHierarchy", "=", "None", ",", "outputGeometryPrecision", "=", "None", ",", "outputGeometryPrecisionUnits", "=", "None", ")", ":", "raise", "NotImplementedError", "(", ")" ]
The solve operation is performed on a network layer resource of type closest facility.
[ "The", "solve", "operation", "is", "performed", "on", "a", "network", "layer", "resource", "of", "type", "closest", "facility", "." ]
python
train
onecodex/onecodex
onecodex/models/collection.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/models/collection.py#L294-L371
def to_otu(self, biom_id=None): """Converts a list of objects associated with a classification result into a `dict` resembling an OTU table. Parameters ---------- biom_id : `string`, optional Optionally specify an `id` field for the generated v1 BIOM file. Returns ------- otu_table : `OrderedDict` A BIOM OTU table, returned as a Python OrderedDict (can be dumped to JSON) """ otu_format = "Biological Observation Matrix 1.0.0" # Note: This is exact format URL is required by https://github.com/biocore/biom-format otu_url = "http://biom-format.org" otu = OrderedDict( { "id": biom_id, "format": otu_format, "format_url": otu_url, "type": "OTU table", "generated_by": "One Codex API V1", "date": datetime.now().isoformat(), "rows": [], "columns": [], "matrix_type": "sparse", "matrix_element_type": "int", } ) rows = defaultdict(dict) tax_ids_to_names = {} for classification in self._classifications: col_id = len(otu["columns"]) # 0 index # Re-encoding the JSON is a bit of a hack, but # we need a ._to_dict() method that properly # resolves references and don't have one at the moment columns_entry = { "id": str(classification.id), "sample_id": str(classification.sample.id), "sample_filename": classification.sample.filename, "metadata": json.loads( classification.sample.metadata._to_json(include_references=False) ), } otu["columns"].append(columns_entry) sample_df = classification.table() for row in sample_df.iterrows(): tax_id = row[1]["tax_id"] tax_ids_to_names[tax_id] = row[1]["name"] rows[tax_id][col_id] = int(row[1]["readcount"]) num_rows = len(rows) num_cols = len(otu["columns"]) otu["shape"] = [num_rows, num_cols] otu["data"] = [] for present_taxa in sorted(rows): # add the row entry row_id = len(otu["rows"]) otu["rows"].append( {"id": present_taxa, "metadata": {"taxonomy": tax_ids_to_names[present_taxa]}} ) for sample_with_hit in rows[present_taxa]: counts = rows[present_taxa][sample_with_hit] otu["data"].append([row_id, sample_with_hit, counts]) return otu
[ "def", "to_otu", "(", "self", ",", "biom_id", "=", "None", ")", ":", "otu_format", "=", "\"Biological Observation Matrix 1.0.0\"", "# Note: This is exact format URL is required by https://github.com/biocore/biom-format", "otu_url", "=", "\"http://biom-format.org\"", "otu", "=", "OrderedDict", "(", "{", "\"id\"", ":", "biom_id", ",", "\"format\"", ":", "otu_format", ",", "\"format_url\"", ":", "otu_url", ",", "\"type\"", ":", "\"OTU table\"", ",", "\"generated_by\"", ":", "\"One Codex API V1\"", ",", "\"date\"", ":", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", ",", "\"rows\"", ":", "[", "]", ",", "\"columns\"", ":", "[", "]", ",", "\"matrix_type\"", ":", "\"sparse\"", ",", "\"matrix_element_type\"", ":", "\"int\"", ",", "}", ")", "rows", "=", "defaultdict", "(", "dict", ")", "tax_ids_to_names", "=", "{", "}", "for", "classification", "in", "self", ".", "_classifications", ":", "col_id", "=", "len", "(", "otu", "[", "\"columns\"", "]", ")", "# 0 index", "# Re-encoding the JSON is a bit of a hack, but", "# we need a ._to_dict() method that properly", "# resolves references and don't have one at the moment", "columns_entry", "=", "{", "\"id\"", ":", "str", "(", "classification", ".", "id", ")", ",", "\"sample_id\"", ":", "str", "(", "classification", ".", "sample", ".", "id", ")", ",", "\"sample_filename\"", ":", "classification", ".", "sample", ".", "filename", ",", "\"metadata\"", ":", "json", ".", "loads", "(", "classification", ".", "sample", ".", "metadata", ".", "_to_json", "(", "include_references", "=", "False", ")", ")", ",", "}", "otu", "[", "\"columns\"", "]", ".", "append", "(", "columns_entry", ")", "sample_df", "=", "classification", ".", "table", "(", ")", "for", "row", "in", "sample_df", ".", "iterrows", "(", ")", ":", "tax_id", "=", "row", "[", "1", "]", "[", "\"tax_id\"", "]", "tax_ids_to_names", "[", "tax_id", "]", "=", "row", "[", "1", "]", "[", "\"name\"", "]", "rows", "[", "tax_id", "]", "[", "col_id", "]", "=", "int", "(", "row", "[", "1", "]", "[", "\"readcount\"", "]", ")", "num_rows", "=", "len", "(", "rows", ")", "num_cols", "=", "len", "(", "otu", "[", "\"columns\"", "]", ")", "otu", "[", "\"shape\"", "]", "=", "[", "num_rows", ",", "num_cols", "]", "otu", "[", "\"data\"", "]", "=", "[", "]", "for", "present_taxa", "in", "sorted", "(", "rows", ")", ":", "# add the row entry", "row_id", "=", "len", "(", "otu", "[", "\"rows\"", "]", ")", "otu", "[", "\"rows\"", "]", ".", "append", "(", "{", "\"id\"", ":", "present_taxa", ",", "\"metadata\"", ":", "{", "\"taxonomy\"", ":", "tax_ids_to_names", "[", "present_taxa", "]", "}", "}", ")", "for", "sample_with_hit", "in", "rows", "[", "present_taxa", "]", ":", "counts", "=", "rows", "[", "present_taxa", "]", "[", "sample_with_hit", "]", "otu", "[", "\"data\"", "]", ".", "append", "(", "[", "row_id", ",", "sample_with_hit", ",", "counts", "]", ")", "return", "otu" ]
Converts a list of objects associated with a classification result into a `dict` resembling an OTU table. Parameters ---------- biom_id : `string`, optional Optionally specify an `id` field for the generated v1 BIOM file. Returns ------- otu_table : `OrderedDict` A BIOM OTU table, returned as a Python OrderedDict (can be dumped to JSON)
[ "Converts", "a", "list", "of", "objects", "associated", "with", "a", "classification", "result", "into", "a", "dict", "resembling", "an", "OTU", "table", "." ]
python
train
RedHatInsights/insights-core
insights/client/connection.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L171-L245
def get_proxies(self): """ Determine proxy configuration """ # Get proxy from ENV or Config proxies = None proxy_auth = None no_proxy = os.environ.get('NO_PROXY') logger.debug("NO PROXY: %s", no_proxy) # CONF PROXY TAKES PRECEDENCE OVER ENV PROXY conf_proxy = self.config.proxy if ((conf_proxy is not None and conf_proxy.lower() != 'None'.lower() and conf_proxy != "")): if '@' in conf_proxy: scheme = conf_proxy.split(':')[0] + '://' logger.debug("Proxy Scheme: %s", scheme) location = conf_proxy.split('@')[1] logger.debug("Proxy Location: %s", location) username = conf_proxy.split( '@')[0].split(':')[1].replace('/', '') logger.debug("Proxy User: %s", username) password = conf_proxy.split('@')[0].split(':')[2] proxy_auth = requests.auth._basic_auth_str(username, password) conf_proxy = scheme + location logger.debug("CONF Proxy: %s", conf_proxy) proxies = {"https": conf_proxy} # HANDLE NO PROXY CONF PROXY EXCEPTION VERBIAGE if no_proxy and conf_proxy: logger.debug("You have environment variable NO_PROXY set " "as well as 'proxy' set in your configuration file. " "NO_PROXY environment variable will be ignored.") # IF NO CONF PROXY, GET ENV PROXY AND NO PROXY if proxies is None: env_proxy = os.environ.get('HTTPS_PROXY') if env_proxy: if '@' in env_proxy: scheme = env_proxy.split(':')[0] + '://' logger.debug("Proxy Scheme: %s", scheme) location = env_proxy.split('@')[1] logger.debug("Proxy Location: %s", location) username = env_proxy.split('@')[0].split(':')[1].replace('/', '') logger.debug("Proxy User: %s", username) password = env_proxy.split('@')[0].split(':')[2] proxy_auth = requests.auth._basic_auth_str(username, password) env_proxy = scheme + location logger.debug("ENV Proxy: %s", env_proxy) proxies = {"https": env_proxy} if no_proxy: insights_service_host = urlparse(self.base_url).hostname logger.debug('Found NO_PROXY set. Checking NO_PROXY %s against base URL %s.', no_proxy, insights_service_host) for no_proxy_host in no_proxy.split(','): logger.debug('Checking %s against %s', no_proxy_host, insights_service_host) if no_proxy_host == '*': proxies = None proxy_auth = None logger.debug('Found NO_PROXY asterisk(*) wildcard, disabling all proxies.') break elif no_proxy_host.startswith('.') or no_proxy_host.startswith('*'): if insights_service_host.endswith(no_proxy_host.replace('*', '')): proxies = None proxy_auth = None logger.debug('Found NO_PROXY range %s matching %s', no_proxy_host, insights_service_host) break elif no_proxy_host == insights_service_host: proxies = None proxy_auth = None logger.debug('Found NO_PROXY %s exactly matching %s', no_proxy_host, insights_service_host) break self.proxies = proxies self.proxy_auth = proxy_auth
[ "def", "get_proxies", "(", "self", ")", ":", "# Get proxy from ENV or Config", "proxies", "=", "None", "proxy_auth", "=", "None", "no_proxy", "=", "os", ".", "environ", ".", "get", "(", "'NO_PROXY'", ")", "logger", ".", "debug", "(", "\"NO PROXY: %s\"", ",", "no_proxy", ")", "# CONF PROXY TAKES PRECEDENCE OVER ENV PROXY", "conf_proxy", "=", "self", ".", "config", ".", "proxy", "if", "(", "(", "conf_proxy", "is", "not", "None", "and", "conf_proxy", ".", "lower", "(", ")", "!=", "'None'", ".", "lower", "(", ")", "and", "conf_proxy", "!=", "\"\"", ")", ")", ":", "if", "'@'", "in", "conf_proxy", ":", "scheme", "=", "conf_proxy", ".", "split", "(", "':'", ")", "[", "0", "]", "+", "'://'", "logger", ".", "debug", "(", "\"Proxy Scheme: %s\"", ",", "scheme", ")", "location", "=", "conf_proxy", ".", "split", "(", "'@'", ")", "[", "1", "]", "logger", ".", "debug", "(", "\"Proxy Location: %s\"", ",", "location", ")", "username", "=", "conf_proxy", ".", "split", "(", "'@'", ")", "[", "0", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "replace", "(", "'/'", ",", "''", ")", "logger", ".", "debug", "(", "\"Proxy User: %s\"", ",", "username", ")", "password", "=", "conf_proxy", ".", "split", "(", "'@'", ")", "[", "0", "]", ".", "split", "(", "':'", ")", "[", "2", "]", "proxy_auth", "=", "requests", ".", "auth", ".", "_basic_auth_str", "(", "username", ",", "password", ")", "conf_proxy", "=", "scheme", "+", "location", "logger", ".", "debug", "(", "\"CONF Proxy: %s\"", ",", "conf_proxy", ")", "proxies", "=", "{", "\"https\"", ":", "conf_proxy", "}", "# HANDLE NO PROXY CONF PROXY EXCEPTION VERBIAGE", "if", "no_proxy", "and", "conf_proxy", ":", "logger", ".", "debug", "(", "\"You have environment variable NO_PROXY set \"", "\"as well as 'proxy' set in your configuration file. \"", "\"NO_PROXY environment variable will be ignored.\"", ")", "# IF NO CONF PROXY, GET ENV PROXY AND NO PROXY", "if", "proxies", "is", "None", ":", "env_proxy", "=", "os", ".", "environ", ".", "get", "(", "'HTTPS_PROXY'", ")", "if", "env_proxy", ":", "if", "'@'", "in", "env_proxy", ":", "scheme", "=", "env_proxy", ".", "split", "(", "':'", ")", "[", "0", "]", "+", "'://'", "logger", ".", "debug", "(", "\"Proxy Scheme: %s\"", ",", "scheme", ")", "location", "=", "env_proxy", ".", "split", "(", "'@'", ")", "[", "1", "]", "logger", ".", "debug", "(", "\"Proxy Location: %s\"", ",", "location", ")", "username", "=", "env_proxy", ".", "split", "(", "'@'", ")", "[", "0", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ".", "replace", "(", "'/'", ",", "''", ")", "logger", ".", "debug", "(", "\"Proxy User: %s\"", ",", "username", ")", "password", "=", "env_proxy", ".", "split", "(", "'@'", ")", "[", "0", "]", ".", "split", "(", "':'", ")", "[", "2", "]", "proxy_auth", "=", "requests", ".", "auth", ".", "_basic_auth_str", "(", "username", ",", "password", ")", "env_proxy", "=", "scheme", "+", "location", "logger", ".", "debug", "(", "\"ENV Proxy: %s\"", ",", "env_proxy", ")", "proxies", "=", "{", "\"https\"", ":", "env_proxy", "}", "if", "no_proxy", ":", "insights_service_host", "=", "urlparse", "(", "self", ".", "base_url", ")", ".", "hostname", "logger", ".", "debug", "(", "'Found NO_PROXY set. Checking NO_PROXY %s against base URL %s.'", ",", "no_proxy", ",", "insights_service_host", ")", "for", "no_proxy_host", "in", "no_proxy", ".", "split", "(", "','", ")", ":", "logger", ".", "debug", "(", "'Checking %s against %s'", ",", "no_proxy_host", ",", "insights_service_host", ")", "if", "no_proxy_host", "==", "'*'", ":", "proxies", "=", "None", "proxy_auth", "=", "None", "logger", ".", "debug", "(", "'Found NO_PROXY asterisk(*) wildcard, disabling all proxies.'", ")", "break", "elif", "no_proxy_host", ".", "startswith", "(", "'.'", ")", "or", "no_proxy_host", ".", "startswith", "(", "'*'", ")", ":", "if", "insights_service_host", ".", "endswith", "(", "no_proxy_host", ".", "replace", "(", "'*'", ",", "''", ")", ")", ":", "proxies", "=", "None", "proxy_auth", "=", "None", "logger", ".", "debug", "(", "'Found NO_PROXY range %s matching %s'", ",", "no_proxy_host", ",", "insights_service_host", ")", "break", "elif", "no_proxy_host", "==", "insights_service_host", ":", "proxies", "=", "None", "proxy_auth", "=", "None", "logger", ".", "debug", "(", "'Found NO_PROXY %s exactly matching %s'", ",", "no_proxy_host", ",", "insights_service_host", ")", "break", "self", ".", "proxies", "=", "proxies", "self", ".", "proxy_auth", "=", "proxy_auth" ]
Determine proxy configuration
[ "Determine", "proxy", "configuration" ]
python
train
ellmetha/django-machina
machina/apps/forum_conversation/forms.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/forms.py#L79-L87
def clean(self): """ Validates the form. """ if not self.instance.pk: # Only set user on post creation if not self.user.is_anonymous: self.instance.poster = self.user else: self.instance.anonymous_key = get_anonymous_user_forum_key(self.user) return super().clean()
[ "def", "clean", "(", "self", ")", ":", "if", "not", "self", ".", "instance", ".", "pk", ":", "# Only set user on post creation", "if", "not", "self", ".", "user", ".", "is_anonymous", ":", "self", ".", "instance", ".", "poster", "=", "self", ".", "user", "else", ":", "self", ".", "instance", ".", "anonymous_key", "=", "get_anonymous_user_forum_key", "(", "self", ".", "user", ")", "return", "super", "(", ")", ".", "clean", "(", ")" ]
Validates the form.
[ "Validates", "the", "form", "." ]
python
train
xtuml/pyxtuml
bridgepoint/ooaofooa.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/ooaofooa.py#L521-L541
def build_component(self, name=None, derived_attributes=False): ''' Instantiate and build a component from ooaofooa named *name* as a pyxtuml model. Classes, associations, attributes and unique identifers, i.e. O_OBJ, R_REL, O_ATTR in ooaofooa, are defined in the resulting pyxtuml model. Optionally, control whether *derived attributes* shall be mapped into the resulting pyxtuml model as attributes or not. Futhermore, if no *name* is provided, the entire content of the ooaofooa model is instantiated into the pyxtuml model. ''' mm = self.build_metamodel() c_c = mm.select_any('C_C', where(Name=name)) if c_c: return mk_component(mm, c_c, derived_attributes) elif name: raise OoaOfOoaException('Unable to find the component %s' % name) else: return mk_component(mm, c_c, derived_attributes)
[ "def", "build_component", "(", "self", ",", "name", "=", "None", ",", "derived_attributes", "=", "False", ")", ":", "mm", "=", "self", ".", "build_metamodel", "(", ")", "c_c", "=", "mm", ".", "select_any", "(", "'C_C'", ",", "where", "(", "Name", "=", "name", ")", ")", "if", "c_c", ":", "return", "mk_component", "(", "mm", ",", "c_c", ",", "derived_attributes", ")", "elif", "name", ":", "raise", "OoaOfOoaException", "(", "'Unable to find the component %s'", "%", "name", ")", "else", ":", "return", "mk_component", "(", "mm", ",", "c_c", ",", "derived_attributes", ")" ]
Instantiate and build a component from ooaofooa named *name* as a pyxtuml model. Classes, associations, attributes and unique identifers, i.e. O_OBJ, R_REL, O_ATTR in ooaofooa, are defined in the resulting pyxtuml model. Optionally, control whether *derived attributes* shall be mapped into the resulting pyxtuml model as attributes or not. Futhermore, if no *name* is provided, the entire content of the ooaofooa model is instantiated into the pyxtuml model.
[ "Instantiate", "and", "build", "a", "component", "from", "ooaofooa", "named", "*", "name", "*", "as", "a", "pyxtuml", "model", ".", "Classes", "associations", "attributes", "and", "unique", "identifers", "i", ".", "e", ".", "O_OBJ", "R_REL", "O_ATTR", "in", "ooaofooa", "are", "defined", "in", "the", "resulting", "pyxtuml", "model", ".", "Optionally", "control", "whether", "*", "derived", "attributes", "*", "shall", "be", "mapped", "into", "the", "resulting", "pyxtuml", "model", "as", "attributes", "or", "not", ".", "Futhermore", "if", "no", "*", "name", "*", "is", "provided", "the", "entire", "content", "of", "the", "ooaofooa", "model", "is", "instantiated", "into", "the", "pyxtuml", "model", "." ]
python
test
caseyjlaw/sdmreader
sdmreader/sdmreader.py
https://github.com/caseyjlaw/sdmreader/blob/b6c3498f1915138727819715ee00d2c46353382d/sdmreader/sdmreader.py#L469-L483
def calc_intsize(self): """ Calculates the size of an integration (cross + auto) in bytes """ # assume first cross blob starts after headxml and second is one int of bytes later for k in self.binarychunks.iterkeys(): if int(k.split('/')[3]) == 1 and 'cross' in k.split('/')[-1]: headsize = self.binarychunks[k] break for k in self.binarychunks.iterkeys(): if int(k.split('/')[3]) == 2 and 'cross' in k.split('/')[-1]: intsize = self.binarychunks[k] - headsize break return (headsize, intsize)
[ "def", "calc_intsize", "(", "self", ")", ":", "# assume first cross blob starts after headxml and second is one int of bytes later\r", "for", "k", "in", "self", ".", "binarychunks", ".", "iterkeys", "(", ")", ":", "if", "int", "(", "k", ".", "split", "(", "'/'", ")", "[", "3", "]", ")", "==", "1", "and", "'cross'", "in", "k", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ":", "headsize", "=", "self", ".", "binarychunks", "[", "k", "]", "break", "for", "k", "in", "self", ".", "binarychunks", ".", "iterkeys", "(", ")", ":", "if", "int", "(", "k", ".", "split", "(", "'/'", ")", "[", "3", "]", ")", "==", "2", "and", "'cross'", "in", "k", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ":", "intsize", "=", "self", ".", "binarychunks", "[", "k", "]", "-", "headsize", "break", "return", "(", "headsize", ",", "intsize", ")" ]
Calculates the size of an integration (cross + auto) in bytes
[ "Calculates", "the", "size", "of", "an", "integration", "(", "cross", "+", "auto", ")", "in", "bytes" ]
python
train
base4sistemas/satcfe
satcfe/resposta/padrao.py
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L104-L112
def configurar_interface_de_rede(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ConfigurarInterfaceDeRede') if resposta.EEEEE not in ('12000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "configurar_interface_de_rede", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'ConfigurarInterfaceDeRede'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'12000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "configurar_interface_de_rede", "." ]
python
train
mbr/latex
latex/build.py
https://github.com/mbr/latex/blob/f96cb9125b4f570fc2ffc5ae628e2f4069b2f3cf/latex/build.py#L207-L235
def build_pdf(source, texinputs=[], builder=None): """Builds a LaTeX source to PDF. Will automatically instantiate an available builder (or raise a :class:`exceptions.RuntimeError` if none are available) and build the supplied source with it. Parameters are passed on to the builder's :meth:`~latex.build.LatexBuilder.build_pdf` function. :param builder: Specify which builder should be used - ``latexmk``, ``pdflatex`` or ``xelatexmk``. """ if builder is None: builders = PREFERRED_BUILDERS elif builder not in BUILDERS: raise RuntimeError('Invalid Builder specified') else: builders = (builder, ) for bld in builders: bld_cls = BUILDERS[bld] builder = bld_cls() if not builder.is_available(): continue return builder.build_pdf(source, texinputs) else: raise RuntimeError('No available builder could be instantiated. ' 'Please make sure LaTeX is installed.')
[ "def", "build_pdf", "(", "source", ",", "texinputs", "=", "[", "]", ",", "builder", "=", "None", ")", ":", "if", "builder", "is", "None", ":", "builders", "=", "PREFERRED_BUILDERS", "elif", "builder", "not", "in", "BUILDERS", ":", "raise", "RuntimeError", "(", "'Invalid Builder specified'", ")", "else", ":", "builders", "=", "(", "builder", ",", ")", "for", "bld", "in", "builders", ":", "bld_cls", "=", "BUILDERS", "[", "bld", "]", "builder", "=", "bld_cls", "(", ")", "if", "not", "builder", ".", "is_available", "(", ")", ":", "continue", "return", "builder", ".", "build_pdf", "(", "source", ",", "texinputs", ")", "else", ":", "raise", "RuntimeError", "(", "'No available builder could be instantiated. '", "'Please make sure LaTeX is installed.'", ")" ]
Builds a LaTeX source to PDF. Will automatically instantiate an available builder (or raise a :class:`exceptions.RuntimeError` if none are available) and build the supplied source with it. Parameters are passed on to the builder's :meth:`~latex.build.LatexBuilder.build_pdf` function. :param builder: Specify which builder should be used - ``latexmk``, ``pdflatex`` or ``xelatexmk``.
[ "Builds", "a", "LaTeX", "source", "to", "PDF", "." ]
python
train
a1ezzz/wasp-general
wasp_general/thread.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/thread.py#L76-L93
def critical_section_lock(lock=None, blocking=True, timeout=None, raise_exception=True): """ An a wrapper for :func:`.critical_section_dynamic_lock` function call, but uses a static lock object instead of a function that returns a lock with which a function protection will be made :param lock: lock with which a function will be protected :param blocking: same as blocking in :func:`.critical_section_dynamic_lock` function :param timeout: same as timeout in :func:`.critical_section_dynamic_lock` function :param raise_exception: same as raise_exception in :func:`.critical_section_dynamic_lock` function :return: decorator with which a target function may be protected """ def lock_getter(*args, **kwargs): return lock return critical_section_dynamic_lock( lock_fn=lock_getter, blocking=blocking, timeout=timeout, raise_exception=raise_exception )
[ "def", "critical_section_lock", "(", "lock", "=", "None", ",", "blocking", "=", "True", ",", "timeout", "=", "None", ",", "raise_exception", "=", "True", ")", ":", "def", "lock_getter", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "lock", "return", "critical_section_dynamic_lock", "(", "lock_fn", "=", "lock_getter", ",", "blocking", "=", "blocking", ",", "timeout", "=", "timeout", ",", "raise_exception", "=", "raise_exception", ")" ]
An a wrapper for :func:`.critical_section_dynamic_lock` function call, but uses a static lock object instead of a function that returns a lock with which a function protection will be made :param lock: lock with which a function will be protected :param blocking: same as blocking in :func:`.critical_section_dynamic_lock` function :param timeout: same as timeout in :func:`.critical_section_dynamic_lock` function :param raise_exception: same as raise_exception in :func:`.critical_section_dynamic_lock` function :return: decorator with which a target function may be protected
[ "An", "a", "wrapper", "for", ":", "func", ":", ".", "critical_section_dynamic_lock", "function", "call", "but", "uses", "a", "static", "lock", "object", "instead", "of", "a", "function", "that", "returns", "a", "lock", "with", "which", "a", "function", "protection", "will", "be", "made" ]
python
train
saltstack/salt
salt/utils/hashutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/hashutils.py#L87-L93
def md5_digest(instr): ''' Generate an md5 hash of a given string. ''' return salt.utils.stringutils.to_unicode( hashlib.md5(salt.utils.stringutils.to_bytes(instr)).hexdigest() )
[ "def", "md5_digest", "(", "instr", ")", ":", "return", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "hashlib", ".", "md5", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "instr", ")", ")", ".", "hexdigest", "(", ")", ")" ]
Generate an md5 hash of a given string.
[ "Generate", "an", "md5", "hash", "of", "a", "given", "string", "." ]
python
train
openpaperwork/paperwork-backend
paperwork_backend/common/doc.py
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/common/doc.py#L308-L324
def __get_name(self): """ Returns the localized name of the document (see l10n) """ if self.is_new: return _("New document") try: split = self.__docid.split("_") short_docid = "_".join(split[:3]) datetime_obj = datetime.datetime.strptime( short_docid, self.DOCNAME_FORMAT) final = datetime_obj.strftime("%x") return final except Exception as exc: logger.error("Unable to parse document id [%s]: %s" % (self.docid, exc)) return self.docid
[ "def", "__get_name", "(", "self", ")", ":", "if", "self", ".", "is_new", ":", "return", "_", "(", "\"New document\"", ")", "try", ":", "split", "=", "self", ".", "__docid", ".", "split", "(", "\"_\"", ")", "short_docid", "=", "\"_\"", ".", "join", "(", "split", "[", ":", "3", "]", ")", "datetime_obj", "=", "datetime", ".", "datetime", ".", "strptime", "(", "short_docid", ",", "self", ".", "DOCNAME_FORMAT", ")", "final", "=", "datetime_obj", ".", "strftime", "(", "\"%x\"", ")", "return", "final", "except", "Exception", "as", "exc", ":", "logger", ".", "error", "(", "\"Unable to parse document id [%s]: %s\"", "%", "(", "self", ".", "docid", ",", "exc", ")", ")", "return", "self", ".", "docid" ]
Returns the localized name of the document (see l10n)
[ "Returns", "the", "localized", "name", "of", "the", "document", "(", "see", "l10n", ")" ]
python
train
openstack/quark
quark/plugin_modules/floating_ips.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L394-L420
def update_floatingip(context, id, content): """Update an existing floating IP. :param context: neutron api request context. :param id: id of the floating ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('update_floatingip %s for tenant %s and body %s' % (id, context.tenant_id, content)) if 'port_id' not in content: raise n_exc.BadRequest(resource='floating_ip', msg='port_id is required.') requested_ports = [] if content.get('port_id'): requested_ports = [{'port_id': content.get('port_id')}] flip = _update_flip(context, id, ip_types.FLOATING, requested_ports) return v._make_floating_ip_dict(flip)
[ "def", "update_floatingip", "(", "context", ",", "id", ",", "content", ")", ":", "LOG", ".", "info", "(", "'update_floatingip %s for tenant %s and body %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "content", ")", ")", "if", "'port_id'", "not", "in", "content", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "'floating_ip'", ",", "msg", "=", "'port_id is required.'", ")", "requested_ports", "=", "[", "]", "if", "content", ".", "get", "(", "'port_id'", ")", ":", "requested_ports", "=", "[", "{", "'port_id'", ":", "content", ".", "get", "(", "'port_id'", ")", "}", "]", "flip", "=", "_update_flip", "(", "context", ",", "id", ",", "ip_types", ".", "FLOATING", ",", "requested_ports", ")", "return", "v", ".", "_make_floating_ip_dict", "(", "flip", ")" ]
Update an existing floating IP. :param context: neutron api request context. :param id: id of the floating ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present.
[ "Update", "an", "existing", "floating", "IP", "." ]
python
valid
ThreatConnect-Inc/tcex
tcex/tcex.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex.py#L724-L733
def playbook(self): """Include the Playbook Module. .. Note:: Playbook methods can be accessed using ``tcex.playbook.<method>``. """ if self._playbook is None: from .tcex_playbook import TcExPlaybook self._playbook = TcExPlaybook(self) return self._playbook
[ "def", "playbook", "(", "self", ")", ":", "if", "self", ".", "_playbook", "is", "None", ":", "from", ".", "tcex_playbook", "import", "TcExPlaybook", "self", ".", "_playbook", "=", "TcExPlaybook", "(", "self", ")", "return", "self", ".", "_playbook" ]
Include the Playbook Module. .. Note:: Playbook methods can be accessed using ``tcex.playbook.<method>``.
[ "Include", "the", "Playbook", "Module", "." ]
python
train
harlowja/constructs
constructs/tree.py
https://github.com/harlowja/constructs/blob/53f20a8422bbd56294d5c0161081cb5875511fab/constructs/tree.py#L119-L128
def child_count(self, only_direct=True): """Returns how many children this node has, either only the direct children of this node or inclusive of all children nodes of this node. """ if not only_direct: count = 0 for _node in self.dfs_iter(): count += 1 return count return len(self._children)
[ "def", "child_count", "(", "self", ",", "only_direct", "=", "True", ")", ":", "if", "not", "only_direct", ":", "count", "=", "0", "for", "_node", "in", "self", ".", "dfs_iter", "(", ")", ":", "count", "+=", "1", "return", "count", "return", "len", "(", "self", ".", "_children", ")" ]
Returns how many children this node has, either only the direct children of this node or inclusive of all children nodes of this node.
[ "Returns", "how", "many", "children", "this", "node", "has", "either", "only", "the", "direct", "children", "of", "this", "node", "or", "inclusive", "of", "all", "children", "nodes", "of", "this", "node", "." ]
python
train
programa-stic/barf-project
barf/analysis/gadgets/finder.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/gadgets/finder.py#L74-L88
def find(self, start_address, end_address, byte_depth=20, instrs_depth=2): """Find gadgets. """ self._max_bytes = byte_depth self._instrs_depth = instrs_depth if self._architecture == ARCH_X86: candidates = self._find_x86_candidates(start_address, end_address) elif self._architecture == ARCH_ARM: candidates = self._find_arm_candidates(start_address, end_address) else: raise Exception("Architecture not supported.") # Sort and return. return sorted(candidates, key=lambda g: g.address)
[ "def", "find", "(", "self", ",", "start_address", ",", "end_address", ",", "byte_depth", "=", "20", ",", "instrs_depth", "=", "2", ")", ":", "self", ".", "_max_bytes", "=", "byte_depth", "self", ".", "_instrs_depth", "=", "instrs_depth", "if", "self", ".", "_architecture", "==", "ARCH_X86", ":", "candidates", "=", "self", ".", "_find_x86_candidates", "(", "start_address", ",", "end_address", ")", "elif", "self", ".", "_architecture", "==", "ARCH_ARM", ":", "candidates", "=", "self", ".", "_find_arm_candidates", "(", "start_address", ",", "end_address", ")", "else", ":", "raise", "Exception", "(", "\"Architecture not supported.\"", ")", "# Sort and return.", "return", "sorted", "(", "candidates", ",", "key", "=", "lambda", "g", ":", "g", ".", "address", ")" ]
Find gadgets.
[ "Find", "gadgets", "." ]
python
train
scanny/python-pptx
pptx/oxml/text.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/text.py#L273-L281
def text(self): """ The text of the ``<a:t>`` child element. """ t = self.t if t is None: return u'' text = t.text return to_unicode(text) if text is not None else u''
[ "def", "text", "(", "self", ")", ":", "t", "=", "self", ".", "t", "if", "t", "is", "None", ":", "return", "u''", "text", "=", "t", ".", "text", "return", "to_unicode", "(", "text", ")", "if", "text", "is", "not", "None", "else", "u''" ]
The text of the ``<a:t>`` child element.
[ "The", "text", "of", "the", "<a", ":", "t", ">", "child", "element", "." ]
python
train
uogbuji/versa
tools/py/contrib/datachefids.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/contrib/datachefids.py#L35-L55
def simple_hashstring(obj, bits=64): ''' Creates a simple hash in brief string form from obj bits is an optional bit width, defaulting to 64, and should be in multiples of 8 with a maximum of 64 >>> from bibframe.contrib.datachefids import simple_hashstring >>> simple_hashstring("The quick brown fox jumps over the lazy dog") 'bBsHvHu8S-M' >>> simple_hashstring("The quick brown fox jumps over the lazy dog", bits=48) 'B7x7vEvj' ''' #Useful discussion of techniques here: http://stackoverflow.com/questions/1303021/shortest-hash-in-python-to-name-cache-files #Use MurmurHash3 #Get a 64-bit integer, the first half of the 128-bit tuple from mmh and then bit shift it to get the desired bit length basis = mmh3.hash64(str(obj))[0] >> (64-bits) if bits == 64: raw_hash = struct.pack('!q', basis) else: raw_hash = struct.pack('!q', basis)[:-int((64-bits)/8)] hashstr = base64.urlsafe_b64encode(raw_hash).rstrip(b"=") return hashstr.decode('ascii')
[ "def", "simple_hashstring", "(", "obj", ",", "bits", "=", "64", ")", ":", "#Useful discussion of techniques here: http://stackoverflow.com/questions/1303021/shortest-hash-in-python-to-name-cache-files", "#Use MurmurHash3", "#Get a 64-bit integer, the first half of the 128-bit tuple from mmh and then bit shift it to get the desired bit length", "basis", "=", "mmh3", ".", "hash64", "(", "str", "(", "obj", ")", ")", "[", "0", "]", ">>", "(", "64", "-", "bits", ")", "if", "bits", "==", "64", ":", "raw_hash", "=", "struct", ".", "pack", "(", "'!q'", ",", "basis", ")", "else", ":", "raw_hash", "=", "struct", ".", "pack", "(", "'!q'", ",", "basis", ")", "[", ":", "-", "int", "(", "(", "64", "-", "bits", ")", "/", "8", ")", "]", "hashstr", "=", "base64", ".", "urlsafe_b64encode", "(", "raw_hash", ")", ".", "rstrip", "(", "b\"=\"", ")", "return", "hashstr", ".", "decode", "(", "'ascii'", ")" ]
Creates a simple hash in brief string form from obj bits is an optional bit width, defaulting to 64, and should be in multiples of 8 with a maximum of 64 >>> from bibframe.contrib.datachefids import simple_hashstring >>> simple_hashstring("The quick brown fox jumps over the lazy dog") 'bBsHvHu8S-M' >>> simple_hashstring("The quick brown fox jumps over the lazy dog", bits=48) 'B7x7vEvj'
[ "Creates", "a", "simple", "hash", "in", "brief", "string", "form", "from", "obj", "bits", "is", "an", "optional", "bit", "width", "defaulting", "to", "64", "and", "should", "be", "in", "multiples", "of", "8", "with", "a", "maximum", "of", "64" ]
python
train
DavidMStraub/pylha
pylha/parse.py
https://github.com/DavidMStraub/pylha/blob/8d65074609321e5eaf97fe962c56f6d79a3ad2b6/pylha/parse.py#L80-L88
def load(stream): """Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.""" if isinstance(stream, str): string = stream else: string = stream.read() tokens = tokenize(string) return parse(tokens)
[ "def", "load", "(", "stream", ")", ":", "if", "isinstance", "(", "stream", ",", "str", ")", ":", "string", "=", "stream", "else", ":", "string", "=", "stream", ".", "read", "(", ")", "tokens", "=", "tokenize", "(", "string", ")", "return", "parse", "(", "tokens", ")" ]
Parse the LHA document and produce the corresponding Python object. Accepts a string or a file-like object.
[ "Parse", "the", "LHA", "document", "and", "produce", "the", "corresponding", "Python", "object", ".", "Accepts", "a", "string", "or", "a", "file", "-", "like", "object", "." ]
python
train
pybel/pybel
src/pybel/parser/utils.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/utils.py#L57-L75
def one_of_tags(tags, canonical_tag, name=None): """Define the tags usable in the :class:`BelParser`. For example, statements like ``g(HGNC:SNCA)`` can be expressed also as ``geneAbundance(HGNC:SNCA)``. The language must define multiple different tags that get normalized to the same thing. :param list[str] tags: a list of strings that are the tags for a function. For example, ['g', 'geneAbundance'] for the abundance of a gene :param str canonical_tag: the preferred tag name. Does not have to be one of the tags. For example, 'GeneAbundance' (note capitalization) is used for the abundance of a gene :param str name: this is the key under which the value for this tag is put in the PyParsing framework. :rtype: :class:`pyparsing.ParseElement` """ element = oneOf(tags).setParseAction(replaceWith(canonical_tag)) if name is None: return element return element.setResultsName(name)
[ "def", "one_of_tags", "(", "tags", ",", "canonical_tag", ",", "name", "=", "None", ")", ":", "element", "=", "oneOf", "(", "tags", ")", ".", "setParseAction", "(", "replaceWith", "(", "canonical_tag", ")", ")", "if", "name", "is", "None", ":", "return", "element", "return", "element", ".", "setResultsName", "(", "name", ")" ]
Define the tags usable in the :class:`BelParser`. For example, statements like ``g(HGNC:SNCA)`` can be expressed also as ``geneAbundance(HGNC:SNCA)``. The language must define multiple different tags that get normalized to the same thing. :param list[str] tags: a list of strings that are the tags for a function. For example, ['g', 'geneAbundance'] for the abundance of a gene :param str canonical_tag: the preferred tag name. Does not have to be one of the tags. For example, 'GeneAbundance' (note capitalization) is used for the abundance of a gene :param str name: this is the key under which the value for this tag is put in the PyParsing framework. :rtype: :class:`pyparsing.ParseElement`
[ "Define", "the", "tags", "usable", "in", "the", ":", "class", ":", "BelParser", "." ]
python
train
jbeluch/xbmcswift2
xbmcswift2/xbmcmixin.py
https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/xbmcmixin.py#L160-L169
def get_string(self, stringid): '''Returns the localized string from strings.xml for the given stringid. ''' stringid = int(stringid) if not hasattr(self, '_strings'): self._strings = {} if not stringid in self._strings: self._strings[stringid] = self.addon.getLocalizedString(stringid) return self._strings[stringid]
[ "def", "get_string", "(", "self", ",", "stringid", ")", ":", "stringid", "=", "int", "(", "stringid", ")", "if", "not", "hasattr", "(", "self", ",", "'_strings'", ")", ":", "self", ".", "_strings", "=", "{", "}", "if", "not", "stringid", "in", "self", ".", "_strings", ":", "self", ".", "_strings", "[", "stringid", "]", "=", "self", ".", "addon", ".", "getLocalizedString", "(", "stringid", ")", "return", "self", ".", "_strings", "[", "stringid", "]" ]
Returns the localized string from strings.xml for the given stringid.
[ "Returns", "the", "localized", "string", "from", "strings", ".", "xml", "for", "the", "given", "stringid", "." ]
python
train
datastax/python-driver
cassandra/cqlengine/query.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L1072-L1100
def _validate_select_where(self): """ Checks that a filterset will not create invalid select statement """ # check that there's either a =, a IN or a CONTAINS (collection) # relationship with a primary key or indexed field. We also allow # custom indexes to be queried with any operator (a difference # between a secondary index) equal_ops = [self.model._get_column_by_db_name(w.field) \ for w in self._where if not isinstance(w.value, Token) and (isinstance(w.operator, EqualsOperator) or self.model._get_column_by_db_name(w.field).custom_index)] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) if not any(w.primary_key or w.has_index for w in equal_ops) and not token_comparison and not self._allow_filtering: raise QueryException( ('Where clauses require either =, a IN or a CONTAINS ' '(collection) comparison with either a primary key or ' 'indexed field. You might want to consider setting ' 'custom_index on fields that you manage index outside ' 'cqlengine.')) if not self._allow_filtering: # if the query is not on an indexed field if not any(w.has_index for w in equal_ops): if not any([w.partition_key for w in equal_ops]) and not token_comparison: raise QueryException( ('Filtering on a clustering key without a partition ' 'key is not allowed unless allow_filtering() is ' 'called on the queryset. You might want to consider ' 'setting custom_index on fields that you manage ' 'index outside cqlengine.'))
[ "def", "_validate_select_where", "(", "self", ")", ":", "# check that there's either a =, a IN or a CONTAINS (collection)", "# relationship with a primary key or indexed field. We also allow", "# custom indexes to be queried with any operator (a difference", "# between a secondary index)", "equal_ops", "=", "[", "self", ".", "model", ".", "_get_column_by_db_name", "(", "w", ".", "field", ")", "for", "w", "in", "self", ".", "_where", "if", "not", "isinstance", "(", "w", ".", "value", ",", "Token", ")", "and", "(", "isinstance", "(", "w", ".", "operator", ",", "EqualsOperator", ")", "or", "self", ".", "model", ".", "_get_column_by_db_name", "(", "w", ".", "field", ")", ".", "custom_index", ")", "]", "token_comparison", "=", "any", "(", "[", "w", "for", "w", "in", "self", ".", "_where", "if", "isinstance", "(", "w", ".", "value", ",", "Token", ")", "]", ")", "if", "not", "any", "(", "w", ".", "primary_key", "or", "w", ".", "has_index", "for", "w", "in", "equal_ops", ")", "and", "not", "token_comparison", "and", "not", "self", ".", "_allow_filtering", ":", "raise", "QueryException", "(", "(", "'Where clauses require either =, a IN or a CONTAINS '", "'(collection) comparison with either a primary key or '", "'indexed field. You might want to consider setting '", "'custom_index on fields that you manage index outside '", "'cqlengine.'", ")", ")", "if", "not", "self", ".", "_allow_filtering", ":", "# if the query is not on an indexed field", "if", "not", "any", "(", "w", ".", "has_index", "for", "w", "in", "equal_ops", ")", ":", "if", "not", "any", "(", "[", "w", ".", "partition_key", "for", "w", "in", "equal_ops", "]", ")", "and", "not", "token_comparison", ":", "raise", "QueryException", "(", "(", "'Filtering on a clustering key without a partition '", "'key is not allowed unless allow_filtering() is '", "'called on the queryset. You might want to consider '", "'setting custom_index on fields that you manage '", "'index outside cqlengine.'", ")", ")" ]
Checks that a filterset will not create invalid select statement
[ "Checks", "that", "a", "filterset", "will", "not", "create", "invalid", "select", "statement" ]
python
train
manns/pyspread
pyspread/src/gui/_gui_interfaces.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_gui_interfaces.py#L91-L111
def get_preferences_from_user(self): """Launches preferences dialog and returns dict with preferences""" dlg = PreferencesDialog(self.main_window) change_choice = dlg.ShowModal() preferences = {} if change_choice == wx.ID_OK: for (parameter, _), ctrl in zip(dlg.parameters, dlg.textctrls): if isinstance(ctrl, wx.Choice): value = ctrl.GetStringSelection() if value: preferences[parameter] = repr(value) else: preferences[parameter] = repr(ctrl.Value) dlg.Destroy() return preferences
[ "def", "get_preferences_from_user", "(", "self", ")", ":", "dlg", "=", "PreferencesDialog", "(", "self", ".", "main_window", ")", "change_choice", "=", "dlg", ".", "ShowModal", "(", ")", "preferences", "=", "{", "}", "if", "change_choice", "==", "wx", ".", "ID_OK", ":", "for", "(", "parameter", ",", "_", ")", ",", "ctrl", "in", "zip", "(", "dlg", ".", "parameters", ",", "dlg", ".", "textctrls", ")", ":", "if", "isinstance", "(", "ctrl", ",", "wx", ".", "Choice", ")", ":", "value", "=", "ctrl", ".", "GetStringSelection", "(", ")", "if", "value", ":", "preferences", "[", "parameter", "]", "=", "repr", "(", "value", ")", "else", ":", "preferences", "[", "parameter", "]", "=", "repr", "(", "ctrl", ".", "Value", ")", "dlg", ".", "Destroy", "(", ")", "return", "preferences" ]
Launches preferences dialog and returns dict with preferences
[ "Launches", "preferences", "dialog", "and", "returns", "dict", "with", "preferences" ]
python
train
ungarj/mapchete
mapchete/io/vector.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/vector.py#L166-L195
def read_vector_window(input_files, tile, validity_check=True): """ Read a window of an input vector dataset. Also clips geometry. Parameters: ----------- input_file : string path to vector file tile : ``Tile`` tile extent to read data from validity_check : bool checks if reprojected geometry is valid and throws ``RuntimeError`` if invalid (default: True) Returns ------- features : list a list of reprojected GeoJSON-like features """ if not isinstance(input_files, list): input_files = [input_files] return [ feature for feature in chain.from_iterable([ _read_vector_window(path, tile, validity_check=validity_check) for path in input_files ]) ]
[ "def", "read_vector_window", "(", "input_files", ",", "tile", ",", "validity_check", "=", "True", ")", ":", "if", "not", "isinstance", "(", "input_files", ",", "list", ")", ":", "input_files", "=", "[", "input_files", "]", "return", "[", "feature", "for", "feature", "in", "chain", ".", "from_iterable", "(", "[", "_read_vector_window", "(", "path", ",", "tile", ",", "validity_check", "=", "validity_check", ")", "for", "path", "in", "input_files", "]", ")", "]" ]
Read a window of an input vector dataset. Also clips geometry. Parameters: ----------- input_file : string path to vector file tile : ``Tile`` tile extent to read data from validity_check : bool checks if reprojected geometry is valid and throws ``RuntimeError`` if invalid (default: True) Returns ------- features : list a list of reprojected GeoJSON-like features
[ "Read", "a", "window", "of", "an", "input", "vector", "dataset", "." ]
python
valid
libtcod/python-tcod
tcod/bsp.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/bsp.py#L215-L226
def level_order(self) -> Iterator["BSP"]: """Iterate over this BSP's hierarchy in level order. .. versionadded:: 8.3 """ next = [self] # type: List['BSP'] while next: level = next # type: List['BSP'] next = [] yield from level for node in level: next.extend(node.children)
[ "def", "level_order", "(", "self", ")", "->", "Iterator", "[", "\"BSP\"", "]", ":", "next", "=", "[", "self", "]", "# type: List['BSP']", "while", "next", ":", "level", "=", "next", "# type: List['BSP']", "next", "=", "[", "]", "yield", "from", "level", "for", "node", "in", "level", ":", "next", ".", "extend", "(", "node", ".", "children", ")" ]
Iterate over this BSP's hierarchy in level order. .. versionadded:: 8.3
[ "Iterate", "over", "this", "BSP", "s", "hierarchy", "in", "level", "order", "." ]
python
train
oceanprotocol/squid-py
squid_py/agreements/service_agreement_template.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/service_agreement_template.py#L110-L128
def as_dictionary(self): """ Return the service agreement template as a dictionary. :return: dict """ template = { 'contractName': self.contract_name, 'events': [e.as_dictionary() for e in self.agreement_events], 'fulfillmentOrder': self.fulfillment_order, 'conditionDependency': self.condition_dependency, 'conditions': [cond.as_dictionary() for cond in self.conditions] } return { # 'type': self.DOCUMENT_TYPE, 'name': self.name, 'creator': self.creator, 'serviceAgreementTemplate': template }
[ "def", "as_dictionary", "(", "self", ")", ":", "template", "=", "{", "'contractName'", ":", "self", ".", "contract_name", ",", "'events'", ":", "[", "e", ".", "as_dictionary", "(", ")", "for", "e", "in", "self", ".", "agreement_events", "]", ",", "'fulfillmentOrder'", ":", "self", ".", "fulfillment_order", ",", "'conditionDependency'", ":", "self", ".", "condition_dependency", ",", "'conditions'", ":", "[", "cond", ".", "as_dictionary", "(", ")", "for", "cond", "in", "self", ".", "conditions", "]", "}", "return", "{", "# 'type': self.DOCUMENT_TYPE,", "'name'", ":", "self", ".", "name", ",", "'creator'", ":", "self", ".", "creator", ",", "'serviceAgreementTemplate'", ":", "template", "}" ]
Return the service agreement template as a dictionary. :return: dict
[ "Return", "the", "service", "agreement", "template", "as", "a", "dictionary", "." ]
python
train
KelSolaar/Umbra
umbra/ui/widgets/codeEditor_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/codeEditor_QPlainTextEdit.py#L986-L1006
def set_highlighter(self, highlighter): """ Sets given highlighter as the current document highlighter. :param highlighter: Highlighter. :type highlighter: QSyntaxHighlighter :return: Method success. :rtype: bool """ if not issubclass(highlighter.__class__, QSyntaxHighlighter): raise foundations.exceptions.ProgrammingError("{0} | '{1}' is not a 'QSyntaxHighlighter' subclass!".format( self.__class__.__name__, highlighter)) if self.__highlighter: self.remove_highlighter() LOGGER.debug("> Setting '{0}' highlighter.".format(highlighter)) self.__highlighter = highlighter return True
[ "def", "set_highlighter", "(", "self", ",", "highlighter", ")", ":", "if", "not", "issubclass", "(", "highlighter", ".", "__class__", ",", "QSyntaxHighlighter", ")", ":", "raise", "foundations", ".", "exceptions", ".", "ProgrammingError", "(", "\"{0} | '{1}' is not a 'QSyntaxHighlighter' subclass!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "highlighter", ")", ")", "if", "self", ".", "__highlighter", ":", "self", ".", "remove_highlighter", "(", ")", "LOGGER", ".", "debug", "(", "\"> Setting '{0}' highlighter.\"", ".", "format", "(", "highlighter", ")", ")", "self", ".", "__highlighter", "=", "highlighter", "return", "True" ]
Sets given highlighter as the current document highlighter. :param highlighter: Highlighter. :type highlighter: QSyntaxHighlighter :return: Method success. :rtype: bool
[ "Sets", "given", "highlighter", "as", "the", "current", "document", "highlighter", "." ]
python
train
erikrose/more-itertools
more_itertools/more.py
https://github.com/erikrose/more-itertools/blob/6a91b4e25c8e12fcf9fc2b53cf8ee0fba293e6f9/more_itertools/more.py#L1016-L1032
def sliced(seq, n): """Yield slices of length *n* from the sequence *seq*. >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) [(1, 2, 3), (4, 5, 6)] If the length of the sequence is not divisible by the requested slice length, the last slice will be shorter. >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) [(1, 2, 3), (4, 5, 6), (7, 8)] This function will only work for iterables that support slicing. For non-sliceable iterables, see :func:`chunked`. """ return takewhile(bool, (seq[i: i + n] for i in count(0, n)))
[ "def", "sliced", "(", "seq", ",", "n", ")", ":", "return", "takewhile", "(", "bool", ",", "(", "seq", "[", "i", ":", "i", "+", "n", "]", "for", "i", "in", "count", "(", "0", ",", "n", ")", ")", ")" ]
Yield slices of length *n* from the sequence *seq*. >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) [(1, 2, 3), (4, 5, 6)] If the length of the sequence is not divisible by the requested slice length, the last slice will be shorter. >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) [(1, 2, 3), (4, 5, 6), (7, 8)] This function will only work for iterables that support slicing. For non-sliceable iterables, see :func:`chunked`.
[ "Yield", "slices", "of", "length", "*", "n", "*", "from", "the", "sequence", "*", "seq", "*", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L1657-L1670
def accumulate(self, buf): '''add in some more bytes''' bytes = array.array('B') if isinstance(buf, array.array): bytes.extend(buf) else: bytes.fromstring(buf) accum = self.crc for b in bytes: tmp = b ^ (accum & 0xff) tmp = (tmp ^ (tmp<<4)) & 0xFF accum = (accum>>8) ^ (tmp<<8) ^ (tmp<<3) ^ (tmp>>4) accum = accum & 0xFFFF self.crc = accum
[ "def", "accumulate", "(", "self", ",", "buf", ")", ":", "bytes", "=", "array", ".", "array", "(", "'B'", ")", "if", "isinstance", "(", "buf", ",", "array", ".", "array", ")", ":", "bytes", ".", "extend", "(", "buf", ")", "else", ":", "bytes", ".", "fromstring", "(", "buf", ")", "accum", "=", "self", ".", "crc", "for", "b", "in", "bytes", ":", "tmp", "=", "b", "^", "(", "accum", "&", "0xff", ")", "tmp", "=", "(", "tmp", "^", "(", "tmp", "<<", "4", ")", ")", "&", "0xFF", "accum", "=", "(", "accum", ">>", "8", ")", "^", "(", "tmp", "<<", "8", ")", "^", "(", "tmp", "<<", "3", ")", "^", "(", "tmp", ">>", "4", ")", "accum", "=", "accum", "&", "0xFFFF", "self", ".", "crc", "=", "accum" ]
add in some more bytes
[ "add", "in", "some", "more", "bytes" ]
python
train
allenai/allennlp
allennlp/modules/conditional_random_field.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/conditional_random_field.py#L324-L384
def viterbi_tags(self, logits: torch.Tensor, mask: torch.Tensor) -> List[Tuple[List[int], float]]: """ Uses viterbi algorithm to find most likely tags for the given inputs. If constraints are applied, disallows all other transitions. """ _, max_seq_length, num_tags = logits.size() # Get the tensors out of the variables logits, mask = logits.data, mask.data # Augment transitions matrix with start and end transitions start_tag = num_tags end_tag = num_tags + 1 transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.) # Apply transition constraints constrained_transitions = ( self.transitions * self._constraint_mask[:num_tags, :num_tags] + -10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags]) ) transitions[:num_tags, :num_tags] = constrained_transitions.data if self.include_start_end_transitions: transitions[start_tag, :num_tags] = ( self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data + -10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach()) ) transitions[:num_tags, end_tag] = ( self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data + -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach()) ) else: transitions[start_tag, :num_tags] = (-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())) transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach()) best_paths = [] # Pad the max sequence length by 2 to account for start_tag + end_tag. tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2) for prediction, prediction_mask in zip(logits, mask): sequence_length = torch.sum(prediction_mask) # Start with everything totally unlikely tag_sequence.fill_(-10000.) # At timestep 0 we must have the START_TAG tag_sequence[0, start_tag] = 0. # At steps 1, ..., sequence_length we just use the incoming prediction tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length] # And at the last timestep we must have the END_TAG tag_sequence[sequence_length + 1, end_tag] = 0. # We pass the tags and the transitions to ``viterbi_decode``. viterbi_path, viterbi_score = util.viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions) # Get rid of START and END sentinels and append. viterbi_path = viterbi_path[1:-1] best_paths.append((viterbi_path, viterbi_score.item())) return best_paths
[ "def", "viterbi_tags", "(", "self", ",", "logits", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ")", "->", "List", "[", "Tuple", "[", "List", "[", "int", "]", ",", "float", "]", "]", ":", "_", ",", "max_seq_length", ",", "num_tags", "=", "logits", ".", "size", "(", ")", "# Get the tensors out of the variables", "logits", ",", "mask", "=", "logits", ".", "data", ",", "mask", ".", "data", "# Augment transitions matrix with start and end transitions", "start_tag", "=", "num_tags", "end_tag", "=", "num_tags", "+", "1", "transitions", "=", "torch", ".", "Tensor", "(", "num_tags", "+", "2", ",", "num_tags", "+", "2", ")", ".", "fill_", "(", "-", "10000.", ")", "# Apply transition constraints", "constrained_transitions", "=", "(", "self", ".", "transitions", "*", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", ":", "num_tags", "]", "+", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", ":", "num_tags", "]", ")", ")", "transitions", "[", ":", "num_tags", ",", ":", "num_tags", "]", "=", "constrained_transitions", ".", "data", "if", "self", ".", "include_start_end_transitions", ":", "transitions", "[", "start_tag", ",", ":", "num_tags", "]", "=", "(", "self", ".", "start_transitions", ".", "detach", "(", ")", "*", "self", ".", "_constraint_mask", "[", "start_tag", ",", ":", "num_tags", "]", ".", "data", "+", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", "start_tag", ",", ":", "num_tags", "]", ".", "detach", "(", ")", ")", ")", "transitions", "[", ":", "num_tags", ",", "end_tag", "]", "=", "(", "self", ".", "end_transitions", ".", "detach", "(", ")", "*", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", "end_tag", "]", ".", "data", "+", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", "end_tag", "]", ".", "detach", "(", ")", ")", ")", "else", ":", "transitions", "[", "start_tag", ",", ":", "num_tags", "]", "=", "(", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", "start_tag", ",", ":", "num_tags", "]", ".", "detach", "(", ")", ")", ")", "transitions", "[", ":", "num_tags", ",", "end_tag", "]", "=", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", "end_tag", "]", ".", "detach", "(", ")", ")", "best_paths", "=", "[", "]", "# Pad the max sequence length by 2 to account for start_tag + end_tag.", "tag_sequence", "=", "torch", ".", "Tensor", "(", "max_seq_length", "+", "2", ",", "num_tags", "+", "2", ")", "for", "prediction", ",", "prediction_mask", "in", "zip", "(", "logits", ",", "mask", ")", ":", "sequence_length", "=", "torch", ".", "sum", "(", "prediction_mask", ")", "# Start with everything totally unlikely", "tag_sequence", ".", "fill_", "(", "-", "10000.", ")", "# At timestep 0 we must have the START_TAG", "tag_sequence", "[", "0", ",", "start_tag", "]", "=", "0.", "# At steps 1, ..., sequence_length we just use the incoming prediction", "tag_sequence", "[", "1", ":", "(", "sequence_length", "+", "1", ")", ",", ":", "num_tags", "]", "=", "prediction", "[", ":", "sequence_length", "]", "# And at the last timestep we must have the END_TAG", "tag_sequence", "[", "sequence_length", "+", "1", ",", "end_tag", "]", "=", "0.", "# We pass the tags and the transitions to ``viterbi_decode``.", "viterbi_path", ",", "viterbi_score", "=", "util", ".", "viterbi_decode", "(", "tag_sequence", "[", ":", "(", "sequence_length", "+", "2", ")", "]", ",", "transitions", ")", "# Get rid of START and END sentinels and append.", "viterbi_path", "=", "viterbi_path", "[", "1", ":", "-", "1", "]", "best_paths", ".", "append", "(", "(", "viterbi_path", ",", "viterbi_score", ".", "item", "(", ")", ")", ")", "return", "best_paths" ]
Uses viterbi algorithm to find most likely tags for the given inputs. If constraints are applied, disallows all other transitions.
[ "Uses", "viterbi", "algorithm", "to", "find", "most", "likely", "tags", "for", "the", "given", "inputs", ".", "If", "constraints", "are", "applied", "disallows", "all", "other", "transitions", "." ]
python
train
etingof/pyasn1
pyasn1/type/tag.py
https://github.com/etingof/pyasn1/blob/25cf116ef8d11bb0e08454c0f3635c9f4002c2d6/pyasn1/type/tag.py#L262-L283
def tagExplicitly(self, superTag): """Return explicitly tagged *TagSet* Create a new *TagSet* representing callee *TagSet* explicitly tagged with passed tag(s). With explicit tagging mode, new tags are appended to existing tag(s). Parameters ---------- superTag: :class:`~pyasn1.type.tag.Tag` *Tag* object to tag this *TagSet* Returns ------- : :class:`~pyasn1.type.tag.TagSet` New *TagSet* object """ if superTag.tagClass == tagClassUniversal: raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag") if superTag.tagFormat != tagFormatConstructed: superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId) return self + superTag
[ "def", "tagExplicitly", "(", "self", ",", "superTag", ")", ":", "if", "superTag", ".", "tagClass", "==", "tagClassUniversal", ":", "raise", "error", ".", "PyAsn1Error", "(", "\"Can't tag with UNIVERSAL class tag\"", ")", "if", "superTag", ".", "tagFormat", "!=", "tagFormatConstructed", ":", "superTag", "=", "Tag", "(", "superTag", ".", "tagClass", ",", "tagFormatConstructed", ",", "superTag", ".", "tagId", ")", "return", "self", "+", "superTag" ]
Return explicitly tagged *TagSet* Create a new *TagSet* representing callee *TagSet* explicitly tagged with passed tag(s). With explicit tagging mode, new tags are appended to existing tag(s). Parameters ---------- superTag: :class:`~pyasn1.type.tag.Tag` *Tag* object to tag this *TagSet* Returns ------- : :class:`~pyasn1.type.tag.TagSet` New *TagSet* object
[ "Return", "explicitly", "tagged", "*", "TagSet", "*" ]
python
train
iotile/coretools
iotileship/iotile/ship/autobuild/ship_file.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileship/iotile/ship/autobuild/ship_file.py#L113-L125
def create_shipfile(target, source, env): """Create a .ship file with all dependencies.""" source_dir = os.path.dirname(str(source[0])) recipe_name = os.path.basename(str(source[0]))[:-5] resman = RecipeManager() resman.add_recipe_actions(env['CUSTOM_STEPS']) resman.add_recipe_folder(source_dir, whitelist=[os.path.basename(str(source[0]))]) recipe = resman.get_recipe(recipe_name) recipe.archive(str(target[0]))
[ "def", "create_shipfile", "(", "target", ",", "source", ",", "env", ")", ":", "source_dir", "=", "os", ".", "path", ".", "dirname", "(", "str", "(", "source", "[", "0", "]", ")", ")", "recipe_name", "=", "os", ".", "path", ".", "basename", "(", "str", "(", "source", "[", "0", "]", ")", ")", "[", ":", "-", "5", "]", "resman", "=", "RecipeManager", "(", ")", "resman", ".", "add_recipe_actions", "(", "env", "[", "'CUSTOM_STEPS'", "]", ")", "resman", ".", "add_recipe_folder", "(", "source_dir", ",", "whitelist", "=", "[", "os", ".", "path", ".", "basename", "(", "str", "(", "source", "[", "0", "]", ")", ")", "]", ")", "recipe", "=", "resman", ".", "get_recipe", "(", "recipe_name", ")", "recipe", ".", "archive", "(", "str", "(", "target", "[", "0", "]", ")", ")" ]
Create a .ship file with all dependencies.
[ "Create", "a", ".", "ship", "file", "with", "all", "dependencies", "." ]
python
train
cjdrake/pyeda
pyeda/boolalg/table.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/table.py#L86-L96
def _truthtable(inputs, pcdata): """Return a truth table.""" if len(inputs) == 0 and pcdata[0] in {PC_ZERO, PC_ONE}: return { PC_ZERO : TTZERO, PC_ONE : TTONE }[pcdata[0]] elif len(inputs) == 1 and pcdata[0] == PC_ZERO and pcdata[1] == PC_ONE: return inputs[0] else: return TruthTable(inputs, pcdata)
[ "def", "_truthtable", "(", "inputs", ",", "pcdata", ")", ":", "if", "len", "(", "inputs", ")", "==", "0", "and", "pcdata", "[", "0", "]", "in", "{", "PC_ZERO", ",", "PC_ONE", "}", ":", "return", "{", "PC_ZERO", ":", "TTZERO", ",", "PC_ONE", ":", "TTONE", "}", "[", "pcdata", "[", "0", "]", "]", "elif", "len", "(", "inputs", ")", "==", "1", "and", "pcdata", "[", "0", "]", "==", "PC_ZERO", "and", "pcdata", "[", "1", "]", "==", "PC_ONE", ":", "return", "inputs", "[", "0", "]", "else", ":", "return", "TruthTable", "(", "inputs", ",", "pcdata", ")" ]
Return a truth table.
[ "Return", "a", "truth", "table", "." ]
python
train
AdvancedClimateSystems/uModbus
umodbus/client/serial/rtu.py
https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/client/serial/rtu.py#L205-L225
def send_message(adu, serial_port): """ Send ADU over serial to to server and return parsed response. :param adu: Request ADU. :param sock: Serial port instance. :return: Parsed response from server. """ serial_port.write(adu) serial_port.flush() # Check exception ADU (which is shorter than all other responses) first. exception_adu_size = 5 response_error_adu = recv_exactly(serial_port.read, exception_adu_size) raise_for_exception_adu(response_error_adu) expected_response_size = \ expected_response_pdu_size_from_request_pdu(adu[1:-2]) + 3 response_remainder = recv_exactly( serial_port.read, expected_response_size - exception_adu_size) return parse_response_adu(response_error_adu + response_remainder, adu)
[ "def", "send_message", "(", "adu", ",", "serial_port", ")", ":", "serial_port", ".", "write", "(", "adu", ")", "serial_port", ".", "flush", "(", ")", "# Check exception ADU (which is shorter than all other responses) first.", "exception_adu_size", "=", "5", "response_error_adu", "=", "recv_exactly", "(", "serial_port", ".", "read", ",", "exception_adu_size", ")", "raise_for_exception_adu", "(", "response_error_adu", ")", "expected_response_size", "=", "expected_response_pdu_size_from_request_pdu", "(", "adu", "[", "1", ":", "-", "2", "]", ")", "+", "3", "response_remainder", "=", "recv_exactly", "(", "serial_port", ".", "read", ",", "expected_response_size", "-", "exception_adu_size", ")", "return", "parse_response_adu", "(", "response_error_adu", "+", "response_remainder", ",", "adu", ")" ]
Send ADU over serial to to server and return parsed response. :param adu: Request ADU. :param sock: Serial port instance. :return: Parsed response from server.
[ "Send", "ADU", "over", "serial", "to", "to", "server", "and", "return", "parsed", "response", "." ]
python
train
spotify/luigi
luigi/contrib/azureblob.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/azureblob.py#L275-L292
def open(self, mode): """ Open the target for reading or writing :param char mode: 'r' for reading and 'w' for writing. 'b' is not supported and will be stripped if used. For binary mode, use `format` :return: * :class:`.ReadableAzureBlobFile` if 'r' * :class:`.AtomicAzureBlobFile` if 'w' """ if mode not in ('r', 'w'): raise ValueError("Unsupported open mode '%s'" % mode) if mode == 'r': return self.format.pipe_reader(ReadableAzureBlobFile(self.container, self.blob, self.client, self.download_when_reading, **self.azure_blob_options)) else: return self.format.pipe_writer(AtomicAzureBlobFile(self.container, self.blob, self.client, **self.azure_blob_options))
[ "def", "open", "(", "self", ",", "mode", ")", ":", "if", "mode", "not", "in", "(", "'r'", ",", "'w'", ")", ":", "raise", "ValueError", "(", "\"Unsupported open mode '%s'\"", "%", "mode", ")", "if", "mode", "==", "'r'", ":", "return", "self", ".", "format", ".", "pipe_reader", "(", "ReadableAzureBlobFile", "(", "self", ".", "container", ",", "self", ".", "blob", ",", "self", ".", "client", ",", "self", ".", "download_when_reading", ",", "*", "*", "self", ".", "azure_blob_options", ")", ")", "else", ":", "return", "self", ".", "format", ".", "pipe_writer", "(", "AtomicAzureBlobFile", "(", "self", ".", "container", ",", "self", ".", "blob", ",", "self", ".", "client", ",", "*", "*", "self", ".", "azure_blob_options", ")", ")" ]
Open the target for reading or writing :param char mode: 'r' for reading and 'w' for writing. 'b' is not supported and will be stripped if used. For binary mode, use `format` :return: * :class:`.ReadableAzureBlobFile` if 'r' * :class:`.AtomicAzureBlobFile` if 'w'
[ "Open", "the", "target", "for", "reading", "or", "writing" ]
python
train
bugra/angel-list
angel/angel.py
https://github.com/bugra/angel-list/blob/75ac453e873727675ba18e1f45b5bc0cfda26fd7/angel/angel.py#L413-L420
def get_startups_filtered_by(self, filter_='raising'): """ Get startups based on which companies are raising funding """ url = _STARTUP_RAISING.format(c_api=_C_API_BEGINNING, api=_API_VERSION, filter_=filter_, at=self.access_token) return _get_request(url)
[ "def", "get_startups_filtered_by", "(", "self", ",", "filter_", "=", "'raising'", ")", ":", "url", "=", "_STARTUP_RAISING", ".", "format", "(", "c_api", "=", "_C_API_BEGINNING", ",", "api", "=", "_API_VERSION", ",", "filter_", "=", "filter_", ",", "at", "=", "self", ".", "access_token", ")", "return", "_get_request", "(", "url", ")" ]
Get startups based on which companies are raising funding
[ "Get", "startups", "based", "on", "which", "companies", "are", "raising", "funding" ]
python
train
synw/dataswim
dataswim/report.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/report.py#L133-L160
def get_html(self, chart_obj=None, slug=None): """ Get the html and script tag for a chart """ if chart_obj is None: if self.chart_obj is None: self.err( "No chart object registered, please provide " "one in parameters" ) return chart_obj = self.chart_obj try: if self.engine == "bokeh": html = self._get_bokeh_html(chart_obj) if html is None: self.err("No html returned for " + str(chart_obj)) return html elif self.engine == "altair": html = self._get_altair_html_(chart_obj, slug) if html is None: self.err("No html returned for " + str(chart_obj)) return html else: self.err("Chart engine " + self.engine + " unknown") return except Exception as e: self.err(e, "Can not get html from chart object")
[ "def", "get_html", "(", "self", ",", "chart_obj", "=", "None", ",", "slug", "=", "None", ")", ":", "if", "chart_obj", "is", "None", ":", "if", "self", ".", "chart_obj", "is", "None", ":", "self", ".", "err", "(", "\"No chart object registered, please provide \"", "\"one in parameters\"", ")", "return", "chart_obj", "=", "self", ".", "chart_obj", "try", ":", "if", "self", ".", "engine", "==", "\"bokeh\"", ":", "html", "=", "self", ".", "_get_bokeh_html", "(", "chart_obj", ")", "if", "html", "is", "None", ":", "self", ".", "err", "(", "\"No html returned for \"", "+", "str", "(", "chart_obj", ")", ")", "return", "html", "elif", "self", ".", "engine", "==", "\"altair\"", ":", "html", "=", "self", ".", "_get_altair_html_", "(", "chart_obj", ",", "slug", ")", "if", "html", "is", "None", ":", "self", ".", "err", "(", "\"No html returned for \"", "+", "str", "(", "chart_obj", ")", ")", "return", "html", "else", ":", "self", ".", "err", "(", "\"Chart engine \"", "+", "self", ".", "engine", "+", "\" unknown\"", ")", "return", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not get html from chart object\"", ")" ]
Get the html and script tag for a chart
[ "Get", "the", "html", "and", "script", "tag", "for", "a", "chart" ]
python
train
google/apitools
apitools/base/py/credentials_lib.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L670-L680
def _EnsureFileExists(self): """Touches a file; returns False on error, True on success.""" if not os.path.exists(self._filename): old_umask = os.umask(0o177) try: open(self._filename, 'a+b').close() except OSError: return False finally: os.umask(old_umask) return True
[ "def", "_EnsureFileExists", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_filename", ")", ":", "old_umask", "=", "os", ".", "umask", "(", "0o177", ")", "try", ":", "open", "(", "self", ".", "_filename", ",", "'a+b'", ")", ".", "close", "(", ")", "except", "OSError", ":", "return", "False", "finally", ":", "os", ".", "umask", "(", "old_umask", ")", "return", "True" ]
Touches a file; returns False on error, True on success.
[ "Touches", "a", "file", ";", "returns", "False", "on", "error", "True", "on", "success", "." ]
python
train
pytroll/trollimage
trollimage/colorspaces.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/colorspaces.py#L87-L111
def rgb2xyz(r__, g__, b__): """RGB to XYZ """ r2_ = r__ / 255.0 g2_ = g__ / 255.0 b2_ = b__ / 255.0 def f__(arr): """Forward """ return np.where(arr > 0.04045, ((arr + 0.055) / 1.055) ** 2.4, arr / 12.92) r2_ = f__(r2_) * 100 g2_ = f__(g2_) * 100 b2_ = f__(b2_) * 100 x__ = r2_ * 0.4124 + g2_ * 0.3576 + b2_ * 0.1805 y__ = r2_ * 0.2126 + g2_ * 0.7152 + b2_ * 0.0722 z__ = r2_ * 0.0193 + g2_ * 0.1192 + b2_ * 0.9505 return x__, y__, z__
[ "def", "rgb2xyz", "(", "r__", ",", "g__", ",", "b__", ")", ":", "r2_", "=", "r__", "/", "255.0", "g2_", "=", "g__", "/", "255.0", "b2_", "=", "b__", "/", "255.0", "def", "f__", "(", "arr", ")", ":", "\"\"\"Forward\n \"\"\"", "return", "np", ".", "where", "(", "arr", ">", "0.04045", ",", "(", "(", "arr", "+", "0.055", ")", "/", "1.055", ")", "**", "2.4", ",", "arr", "/", "12.92", ")", "r2_", "=", "f__", "(", "r2_", ")", "*", "100", "g2_", "=", "f__", "(", "g2_", ")", "*", "100", "b2_", "=", "f__", "(", "b2_", ")", "*", "100", "x__", "=", "r2_", "*", "0.4124", "+", "g2_", "*", "0.3576", "+", "b2_", "*", "0.1805", "y__", "=", "r2_", "*", "0.2126", "+", "g2_", "*", "0.7152", "+", "b2_", "*", "0.0722", "z__", "=", "r2_", "*", "0.0193", "+", "g2_", "*", "0.1192", "+", "b2_", "*", "0.9505", "return", "x__", ",", "y__", ",", "z__" ]
RGB to XYZ
[ "RGB", "to", "XYZ" ]
python
train
niklasf/python-chess
chess/pgn.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/pgn.py#L441-L466
def setup(self, board: Union[chess.Board, str]) -> None: """ Sets up a specific starting position. This sets (or resets) the ``FEN``, ``SetUp``, and ``Variant`` header tags. """ try: fen = board.fen() except AttributeError: board = chess.Board(board) board.chess960 = board.has_chess960_castling_rights() fen = board.fen() if fen == type(board).starting_fen: self.headers.pop("SetUp", None) self.headers.pop("FEN", None) else: self.headers["SetUp"] = "1" self.headers["FEN"] = fen if type(board).aliases[0] == "Standard" and board.chess960: self.headers["Variant"] = "Chess960" elif type(board).aliases[0] != "Standard": self.headers["Variant"] = type(board).aliases[0] self.headers["FEN"] = board.fen() else: self.headers.pop("Variant", None)
[ "def", "setup", "(", "self", ",", "board", ":", "Union", "[", "chess", ".", "Board", ",", "str", "]", ")", "->", "None", ":", "try", ":", "fen", "=", "board", ".", "fen", "(", ")", "except", "AttributeError", ":", "board", "=", "chess", ".", "Board", "(", "board", ")", "board", ".", "chess960", "=", "board", ".", "has_chess960_castling_rights", "(", ")", "fen", "=", "board", ".", "fen", "(", ")", "if", "fen", "==", "type", "(", "board", ")", ".", "starting_fen", ":", "self", ".", "headers", ".", "pop", "(", "\"SetUp\"", ",", "None", ")", "self", ".", "headers", ".", "pop", "(", "\"FEN\"", ",", "None", ")", "else", ":", "self", ".", "headers", "[", "\"SetUp\"", "]", "=", "\"1\"", "self", ".", "headers", "[", "\"FEN\"", "]", "=", "fen", "if", "type", "(", "board", ")", ".", "aliases", "[", "0", "]", "==", "\"Standard\"", "and", "board", ".", "chess960", ":", "self", ".", "headers", "[", "\"Variant\"", "]", "=", "\"Chess960\"", "elif", "type", "(", "board", ")", ".", "aliases", "[", "0", "]", "!=", "\"Standard\"", ":", "self", ".", "headers", "[", "\"Variant\"", "]", "=", "type", "(", "board", ")", ".", "aliases", "[", "0", "]", "self", ".", "headers", "[", "\"FEN\"", "]", "=", "board", ".", "fen", "(", ")", "else", ":", "self", ".", "headers", ".", "pop", "(", "\"Variant\"", ",", "None", ")" ]
Sets up a specific starting position. This sets (or resets) the ``FEN``, ``SetUp``, and ``Variant`` header tags.
[ "Sets", "up", "a", "specific", "starting", "position", ".", "This", "sets", "(", "or", "resets", ")", "the", "FEN", "SetUp", "and", "Variant", "header", "tags", "." ]
python
train
scikit-learn-contrib/categorical-encoding
examples/benchmarking_large/util.py
https://github.com/scikit-learn-contrib/categorical-encoding/blob/5e9e803c9131b377af305d5302723ba2415001da/examples/benchmarking_large/util.py#L58-L96
def train_model(folds, model): """ Evaluation with: Matthews correlation coefficient: represents thresholding measures AUC: represents ranking measures Brier score: represents calibration measures """ scores = [] fit_model_time = 0 # Sum of all the time spend on fitting the training data, later on normalized score_model_time = 0 # Sum of all the time spend on scoring the testing data, later on normalized for X_train, y_train, X_test, y_test in folds: # Training start_time = time.time() with ignore_warnings(category=ConvergenceWarning): # Yes, neural networks do not always converge model.fit(X_train, y_train) fit_model_time += time.time() - start_time prediction_train_proba = model.predict_proba(X_train)[:, 1] prediction_train = (prediction_train_proba >= 0.5).astype('uint8') # Testing start_time = time.time() prediction_test_proba = model.predict_proba(X_test)[:, 1] score_model_time += time.time() - start_time prediction_test = (prediction_test_proba >= 0.5).astype('uint8') # When all the predictions are of a single class, we get a RuntimeWarning in matthews_corr with warnings.catch_warnings(): warnings.simplefilter("ignore") scores.append([ sklearn.metrics.matthews_corrcoef(y_test, prediction_test), sklearn.metrics.matthews_corrcoef(y_train, prediction_train), sklearn.metrics.roc_auc_score(y_test, prediction_test_proba), sklearn.metrics.roc_auc_score(y_train, prediction_train_proba), sklearn.metrics.brier_score_loss(y_test, prediction_test_proba), sklearn.metrics.brier_score_loss(y_train, prediction_train_proba) ]) return np.mean(scores, axis=0), fit_model_time/len(folds), score_model_time/len(folds)
[ "def", "train_model", "(", "folds", ",", "model", ")", ":", "scores", "=", "[", "]", "fit_model_time", "=", "0", "# Sum of all the time spend on fitting the training data, later on normalized", "score_model_time", "=", "0", "# Sum of all the time spend on scoring the testing data, later on normalized", "for", "X_train", ",", "y_train", ",", "X_test", ",", "y_test", "in", "folds", ":", "# Training", "start_time", "=", "time", ".", "time", "(", ")", "with", "ignore_warnings", "(", "category", "=", "ConvergenceWarning", ")", ":", "# Yes, neural networks do not always converge", "model", ".", "fit", "(", "X_train", ",", "y_train", ")", "fit_model_time", "+=", "time", ".", "time", "(", ")", "-", "start_time", "prediction_train_proba", "=", "model", ".", "predict_proba", "(", "X_train", ")", "[", ":", ",", "1", "]", "prediction_train", "=", "(", "prediction_train_proba", ">=", "0.5", ")", ".", "astype", "(", "'uint8'", ")", "# Testing", "start_time", "=", "time", ".", "time", "(", ")", "prediction_test_proba", "=", "model", ".", "predict_proba", "(", "X_test", ")", "[", ":", ",", "1", "]", "score_model_time", "+=", "time", ".", "time", "(", ")", "-", "start_time", "prediction_test", "=", "(", "prediction_test_proba", ">=", "0.5", ")", ".", "astype", "(", "'uint8'", ")", "# When all the predictions are of a single class, we get a RuntimeWarning in matthews_corr", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "scores", ".", "append", "(", "[", "sklearn", ".", "metrics", ".", "matthews_corrcoef", "(", "y_test", ",", "prediction_test", ")", ",", "sklearn", ".", "metrics", ".", "matthews_corrcoef", "(", "y_train", ",", "prediction_train", ")", ",", "sklearn", ".", "metrics", ".", "roc_auc_score", "(", "y_test", ",", "prediction_test_proba", ")", ",", "sklearn", ".", "metrics", ".", "roc_auc_score", "(", "y_train", ",", "prediction_train_proba", ")", ",", "sklearn", ".", "metrics", ".", "brier_score_loss", "(", "y_test", ",", "prediction_test_proba", ")", ",", "sklearn", ".", "metrics", ".", "brier_score_loss", "(", "y_train", ",", "prediction_train_proba", ")", "]", ")", "return", "np", ".", "mean", "(", "scores", ",", "axis", "=", "0", ")", ",", "fit_model_time", "/", "len", "(", "folds", ")", ",", "score_model_time", "/", "len", "(", "folds", ")" ]
Evaluation with: Matthews correlation coefficient: represents thresholding measures AUC: represents ranking measures Brier score: represents calibration measures
[ "Evaluation", "with", ":", "Matthews", "correlation", "coefficient", ":", "represents", "thresholding", "measures", "AUC", ":", "represents", "ranking", "measures", "Brier", "score", ":", "represents", "calibration", "measures" ]
python
valid
delph-in/pydelphin
delphin/interfaces/ace.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/interfaces/ace.py#L505-L519
def transfer_from_iterable(grm, data, **kwargs): """ Transfer from each MRS in *data* with ACE using grammar *grm*. Args: grm (str): path to a compiled grammar image data (iterable): source MRSs as SimpleMRS strings **kwargs: additional keyword arguments to pass to the AceTransferer Yields: :class:`~delphin.interfaces.ParseResponse` """ with AceTransferer(grm, **kwargs) as transferer: for datum in data: yield transferer.interact(datum)
[ "def", "transfer_from_iterable", "(", "grm", ",", "data", ",", "*", "*", "kwargs", ")", ":", "with", "AceTransferer", "(", "grm", ",", "*", "*", "kwargs", ")", "as", "transferer", ":", "for", "datum", "in", "data", ":", "yield", "transferer", ".", "interact", "(", "datum", ")" ]
Transfer from each MRS in *data* with ACE using grammar *grm*. Args: grm (str): path to a compiled grammar image data (iterable): source MRSs as SimpleMRS strings **kwargs: additional keyword arguments to pass to the AceTransferer Yields: :class:`~delphin.interfaces.ParseResponse`
[ "Transfer", "from", "each", "MRS", "in", "*", "data", "*", "with", "ACE", "using", "grammar", "*", "grm", "*", "." ]
python
train
VorskiImagineering/C3PO
c3po/converters/po_ods.py
https://github.com/VorskiImagineering/C3PO/blob/e3e35835e5ac24158848afed4f905ca44ac3ae00/c3po/converters/po_ods.py#L142-L166
def csv_to_ods(trans_csv, meta_csv, local_ods): """ Converts csv files to one ods file :param trans_csv: path to csv file with translations :param meta_csv: path to csv file with metadata :param local_ods: path to new ods file """ trans_reader = UnicodeReader(trans_csv) meta_reader = UnicodeReader(meta_csv) ods = ODS() trans_title = trans_reader.next() meta_reader.next() _prepare_ods_columns(ods, trans_title) for i, (trans_row, meta_row) in enumerate(izip(trans_reader, meta_reader)): _write_row_into_ods(ods, 0, i, trans_row) _write_row_into_ods(ods, 1, i, meta_row) trans_reader.close() meta_reader.close() ods.save(local_ods)
[ "def", "csv_to_ods", "(", "trans_csv", ",", "meta_csv", ",", "local_ods", ")", ":", "trans_reader", "=", "UnicodeReader", "(", "trans_csv", ")", "meta_reader", "=", "UnicodeReader", "(", "meta_csv", ")", "ods", "=", "ODS", "(", ")", "trans_title", "=", "trans_reader", ".", "next", "(", ")", "meta_reader", ".", "next", "(", ")", "_prepare_ods_columns", "(", "ods", ",", "trans_title", ")", "for", "i", ",", "(", "trans_row", ",", "meta_row", ")", "in", "enumerate", "(", "izip", "(", "trans_reader", ",", "meta_reader", ")", ")", ":", "_write_row_into_ods", "(", "ods", ",", "0", ",", "i", ",", "trans_row", ")", "_write_row_into_ods", "(", "ods", ",", "1", ",", "i", ",", "meta_row", ")", "trans_reader", ".", "close", "(", ")", "meta_reader", ".", "close", "(", ")", "ods", ".", "save", "(", "local_ods", ")" ]
Converts csv files to one ods file :param trans_csv: path to csv file with translations :param meta_csv: path to csv file with metadata :param local_ods: path to new ods file
[ "Converts", "csv", "files", "to", "one", "ods", "file", ":", "param", "trans_csv", ":", "path", "to", "csv", "file", "with", "translations", ":", "param", "meta_csv", ":", "path", "to", "csv", "file", "with", "metadata", ":", "param", "local_ods", ":", "path", "to", "new", "ods", "file" ]
python
test
googleapis/google-cloud-python
api_core/google/api_core/iam.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/iam.py#L120-L128
def editors(self): """Legacy access to editor role. DEPRECATED: use ``policy["roles/editors"]`` instead.""" result = set() for role in self._EDITOR_ROLES: for member in self._bindings.get(role, ()): result.add(member) return frozenset(result)
[ "def", "editors", "(", "self", ")", ":", "result", "=", "set", "(", ")", "for", "role", "in", "self", ".", "_EDITOR_ROLES", ":", "for", "member", "in", "self", ".", "_bindings", ".", "get", "(", "role", ",", "(", ")", ")", ":", "result", ".", "add", "(", "member", ")", "return", "frozenset", "(", "result", ")" ]
Legacy access to editor role. DEPRECATED: use ``policy["roles/editors"]`` instead.
[ "Legacy", "access", "to", "editor", "role", "." ]
python
train
nyaruka/smartmin
smartmin/views.py
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L531-L540
def derive_title(self): """ Derives our title from our list """ title = super(SmartListView, self).derive_title() if not title: return force_text(self.model._meta.verbose_name_plural).title() else: return title
[ "def", "derive_title", "(", "self", ")", ":", "title", "=", "super", "(", "SmartListView", ",", "self", ")", ".", "derive_title", "(", ")", "if", "not", "title", ":", "return", "force_text", "(", "self", ".", "model", ".", "_meta", ".", "verbose_name_plural", ")", ".", "title", "(", ")", "else", ":", "return", "title" ]
Derives our title from our list
[ "Derives", "our", "title", "from", "our", "list" ]
python
train
spyder-ide/spyder
spyder/widgets/mixins.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/mixins.py#L482-L487
def extend_selection_to_next(self, what='word', direction='left'): """ Extend selection to next *what* ('word' or 'character') toward *direction* ('left' or 'right') """ self.__move_cursor_anchor(what, direction, QTextCursor.KeepAnchor)
[ "def", "extend_selection_to_next", "(", "self", ",", "what", "=", "'word'", ",", "direction", "=", "'left'", ")", ":", "self", ".", "__move_cursor_anchor", "(", "what", ",", "direction", ",", "QTextCursor", ".", "KeepAnchor", ")" ]
Extend selection to next *what* ('word' or 'character') toward *direction* ('left' or 'right')
[ "Extend", "selection", "to", "next", "*", "what", "*", "(", "word", "or", "character", ")", "toward", "*", "direction", "*", "(", "left", "or", "right", ")" ]
python
train
svetlyak40wt/python-cl-conditions
example/example.py
https://github.com/svetlyak40wt/python-cl-conditions/blob/709dfd55f2b8cf7eb9b7d86a6b70c8a3feed4b10/example/example.py#L93-L102
def log_analyzer(path): """This procedure replaces every line which can't be parsed with special object MalformedLogEntry. """ with handle(MalformedLogEntryError, lambda (c): invoke_restart('use_value', MalformedLogEntry(c.text))): for filename in find_all_logs(path): analyze_log(filename)
[ "def", "log_analyzer", "(", "path", ")", ":", "with", "handle", "(", "MalformedLogEntryError", ",", "lambda", "(", "c", ")", ":", "invoke_restart", "(", "'use_value'", ",", "MalformedLogEntry", "(", "c", ".", "text", ")", ")", ")", ":", "for", "filename", "in", "find_all_logs", "(", "path", ")", ":", "analyze_log", "(", "filename", ")" ]
This procedure replaces every line which can't be parsed with special object MalformedLogEntry.
[ "This", "procedure", "replaces", "every", "line", "which", "can", "t", "be", "parsed", "with", "special", "object", "MalformedLogEntry", "." ]
python
train
jilljenn/tryalgo
tryalgo/freivalds.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/freivalds.py#L36-L49
def freivalds(A, B, C): """Tests matrix product AB=C by Freivalds :param A: n by n numerical matrix :param B: same :param C: same :returns: False with high probability if AB != C :complexity: :math:`O(n^2)` """ n = len(A) x = [randint(0, 1000000) for j in range(n)] return mult(A, mult(B, x)) == mult(C, x)
[ "def", "freivalds", "(", "A", ",", "B", ",", "C", ")", ":", "n", "=", "len", "(", "A", ")", "x", "=", "[", "randint", "(", "0", ",", "1000000", ")", "for", "j", "in", "range", "(", "n", ")", "]", "return", "mult", "(", "A", ",", "mult", "(", "B", ",", "x", ")", ")", "==", "mult", "(", "C", ",", "x", ")" ]
Tests matrix product AB=C by Freivalds :param A: n by n numerical matrix :param B: same :param C: same :returns: False with high probability if AB != C :complexity: :math:`O(n^2)`
[ "Tests", "matrix", "product", "AB", "=", "C", "by", "Freivalds" ]
python
train
digidotcom/python-devicecloud
devicecloud/sci.py
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/sci.py#L133-L220
def send_sci(self, operation, target, payload, reply=None, synchronous=None, sync_timeout=None, cache=None, allow_offline=None, wait_for_reconnect=None): """Send SCI request to 1 or more targets :param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets, file_system, data_service, and reboot} :param target: The device(s) to be targeted with this request :type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances TODO: document other params """ if not isinstance(payload, six.string_types) and not isinstance(payload, six.binary_type): raise TypeError("payload is required to be a string or bytes") # validate targets and bulid targets xml section try: iter(target) targets = target except TypeError: targets = [target, ] if not all(isinstance(t, TargetABC) for t in targets): raise TypeError("Target(s) must each be instances of TargetABC") targets_xml = "".join(t.to_xml() for t in targets) # reply argument if not isinstance(reply, (type(None), six.string_types)): raise TypeError("reply must be either None or a string") if reply is not None: reply_xml = ' reply="{}"'.format(reply) else: reply_xml = '' # synchronous argument if not isinstance(synchronous, (type(None), bool)): raise TypeError("synchronous expected to be either None or a boolean") if synchronous is not None: synchronous_xml = ' synchronous="{}"'.format('true' if synchronous else 'false') else: synchronous_xml = '' # sync_timeout argument # TODO: What units is syncTimeout in? seconds? if sync_timeout is not None and not isinstance(sync_timeout, six.integer_types): raise TypeError("sync_timeout expected to either be None or a number") if sync_timeout is not None: sync_timeout_xml = ' syncTimeout="{}"'.format(sync_timeout) else: sync_timeout_xml = '' # cache argument if not isinstance(cache, (type(None), bool)): raise TypeError("cache expected to either be None or a boolean") if cache is not None: cache_xml = ' cache="{}"'.format('true' if cache else 'false') else: cache_xml = '' # allow_offline argument if not isinstance(allow_offline, (type(None), bool)): raise TypeError("allow_offline is expected to be either None or a boolean") if allow_offline is not None: allow_offline_xml = ' allowOffline="{}"'.format('true' if allow_offline else 'false') else: allow_offline_xml = '' # wait_for_reconnect argument if not isinstance(wait_for_reconnect, (type(None), bool)): raise TypeError("wait_for_reconnect expected to be either None or a boolean") if wait_for_reconnect is not None: wait_for_reconnect_xml = ' waitForReconnect="{}"'.format('true' if wait_for_reconnect else 'false') else: wait_for_reconnect_xml = '' full_request = SCI_TEMPLATE.format( operation=operation, targets=targets_xml, reply=reply_xml, synchronous=synchronous_xml, sync_timeout=sync_timeout_xml, cache=cache_xml, allow_offline=allow_offline_xml, wait_for_reconnect=wait_for_reconnect_xml, payload=payload ) # TODO: do parsing here? return self._conn.post("/ws/sci", full_request)
[ "def", "send_sci", "(", "self", ",", "operation", ",", "target", ",", "payload", ",", "reply", "=", "None", ",", "synchronous", "=", "None", ",", "sync_timeout", "=", "None", ",", "cache", "=", "None", ",", "allow_offline", "=", "None", ",", "wait_for_reconnect", "=", "None", ")", ":", "if", "not", "isinstance", "(", "payload", ",", "six", ".", "string_types", ")", "and", "not", "isinstance", "(", "payload", ",", "six", ".", "binary_type", ")", ":", "raise", "TypeError", "(", "\"payload is required to be a string or bytes\"", ")", "# validate targets and bulid targets xml section", "try", ":", "iter", "(", "target", ")", "targets", "=", "target", "except", "TypeError", ":", "targets", "=", "[", "target", ",", "]", "if", "not", "all", "(", "isinstance", "(", "t", ",", "TargetABC", ")", "for", "t", "in", "targets", ")", ":", "raise", "TypeError", "(", "\"Target(s) must each be instances of TargetABC\"", ")", "targets_xml", "=", "\"\"", ".", "join", "(", "t", ".", "to_xml", "(", ")", "for", "t", "in", "targets", ")", "# reply argument", "if", "not", "isinstance", "(", "reply", ",", "(", "type", "(", "None", ")", ",", "six", ".", "string_types", ")", ")", ":", "raise", "TypeError", "(", "\"reply must be either None or a string\"", ")", "if", "reply", "is", "not", "None", ":", "reply_xml", "=", "' reply=\"{}\"'", ".", "format", "(", "reply", ")", "else", ":", "reply_xml", "=", "''", "# synchronous argument", "if", "not", "isinstance", "(", "synchronous", ",", "(", "type", "(", "None", ")", ",", "bool", ")", ")", ":", "raise", "TypeError", "(", "\"synchronous expected to be either None or a boolean\"", ")", "if", "synchronous", "is", "not", "None", ":", "synchronous_xml", "=", "' synchronous=\"{}\"'", ".", "format", "(", "'true'", "if", "synchronous", "else", "'false'", ")", "else", ":", "synchronous_xml", "=", "''", "# sync_timeout argument", "# TODO: What units is syncTimeout in? seconds?", "if", "sync_timeout", "is", "not", "None", "and", "not", "isinstance", "(", "sync_timeout", ",", "six", ".", "integer_types", ")", ":", "raise", "TypeError", "(", "\"sync_timeout expected to either be None or a number\"", ")", "if", "sync_timeout", "is", "not", "None", ":", "sync_timeout_xml", "=", "' syncTimeout=\"{}\"'", ".", "format", "(", "sync_timeout", ")", "else", ":", "sync_timeout_xml", "=", "''", "# cache argument", "if", "not", "isinstance", "(", "cache", ",", "(", "type", "(", "None", ")", ",", "bool", ")", ")", ":", "raise", "TypeError", "(", "\"cache expected to either be None or a boolean\"", ")", "if", "cache", "is", "not", "None", ":", "cache_xml", "=", "' cache=\"{}\"'", ".", "format", "(", "'true'", "if", "cache", "else", "'false'", ")", "else", ":", "cache_xml", "=", "''", "# allow_offline argument", "if", "not", "isinstance", "(", "allow_offline", ",", "(", "type", "(", "None", ")", ",", "bool", ")", ")", ":", "raise", "TypeError", "(", "\"allow_offline is expected to be either None or a boolean\"", ")", "if", "allow_offline", "is", "not", "None", ":", "allow_offline_xml", "=", "' allowOffline=\"{}\"'", ".", "format", "(", "'true'", "if", "allow_offline", "else", "'false'", ")", "else", ":", "allow_offline_xml", "=", "''", "# wait_for_reconnect argument", "if", "not", "isinstance", "(", "wait_for_reconnect", ",", "(", "type", "(", "None", ")", ",", "bool", ")", ")", ":", "raise", "TypeError", "(", "\"wait_for_reconnect expected to be either None or a boolean\"", ")", "if", "wait_for_reconnect", "is", "not", "None", ":", "wait_for_reconnect_xml", "=", "' waitForReconnect=\"{}\"'", ".", "format", "(", "'true'", "if", "wait_for_reconnect", "else", "'false'", ")", "else", ":", "wait_for_reconnect_xml", "=", "''", "full_request", "=", "SCI_TEMPLATE", ".", "format", "(", "operation", "=", "operation", ",", "targets", "=", "targets_xml", ",", "reply", "=", "reply_xml", ",", "synchronous", "=", "synchronous_xml", ",", "sync_timeout", "=", "sync_timeout_xml", ",", "cache", "=", "cache_xml", ",", "allow_offline", "=", "allow_offline_xml", ",", "wait_for_reconnect", "=", "wait_for_reconnect_xml", ",", "payload", "=", "payload", ")", "# TODO: do parsing here?", "return", "self", ".", "_conn", ".", "post", "(", "\"/ws/sci\"", ",", "full_request", ")" ]
Send SCI request to 1 or more targets :param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets, file_system, data_service, and reboot} :param target: The device(s) to be targeted with this request :type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances TODO: document other params
[ "Send", "SCI", "request", "to", "1", "or", "more", "targets" ]
python
train
lreis2415/PyGeoC
pygeoc/vector.py
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/vector.py#L36-L77
def raster2shp(rasterfile, vectorshp, layername=None, fieldname=None, band_num=1, mask='default'): """Convert raster to ESRI shapefile""" FileClass.remove_files(vectorshp) FileClass.check_file_exists(rasterfile) # this allows GDAL to throw Python Exceptions gdal.UseExceptions() src_ds = gdal.Open(rasterfile) if src_ds is None: print('Unable to open %s' % rasterfile) sys.exit(1) try: srcband = src_ds.GetRasterBand(band_num) except RuntimeError as e: # for example, try GetRasterBand(10) print('Band ( %i ) not found, %s' % (band_num, e)) sys.exit(1) if mask == 'default': maskband = srcband.GetMaskBand() elif mask is None or mask.upper() == 'NONE': maskband = None else: mask_ds = gdal.Open(mask) maskband = mask_ds.GetRasterBand(1) # create output datasource if layername is None: layername = FileClass.get_core_name_without_suffix(rasterfile) drv = ogr_GetDriverByName(str('ESRI Shapefile')) dst_ds = drv.CreateDataSource(vectorshp) srs = None if src_ds.GetProjection() != '': srs = osr_SpatialReference() srs.ImportFromWkt(src_ds.GetProjection()) dst_layer = dst_ds.CreateLayer(str(layername), srs=srs) if fieldname is None: fieldname = layername.upper() fd = ogr_FieldDefn(str(fieldname), OFTInteger) dst_layer.CreateField(fd) dst_field = 0 result = gdal.Polygonize(srcband, maskband, dst_layer, dst_field, ['8CONNECTED=8'], callback=None) return result
[ "def", "raster2shp", "(", "rasterfile", ",", "vectorshp", ",", "layername", "=", "None", ",", "fieldname", "=", "None", ",", "band_num", "=", "1", ",", "mask", "=", "'default'", ")", ":", "FileClass", ".", "remove_files", "(", "vectorshp", ")", "FileClass", ".", "check_file_exists", "(", "rasterfile", ")", "# this allows GDAL to throw Python Exceptions", "gdal", ".", "UseExceptions", "(", ")", "src_ds", "=", "gdal", ".", "Open", "(", "rasterfile", ")", "if", "src_ds", "is", "None", ":", "print", "(", "'Unable to open %s'", "%", "rasterfile", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "srcband", "=", "src_ds", ".", "GetRasterBand", "(", "band_num", ")", "except", "RuntimeError", "as", "e", ":", "# for example, try GetRasterBand(10)", "print", "(", "'Band ( %i ) not found, %s'", "%", "(", "band_num", ",", "e", ")", ")", "sys", ".", "exit", "(", "1", ")", "if", "mask", "==", "'default'", ":", "maskband", "=", "srcband", ".", "GetMaskBand", "(", ")", "elif", "mask", "is", "None", "or", "mask", ".", "upper", "(", ")", "==", "'NONE'", ":", "maskband", "=", "None", "else", ":", "mask_ds", "=", "gdal", ".", "Open", "(", "mask", ")", "maskband", "=", "mask_ds", ".", "GetRasterBand", "(", "1", ")", "# create output datasource", "if", "layername", "is", "None", ":", "layername", "=", "FileClass", ".", "get_core_name_without_suffix", "(", "rasterfile", ")", "drv", "=", "ogr_GetDriverByName", "(", "str", "(", "'ESRI Shapefile'", ")", ")", "dst_ds", "=", "drv", ".", "CreateDataSource", "(", "vectorshp", ")", "srs", "=", "None", "if", "src_ds", ".", "GetProjection", "(", ")", "!=", "''", ":", "srs", "=", "osr_SpatialReference", "(", ")", "srs", ".", "ImportFromWkt", "(", "src_ds", ".", "GetProjection", "(", ")", ")", "dst_layer", "=", "dst_ds", ".", "CreateLayer", "(", "str", "(", "layername", ")", ",", "srs", "=", "srs", ")", "if", "fieldname", "is", "None", ":", "fieldname", "=", "layername", ".", "upper", "(", ")", "fd", "=", "ogr_FieldDefn", "(", "str", "(", "fieldname", ")", ",", "OFTInteger", ")", "dst_layer", ".", "CreateField", "(", "fd", ")", "dst_field", "=", "0", "result", "=", "gdal", ".", "Polygonize", "(", "srcband", ",", "maskband", ",", "dst_layer", ",", "dst_field", ",", "[", "'8CONNECTED=8'", "]", ",", "callback", "=", "None", ")", "return", "result" ]
Convert raster to ESRI shapefile
[ "Convert", "raster", "to", "ESRI", "shapefile" ]
python
train
stratis-storage/into-dbus-python
src/into_dbus_python/_xformer.py
https://github.com/stratis-storage/into-dbus-python/blob/81366049671f79116bbb81c97bf621800a2f6315/src/into_dbus_python/_xformer.py#L27-L48
def _wrapper(func): """ Wraps a generated function so that it catches all Type- and ValueErrors and raises IntoDPValueErrors. :param func: the transforming function """ @functools.wraps(func) def the_func(expr): """ The actual function. :param object expr: the expression to be xformed to dbus-python types """ try: return func(expr) except (TypeError, ValueError) as err: raise IntoDPValueError(expr, "expr", "could not be transformed") \ from err return the_func
[ "def", "_wrapper", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "the_func", "(", "expr", ")", ":", "\"\"\"\n The actual function.\n\n :param object expr: the expression to be xformed to dbus-python types\n \"\"\"", "try", ":", "return", "func", "(", "expr", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "err", ":", "raise", "IntoDPValueError", "(", "expr", ",", "\"expr\"", ",", "\"could not be transformed\"", ")", "from", "err", "return", "the_func" ]
Wraps a generated function so that it catches all Type- and ValueErrors and raises IntoDPValueErrors. :param func: the transforming function
[ "Wraps", "a", "generated", "function", "so", "that", "it", "catches", "all", "Type", "-", "and", "ValueErrors", "and", "raises", "IntoDPValueErrors", "." ]
python
valid
CamDavidsonPilon/lifelines
lifelines/fitters/aalen_additive_fitter.py
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/aalen_additive_fitter.py#L531-L579
def print_summary(self, decimals=2, **kwargs): """ Print summary statistics describing the fit, the coefficients, and the error bounds. Parameters ----------- decimals: int, optional (default=2) specify the number of decimal places to show kwargs: print additional meta data in the output (useful to provide model names, dataset names, etc.) when comparing multiple outputs. """ # Print information about data first justify = string_justify(18) print(self) print("{} = '{}'".format(justify("duration col"), self.duration_col)) print("{} = '{}'".format(justify("event col"), self.event_col)) if self.weights_col: print("{} = '{}'".format(justify("weights col"), self.weights_col)) if self.coef_penalizer > 0: print("{} = '{}'".format(justify("coef penalizer"), self.coef_penalizer)) if self.smoothing_penalizer > 0: print("{} = '{}'".format(justify("smoothing penalizer"), self.smoothing_penalizer)) print("{} = {}".format(justify("number of subjects"), self._n_examples)) print("{} = {}".format(justify("number of events"), self.event_observed.sum())) print("{} = {}".format(justify("time fit was run"), self._time_fit_was_called)) for k, v in kwargs.items(): print("{} = {}\n".format(justify(k), v)) print(end="\n") print("---") df = self.summary print( df.to_string( float_format=format_floats(decimals), formatters={"p": format_p_value(decimals), "exp(coef)": format_exp_floats(decimals)}, ) ) # Significance code explanation print("---") print("Concordance = {:.{prec}f}".format(self.score_, prec=decimals))
[ "def", "print_summary", "(", "self", ",", "decimals", "=", "2", ",", "*", "*", "kwargs", ")", ":", "# Print information about data first", "justify", "=", "string_justify", "(", "18", ")", "print", "(", "self", ")", "print", "(", "\"{} = '{}'\"", ".", "format", "(", "justify", "(", "\"duration col\"", ")", ",", "self", ".", "duration_col", ")", ")", "print", "(", "\"{} = '{}'\"", ".", "format", "(", "justify", "(", "\"event col\"", ")", ",", "self", ".", "event_col", ")", ")", "if", "self", ".", "weights_col", ":", "print", "(", "\"{} = '{}'\"", ".", "format", "(", "justify", "(", "\"weights col\"", ")", ",", "self", ".", "weights_col", ")", ")", "if", "self", ".", "coef_penalizer", ">", "0", ":", "print", "(", "\"{} = '{}'\"", ".", "format", "(", "justify", "(", "\"coef penalizer\"", ")", ",", "self", ".", "coef_penalizer", ")", ")", "if", "self", ".", "smoothing_penalizer", ">", "0", ":", "print", "(", "\"{} = '{}'\"", ".", "format", "(", "justify", "(", "\"smoothing penalizer\"", ")", ",", "self", ".", "smoothing_penalizer", ")", ")", "print", "(", "\"{} = {}\"", ".", "format", "(", "justify", "(", "\"number of subjects\"", ")", ",", "self", ".", "_n_examples", ")", ")", "print", "(", "\"{} = {}\"", ".", "format", "(", "justify", "(", "\"number of events\"", ")", ",", "self", ".", "event_observed", ".", "sum", "(", ")", ")", ")", "print", "(", "\"{} = {}\"", ".", "format", "(", "justify", "(", "\"time fit was run\"", ")", ",", "self", ".", "_time_fit_was_called", ")", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "print", "(", "\"{} = {}\\n\"", ".", "format", "(", "justify", "(", "k", ")", ",", "v", ")", ")", "print", "(", "end", "=", "\"\\n\"", ")", "print", "(", "\"---\"", ")", "df", "=", "self", ".", "summary", "print", "(", "df", ".", "to_string", "(", "float_format", "=", "format_floats", "(", "decimals", ")", ",", "formatters", "=", "{", "\"p\"", ":", "format_p_value", "(", "decimals", ")", ",", "\"exp(coef)\"", ":", "format_exp_floats", "(", "decimals", ")", "}", ",", ")", ")", "# Significance code explanation", "print", "(", "\"---\"", ")", "print", "(", "\"Concordance = {:.{prec}f}\"", ".", "format", "(", "self", ".", "score_", ",", "prec", "=", "decimals", ")", ")" ]
Print summary statistics describing the fit, the coefficients, and the error bounds. Parameters ----------- decimals: int, optional (default=2) specify the number of decimal places to show kwargs: print additional meta data in the output (useful to provide model names, dataset names, etc.) when comparing multiple outputs.
[ "Print", "summary", "statistics", "describing", "the", "fit", "the", "coefficients", "and", "the", "error", "bounds", "." ]
python
train
pyrocat101/moo
moo/__init__.py
https://github.com/pyrocat101/moo/blob/39e86d4ecb329309260bc30876c77aa3a7a2cfb1/moo/__init__.py#L209-L229
def main(port, export, css, files): """ \b Examples: $ moo README.md # live preview README.md $ moo -e *.md # export all markdown files $ moo --no-css -e README.md # export README.md without CSS $ cat README.md | moo -e - | less # export STDIN to STDOUT """ options = { 'css': css, 'port': port } try: if not export: if len(files) != 1: error("please specify just one file to preview") preview(files[0], options) else: export_files(files, options) except KeyboardInterrupt: sys.exit(0) except Exception as exc: die()
[ "def", "main", "(", "port", ",", "export", ",", "css", ",", "files", ")", ":", "options", "=", "{", "'css'", ":", "css", ",", "'port'", ":", "port", "}", "try", ":", "if", "not", "export", ":", "if", "len", "(", "files", ")", "!=", "1", ":", "error", "(", "\"please specify just one file to preview\"", ")", "preview", "(", "files", "[", "0", "]", ",", "options", ")", "else", ":", "export_files", "(", "files", ",", "options", ")", "except", "KeyboardInterrupt", ":", "sys", ".", "exit", "(", "0", ")", "except", "Exception", "as", "exc", ":", "die", "(", ")" ]
\b Examples: $ moo README.md # live preview README.md $ moo -e *.md # export all markdown files $ moo --no-css -e README.md # export README.md without CSS $ cat README.md | moo -e - | less # export STDIN to STDOUT
[ "\\", "b", "Examples", ":", "$", "moo", "README", ".", "md", "#", "live", "preview", "README", ".", "md", "$", "moo", "-", "e", "*", ".", "md", "#", "export", "all", "markdown", "files", "$", "moo", "--", "no", "-", "css", "-", "e", "README", ".", "md", "#", "export", "README", ".", "md", "without", "CSS", "$", "cat", "README", ".", "md", "|", "moo", "-", "e", "-", "|", "less", "#", "export", "STDIN", "to", "STDOUT" ]
python
test
PRIArobotics/HedgehogUtils
hedgehog/utils/zmq/__init__.py
https://github.com/PRIArobotics/HedgehogUtils/blob/cc368df270288c870cc66d707696ccb62823ca9c/hedgehog/utils/zmq/__init__.py#L11-L23
def configure(self, *, hwm: int=None, rcvtimeo: int=None, sndtimeo: int=None, linger: int=None) -> 'Socket': """ Allows to configure some common socket options and configurations, while allowing method chaining """ if hwm is not None: self.set_hwm(hwm) if rcvtimeo is not None: self.setsockopt(zmq.RCVTIMEO, rcvtimeo) if sndtimeo is not None: self.setsockopt(zmq.SNDTIMEO, sndtimeo) if linger is not None: self.setsockopt(zmq.LINGER, linger) return self
[ "def", "configure", "(", "self", ",", "*", ",", "hwm", ":", "int", "=", "None", ",", "rcvtimeo", ":", "int", "=", "None", ",", "sndtimeo", ":", "int", "=", "None", ",", "linger", ":", "int", "=", "None", ")", "->", "'Socket'", ":", "if", "hwm", "is", "not", "None", ":", "self", ".", "set_hwm", "(", "hwm", ")", "if", "rcvtimeo", "is", "not", "None", ":", "self", ".", "setsockopt", "(", "zmq", ".", "RCVTIMEO", ",", "rcvtimeo", ")", "if", "sndtimeo", "is", "not", "None", ":", "self", ".", "setsockopt", "(", "zmq", ".", "SNDTIMEO", ",", "sndtimeo", ")", "if", "linger", "is", "not", "None", ":", "self", ".", "setsockopt", "(", "zmq", ".", "LINGER", ",", "linger", ")", "return", "self" ]
Allows to configure some common socket options and configurations, while allowing method chaining
[ "Allows", "to", "configure", "some", "common", "socket", "options", "and", "configurations", "while", "allowing", "method", "chaining" ]
python
train
welbornprod/colr
colr/colr.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/colr.py#L1439-L1460
def color_code(self, fore=None, back=None, style=None): """ Return the codes for this style/colors. """ # Map from style type to raw code formatter function. colorcodes = [] resetcodes = [] userstyles = {'style': style, 'back': back, 'fore': fore} for stype in userstyles: stylearg = userstyles.get(stype, None) if not stylearg: # No value for this style name, don't use it. continue # Get escape code for this style. code = self.get_escape_code(stype, stylearg) stylename = str(stylearg).lower() if (stype == 'style') and (stylename in ('0', )): resetcodes.append(code) elif stylename.startswith('reset'): resetcodes.append(code) else: colorcodes.append(code) # Reset codes come first, to not override colors. return ''.join((''.join(resetcodes), ''.join(colorcodes)))
[ "def", "color_code", "(", "self", ",", "fore", "=", "None", ",", "back", "=", "None", ",", "style", "=", "None", ")", ":", "# Map from style type to raw code formatter function.", "colorcodes", "=", "[", "]", "resetcodes", "=", "[", "]", "userstyles", "=", "{", "'style'", ":", "style", ",", "'back'", ":", "back", ",", "'fore'", ":", "fore", "}", "for", "stype", "in", "userstyles", ":", "stylearg", "=", "userstyles", ".", "get", "(", "stype", ",", "None", ")", "if", "not", "stylearg", ":", "# No value for this style name, don't use it.", "continue", "# Get escape code for this style.", "code", "=", "self", ".", "get_escape_code", "(", "stype", ",", "stylearg", ")", "stylename", "=", "str", "(", "stylearg", ")", ".", "lower", "(", ")", "if", "(", "stype", "==", "'style'", ")", "and", "(", "stylename", "in", "(", "'0'", ",", ")", ")", ":", "resetcodes", ".", "append", "(", "code", ")", "elif", "stylename", ".", "startswith", "(", "'reset'", ")", ":", "resetcodes", ".", "append", "(", "code", ")", "else", ":", "colorcodes", ".", "append", "(", "code", ")", "# Reset codes come first, to not override colors.", "return", "''", ".", "join", "(", "(", "''", ".", "join", "(", "resetcodes", ")", ",", "''", ".", "join", "(", "colorcodes", ")", ")", ")" ]
Return the codes for this style/colors.
[ "Return", "the", "codes", "for", "this", "style", "/", "colors", "." ]
python
train
HIPS/autograd
examples/data.py
https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/examples/data.py#L53-L67
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate, rs=npr.RandomState(0)): """Based on code by Ryan P. Adams.""" rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False) features = rs.randn(num_classes*num_per_class, 2) \ * np.array([radial_std, tangential_std]) features[:, 0] += 1 labels = np.repeat(np.arange(num_classes), num_per_class) angles = rads[labels] + rate * np.exp(features[:,0]) rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) rotations = np.reshape(rotations.T, (-1, 2, 2)) return np.einsum('ti,tij->tj', features, rotations)
[ "def", "make_pinwheel", "(", "radial_std", ",", "tangential_std", ",", "num_classes", ",", "num_per_class", ",", "rate", ",", "rs", "=", "npr", ".", "RandomState", "(", "0", ")", ")", ":", "rads", "=", "np", ".", "linspace", "(", "0", ",", "2", "*", "np", ".", "pi", ",", "num_classes", ",", "endpoint", "=", "False", ")", "features", "=", "rs", ".", "randn", "(", "num_classes", "*", "num_per_class", ",", "2", ")", "*", "np", ".", "array", "(", "[", "radial_std", ",", "tangential_std", "]", ")", "features", "[", ":", ",", "0", "]", "+=", "1", "labels", "=", "np", ".", "repeat", "(", "np", ".", "arange", "(", "num_classes", ")", ",", "num_per_class", ")", "angles", "=", "rads", "[", "labels", "]", "+", "rate", "*", "np", ".", "exp", "(", "features", "[", ":", ",", "0", "]", ")", "rotations", "=", "np", ".", "stack", "(", "[", "np", ".", "cos", "(", "angles", ")", ",", "-", "np", ".", "sin", "(", "angles", ")", ",", "np", ".", "sin", "(", "angles", ")", ",", "np", ".", "cos", "(", "angles", ")", "]", ")", "rotations", "=", "np", ".", "reshape", "(", "rotations", ".", "T", ",", "(", "-", "1", ",", "2", ",", "2", ")", ")", "return", "np", ".", "einsum", "(", "'ti,tij->tj'", ",", "features", ",", "rotations", ")" ]
Based on code by Ryan P. Adams.
[ "Based", "on", "code", "by", "Ryan", "P", ".", "Adams", "." ]
python
train
ReFirmLabs/binwalk
src/binwalk/core/module.py
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/core/module.py#L1010-L1019
def show_help(fd=sys.stdout): ''' Convenience wrapper around binwalk.core.module.Modules.help. @fd - An object with a write method (e.g., sys.stdout, sys.stderr, etc). Returns None. ''' with Modules() as m: fd.write(m.help())
[ "def", "show_help", "(", "fd", "=", "sys", ".", "stdout", ")", ":", "with", "Modules", "(", ")", "as", "m", ":", "fd", ".", "write", "(", "m", ".", "help", "(", ")", ")" ]
Convenience wrapper around binwalk.core.module.Modules.help. @fd - An object with a write method (e.g., sys.stdout, sys.stderr, etc). Returns None.
[ "Convenience", "wrapper", "around", "binwalk", ".", "core", ".", "module", ".", "Modules", ".", "help", "." ]
python
train
wdm0006/git-pandas
gitpandas/project.py
https://github.com/wdm0006/git-pandas/blob/e56b817b1d66b8296d1d5e703d5db0e181d25899/gitpandas/project.py#L236-L279
def commit_history(self, branch, limit=None, days=None, ignore_globs=None, include_globs=None): """ Returns a pandas DataFrame containing all of the commits for a given branch. The results from all repositories are appended to each other, resulting in one large data frame of size <limit>. If a limit is provided, it is divided by the number of repositories in the project directory to find out how many commits to pull from each project. Future implementations will use date ordering across all projects to get the true most recent N commits across the project. Included in that DataFrame will be the columns: * repository * date (index) * author * committer * message * lines * insertions * deletions * net :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame """ if limit is not None: limit = int(limit / len(self.repo_dirs)) df = pd.DataFrame(columns=['author', 'committer', 'message', 'lines', 'insertions', 'deletions', 'net']) for repo in self.repos: try: ch = repo.commit_history(branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs) ch['repository'] = repo.repo_name df = df.append(ch) except GitCommandError: print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch)) df.reset_index() return df
[ "def", "commit_history", "(", "self", ",", "branch", ",", "limit", "=", "None", ",", "days", "=", "None", ",", "ignore_globs", "=", "None", ",", "include_globs", "=", "None", ")", ":", "if", "limit", "is", "not", "None", ":", "limit", "=", "int", "(", "limit", "/", "len", "(", "self", ".", "repo_dirs", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'author'", ",", "'committer'", ",", "'message'", ",", "'lines'", ",", "'insertions'", ",", "'deletions'", ",", "'net'", "]", ")", "for", "repo", "in", "self", ".", "repos", ":", "try", ":", "ch", "=", "repo", ".", "commit_history", "(", "branch", ",", "limit", "=", "limit", ",", "days", "=", "days", ",", "ignore_globs", "=", "ignore_globs", ",", "include_globs", "=", "include_globs", ")", "ch", "[", "'repository'", "]", "=", "repo", ".", "repo_name", "df", "=", "df", ".", "append", "(", "ch", ")", "except", "GitCommandError", ":", "print", "(", "'Warning! Repo: %s seems to not have the branch: %s'", "%", "(", "repo", ",", "branch", ")", ")", "df", ".", "reset_index", "(", ")", "return", "df" ]
Returns a pandas DataFrame containing all of the commits for a given branch. The results from all repositories are appended to each other, resulting in one large data frame of size <limit>. If a limit is provided, it is divided by the number of repositories in the project directory to find out how many commits to pull from each project. Future implementations will use date ordering across all projects to get the true most recent N commits across the project. Included in that DataFrame will be the columns: * repository * date (index) * author * committer * message * lines * insertions * deletions * net :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "of", "the", "commits", "for", "a", "given", "branch", ".", "The", "results", "from", "all", "repositories", "are", "appended", "to", "each", "other", "resulting", "in", "one", "large", "data", "frame", "of", "size", "<limit", ">", ".", "If", "a", "limit", "is", "provided", "it", "is", "divided", "by", "the", "number", "of", "repositories", "in", "the", "project", "directory", "to", "find", "out", "how", "many", "commits", "to", "pull", "from", "each", "project", ".", "Future", "implementations", "will", "use", "date", "ordering", "across", "all", "projects", "to", "get", "the", "true", "most", "recent", "N", "commits", "across", "the", "project", "." ]
python
train