repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Crunch-io/crunch-cube
src/cr/cube/dimension.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/dimension.py#L113-L118
def _raw_dimensions(self): """Sequence of _RawDimension objects wrapping each dimension dict.""" return tuple( _RawDimension(dimension_dict, self._dimension_dicts) for dimension_dict in self._dimension_dicts )
[ "def", "_raw_dimensions", "(", "self", ")", ":", "return", "tuple", "(", "_RawDimension", "(", "dimension_dict", ",", "self", ".", "_dimension_dicts", ")", "for", "dimension_dict", "in", "self", ".", "_dimension_dicts", ")" ]
Sequence of _RawDimension objects wrapping each dimension dict.
[ "Sequence", "of", "_RawDimension", "objects", "wrapping", "each", "dimension", "dict", "." ]
python
train
niccokunzmann/hanging_threads
hanging_threads.py
https://github.com/niccokunzmann/hanging_threads/blob/167f4faa9ef7bf44866d9cda75d30606acb3c416/hanging_threads.py#L51-L66
def start_monitoring(seconds_frozen=SECONDS_FROZEN, test_interval=TEST_INTERVAL): """Start monitoring for hanging threads. seconds_frozen - How much time should thread hang to activate printing stack trace - default(10) tests_interval - Sleep time of monitoring thread (in milliseconds) - default(100) """ thread = StoppableThread(target=monitor, args=(seconds_frozen, test_interval)) thread.daemon = True thread.start() return thread
[ "def", "start_monitoring", "(", "seconds_frozen", "=", "SECONDS_FROZEN", ",", "test_interval", "=", "TEST_INTERVAL", ")", ":", "thread", "=", "StoppableThread", "(", "target", "=", "monitor", ",", "args", "=", "(", "seconds_frozen", ",", "test_interval", ")", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "start", "(", ")", "return", "thread" ]
Start monitoring for hanging threads. seconds_frozen - How much time should thread hang to activate printing stack trace - default(10) tests_interval - Sleep time of monitoring thread (in milliseconds) - default(100)
[ "Start", "monitoring", "for", "hanging", "threads", "." ]
python
train
Dallinger/Dallinger
dallinger/models.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/models.py#L332-L340
def json_data(self): """Return json description of a question.""" return { "number": self.number, "type": self.type, "participant_id": self.participant_id, "question": self.question, "response": self.response, }
[ "def", "json_data", "(", "self", ")", ":", "return", "{", "\"number\"", ":", "self", ".", "number", ",", "\"type\"", ":", "self", ".", "type", ",", "\"participant_id\"", ":", "self", ".", "participant_id", ",", "\"question\"", ":", "self", ".", "question", ",", "\"response\"", ":", "self", ".", "response", ",", "}" ]
Return json description of a question.
[ "Return", "json", "description", "of", "a", "question", "." ]
python
train
fastai/fastai
fastai/core.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/core.py#L119-L122
def camel2snake(name:str)->str: "Change `name` from camel to snake style." s1 = re.sub(_camel_re1, r'\1_\2', name) return re.sub(_camel_re2, r'\1_\2', s1).lower()
[ "def", "camel2snake", "(", "name", ":", "str", ")", "->", "str", ":", "s1", "=", "re", ".", "sub", "(", "_camel_re1", ",", "r'\\1_\\2'", ",", "name", ")", "return", "re", ".", "sub", "(", "_camel_re2", ",", "r'\\1_\\2'", ",", "s1", ")", ".", "lower", "(", ")" ]
Change `name` from camel to snake style.
[ "Change", "name", "from", "camel", "to", "snake", "style", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/cisco_dfa_rest.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/cisco_dfa_rest.py#L763-L784
def create_partition(self, org_name, part_name, dci_id, vrf_prof, service_node_ip=None, desc=None): """Create partition on the DCNM. :param org_name: name of organization to be created :param part_name: name of partition to be created :param dci_id: DCI ID :vrf_prof: VRF profile for the partition :param service_node_ip: Specifies the Default route IP address. :param desc: string that describes organization """ desc = desc or org_name res = self._create_or_update_partition(org_name, part_name, desc, dci_id=dci_id, service_node_ip=service_node_ip, vrf_prof=vrf_prof) if res and res.status_code in self._resp_ok: LOG.debug("Created %s partition in DCNM.", part_name) else: LOG.error("Failed to create %(part)s partition in DCNM." "Response: %(res)s", ({'part': part_name, 'res': res})) raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
[ "def", "create_partition", "(", "self", ",", "org_name", ",", "part_name", ",", "dci_id", ",", "vrf_prof", ",", "service_node_ip", "=", "None", ",", "desc", "=", "None", ")", ":", "desc", "=", "desc", "or", "org_name", "res", "=", "self", ".", "_create_or_update_partition", "(", "org_name", ",", "part_name", ",", "desc", ",", "dci_id", "=", "dci_id", ",", "service_node_ip", "=", "service_node_ip", ",", "vrf_prof", "=", "vrf_prof", ")", "if", "res", "and", "res", ".", "status_code", "in", "self", ".", "_resp_ok", ":", "LOG", ".", "debug", "(", "\"Created %s partition in DCNM.\"", ",", "part_name", ")", "else", ":", "LOG", ".", "error", "(", "\"Failed to create %(part)s partition in DCNM.\"", "\"Response: %(res)s\"", ",", "(", "{", "'part'", ":", "part_name", ",", "'res'", ":", "res", "}", ")", ")", "raise", "dexc", ".", "DfaClientRequestFailed", "(", "reason", "=", "self", ".", "_failure_msg", "(", "res", ")", ")" ]
Create partition on the DCNM. :param org_name: name of organization to be created :param part_name: name of partition to be created :param dci_id: DCI ID :vrf_prof: VRF profile for the partition :param service_node_ip: Specifies the Default route IP address. :param desc: string that describes organization
[ "Create", "partition", "on", "the", "DCNM", "." ]
python
train
qualisys/qualisys_python_sdk
qtm/qrt.py
https://github.com/qualisys/qualisys_python_sdk/blob/127d7eeebc2b38b5cafdfa5d1d0198437fedd274/qtm/qrt.py#L56-L61
async def qtm_version(self): """Get the QTM version. """ return await asyncio.wait_for( self._protocol.send_command("qtmversion"), timeout=self._timeout )
[ "async", "def", "qtm_version", "(", "self", ")", ":", "return", "await", "asyncio", ".", "wait_for", "(", "self", ".", "_protocol", ".", "send_command", "(", "\"qtmversion\"", ")", ",", "timeout", "=", "self", ".", "_timeout", ")" ]
Get the QTM version.
[ "Get", "the", "QTM", "version", "." ]
python
valid
AguaClara/aguaclara
aguaclara/research/peristaltic_pump.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/research/peristaltic_pump.py#L104-L123
def flow_rate(vol_per_rev, rpm): """Return the flow rate from a pump given the volume of fluid pumped per revolution and the desired pump speed. :param vol_per_rev: Volume of fluid output per revolution (dependent on pump and tubing) :type vol_per_rev: float :param rpm: Desired pump speed in revolutions per minute :type rpm: float :return: Flow rate of the pump (mL/s) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import flow_rate >>> from aguaclara.core.units import unit_registry as u >>> flow_rate(3*u.mL/u.rev, 5*u.rev/u.min) <Quantity(0.25, 'milliliter / second')> """ return (vol_per_rev * rpm).to(u.mL/u.s)
[ "def", "flow_rate", "(", "vol_per_rev", ",", "rpm", ")", ":", "return", "(", "vol_per_rev", "*", "rpm", ")", ".", "to", "(", "u", ".", "mL", "/", "u", ".", "s", ")" ]
Return the flow rate from a pump given the volume of fluid pumped per revolution and the desired pump speed. :param vol_per_rev: Volume of fluid output per revolution (dependent on pump and tubing) :type vol_per_rev: float :param rpm: Desired pump speed in revolutions per minute :type rpm: float :return: Flow rate of the pump (mL/s) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import flow_rate >>> from aguaclara.core.units import unit_registry as u >>> flow_rate(3*u.mL/u.rev, 5*u.rev/u.min) <Quantity(0.25, 'milliliter / second')>
[ "Return", "the", "flow", "rate", "from", "a", "pump", "given", "the", "volume", "of", "fluid", "pumped", "per", "revolution", "and", "the", "desired", "pump", "speed", "." ]
python
train
lowandrew/OLCTools
spadespipeline/sistr.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/sistr.py#L59-L95
def report(self): """Creates sistr reports""" # Initialise strings to store report data header = '\t'.join(self.headers) + '\n' data = '' for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Each strain is a fresh row row = '' try: # Read in the output .json file into the metadata sample[self.analysistype].jsondata = json.load(open(sample[self.analysistype].jsonoutput, 'r')) # Set the name of the report. # Note that this is a tab-separated file, as there can be commas in the results sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir, '{}.tsv'.format(sample.name)) # Iterate through all the headers to use as keys in the json-formatted output for category in self.headers: # Tab separate all the results row += '{}\t'.format(sample[self.analysistype].jsondata[0][category]) # Create attributes for each category setattr(sample[self.analysistype], category, str(sample[self.analysistype].jsondata[0][category])) # End the results with a newline row += '\n' data += row # Create and write headers and results to the strain-specific report with open(sample[self.analysistype].report, 'w') as strainreport: strainreport.write(header) strainreport.write(row) except (KeyError, AttributeError): pass # Create and write headers and cumulative results to the combined report with open(os.path.join(self.reportdir, 'sistr.tsv'), 'w') as report: report.write(header) report.write(data)
[ "def", "report", "(", "self", ")", ":", "# Initialise strings to store report data", "header", "=", "'\\t'", ".", "join", "(", "self", ".", "headers", ")", "+", "'\\n'", "data", "=", "''", "for", "sample", "in", "self", ".", "metadata", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "# Each strain is a fresh row", "row", "=", "''", "try", ":", "# Read in the output .json file into the metadata", "sample", "[", "self", ".", "analysistype", "]", ".", "jsondata", "=", "json", ".", "load", "(", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsonoutput", ",", "'r'", ")", ")", "# Set the name of the report.", "# Note that this is a tab-separated file, as there can be commas in the results", "sample", "[", "self", ".", "analysistype", "]", ".", "report", "=", "os", ".", "path", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "reportdir", ",", "'{}.tsv'", ".", "format", "(", "sample", ".", "name", ")", ")", "# Iterate through all the headers to use as keys in the json-formatted output", "for", "category", "in", "self", ".", "headers", ":", "# Tab separate all the results", "row", "+=", "'{}\\t'", ".", "format", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsondata", "[", "0", "]", "[", "category", "]", ")", "# Create attributes for each category", "setattr", "(", "sample", "[", "self", ".", "analysistype", "]", ",", "category", ",", "str", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "jsondata", "[", "0", "]", "[", "category", "]", ")", ")", "# End the results with a newline", "row", "+=", "'\\n'", "data", "+=", "row", "# Create and write headers and results to the strain-specific report", "with", "open", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "report", ",", "'w'", ")", "as", "strainreport", ":", "strainreport", ".", "write", "(", "header", ")", "strainreport", ".", "write", "(", "row", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "# Create and write headers and cumulative results to the combined report", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportdir", ",", "'sistr.tsv'", ")", ",", "'w'", ")", "as", "report", ":", "report", ".", "write", "(", "header", ")", "report", ".", "write", "(", "data", ")" ]
Creates sistr reports
[ "Creates", "sistr", "reports" ]
python
train
ArchiveTeam/wpull
wpull/protocol/ftp/client.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/ftp/client.py#L362-L382
def _open_data_stream(self): '''Open the data stream connection. Coroutine. ''' @asyncio.coroutine def connection_factory(address: Tuple[int, int]): self._data_connection = yield from self._acquire_connection(address[0], address[1]) return self._data_connection self._data_stream = yield from self._commander.setup_data_stream( connection_factory ) self._response.data_address = self._data_connection.address read_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_receive_data) self._data_stream.data_event_dispatcher.add_read_listener(read_callback) write_callback = functools.partial(self.event_dispatcher.notify, self.Event.transfer_send_data) self._data_stream.data_event_dispatcher.add_write_listener(write_callback)
[ "def", "_open_data_stream", "(", "self", ")", ":", "@", "asyncio", ".", "coroutine", "def", "connection_factory", "(", "address", ":", "Tuple", "[", "int", ",", "int", "]", ")", ":", "self", ".", "_data_connection", "=", "yield", "from", "self", ".", "_acquire_connection", "(", "address", "[", "0", "]", ",", "address", "[", "1", "]", ")", "return", "self", ".", "_data_connection", "self", ".", "_data_stream", "=", "yield", "from", "self", ".", "_commander", ".", "setup_data_stream", "(", "connection_factory", ")", "self", ".", "_response", ".", "data_address", "=", "self", ".", "_data_connection", ".", "address", "read_callback", "=", "functools", ".", "partial", "(", "self", ".", "event_dispatcher", ".", "notify", ",", "self", ".", "Event", ".", "transfer_receive_data", ")", "self", ".", "_data_stream", ".", "data_event_dispatcher", ".", "add_read_listener", "(", "read_callback", ")", "write_callback", "=", "functools", ".", "partial", "(", "self", ".", "event_dispatcher", ".", "notify", ",", "self", ".", "Event", ".", "transfer_send_data", ")", "self", ".", "_data_stream", ".", "data_event_dispatcher", ".", "add_write_listener", "(", "write_callback", ")" ]
Open the data stream connection. Coroutine.
[ "Open", "the", "data", "stream", "connection", "." ]
python
train
MAVENSDC/cdflib
cdflib/cdfwrite.py
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1193-L1214
def _use_vxrentry(self, f, VXRoffset, recStart, recEnd, offset): ''' Adds a VVR pointer to a VXR ''' # Select the next unused entry in a VXR for a VVR/CVVR f.seek(VXRoffset+20) # num entries numEntries = int.from_bytes(f.read(4), 'big', signed=True) # used entries usedEntries = int.from_bytes(f.read(4), 'big', signed=True) # VXR's First self._update_offset_value(f, VXRoffset+28+4*usedEntries, 4, recStart) # VXR's Last self._update_offset_value(f, VXRoffset+28+4*numEntries+4*usedEntries, 4, recEnd) # VXR's Offset self._update_offset_value(f, VXRoffset+28+2*4*numEntries+8*usedEntries, 8, offset) # VXR's NusedEntries usedEntries += 1 self._update_offset_value(f, VXRoffset+24, 4, usedEntries) return usedEntries
[ "def", "_use_vxrentry", "(", "self", ",", "f", ",", "VXRoffset", ",", "recStart", ",", "recEnd", ",", "offset", ")", ":", "# Select the next unused entry in a VXR for a VVR/CVVR", "f", ".", "seek", "(", "VXRoffset", "+", "20", ")", "# num entries", "numEntries", "=", "int", ".", "from_bytes", "(", "f", ".", "read", "(", "4", ")", ",", "'big'", ",", "signed", "=", "True", ")", "# used entries", "usedEntries", "=", "int", ".", "from_bytes", "(", "f", ".", "read", "(", "4", ")", ",", "'big'", ",", "signed", "=", "True", ")", "# VXR's First", "self", ".", "_update_offset_value", "(", "f", ",", "VXRoffset", "+", "28", "+", "4", "*", "usedEntries", ",", "4", ",", "recStart", ")", "# VXR's Last", "self", ".", "_update_offset_value", "(", "f", ",", "VXRoffset", "+", "28", "+", "4", "*", "numEntries", "+", "4", "*", "usedEntries", ",", "4", ",", "recEnd", ")", "# VXR's Offset", "self", ".", "_update_offset_value", "(", "f", ",", "VXRoffset", "+", "28", "+", "2", "*", "4", "*", "numEntries", "+", "8", "*", "usedEntries", ",", "8", ",", "offset", ")", "# VXR's NusedEntries", "usedEntries", "+=", "1", "self", ".", "_update_offset_value", "(", "f", ",", "VXRoffset", "+", "24", ",", "4", ",", "usedEntries", ")", "return", "usedEntries" ]
Adds a VVR pointer to a VXR
[ "Adds", "a", "VVR", "pointer", "to", "a", "VXR" ]
python
train
Jaymon/dsnparse
dsnparse.py
https://github.com/Jaymon/dsnparse/blob/2e4e1be8cc9d2dd0f6138c881b06677a6e80b029/dsnparse.py#L197-L210
def setdefault(self, key, val): """ set a default value for key this is different than dict's setdefault because it will set default either if the key doesn't exist, or if the value at the key evaluates to False, so an empty string or a None value will also be updated :param key: string, the attribute to update :param val: mixed, the attributes new value if key has a current value that evaluates to False """ if not getattr(self, key, None): setattr(self, key, val)
[ "def", "setdefault", "(", "self", ",", "key", ",", "val", ")", ":", "if", "not", "getattr", "(", "self", ",", "key", ",", "None", ")", ":", "setattr", "(", "self", ",", "key", ",", "val", ")" ]
set a default value for key this is different than dict's setdefault because it will set default either if the key doesn't exist, or if the value at the key evaluates to False, so an empty string or a None value will also be updated :param key: string, the attribute to update :param val: mixed, the attributes new value if key has a current value that evaluates to False
[ "set", "a", "default", "value", "for", "key" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/akkar_2014.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/akkar_2014.py#L385-L393
def _compute_logarithmic_distance_term(self, C, mag, dists): """ Compute and return fourth term in equations (2a) and (2b), page 20. """ return ( (C['a4'] + C['a5'] * (mag - self.c1)) * np.log(np.sqrt(dists.rhypo ** 2 + C['a6'] ** 2)) )
[ "def", "_compute_logarithmic_distance_term", "(", "self", ",", "C", ",", "mag", ",", "dists", ")", ":", "return", "(", "(", "C", "[", "'a4'", "]", "+", "C", "[", "'a5'", "]", "*", "(", "mag", "-", "self", ".", "c1", ")", ")", "*", "np", ".", "log", "(", "np", ".", "sqrt", "(", "dists", ".", "rhypo", "**", "2", "+", "C", "[", "'a6'", "]", "**", "2", ")", ")", ")" ]
Compute and return fourth term in equations (2a) and (2b), page 20.
[ "Compute", "and", "return", "fourth", "term", "in", "equations", "(", "2a", ")", "and", "(", "2b", ")", "page", "20", "." ]
python
train
waqasbhatti/astrobase
astrobase/periodbase/zgls.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/periodbase/zgls.py#L161-L265
def generalized_lsp_value_withtau(times, mags, errs, omega): '''Generalized LSP value for a single omega. This uses tau to provide an arbitrary time-reference point. The relations used are:: P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS) where: YC, YS, CC, and SS are all calculated at T and where: tan 2omegaT = 2*CS/(CC - SS) and where: Y = sum( w_i*y_i ) C = sum( w_i*cos(wT_i) ) S = sum( w_i*sin(wT_i) ) YY = sum( w_i*y_i*y_i ) - Y*Y YC = sum( w_i*y_i*cos(wT_i) ) - Y*C YS = sum( w_i*y_i*sin(wT_i) ) - Y*S CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) ) CC = CpC - C*C SS = (1 - CpC) - S*S CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S Parameters ---------- times,mags,errs : np.array The time-series to calculate the periodogram value for. omega : float The frequency to calculate the periodogram value at. Returns ------- periodogramvalue : float The normalized periodogram at the specified test frequency `omega`. ''' one_over_errs2 = 1.0/(errs*errs) W = npsum(one_over_errs2) wi = one_over_errs2/W sin_omegat = npsin(omega*times) cos_omegat = npcos(omega*times) sin2_omegat = sin_omegat*sin_omegat cos2_omegat = cos_omegat*cos_omegat sincos_omegat = sin_omegat*cos_omegat # calculate some more sums and terms Y = npsum( wi*mags ) C = npsum( wi*cos_omegat ) S = npsum( wi*sin_omegat ) CpS = npsum( wi*sincos_omegat ) CpC = npsum( wi*cos2_omegat ) CS = CpS - C*S CC = CpC - C*C SS = 1 - CpC - S*S # use SpS = 1 - CpC # calculate tau tan_omega_tau_top = 2.0*CS tan_omega_tau_bottom = CC - SS tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom tau = nparctan(tan_omega_tau)/(2.0*omega) # now we need to calculate all the bits at tau sin_omega_tau = npsin(omega*(times - tau)) cos_omega_tau = npcos(omega*(times - tau)) sin2_omega_tau = sin_omega_tau*sin_omega_tau cos2_omega_tau = cos_omega_tau*cos_omega_tau sincos_omega_tau = sin_omega_tau*cos_omega_tau C_tau = npsum(wi*cos_omega_tau) S_tau = npsum(wi*sin_omega_tau) CpS_tau = npsum( wi*sincos_omega_tau ) CpC_tau = npsum( wi*cos2_omega_tau ) CS_tau = CpS_tau - C_tau*S_tau CC_tau = CpC_tau - C_tau*C_tau SS_tau = 1 - CpC_tau - S_tau*S_tau # use SpS = 1 - CpC YpY = npsum( wi*mags*mags) YpC_tau = npsum( wi*mags*cos_omega_tau ) YpS_tau = npsum( wi*mags*sin_omega_tau ) # SpS = npsum( wi*sin2_omegat ) # the final terms YY = YpY - Y*Y YC_tau = YpC_tau - Y*C_tau YS_tau = YpS_tau - Y*S_tau periodogramvalue = (YC_tau*YC_tau/CC_tau + YS_tau*YS_tau/SS_tau)/YY return periodogramvalue
[ "def", "generalized_lsp_value_withtau", "(", "times", ",", "mags", ",", "errs", ",", "omega", ")", ":", "one_over_errs2", "=", "1.0", "/", "(", "errs", "*", "errs", ")", "W", "=", "npsum", "(", "one_over_errs2", ")", "wi", "=", "one_over_errs2", "/", "W", "sin_omegat", "=", "npsin", "(", "omega", "*", "times", ")", "cos_omegat", "=", "npcos", "(", "omega", "*", "times", ")", "sin2_omegat", "=", "sin_omegat", "*", "sin_omegat", "cos2_omegat", "=", "cos_omegat", "*", "cos_omegat", "sincos_omegat", "=", "sin_omegat", "*", "cos_omegat", "# calculate some more sums and terms", "Y", "=", "npsum", "(", "wi", "*", "mags", ")", "C", "=", "npsum", "(", "wi", "*", "cos_omegat", ")", "S", "=", "npsum", "(", "wi", "*", "sin_omegat", ")", "CpS", "=", "npsum", "(", "wi", "*", "sincos_omegat", ")", "CpC", "=", "npsum", "(", "wi", "*", "cos2_omegat", ")", "CS", "=", "CpS", "-", "C", "*", "S", "CC", "=", "CpC", "-", "C", "*", "C", "SS", "=", "1", "-", "CpC", "-", "S", "*", "S", "# use SpS = 1 - CpC", "# calculate tau", "tan_omega_tau_top", "=", "2.0", "*", "CS", "tan_omega_tau_bottom", "=", "CC", "-", "SS", "tan_omega_tau", "=", "tan_omega_tau_top", "/", "tan_omega_tau_bottom", "tau", "=", "nparctan", "(", "tan_omega_tau", ")", "/", "(", "2.0", "*", "omega", ")", "# now we need to calculate all the bits at tau", "sin_omega_tau", "=", "npsin", "(", "omega", "*", "(", "times", "-", "tau", ")", ")", "cos_omega_tau", "=", "npcos", "(", "omega", "*", "(", "times", "-", "tau", ")", ")", "sin2_omega_tau", "=", "sin_omega_tau", "*", "sin_omega_tau", "cos2_omega_tau", "=", "cos_omega_tau", "*", "cos_omega_tau", "sincos_omega_tau", "=", "sin_omega_tau", "*", "cos_omega_tau", "C_tau", "=", "npsum", "(", "wi", "*", "cos_omega_tau", ")", "S_tau", "=", "npsum", "(", "wi", "*", "sin_omega_tau", ")", "CpS_tau", "=", "npsum", "(", "wi", "*", "sincos_omega_tau", ")", "CpC_tau", "=", "npsum", "(", "wi", "*", "cos2_omega_tau", ")", "CS_tau", "=", "CpS_tau", "-", "C_tau", "*", "S_tau", "CC_tau", "=", "CpC_tau", "-", "C_tau", "*", "C_tau", "SS_tau", "=", "1", "-", "CpC_tau", "-", "S_tau", "*", "S_tau", "# use SpS = 1 - CpC", "YpY", "=", "npsum", "(", "wi", "*", "mags", "*", "mags", ")", "YpC_tau", "=", "npsum", "(", "wi", "*", "mags", "*", "cos_omega_tau", ")", "YpS_tau", "=", "npsum", "(", "wi", "*", "mags", "*", "sin_omega_tau", ")", "# SpS = npsum( wi*sin2_omegat )", "# the final terms", "YY", "=", "YpY", "-", "Y", "*", "Y", "YC_tau", "=", "YpC_tau", "-", "Y", "*", "C_tau", "YS_tau", "=", "YpS_tau", "-", "Y", "*", "S_tau", "periodogramvalue", "=", "(", "YC_tau", "*", "YC_tau", "/", "CC_tau", "+", "YS_tau", "*", "YS_tau", "/", "SS_tau", ")", "/", "YY", "return", "periodogramvalue" ]
Generalized LSP value for a single omega. This uses tau to provide an arbitrary time-reference point. The relations used are:: P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS) where: YC, YS, CC, and SS are all calculated at T and where: tan 2omegaT = 2*CS/(CC - SS) and where: Y = sum( w_i*y_i ) C = sum( w_i*cos(wT_i) ) S = sum( w_i*sin(wT_i) ) YY = sum( w_i*y_i*y_i ) - Y*Y YC = sum( w_i*y_i*cos(wT_i) ) - Y*C YS = sum( w_i*y_i*sin(wT_i) ) - Y*S CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) ) CC = CpC - C*C SS = (1 - CpC) - S*S CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S Parameters ---------- times,mags,errs : np.array The time-series to calculate the periodogram value for. omega : float The frequency to calculate the periodogram value at. Returns ------- periodogramvalue : float The normalized periodogram at the specified test frequency `omega`.
[ "Generalized", "LSP", "value", "for", "a", "single", "omega", "." ]
python
valid
EUDAT-B2SAFE/B2HANDLE
b2handle/handleclient.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handleclient.py#L934-L973
def search_handle(self, URL=None, prefix=None, **key_value_pairs): ''' Search for handles containing the specified key with the specified value. The search terms are passed on to the reverse lookup servlet as-is. The servlet is supposed to be case-insensitive, but if it isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`. *Note:* If allowed search keys are configured, only these are used. If no allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet, possibly causing a :exc:`~b2handle.handleexceptions.ReverseLookupException`. Example calls: .. code:: python list_of_handles = search_handle('http://www.foo.com') list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999) list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999) :param URL: Optional. The URL to search for (reverse lookup). [This is NOT the URL of the search servlet!] :param prefix: Optional. The Handle prefix to which the search should be limited to. If unspecified, the method will search across all prefixes present at the server given to the constructor. :param key_value_pairs: Optional. Several search fields and values can be specified as key-value-pairs, e.g. CHECKSUM=123456, URL=www.foo.com :raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that cannot be used, or if something else goes wrong. :return: A list of all Handles (strings) that bear the given key with given value of given prefix or server. The list may be empty and may also contain more than one element. ''' LOGGER.debug('search_handle...') list_of_handles = self.__searcher.search_handle(URL=URL, prefix=prefix, **key_value_pairs) return list_of_handles
[ "def", "search_handle", "(", "self", ",", "URL", "=", "None", ",", "prefix", "=", "None", ",", "*", "*", "key_value_pairs", ")", ":", "LOGGER", ".", "debug", "(", "'search_handle...'", ")", "list_of_handles", "=", "self", ".", "__searcher", ".", "search_handle", "(", "URL", "=", "URL", ",", "prefix", "=", "prefix", ",", "*", "*", "key_value_pairs", ")", "return", "list_of_handles" ]
Search for handles containing the specified key with the specified value. The search terms are passed on to the reverse lookup servlet as-is. The servlet is supposed to be case-insensitive, but if it isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`. *Note:* If allowed search keys are configured, only these are used. If no allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet, possibly causing a :exc:`~b2handle.handleexceptions.ReverseLookupException`. Example calls: .. code:: python list_of_handles = search_handle('http://www.foo.com') list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999) list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999) :param URL: Optional. The URL to search for (reverse lookup). [This is NOT the URL of the search servlet!] :param prefix: Optional. The Handle prefix to which the search should be limited to. If unspecified, the method will search across all prefixes present at the server given to the constructor. :param key_value_pairs: Optional. Several search fields and values can be specified as key-value-pairs, e.g. CHECKSUM=123456, URL=www.foo.com :raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that cannot be used, or if something else goes wrong. :return: A list of all Handles (strings) that bear the given key with given value of given prefix or server. The list may be empty and may also contain more than one element.
[ "Search", "for", "handles", "containing", "the", "specified", "key", "with", "the", "specified", "value", ".", "The", "search", "terms", "are", "passed", "on", "to", "the", "reverse", "lookup", "servlet", "as", "-", "is", ".", "The", "servlet", "is", "supposed", "to", "be", "case", "-", "insensitive", "but", "if", "it", "isn", "t", "the", "wrong", "case", "will", "cause", "a", ":", "exc", ":", "~b2handle", ".", "handleexceptions", ".", "ReverseLookupException", "." ]
python
train
serge-sans-paille/pythran
pythran/conversion.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/conversion.py#L34-L65
def size_container_folding(value): """ Convert value to ast expression if size is not too big. Converter for sized container. """ if len(value) < MAX_LEN: if isinstance(value, list): return ast.List([to_ast(elt) for elt in value], ast.Load()) elif isinstance(value, tuple): return ast.Tuple([to_ast(elt) for elt in value], ast.Load()) elif isinstance(value, set): return ast.Set([to_ast(elt) for elt in value]) elif isinstance(value, dict): keys = [to_ast(elt) for elt in value.keys()] values = [to_ast(elt) for elt in value.values()] return ast.Dict(keys, values) elif isinstance(value, np.ndarray): return ast.Call(func=ast.Attribute( ast.Name(mangle('numpy'), ast.Load(), None), 'array', ast.Load()), args=[to_ast(totuple(value.tolist())), ast.Attribute( ast.Name(mangle('numpy'), ast.Load(), None), value.dtype.name, ast.Load())], keywords=[]) else: raise ConversionError() else: raise ToNotEval()
[ "def", "size_container_folding", "(", "value", ")", ":", "if", "len", "(", "value", ")", "<", "MAX_LEN", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "ast", ".", "List", "(", "[", "to_ast", "(", "elt", ")", "for", "elt", "in", "value", "]", ",", "ast", ".", "Load", "(", ")", ")", "elif", "isinstance", "(", "value", ",", "tuple", ")", ":", "return", "ast", ".", "Tuple", "(", "[", "to_ast", "(", "elt", ")", "for", "elt", "in", "value", "]", ",", "ast", ".", "Load", "(", ")", ")", "elif", "isinstance", "(", "value", ",", "set", ")", ":", "return", "ast", ".", "Set", "(", "[", "to_ast", "(", "elt", ")", "for", "elt", "in", "value", "]", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "keys", "=", "[", "to_ast", "(", "elt", ")", "for", "elt", "in", "value", ".", "keys", "(", ")", "]", "values", "=", "[", "to_ast", "(", "elt", ")", "for", "elt", "in", "value", ".", "values", "(", ")", "]", "return", "ast", ".", "Dict", "(", "keys", ",", "values", ")", "elif", "isinstance", "(", "value", ",", "np", ".", "ndarray", ")", ":", "return", "ast", ".", "Call", "(", "func", "=", "ast", ".", "Attribute", "(", "ast", ".", "Name", "(", "mangle", "(", "'numpy'", ")", ",", "ast", ".", "Load", "(", ")", ",", "None", ")", ",", "'array'", ",", "ast", ".", "Load", "(", ")", ")", ",", "args", "=", "[", "to_ast", "(", "totuple", "(", "value", ".", "tolist", "(", ")", ")", ")", ",", "ast", ".", "Attribute", "(", "ast", ".", "Name", "(", "mangle", "(", "'numpy'", ")", ",", "ast", ".", "Load", "(", ")", ",", "None", ")", ",", "value", ".", "dtype", ".", "name", ",", "ast", ".", "Load", "(", ")", ")", "]", ",", "keywords", "=", "[", "]", ")", "else", ":", "raise", "ConversionError", "(", ")", "else", ":", "raise", "ToNotEval", "(", ")" ]
Convert value to ast expression if size is not too big. Converter for sized container.
[ "Convert", "value", "to", "ast", "expression", "if", "size", "is", "not", "too", "big", "." ]
python
train
ewiger/mlab
src/mlab/awmstools.py
https://github.com/ewiger/mlab/blob/72a98adf6499f548848ad44c604f74d68f07fe4f/src/mlab/awmstools.py#L1037-L1051
def some(predicate, *seqs): """ >>> some(lambda x: x, [0, False, None]) False >>> some(lambda x: x, [None, 0, 2, 3]) 2 >>> some(operator.eq, [0,1,2], [2,1,0]) True >>> some(operator.eq, [1,2], [2,1]) False """ try: if len(seqs) == 1: return ifilter(bool,imap(predicate, seqs[0])).next() else: return ifilter(bool,starmap(predicate, izip(*seqs))).next() except StopIteration: return False
[ "def", "some", "(", "predicate", ",", "*", "seqs", ")", ":", "try", ":", "if", "len", "(", "seqs", ")", "==", "1", ":", "return", "ifilter", "(", "bool", ",", "imap", "(", "predicate", ",", "seqs", "[", "0", "]", ")", ")", ".", "next", "(", ")", "else", ":", "return", "ifilter", "(", "bool", ",", "starmap", "(", "predicate", ",", "izip", "(", "*", "seqs", ")", ")", ")", ".", "next", "(", ")", "except", "StopIteration", ":", "return", "False" ]
>>> some(lambda x: x, [0, False, None]) False >>> some(lambda x: x, [None, 0, 2, 3]) 2 >>> some(operator.eq, [0,1,2], [2,1,0]) True >>> some(operator.eq, [1,2], [2,1]) False
[ ">>>", "some", "(", "lambda", "x", ":", "x", "[", "0", "False", "None", "]", ")", "False", ">>>", "some", "(", "lambda", "x", ":", "x", "[", "None", "0", "2", "3", "]", ")", "2", ">>>", "some", "(", "operator", ".", "eq", "[", "0", "1", "2", "]", "[", "2", "1", "0", "]", ")", "True", ">>>", "some", "(", "operator", ".", "eq", "[", "1", "2", "]", "[", "2", "1", "]", ")", "False" ]
python
train
mwouts/jupytext
jupytext/jupytext.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/jupytext.py#L45-L90
def reads(self, s, **_): """Read a notebook represented as text""" if self.fmt.get('format_name') == 'pandoc': return md_to_notebook(s) lines = s.splitlines() cells = [] metadata, jupyter_md, header_cell, pos = header_to_metadata_and_cell(lines, self.implementation.header_prefix, self.implementation.extension) default_language = default_language_from_metadata_and_ext(metadata, self.implementation.extension) self.update_fmt_with_notebook_options(metadata) if header_cell: cells.append(header_cell) lines = lines[pos:] if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'): cells.append(new_code_cell(source='%matplotlib inline')) cell_metadata = set() while lines: reader = self.implementation.cell_reader_class(self.fmt, default_language) cell, pos = reader.read(lines) cells.append(cell) cell_metadata.update(cell.metadata.keys()) if pos <= 0: raise Exception('Blocked at lines ' + '\n'.join(lines[:6])) # pragma: no cover lines = lines[pos:] update_metadata_filters(metadata, jupyter_md, cell_metadata) set_main_and_cell_language(metadata, cells, self.implementation.extension) if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'): filtered_cells = [] for i, cell in enumerate(cells): if cell.source == '' and i > 0 and i + 1 < len(cells) \ and cells[i - 1].cell_type != 'markdown' and cells[i + 1].cell_type != 'markdown': continue filtered_cells.append(cell) cells = filtered_cells return new_notebook(cells=cells, metadata=metadata)
[ "def", "reads", "(", "self", ",", "s", ",", "*", "*", "_", ")", ":", "if", "self", ".", "fmt", ".", "get", "(", "'format_name'", ")", "==", "'pandoc'", ":", "return", "md_to_notebook", "(", "s", ")", "lines", "=", "s", ".", "splitlines", "(", ")", "cells", "=", "[", "]", "metadata", ",", "jupyter_md", ",", "header_cell", ",", "pos", "=", "header_to_metadata_and_cell", "(", "lines", ",", "self", ".", "implementation", ".", "header_prefix", ",", "self", ".", "implementation", ".", "extension", ")", "default_language", "=", "default_language_from_metadata_and_ext", "(", "metadata", ",", "self", ".", "implementation", ".", "extension", ")", "self", ".", "update_fmt_with_notebook_options", "(", "metadata", ")", "if", "header_cell", ":", "cells", ".", "append", "(", "header_cell", ")", "lines", "=", "lines", "[", "pos", ":", "]", "if", "self", ".", "implementation", ".", "format_name", "and", "self", ".", "implementation", ".", "format_name", ".", "startswith", "(", "'sphinx'", ")", ":", "cells", ".", "append", "(", "new_code_cell", "(", "source", "=", "'%matplotlib inline'", ")", ")", "cell_metadata", "=", "set", "(", ")", "while", "lines", ":", "reader", "=", "self", ".", "implementation", ".", "cell_reader_class", "(", "self", ".", "fmt", ",", "default_language", ")", "cell", ",", "pos", "=", "reader", ".", "read", "(", "lines", ")", "cells", ".", "append", "(", "cell", ")", "cell_metadata", ".", "update", "(", "cell", ".", "metadata", ".", "keys", "(", ")", ")", "if", "pos", "<=", "0", ":", "raise", "Exception", "(", "'Blocked at lines '", "+", "'\\n'", ".", "join", "(", "lines", "[", ":", "6", "]", ")", ")", "# pragma: no cover", "lines", "=", "lines", "[", "pos", ":", "]", "update_metadata_filters", "(", "metadata", ",", "jupyter_md", ",", "cell_metadata", ")", "set_main_and_cell_language", "(", "metadata", ",", "cells", ",", "self", ".", "implementation", ".", "extension", ")", "if", "self", ".", "implementation", ".", "format_name", "and", "self", ".", "implementation", ".", "format_name", ".", "startswith", "(", "'sphinx'", ")", ":", "filtered_cells", "=", "[", "]", "for", "i", ",", "cell", "in", "enumerate", "(", "cells", ")", ":", "if", "cell", ".", "source", "==", "''", "and", "i", ">", "0", "and", "i", "+", "1", "<", "len", "(", "cells", ")", "and", "cells", "[", "i", "-", "1", "]", ".", "cell_type", "!=", "'markdown'", "and", "cells", "[", "i", "+", "1", "]", ".", "cell_type", "!=", "'markdown'", ":", "continue", "filtered_cells", ".", "append", "(", "cell", ")", "cells", "=", "filtered_cells", "return", "new_notebook", "(", "cells", "=", "cells", ",", "metadata", "=", "metadata", ")" ]
Read a notebook represented as text
[ "Read", "a", "notebook", "represented", "as", "text" ]
python
train
gem/oq-engine
openquake/commonlib/readinput.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/readinput.py#L1328-L1372
def get_input_files(oqparam, hazard=False): """ :param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: input path names in a specific order """ fnames = [] # files entering in the checksum for key in oqparam.inputs: fname = oqparam.inputs[key] if hazard and key not in ('site_model', 'source_model_logic_tree', 'gsim_logic_tree', 'source'): continue # collect .hdf5 tables for the GSIMs, if any elif key == 'gsim_logic_tree': gsim_lt = get_gsim_lt(oqparam) for gsims in gsim_lt.values.values(): for gsim in gsims: table = getattr(gsim, 'GMPE_TABLE', None) if table: fnames.append(table) fnames.append(fname) elif key == 'source_model': # UCERF f = oqparam.inputs['source_model'] fnames.append(f) fname = nrml.read(f).sourceModel.UCERFSource['filename'] fnames.append(os.path.join(os.path.dirname(f), fname)) elif key == 'exposure': # fname is a list for exp in asset.Exposure.read_headers(fname): fnames.extend(exp.datafiles) fnames.extend(fname) elif isinstance(fname, dict): fnames.extend(fname.values()) elif isinstance(fname, list): for f in fname: if f == oqparam.input_dir: raise InvalidFile('%s there is an empty path in %s' % (oqparam.inputs['job_ini'], key)) fnames.extend(fname) elif key == 'source_model_logic_tree': for smpaths in logictree.collect_info(fname).smpaths.values(): fnames.extend(smpaths) fnames.append(fname) else: fnames.append(fname) return sorted(fnames)
[ "def", "get_input_files", "(", "oqparam", ",", "hazard", "=", "False", ")", ":", "fnames", "=", "[", "]", "# files entering in the checksum", "for", "key", "in", "oqparam", ".", "inputs", ":", "fname", "=", "oqparam", ".", "inputs", "[", "key", "]", "if", "hazard", "and", "key", "not", "in", "(", "'site_model'", ",", "'source_model_logic_tree'", ",", "'gsim_logic_tree'", ",", "'source'", ")", ":", "continue", "# collect .hdf5 tables for the GSIMs, if any", "elif", "key", "==", "'gsim_logic_tree'", ":", "gsim_lt", "=", "get_gsim_lt", "(", "oqparam", ")", "for", "gsims", "in", "gsim_lt", ".", "values", ".", "values", "(", ")", ":", "for", "gsim", "in", "gsims", ":", "table", "=", "getattr", "(", "gsim", ",", "'GMPE_TABLE'", ",", "None", ")", "if", "table", ":", "fnames", ".", "append", "(", "table", ")", "fnames", ".", "append", "(", "fname", ")", "elif", "key", "==", "'source_model'", ":", "# UCERF", "f", "=", "oqparam", ".", "inputs", "[", "'source_model'", "]", "fnames", ".", "append", "(", "f", ")", "fname", "=", "nrml", ".", "read", "(", "f", ")", ".", "sourceModel", ".", "UCERFSource", "[", "'filename'", "]", "fnames", ".", "append", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "f", ")", ",", "fname", ")", ")", "elif", "key", "==", "'exposure'", ":", "# fname is a list", "for", "exp", "in", "asset", ".", "Exposure", ".", "read_headers", "(", "fname", ")", ":", "fnames", ".", "extend", "(", "exp", ".", "datafiles", ")", "fnames", ".", "extend", "(", "fname", ")", "elif", "isinstance", "(", "fname", ",", "dict", ")", ":", "fnames", ".", "extend", "(", "fname", ".", "values", "(", ")", ")", "elif", "isinstance", "(", "fname", ",", "list", ")", ":", "for", "f", "in", "fname", ":", "if", "f", "==", "oqparam", ".", "input_dir", ":", "raise", "InvalidFile", "(", "'%s there is an empty path in %s'", "%", "(", "oqparam", ".", "inputs", "[", "'job_ini'", "]", ",", "key", ")", ")", "fnames", ".", "extend", "(", "fname", ")", "elif", "key", "==", "'source_model_logic_tree'", ":", "for", "smpaths", "in", "logictree", ".", "collect_info", "(", "fname", ")", ".", "smpaths", ".", "values", "(", ")", ":", "fnames", ".", "extend", "(", "smpaths", ")", "fnames", ".", "append", "(", "fname", ")", "else", ":", "fnames", ".", "append", "(", "fname", ")", "return", "sorted", "(", "fnames", ")" ]
:param oqparam: an OqParam instance :param hazard: if True, consider only the hazard files :returns: input path names in a specific order
[ ":", "param", "oqparam", ":", "an", "OqParam", "instance", ":", "param", "hazard", ":", "if", "True", "consider", "only", "the", "hazard", "files", ":", "returns", ":", "input", "path", "names", "in", "a", "specific", "order" ]
python
train
rhgrant10/Groupy
groupy/api/memberships.py
https://github.com/rhgrant10/Groupy/blob/ffd8cac57586fa1c218e3b4bfaa531142c3be766/groupy/api/memberships.py#L66-L82
def check(self, results_id): """Check for results of a membership request. :param str results_id: the ID of a membership request :return: successfully created memberships :rtype: :class:`list` :raises groupy.exceptions.ResultsNotReady: if the results are not ready :raises groupy.exceptions.ResultsExpired: if the results have expired """ path = 'results/{}'.format(results_id) url = utils.urljoin(self.url, path) response = self.session.get(url) if response.status_code == 503: raise exceptions.ResultsNotReady(response) if response.status_code == 404: raise exceptions.ResultsExpired(response) return response.data['members']
[ "def", "check", "(", "self", ",", "results_id", ")", ":", "path", "=", "'results/{}'", ".", "format", "(", "results_id", ")", "url", "=", "utils", ".", "urljoin", "(", "self", ".", "url", ",", "path", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", ")", "if", "response", ".", "status_code", "==", "503", ":", "raise", "exceptions", ".", "ResultsNotReady", "(", "response", ")", "if", "response", ".", "status_code", "==", "404", ":", "raise", "exceptions", ".", "ResultsExpired", "(", "response", ")", "return", "response", ".", "data", "[", "'members'", "]" ]
Check for results of a membership request. :param str results_id: the ID of a membership request :return: successfully created memberships :rtype: :class:`list` :raises groupy.exceptions.ResultsNotReady: if the results are not ready :raises groupy.exceptions.ResultsExpired: if the results have expired
[ "Check", "for", "results", "of", "a", "membership", "request", "." ]
python
train
xzased/lvm2py
lvm2py/vg.py
https://github.com/xzased/lvm2py/blob/34ce69304531a474c2fe4a4009ca445a8c103cd6/lvm2py/vg.py#L197-L204
def is_exported(self): """ Returns True if the VG is exported, False otherwise. """ self.open() exp = lvm_vg_is_exported(self.handle) self.close() return bool(exp)
[ "def", "is_exported", "(", "self", ")", ":", "self", ".", "open", "(", ")", "exp", "=", "lvm_vg_is_exported", "(", "self", ".", "handle", ")", "self", ".", "close", "(", ")", "return", "bool", "(", "exp", ")" ]
Returns True if the VG is exported, False otherwise.
[ "Returns", "True", "if", "the", "VG", "is", "exported", "False", "otherwise", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/fiesta.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/fiesta.py#L355-L364
def dump_BSE_data_in_GW_run(self, BSE_dump=True): """ :param BSE_dump: boolean :return: set the "do_bse" variable to one in cell.in """ if BSE_dump: self.BSE_TDDFT_options.update(do_bse=1, do_tddft=0) else: self.BSE_TDDFT_options.update(do_bse=0, do_tddft=0)
[ "def", "dump_BSE_data_in_GW_run", "(", "self", ",", "BSE_dump", "=", "True", ")", ":", "if", "BSE_dump", ":", "self", ".", "BSE_TDDFT_options", ".", "update", "(", "do_bse", "=", "1", ",", "do_tddft", "=", "0", ")", "else", ":", "self", ".", "BSE_TDDFT_options", ".", "update", "(", "do_bse", "=", "0", ",", "do_tddft", "=", "0", ")" ]
:param BSE_dump: boolean :return: set the "do_bse" variable to one in cell.in
[ ":", "param", "BSE_dump", ":", "boolean", ":", "return", ":", "set", "the", "do_bse", "variable", "to", "one", "in", "cell", ".", "in" ]
python
train
adrn/gala
gala/coordinates/quaternion.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/quaternion.py#L112-L137
def random(cls): """ Randomly sample a Quaternion from a distribution uniform in 3D rotation angles. https://www-preview.ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf Returns ------- q : :class:`gala.coordinates.Quaternion` A randomly sampled ``Quaternion`` instance. """ s = np.random.uniform() s1 = np.sqrt(1 - s) s2 = np.sqrt(s) t1 = np.random.uniform(0, 2*np.pi) t2 = np.random.uniform(0, 2*np.pi) w = np.cos(t2)*s2 x = np.sin(t1)*s1 y = np.cos(t1)*s1 z = np.sin(t2)*s2 return cls([w,x,y,z])
[ "def", "random", "(", "cls", ")", ":", "s", "=", "np", ".", "random", ".", "uniform", "(", ")", "s1", "=", "np", ".", "sqrt", "(", "1", "-", "s", ")", "s2", "=", "np", ".", "sqrt", "(", "s", ")", "t1", "=", "np", ".", "random", ".", "uniform", "(", "0", ",", "2", "*", "np", ".", "pi", ")", "t2", "=", "np", ".", "random", ".", "uniform", "(", "0", ",", "2", "*", "np", ".", "pi", ")", "w", "=", "np", ".", "cos", "(", "t2", ")", "*", "s2", "x", "=", "np", ".", "sin", "(", "t1", ")", "*", "s1", "y", "=", "np", ".", "cos", "(", "t1", ")", "*", "s1", "z", "=", "np", ".", "sin", "(", "t2", ")", "*", "s2", "return", "cls", "(", "[", "w", ",", "x", ",", "y", ",", "z", "]", ")" ]
Randomly sample a Quaternion from a distribution uniform in 3D rotation angles. https://www-preview.ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf Returns ------- q : :class:`gala.coordinates.Quaternion` A randomly sampled ``Quaternion`` instance.
[ "Randomly", "sample", "a", "Quaternion", "from", "a", "distribution", "uniform", "in", "3D", "rotation", "angles", "." ]
python
train
saltstack/salt
salt/cloud/clouds/scaleway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/scaleway.py#L91-L124
def list_nodes(call=None): ''' Return a list of the BareMetal servers that are on the provider. ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) items = query(method='servers') ret = {} for node in items['servers']: public_ips = [] private_ips = [] image_id = '' if node.get('public_ip'): public_ips = [node['public_ip']['address']] if node.get('private_ip'): private_ips = [node['private_ip']] if node.get('image'): image_id = node['image']['id'] ret[node['name']] = { 'id': node['id'], 'image_id': image_id, 'public_ips': public_ips, 'private_ips': private_ips, 'size': node['volumes']['0']['size'], 'state': node['state'], } return ret
[ "def", "list_nodes", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_nodes function must be called with -f or --function.'", ")", "items", "=", "query", "(", "method", "=", "'servers'", ")", "ret", "=", "{", "}", "for", "node", "in", "items", "[", "'servers'", "]", ":", "public_ips", "=", "[", "]", "private_ips", "=", "[", "]", "image_id", "=", "''", "if", "node", ".", "get", "(", "'public_ip'", ")", ":", "public_ips", "=", "[", "node", "[", "'public_ip'", "]", "[", "'address'", "]", "]", "if", "node", ".", "get", "(", "'private_ip'", ")", ":", "private_ips", "=", "[", "node", "[", "'private_ip'", "]", "]", "if", "node", ".", "get", "(", "'image'", ")", ":", "image_id", "=", "node", "[", "'image'", "]", "[", "'id'", "]", "ret", "[", "node", "[", "'name'", "]", "]", "=", "{", "'id'", ":", "node", "[", "'id'", "]", ",", "'image_id'", ":", "image_id", ",", "'public_ips'", ":", "public_ips", ",", "'private_ips'", ":", "private_ips", ",", "'size'", ":", "node", "[", "'volumes'", "]", "[", "'0'", "]", "[", "'size'", "]", ",", "'state'", ":", "node", "[", "'state'", "]", ",", "}", "return", "ret" ]
Return a list of the BareMetal servers that are on the provider.
[ "Return", "a", "list", "of", "the", "BareMetal", "servers", "that", "are", "on", "the", "provider", "." ]
python
train
gunthercox/ChatterBot
chatterbot/utils.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/utils.py#L6-L17
def import_module(dotted_path): """ Imports the specified module based on the dot notated import path for the module. """ import importlib module_parts = dotted_path.split('.') module_path = '.'.join(module_parts[:-1]) module = importlib.import_module(module_path) return getattr(module, module_parts[-1])
[ "def", "import_module", "(", "dotted_path", ")", ":", "import", "importlib", "module_parts", "=", "dotted_path", ".", "split", "(", "'.'", ")", "module_path", "=", "'.'", ".", "join", "(", "module_parts", "[", ":", "-", "1", "]", ")", "module", "=", "importlib", ".", "import_module", "(", "module_path", ")", "return", "getattr", "(", "module", ",", "module_parts", "[", "-", "1", "]", ")" ]
Imports the specified module based on the dot notated import path for the module.
[ "Imports", "the", "specified", "module", "based", "on", "the", "dot", "notated", "import", "path", "for", "the", "module", "." ]
python
train
ui/django-thumbnails
thumbnails/post_processors.py
https://github.com/ui/django-thumbnails/blob/5cef55e7f167060458709ed760dd43981124796a/thumbnails/post_processors.py#L31-L79
def optimize(thumbnail_file, jpg_command=None, png_command=None, gif_command=None): """ A post processing function to optimize file size. Accepts commands to optimize JPG, PNG and GIF images as arguments. Example: THUMBNAILS = { # Other options... 'POST_PROCESSORS': [ { 'processor': 'thumbnails.post_processors.optimize', 'png_command': 'optipng -force -o3 "%(filename)s"', 'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"', }, ], } Note: using output redirection in commands may cause unpredictable results. For example 'optipng -force -o3 "%(filename)s" &> /dev/null' may cause optimize command to fail on some systems. """ temp_dir = get_or_create_temp_dir() thumbnail_filename = os.path.join(temp_dir, "%s" % shortuuid.uuid()) f = open(thumbnail_filename, 'wb') f.write(thumbnail_file.read()) f.close() # Detect filetype filetype = imghdr.what(thumbnail_filename) # Construct command to optimize image based on filetype command = None if filetype == "jpg" or filetype == "jpeg": command = jpg_command elif filetype == "png": command = png_command elif filetype == "gif": command = gif_command # Run Command if command: command = command % {'filename': thumbnail_filename} call(command, shell=True) optimized_file = File(open(thumbnail_filename, 'rb')) os.remove(thumbnail_filename) return optimized_file
[ "def", "optimize", "(", "thumbnail_file", ",", "jpg_command", "=", "None", ",", "png_command", "=", "None", ",", "gif_command", "=", "None", ")", ":", "temp_dir", "=", "get_or_create_temp_dir", "(", ")", "thumbnail_filename", "=", "os", ".", "path", ".", "join", "(", "temp_dir", ",", "\"%s\"", "%", "shortuuid", ".", "uuid", "(", ")", ")", "f", "=", "open", "(", "thumbnail_filename", ",", "'wb'", ")", "f", ".", "write", "(", "thumbnail_file", ".", "read", "(", ")", ")", "f", ".", "close", "(", ")", "# Detect filetype", "filetype", "=", "imghdr", ".", "what", "(", "thumbnail_filename", ")", "# Construct command to optimize image based on filetype", "command", "=", "None", "if", "filetype", "==", "\"jpg\"", "or", "filetype", "==", "\"jpeg\"", ":", "command", "=", "jpg_command", "elif", "filetype", "==", "\"png\"", ":", "command", "=", "png_command", "elif", "filetype", "==", "\"gif\"", ":", "command", "=", "gif_command", "# Run Command", "if", "command", ":", "command", "=", "command", "%", "{", "'filename'", ":", "thumbnail_filename", "}", "call", "(", "command", ",", "shell", "=", "True", ")", "optimized_file", "=", "File", "(", "open", "(", "thumbnail_filename", ",", "'rb'", ")", ")", "os", ".", "remove", "(", "thumbnail_filename", ")", "return", "optimized_file" ]
A post processing function to optimize file size. Accepts commands to optimize JPG, PNG and GIF images as arguments. Example: THUMBNAILS = { # Other options... 'POST_PROCESSORS': [ { 'processor': 'thumbnails.post_processors.optimize', 'png_command': 'optipng -force -o3 "%(filename)s"', 'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"', }, ], } Note: using output redirection in commands may cause unpredictable results. For example 'optipng -force -o3 "%(filename)s" &> /dev/null' may cause optimize command to fail on some systems.
[ "A", "post", "processing", "function", "to", "optimize", "file", "size", ".", "Accepts", "commands", "to", "optimize", "JPG", "PNG", "and", "GIF", "images", "as", "arguments", ".", "Example", ":" ]
python
test
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/aio/async_receive_handler.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/aio/async_receive_handler.py#L513-L537
async def get_session_state(self): """Get the session state. Returns None if no state has been set. :rtype: str Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START set_session_state] :end-before: [END set_session_state] :language: python :dedent: 4 :caption: Getting and setting the state of a session. """ await self._can_run() response = await self._mgmt_request_response( REQUEST_RESPONSE_GET_SESSION_STATE_OPERATION, {'session-id': self.session_id}, mgmt_handlers.default) session_state = response.get(b'session-state') if isinstance(session_state, six.binary_type): session_state = session_state.decode('UTF-8') return session_state
[ "async", "def", "get_session_state", "(", "self", ")", ":", "await", "self", ".", "_can_run", "(", ")", "response", "=", "await", "self", ".", "_mgmt_request_response", "(", "REQUEST_RESPONSE_GET_SESSION_STATE_OPERATION", ",", "{", "'session-id'", ":", "self", ".", "session_id", "}", ",", "mgmt_handlers", ".", "default", ")", "session_state", "=", "response", ".", "get", "(", "b'session-state'", ")", "if", "isinstance", "(", "session_state", ",", "six", ".", "binary_type", ")", ":", "session_state", "=", "session_state", ".", "decode", "(", "'UTF-8'", ")", "return", "session_state" ]
Get the session state. Returns None if no state has been set. :rtype: str Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START set_session_state] :end-before: [END set_session_state] :language: python :dedent: 4 :caption: Getting and setting the state of a session.
[ "Get", "the", "session", "state", "." ]
python
test
sparklingpandas/sparklingpandas
sparklingpandas/groupby.py
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/groupby.py#L129-L134
def ngroups(self): """Number of groups.""" if self._can_use_new_school(): return self._grouped_spark_sql.count() self._prep_pandas_groupby() return self._mergedRDD.count()
[ "def", "ngroups", "(", "self", ")", ":", "if", "self", ".", "_can_use_new_school", "(", ")", ":", "return", "self", ".", "_grouped_spark_sql", ".", "count", "(", ")", "self", ".", "_prep_pandas_groupby", "(", ")", "return", "self", ".", "_mergedRDD", ".", "count", "(", ")" ]
Number of groups.
[ "Number", "of", "groups", "." ]
python
train
jrigden/pyPodcastParser
pyPodcastParser/Item.py
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Item.py#L220-L227
def set_itunes_closed_captioned(self): """Parses isClosedCaptioned from itunes tags and sets value""" try: self.itunes_closed_captioned = self.soup.find( 'itunes:isclosedcaptioned').string self.itunes_closed_captioned = self.itunes_closed_captioned.lower() except AttributeError: self.itunes_closed_captioned = None
[ "def", "set_itunes_closed_captioned", "(", "self", ")", ":", "try", ":", "self", ".", "itunes_closed_captioned", "=", "self", ".", "soup", ".", "find", "(", "'itunes:isclosedcaptioned'", ")", ".", "string", "self", ".", "itunes_closed_captioned", "=", "self", ".", "itunes_closed_captioned", ".", "lower", "(", ")", "except", "AttributeError", ":", "self", ".", "itunes_closed_captioned", "=", "None" ]
Parses isClosedCaptioned from itunes tags and sets value
[ "Parses", "isClosedCaptioned", "from", "itunes", "tags", "and", "sets", "value" ]
python
train
pirate/mesh-networking
examples/mesh-botnet/shell_tools.py
https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/examples/mesh-botnet/shell_tools.py#L41-L86
def run_shell(cmd, timeout=60, verbose=False): """run a shell command and return the output, verbose enables live command output via yield""" retcode = None try: p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, executable='/bin/bash') continue_running = True except Exception as e: yield("Failed: %s" % e) continue_running = False while continue_running: try: line = p.stdout.readline() if verbose and line: yield(line) elif line.strip(): yield(line.strip()) except Exception: pass try: data = irc.recv(4096) except Exception as e: data = "" retcode = p.poll() # returns None while subprocess is running if '!cancel' in data: retcode = "Cancelled live output reading. You have to kill the process manually." yield "[X]: %s" % retcode break elif retcode is not None: try: line = p.stdout.read() except: retcode = "Too much output, read timed out. Process is still running in background." if verbose and line: yield line if retcode != 0: yield "[X]: %s" % retcode elif retcode == 0 and verbose: yield "[√]" break
[ "def", "run_shell", "(", "cmd", ",", "timeout", "=", "60", ",", "verbose", "=", "False", ")", ":", "retcode", "=", "None", "try", ":", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "STDOUT", ",", "executable", "=", "'/bin/bash'", ")", "continue_running", "=", "True", "except", "Exception", "as", "e", ":", "yield", "(", "\"Failed: %s\"", "%", "e", ")", "continue_running", "=", "False", "while", "continue_running", ":", "try", ":", "line", "=", "p", ".", "stdout", ".", "readline", "(", ")", "if", "verbose", "and", "line", ":", "yield", "(", "line", ")", "elif", "line", ".", "strip", "(", ")", ":", "yield", "(", "line", ".", "strip", "(", ")", ")", "except", "Exception", ":", "pass", "try", ":", "data", "=", "irc", ".", "recv", "(", "4096", ")", "except", "Exception", "as", "e", ":", "data", "=", "\"\"", "retcode", "=", "p", ".", "poll", "(", ")", "# returns None while subprocess is running", "if", "'!cancel'", "in", "data", ":", "retcode", "=", "\"Cancelled live output reading. You have to kill the process manually.\"", "yield", "\"[X]: %s\"", "%", "retcode", "break", "elif", "retcode", "is", "not", "None", ":", "try", ":", "line", "=", "p", ".", "stdout", ".", "read", "(", ")", "except", ":", "retcode", "=", "\"Too much output, read timed out. Process is still running in background.\"", "if", "verbose", "and", "line", ":", "yield", "line", "if", "retcode", "!=", "0", ":", "yield", "\"[X]: %s\"", "%", "retcode", "elif", "retcode", "==", "0", "and", "verbose", ":", "yield", "\"[√]\"", "break" ]
run a shell command and return the output, verbose enables live command output via yield
[ "run", "a", "shell", "command", "and", "return", "the", "output", "verbose", "enables", "live", "command", "output", "via", "yield" ]
python
train
rbarrois/aionotify
aionotify/aioutils.py
https://github.com/rbarrois/aionotify/blob/6cfa35b26a2660f77f29a92d3efb7d1dde685b43/aionotify/aioutils.py#L60-L63
def pause_reading(self): """Public API: pause reading the transport.""" self._loop.remove_reader(self._fileno) self._active = False
[ "def", "pause_reading", "(", "self", ")", ":", "self", ".", "_loop", ".", "remove_reader", "(", "self", ".", "_fileno", ")", "self", ".", "_active", "=", "False" ]
Public API: pause reading the transport.
[ "Public", "API", ":", "pause", "reading", "the", "transport", "." ]
python
test
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/ThingMeta.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/ThingMeta.py#L48-L63
def get_location(self): """Gets the current geo location of your Thing Returns tuple of `(lat, lon)` in `float` or `(None, None)` if location is not set for this Thing """ lat = None lon = None # note: always picks from first triple for _, _, o in self._graph.triples((None, GEO_NS.lat, None)): lat = float(o) break for _, _, o in self._graph.triples((None, GEO_NS.long, None)): lon = float(o) break return lat, lon
[ "def", "get_location", "(", "self", ")", ":", "lat", "=", "None", "lon", "=", "None", "# note: always picks from first triple", "for", "_", ",", "_", ",", "o", "in", "self", ".", "_graph", ".", "triples", "(", "(", "None", ",", "GEO_NS", ".", "lat", ",", "None", ")", ")", ":", "lat", "=", "float", "(", "o", ")", "break", "for", "_", ",", "_", ",", "o", "in", "self", ".", "_graph", ".", "triples", "(", "(", "None", ",", "GEO_NS", ".", "long", ",", "None", ")", ")", ":", "lon", "=", "float", "(", "o", ")", "break", "return", "lat", ",", "lon" ]
Gets the current geo location of your Thing Returns tuple of `(lat, lon)` in `float` or `(None, None)` if location is not set for this Thing
[ "Gets", "the", "current", "geo", "location", "of", "your", "Thing" ]
python
train
finklabs/metrics
metrics/position.py
https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L112-L132
def process_token(self, tok): """count lines and track position of classes and functions""" if tok[0] == Token.Text: count = tok[1].count('\n') if count: self._line += count # adjust linecount if self._detector.process(tok): pass # works been completed in the detector elif tok[0] == Token.Punctuation: if tok[0] == Token.Punctuation and tok[1] == '{': self._scope += 1 if tok[0] == Token.Punctuation and tok[1] == '}': self._scope += -1 if self._scope == 0 and self._curr is not None: self._curr['end'] = self._line # close last scope self._curr = None elif tok[0] == Token.Name.Class and self._scope == 0: self.add_scope('Class', tok[1], self._line) elif tok[0] == Token.Name.Function and self._scope in [0, 1]: self.add_scope('Function', tok[1], self._line, self._scope == 1)
[ "def", "process_token", "(", "self", ",", "tok", ")", ":", "if", "tok", "[", "0", "]", "==", "Token", ".", "Text", ":", "count", "=", "tok", "[", "1", "]", ".", "count", "(", "'\\n'", ")", "if", "count", ":", "self", ".", "_line", "+=", "count", "# adjust linecount", "if", "self", ".", "_detector", ".", "process", "(", "tok", ")", ":", "pass", "# works been completed in the detector", "elif", "tok", "[", "0", "]", "==", "Token", ".", "Punctuation", ":", "if", "tok", "[", "0", "]", "==", "Token", ".", "Punctuation", "and", "tok", "[", "1", "]", "==", "'{'", ":", "self", ".", "_scope", "+=", "1", "if", "tok", "[", "0", "]", "==", "Token", ".", "Punctuation", "and", "tok", "[", "1", "]", "==", "'}'", ":", "self", ".", "_scope", "+=", "-", "1", "if", "self", ".", "_scope", "==", "0", "and", "self", ".", "_curr", "is", "not", "None", ":", "self", ".", "_curr", "[", "'end'", "]", "=", "self", ".", "_line", "# close last scope", "self", ".", "_curr", "=", "None", "elif", "tok", "[", "0", "]", "==", "Token", ".", "Name", ".", "Class", "and", "self", ".", "_scope", "==", "0", ":", "self", ".", "add_scope", "(", "'Class'", ",", "tok", "[", "1", "]", ",", "self", ".", "_line", ")", "elif", "tok", "[", "0", "]", "==", "Token", ".", "Name", ".", "Function", "and", "self", ".", "_scope", "in", "[", "0", ",", "1", "]", ":", "self", ".", "add_scope", "(", "'Function'", ",", "tok", "[", "1", "]", ",", "self", ".", "_line", ",", "self", ".", "_scope", "==", "1", ")" ]
count lines and track position of classes and functions
[ "count", "lines", "and", "track", "position", "of", "classes", "and", "functions" ]
python
train
ecordell/pymacaroons
pymacaroons/serializers/binary_serializer.py
https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/binary_serializer.py#L301-L311
def _encode_uvarint(data, n): ''' Encodes integer into variable-length format into data.''' if n < 0: raise ValueError('only support positive integer') while True: this_byte = n & 0x7f n >>= 7 if n == 0: data.append(this_byte) break data.append(this_byte | 0x80)
[ "def", "_encode_uvarint", "(", "data", ",", "n", ")", ":", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "'only support positive integer'", ")", "while", "True", ":", "this_byte", "=", "n", "&", "0x7f", "n", ">>=", "7", "if", "n", "==", "0", ":", "data", ".", "append", "(", "this_byte", ")", "break", "data", ".", "append", "(", "this_byte", "|", "0x80", ")" ]
Encodes integer into variable-length format into data.
[ "Encodes", "integer", "into", "variable", "-", "length", "format", "into", "data", "." ]
python
train
angr/angr
angr/analyses/cfg/cfg_base.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_base.py#L507-L527
def _should_skip_region(self, region_start): """ Some regions usually do not contain any executable code, but are still marked as executable. We should skip those regions by default. :param int region_start: Address of the beginning of the region. :return: True/False :rtype: bool """ obj = self.project.loader.find_object_containing(region_start, membership_check=False) if obj is None: return False if isinstance(obj, PE): section = obj.find_section_containing(region_start) if section is None: return False if section.name in {'.textbss'}: return True return False
[ "def", "_should_skip_region", "(", "self", ",", "region_start", ")", ":", "obj", "=", "self", ".", "project", ".", "loader", ".", "find_object_containing", "(", "region_start", ",", "membership_check", "=", "False", ")", "if", "obj", "is", "None", ":", "return", "False", "if", "isinstance", "(", "obj", ",", "PE", ")", ":", "section", "=", "obj", ".", "find_section_containing", "(", "region_start", ")", "if", "section", "is", "None", ":", "return", "False", "if", "section", ".", "name", "in", "{", "'.textbss'", "}", ":", "return", "True", "return", "False" ]
Some regions usually do not contain any executable code, but are still marked as executable. We should skip those regions by default. :param int region_start: Address of the beginning of the region. :return: True/False :rtype: bool
[ "Some", "regions", "usually", "do", "not", "contain", "any", "executable", "code", "but", "are", "still", "marked", "as", "executable", ".", "We", "should", "skip", "those", "regions", "by", "default", "." ]
python
train
mitsei/dlkit
dlkit/services/authorization.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/authorization.py#L430-L438
def use_comparative_vault_view(self): """Pass through to provider AuthorizationVaultSession.use_comparative_vault_view""" self._vault_view = COMPARATIVE # self._get_provider_session('authorization_vault_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_comparative_vault_view() except AttributeError: pass
[ "def", "use_comparative_vault_view", "(", "self", ")", ":", "self", ".", "_vault_view", "=", "COMPARATIVE", "# self._get_provider_session('authorization_vault_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_vault_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider AuthorizationVaultSession.use_comparative_vault_view
[ "Pass", "through", "to", "provider", "AuthorizationVaultSession", ".", "use_comparative_vault_view" ]
python
train
batiste/django-page-cms
pages/models.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/models.py#L465-L478
def slug(self, language=None, fallback=True): """ Return the slug of the page depending on the given language. :param language: wanted language, if not defined default is used. :param fallback: if ``True``, the slug will also be searched in other \ languages. """ slug = self.get_content(language, 'slug', language_fallback=fallback) if slug == '': return "Page {0}".format(self.pk) return slug
[ "def", "slug", "(", "self", ",", "language", "=", "None", ",", "fallback", "=", "True", ")", ":", "slug", "=", "self", ".", "get_content", "(", "language", ",", "'slug'", ",", "language_fallback", "=", "fallback", ")", "if", "slug", "==", "''", ":", "return", "\"Page {0}\"", ".", "format", "(", "self", ".", "pk", ")", "return", "slug" ]
Return the slug of the page depending on the given language. :param language: wanted language, if not defined default is used. :param fallback: if ``True``, the slug will also be searched in other \ languages.
[ "Return", "the", "slug", "of", "the", "page", "depending", "on", "the", "given", "language", "." ]
python
train
XuShaohua/bcloud
bcloud/App.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/App.py#L492-L496
def update_clipboard(self, text): '''将文本复制到系统剪贴板里面''' clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) clipboard.set_text(text, -1) self.toast(_('{0} copied to clipboard'.format(text)))
[ "def", "update_clipboard", "(", "self", ",", "text", ")", ":", "clipboard", "=", "Gtk", ".", "Clipboard", ".", "get", "(", "Gdk", ".", "SELECTION_CLIPBOARD", ")", "clipboard", ".", "set_text", "(", "text", ",", "-", "1", ")", "self", ".", "toast", "(", "_", "(", "'{0} copied to clipboard'", ".", "format", "(", "text", ")", ")", ")" ]
将文本复制到系统剪贴板里面
[ "将文本复制到系统剪贴板里面" ]
python
train
saltstack/salt
salt/modules/apf.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/apf.py#L57-L66
def _status_apf(): ''' Return True if apf is running otherwise return False ''' status = 0 table = iptc.Table(iptc.Table.FILTER) for chain in table.chains: if 'sanity' in chain.name.lower(): status = 1 return True if status else False
[ "def", "_status_apf", "(", ")", ":", "status", "=", "0", "table", "=", "iptc", ".", "Table", "(", "iptc", ".", "Table", ".", "FILTER", ")", "for", "chain", "in", "table", ".", "chains", ":", "if", "'sanity'", "in", "chain", ".", "name", ".", "lower", "(", ")", ":", "status", "=", "1", "return", "True", "if", "status", "else", "False" ]
Return True if apf is running otherwise return False
[ "Return", "True", "if", "apf", "is", "running", "otherwise", "return", "False" ]
python
train
ella/ella
ella/core/templatetags/core.py
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/core/templatetags/core.py#L332-L345
def do_render(parser, token): """ Renders a rich-text field using defined markup. Example:: {% render some_var %} """ bits = token.split_contents() if len(bits) != 2: raise template.TemplateSyntaxError() return RenderNode(bits[1])
[ "def", "do_render", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", "!=", "2", ":", "raise", "template", ".", "TemplateSyntaxError", "(", ")", "return", "RenderNode", "(", "bits", "[", "1", "]", ")" ]
Renders a rich-text field using defined markup. Example:: {% render some_var %}
[ "Renders", "a", "rich", "-", "text", "field", "using", "defined", "markup", "." ]
python
train
BetterWorks/django-anonymizer
anonymizer/replacers.py
https://github.com/BetterWorks/django-anonymizer/blob/2d25bb6e8b5e4230c58031c4b6d10cc536669b3e/anonymizer/replacers.py#L166-L170
def zip_code(anon, obj, field, val): """ Returns a randomly generated US zip code (not necessarily valid, but will look like one). """ return anon.faker.zipcode(field=field)
[ "def", "zip_code", "(", "anon", ",", "obj", ",", "field", ",", "val", ")", ":", "return", "anon", ".", "faker", ".", "zipcode", "(", "field", "=", "field", ")" ]
Returns a randomly generated US zip code (not necessarily valid, but will look like one).
[ "Returns", "a", "randomly", "generated", "US", "zip", "code", "(", "not", "necessarily", "valid", "but", "will", "look", "like", "one", ")", "." ]
python
train
sony/nnabla
python/src/nnabla/parameter.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parameter.py#L179-L245
def get_parameter_or_create(name, shape=None, initializer=None, need_grad=True, as_need_grad=None): """ Returns an existing parameter variable with the provided name. If a variable with the provided name does not exist, a new variable with the provided name is returned. Args: name(str): The name under the current scope. If it already exists, the name is queried from the parameter manager. shape (:obj:`tuple` of :obj:`int`): Shape of created parameter. The shape of the specified parameter must match with this shape. The default is None which is only valid if initializer is given as an :obj:`numpy.ndarray`. initializer (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): An initialization function to be applied to the parameter. :obj:`numpy.ndarray` can also be given to initialize parameters from numpy array data. need_grad (bool): Register the parameter with the specified ``need_grad`` flag. The default is True. If the flag is different from the previously specified one, the flag will be overwritten, but the values will be kept. as_need_grad (bool): Get a parameter variable with the specified ``need_grad`` flag. Note that this doesn't overwrite the flag of the registered parameter variable with the provided name. Instead, if the given flag mismatches with the previously registered ``need_grad`` flag, it returns a new variable referring to the same array contents but with ``need_grad=as_need_grad``. """ names = name.split('/') if len(names) > 1: with parameter_scope(names[0]): return get_parameter_or_create('/'.join(names[1:]), shape, initializer, need_grad, as_need_grad) param = get_parameter(names[0]) if param is None: class VariableInfo: pass info = VariableInfo() info.initializer = initializer if initializer is not None: if isinstance(initializer, numpy.ndarray): # numpy init param = nn.Variable(initializer.shape, need_grad=need_grad) param.d = initializer # initializer init elif isinstance(initializer, nn.initializer.BaseInitializer) or initializer.__name__ == "<lambda>": assert shape is not None param = nn.Variable(shape, need_grad=need_grad) param.d = initializer(shape=param.shape) else: raise ValueError( "`initializer` must be either the :obj:`numpy.ndarray` or an instance inherited from `nnabla.initializer.BaseInitializer`.") else: # default init assert shape is not None param = nn.Variable(shape, need_grad=need_grad) set_parameter(name, param) else: if param.shape != tuple(shape): raise ValueError( 'The size of existing parameter "{}" {} is different from the size of new parameter {}.\n' 'To clear all parameters, call nn.clear_parameters().'.format(name, param.shape, tuple(shape))) if need_grad != param.need_grad: param.need_grad = need_grad if as_need_grad is None: return param if param.need_grad != as_need_grad: param = param.get_unlinked_variable(need_grad=as_need_grad) return param
[ "def", "get_parameter_or_create", "(", "name", ",", "shape", "=", "None", ",", "initializer", "=", "None", ",", "need_grad", "=", "True", ",", "as_need_grad", "=", "None", ")", ":", "names", "=", "name", ".", "split", "(", "'/'", ")", "if", "len", "(", "names", ")", ">", "1", ":", "with", "parameter_scope", "(", "names", "[", "0", "]", ")", ":", "return", "get_parameter_or_create", "(", "'/'", ".", "join", "(", "names", "[", "1", ":", "]", ")", ",", "shape", ",", "initializer", ",", "need_grad", ",", "as_need_grad", ")", "param", "=", "get_parameter", "(", "names", "[", "0", "]", ")", "if", "param", "is", "None", ":", "class", "VariableInfo", ":", "pass", "info", "=", "VariableInfo", "(", ")", "info", ".", "initializer", "=", "initializer", "if", "initializer", "is", "not", "None", ":", "if", "isinstance", "(", "initializer", ",", "numpy", ".", "ndarray", ")", ":", "# numpy init", "param", "=", "nn", ".", "Variable", "(", "initializer", ".", "shape", ",", "need_grad", "=", "need_grad", ")", "param", ".", "d", "=", "initializer", "# initializer init", "elif", "isinstance", "(", "initializer", ",", "nn", ".", "initializer", ".", "BaseInitializer", ")", "or", "initializer", ".", "__name__", "==", "\"<lambda>\"", ":", "assert", "shape", "is", "not", "None", "param", "=", "nn", ".", "Variable", "(", "shape", ",", "need_grad", "=", "need_grad", ")", "param", ".", "d", "=", "initializer", "(", "shape", "=", "param", ".", "shape", ")", "else", ":", "raise", "ValueError", "(", "\"`initializer` must be either the :obj:`numpy.ndarray` or an instance inherited from `nnabla.initializer.BaseInitializer`.\"", ")", "else", ":", "# default init", "assert", "shape", "is", "not", "None", "param", "=", "nn", ".", "Variable", "(", "shape", ",", "need_grad", "=", "need_grad", ")", "set_parameter", "(", "name", ",", "param", ")", "else", ":", "if", "param", ".", "shape", "!=", "tuple", "(", "shape", ")", ":", "raise", "ValueError", "(", "'The size of existing parameter \"{}\" {} is different from the size of new parameter {}.\\n'", "'To clear all parameters, call nn.clear_parameters().'", ".", "format", "(", "name", ",", "param", ".", "shape", ",", "tuple", "(", "shape", ")", ")", ")", "if", "need_grad", "!=", "param", ".", "need_grad", ":", "param", ".", "need_grad", "=", "need_grad", "if", "as_need_grad", "is", "None", ":", "return", "param", "if", "param", ".", "need_grad", "!=", "as_need_grad", ":", "param", "=", "param", ".", "get_unlinked_variable", "(", "need_grad", "=", "as_need_grad", ")", "return", "param" ]
Returns an existing parameter variable with the provided name. If a variable with the provided name does not exist, a new variable with the provided name is returned. Args: name(str): The name under the current scope. If it already exists, the name is queried from the parameter manager. shape (:obj:`tuple` of :obj:`int`): Shape of created parameter. The shape of the specified parameter must match with this shape. The default is None which is only valid if initializer is given as an :obj:`numpy.ndarray`. initializer (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): An initialization function to be applied to the parameter. :obj:`numpy.ndarray` can also be given to initialize parameters from numpy array data. need_grad (bool): Register the parameter with the specified ``need_grad`` flag. The default is True. If the flag is different from the previously specified one, the flag will be overwritten, but the values will be kept. as_need_grad (bool): Get a parameter variable with the specified ``need_grad`` flag. Note that this doesn't overwrite the flag of the registered parameter variable with the provided name. Instead, if the given flag mismatches with the previously registered ``need_grad`` flag, it returns a new variable referring to the same array contents but with ``need_grad=as_need_grad``.
[ "Returns", "an", "existing", "parameter", "variable", "with", "the", "provided", "name", ".", "If", "a", "variable", "with", "the", "provided", "name", "does", "not", "exist", "a", "new", "variable", "with", "the", "provided", "name", "is", "returned", "." ]
python
train
wkentaro/fcn
fcn/trainer.py
https://github.com/wkentaro/fcn/blob/a29e167b67b11418a06566ad1ddbbc6949575e05/fcn/trainer.py#L91-L149
def validate(self, n_viz=9): """Validate current model using validation dataset. Parameters ---------- n_viz: int Number fo visualization. Returns ------- log: dict Log values. """ iter_valid = copy.copy(self.iter_valid) losses, lbl_trues, lbl_preds = [], [], [] vizs = [] dataset = iter_valid.dataset desc = 'valid [iteration=%08d]' % self.iteration for batch in tqdm.tqdm(iter_valid, desc=desc, total=len(dataset), ncols=80, leave=False): img, lbl_true = zip(*batch) batch = map(datasets.transform_lsvrc2012_vgg16, batch) with chainer.no_backprop_mode(), \ chainer.using_config('train', False): in_vars = utils.batch_to_vars(batch, device=self.device) loss = self.model(*in_vars) losses.append(float(loss.data)) score = self.model.score lbl_pred = chainer.functions.argmax(score, axis=1) lbl_pred = chainer.cuda.to_cpu(lbl_pred.data) for im, lt, lp in zip(img, lbl_true, lbl_pred): lbl_trues.append(lt) lbl_preds.append(lp) if len(vizs) < n_viz: viz = utils.visualize_segmentation( lbl_pred=lp, lbl_true=lt, img=im, n_class=self.model.n_class) vizs.append(viz) # save visualization out_viz = osp.join(self.out, 'visualizations_valid', 'iter%08d.jpg' % self.iteration) if not osp.exists(osp.dirname(out_viz)): os.makedirs(osp.dirname(out_viz)) viz = utils.get_tile_image(vizs) skimage.io.imsave(out_viz, viz) # generate log acc = utils.label_accuracy_score( lbl_trues, lbl_preds, self.model.n_class) self._write_log(**{ 'epoch': self.epoch, 'iteration': self.iteration, 'elapsed_time': time.time() - self.stamp_start, 'valid/loss': np.mean(losses), 'valid/acc': acc[0], 'valid/acc_cls': acc[1], 'valid/mean_iu': acc[2], 'valid/fwavacc': acc[3], }) self._save_model()
[ "def", "validate", "(", "self", ",", "n_viz", "=", "9", ")", ":", "iter_valid", "=", "copy", ".", "copy", "(", "self", ".", "iter_valid", ")", "losses", ",", "lbl_trues", ",", "lbl_preds", "=", "[", "]", ",", "[", "]", ",", "[", "]", "vizs", "=", "[", "]", "dataset", "=", "iter_valid", ".", "dataset", "desc", "=", "'valid [iteration=%08d]'", "%", "self", ".", "iteration", "for", "batch", "in", "tqdm", ".", "tqdm", "(", "iter_valid", ",", "desc", "=", "desc", ",", "total", "=", "len", "(", "dataset", ")", ",", "ncols", "=", "80", ",", "leave", "=", "False", ")", ":", "img", ",", "lbl_true", "=", "zip", "(", "*", "batch", ")", "batch", "=", "map", "(", "datasets", ".", "transform_lsvrc2012_vgg16", ",", "batch", ")", "with", "chainer", ".", "no_backprop_mode", "(", ")", ",", "chainer", ".", "using_config", "(", "'train'", ",", "False", ")", ":", "in_vars", "=", "utils", ".", "batch_to_vars", "(", "batch", ",", "device", "=", "self", ".", "device", ")", "loss", "=", "self", ".", "model", "(", "*", "in_vars", ")", "losses", ".", "append", "(", "float", "(", "loss", ".", "data", ")", ")", "score", "=", "self", ".", "model", ".", "score", "lbl_pred", "=", "chainer", ".", "functions", ".", "argmax", "(", "score", ",", "axis", "=", "1", ")", "lbl_pred", "=", "chainer", ".", "cuda", ".", "to_cpu", "(", "lbl_pred", ".", "data", ")", "for", "im", ",", "lt", ",", "lp", "in", "zip", "(", "img", ",", "lbl_true", ",", "lbl_pred", ")", ":", "lbl_trues", ".", "append", "(", "lt", ")", "lbl_preds", ".", "append", "(", "lp", ")", "if", "len", "(", "vizs", ")", "<", "n_viz", ":", "viz", "=", "utils", ".", "visualize_segmentation", "(", "lbl_pred", "=", "lp", ",", "lbl_true", "=", "lt", ",", "img", "=", "im", ",", "n_class", "=", "self", ".", "model", ".", "n_class", ")", "vizs", ".", "append", "(", "viz", ")", "# save visualization", "out_viz", "=", "osp", ".", "join", "(", "self", ".", "out", ",", "'visualizations_valid'", ",", "'iter%08d.jpg'", "%", "self", ".", "iteration", ")", "if", "not", "osp", ".", "exists", "(", "osp", ".", "dirname", "(", "out_viz", ")", ")", ":", "os", ".", "makedirs", "(", "osp", ".", "dirname", "(", "out_viz", ")", ")", "viz", "=", "utils", ".", "get_tile_image", "(", "vizs", ")", "skimage", ".", "io", ".", "imsave", "(", "out_viz", ",", "viz", ")", "# generate log", "acc", "=", "utils", ".", "label_accuracy_score", "(", "lbl_trues", ",", "lbl_preds", ",", "self", ".", "model", ".", "n_class", ")", "self", ".", "_write_log", "(", "*", "*", "{", "'epoch'", ":", "self", ".", "epoch", ",", "'iteration'", ":", "self", ".", "iteration", ",", "'elapsed_time'", ":", "time", ".", "time", "(", ")", "-", "self", ".", "stamp_start", ",", "'valid/loss'", ":", "np", ".", "mean", "(", "losses", ")", ",", "'valid/acc'", ":", "acc", "[", "0", "]", ",", "'valid/acc_cls'", ":", "acc", "[", "1", "]", ",", "'valid/mean_iu'", ":", "acc", "[", "2", "]", ",", "'valid/fwavacc'", ":", "acc", "[", "3", "]", ",", "}", ")", "self", ".", "_save_model", "(", ")" ]
Validate current model using validation dataset. Parameters ---------- n_viz: int Number fo visualization. Returns ------- log: dict Log values.
[ "Validate", "current", "model", "using", "validation", "dataset", "." ]
python
train
hodgesds/elasticsearch_tornado
elasticsearch_tornado/client.py
https://github.com/hodgesds/elasticsearch_tornado/blob/5acc1385589c92ffe3587ad05b7921c2cd1a30da/elasticsearch_tornado/client.py#L1274-L1294
def list_benchmarks(self, index = None, doc_type = None, params = {}, cb = None, **kwargs ): """ View the progress of long-running benchmarks. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-benchmark.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg doc_type: The name of the document type """ url = self.mk_url(*[index, doc_type, '_bench'], **params) self.client.fetch( self.mk_req(url, method='GET', **kwargs), callback = callback )
[ "def", "list_benchmarks", "(", "self", ",", "index", "=", "None", ",", "doc_type", "=", "None", ",", "params", "=", "{", "}", ",", "cb", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "mk_url", "(", "*", "[", "index", ",", "doc_type", ",", "'_bench'", "]", ",", "*", "*", "params", ")", "self", ".", "client", ".", "fetch", "(", "self", ".", "mk_req", "(", "url", ",", "method", "=", "'GET'", ",", "*", "*", "kwargs", ")", ",", "callback", "=", "callback", ")" ]
View the progress of long-running benchmarks. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-benchmark.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg doc_type: The name of the document type
[ "View", "the", "progress", "of", "long", "-", "running", "benchmarks", ".", "<http", ":", "//", "www", ".", "elasticsearch", ".", "org", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "master", "/", "search", "-", "benchmark", ".", "html", ">", "_", ":", "arg", "index", ":", "A", "comma", "-", "separated", "list", "of", "index", "names", ";", "use", "_all", "or", "empty", "string", "to", "perform", "the", "operation", "on", "all", "indices", ":", "arg", "doc_type", ":", "The", "name", "of", "the", "document", "type" ]
python
train
NiklasRosenstein/myo-python
myo/_device_listener.py
https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/_device_listener.py#L218-L242
def wait_for_single_device(self, timeout=None, interval=0.5): """ Waits until a Myo is was paired **and** connected with the Hub and returns it. If the *timeout* is exceeded, returns None. This function will not return a Myo that is only paired but not connected. # Parameters timeout: The maximum time to wait for a device. interval: The interval at which the function should exit sleeping. We can not sleep endlessly, otherwise the main thread can not be exit, eg. through a KeyboardInterrupt. """ timer = TimeoutManager(timeout) with self._cond: # As long as there are no Myo's connected, wait until we # get notified about a change. while not timer.check(): # Check if we found a Myo that is connected. for device in self._devices.values(): if device.connected: return device self._cond.wait(timer.remainder(interval)) return None
[ "def", "wait_for_single_device", "(", "self", ",", "timeout", "=", "None", ",", "interval", "=", "0.5", ")", ":", "timer", "=", "TimeoutManager", "(", "timeout", ")", "with", "self", ".", "_cond", ":", "# As long as there are no Myo's connected, wait until we", "# get notified about a change.", "while", "not", "timer", ".", "check", "(", ")", ":", "# Check if we found a Myo that is connected.", "for", "device", "in", "self", ".", "_devices", ".", "values", "(", ")", ":", "if", "device", ".", "connected", ":", "return", "device", "self", ".", "_cond", ".", "wait", "(", "timer", ".", "remainder", "(", "interval", ")", ")", "return", "None" ]
Waits until a Myo is was paired **and** connected with the Hub and returns it. If the *timeout* is exceeded, returns None. This function will not return a Myo that is only paired but not connected. # Parameters timeout: The maximum time to wait for a device. interval: The interval at which the function should exit sleeping. We can not sleep endlessly, otherwise the main thread can not be exit, eg. through a KeyboardInterrupt.
[ "Waits", "until", "a", "Myo", "is", "was", "paired", "**", "and", "**", "connected", "with", "the", "Hub", "and", "returns", "it", ".", "If", "the", "*", "timeout", "*", "is", "exceeded", "returns", "None", ".", "This", "function", "will", "not", "return", "a", "Myo", "that", "is", "only", "paired", "but", "not", "connected", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/ultratb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/ultratb.py#L391-L400
def color_toggle(self): """Toggle between the currently active color scheme and NoColor.""" if self.color_scheme_table.active_scheme_name == 'NoColor': self.color_scheme_table.set_active_scheme(self.old_scheme) self.Colors = self.color_scheme_table.active_colors else: self.old_scheme = self.color_scheme_table.active_scheme_name self.color_scheme_table.set_active_scheme('NoColor') self.Colors = self.color_scheme_table.active_colors
[ "def", "color_toggle", "(", "self", ")", ":", "if", "self", ".", "color_scheme_table", ".", "active_scheme_name", "==", "'NoColor'", ":", "self", ".", "color_scheme_table", ".", "set_active_scheme", "(", "self", ".", "old_scheme", ")", "self", ".", "Colors", "=", "self", ".", "color_scheme_table", ".", "active_colors", "else", ":", "self", ".", "old_scheme", "=", "self", ".", "color_scheme_table", ".", "active_scheme_name", "self", ".", "color_scheme_table", ".", "set_active_scheme", "(", "'NoColor'", ")", "self", ".", "Colors", "=", "self", ".", "color_scheme_table", ".", "active_colors" ]
Toggle between the currently active color scheme and NoColor.
[ "Toggle", "between", "the", "currently", "active", "color", "scheme", "and", "NoColor", "." ]
python
test
GNS3/gns3-server
gns3server/compute/qemu/qemu_vm.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/qemu/qemu_vm.py#L579-L590
def acpi_shutdown(self, acpi_shutdown): """ Sets either this QEMU VM can be ACPI shutdown. :param acpi_shutdown: boolean """ if acpi_shutdown: log.info('QEMU VM "{name}" [{id}] has enabled ACPI shutdown'.format(name=self._name, id=self._id)) else: log.info('QEMU VM "{name}" [{id}] has disabled ACPI shutdown'.format(name=self._name, id=self._id)) self._acpi_shutdown = acpi_shutdown
[ "def", "acpi_shutdown", "(", "self", ",", "acpi_shutdown", ")", ":", "if", "acpi_shutdown", ":", "log", ".", "info", "(", "'QEMU VM \"{name}\" [{id}] has enabled ACPI shutdown'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ")", ")", "else", ":", "log", ".", "info", "(", "'QEMU VM \"{name}\" [{id}] has disabled ACPI shutdown'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ")", ")", "self", ".", "_acpi_shutdown", "=", "acpi_shutdown" ]
Sets either this QEMU VM can be ACPI shutdown. :param acpi_shutdown: boolean
[ "Sets", "either", "this", "QEMU", "VM", "can", "be", "ACPI", "shutdown", "." ]
python
train
praekelt/panya-music
music/importer.py
https://github.com/praekelt/panya-music/blob/9300b1866bc33178e721b6de4771ba866bfc4b11/music/importer.py#L38-L47
def lookup_track(self, track): """ Looks up Django Track object for provided raw importing track object. """ tracks = Track.objects.filter(title__iexact=track.title) for track_obj in tracks: for contributor in track_obj.get_primary_contributors(permitted=False): if contributor.title == track.artist: return track_obj return None
[ "def", "lookup_track", "(", "self", ",", "track", ")", ":", "tracks", "=", "Track", ".", "objects", ".", "filter", "(", "title__iexact", "=", "track", ".", "title", ")", "for", "track_obj", "in", "tracks", ":", "for", "contributor", "in", "track_obj", ".", "get_primary_contributors", "(", "permitted", "=", "False", ")", ":", "if", "contributor", ".", "title", "==", "track", ".", "artist", ":", "return", "track_obj", "return", "None" ]
Looks up Django Track object for provided raw importing track object.
[ "Looks", "up", "Django", "Track", "object", "for", "provided", "raw", "importing", "track", "object", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/spp.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/spp.py#L123-L132
def mass_3d(self, r, rho0, gamma): """ mass enclosed a 3d sphere or radius r :param r: :param a: :param s: :return: """ mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3) return mass_3d
[ "def", "mass_3d", "(", "self", ",", "r", ",", "rho0", ",", "gamma", ")", ":", "mass_3d", "=", "4", "*", "np", ".", "pi", "*", "rho0", "/", "(", "-", "gamma", "+", "3", ")", "*", "r", "**", "(", "-", "gamma", "+", "3", ")", "return", "mass_3d" ]
mass enclosed a 3d sphere or radius r :param r: :param a: :param s: :return:
[ "mass", "enclosed", "a", "3d", "sphere", "or", "radius", "r", ":", "param", "r", ":", ":", "param", "a", ":", ":", "param", "s", ":", ":", "return", ":" ]
python
train
cloudant/python-cloudant
src/cloudant/design_document.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/design_document.py#L425-L441
def delete_view(self, view_name): """ Removes an existing MapReduce view definition from the locally cached DesignDocument View dictionary. To delete a JSON query index use :func:`~cloudant.database.CloudantDatabase.delete_query_index` instead. A CloudantException is raised if an attempt to delete a QueryIndexView (JSON query index) using this method is made. :param str view_name: Name used to identify the View. """ view = self.get_view(view_name) if view is None: return if isinstance(view, QueryIndexView): raise CloudantDesignDocumentException(103) self.views.__delitem__(view_name)
[ "def", "delete_view", "(", "self", ",", "view_name", ")", ":", "view", "=", "self", ".", "get_view", "(", "view_name", ")", "if", "view", "is", "None", ":", "return", "if", "isinstance", "(", "view", ",", "QueryIndexView", ")", ":", "raise", "CloudantDesignDocumentException", "(", "103", ")", "self", ".", "views", ".", "__delitem__", "(", "view_name", ")" ]
Removes an existing MapReduce view definition from the locally cached DesignDocument View dictionary. To delete a JSON query index use :func:`~cloudant.database.CloudantDatabase.delete_query_index` instead. A CloudantException is raised if an attempt to delete a QueryIndexView (JSON query index) using this method is made. :param str view_name: Name used to identify the View.
[ "Removes", "an", "existing", "MapReduce", "view", "definition", "from", "the", "locally", "cached", "DesignDocument", "View", "dictionary", ".", "To", "delete", "a", "JSON", "query", "index", "use", ":", "func", ":", "~cloudant", ".", "database", ".", "CloudantDatabase", ".", "delete_query_index", "instead", ".", "A", "CloudantException", "is", "raised", "if", "an", "attempt", "to", "delete", "a", "QueryIndexView", "(", "JSON", "query", "index", ")", "using", "this", "method", "is", "made", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/reaction_calculator.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/reaction_calculator.py#L160-L165
def reactants(self): """ List of reactants """ return [self._all_comp[i] for i in range(len(self._all_comp)) if self._coeffs[i] < 0]
[ "def", "reactants", "(", "self", ")", ":", "return", "[", "self", ".", "_all_comp", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_all_comp", ")", ")", "if", "self", ".", "_coeffs", "[", "i", "]", "<", "0", "]" ]
List of reactants
[ "List", "of", "reactants" ]
python
train
ets-labs/python-domain-models
domain_models/views.py
https://github.com/ets-labs/python-domain-models/blob/7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9/domain_models/views.py#L54-L73
def check_include_exclude(attributes): """Check __include__ and __exclude__ attributes. :type attributes: dict """ include = attributes.get('__include__', tuple()) exclude = attributes.get('__exclude__', tuple()) if not isinstance(include, tuple): raise TypeError("Attribute __include__ must be a tuple.") if not isinstance(exclude, tuple): raise TypeError("Attribute __exclude__ must be a tuple.") if all((not include, not exclude)): return None if all((include, exclude)): raise AttributeError("Usage of __include__ and __exclude__ " "at the same time is prohibited.")
[ "def", "check_include_exclude", "(", "attributes", ")", ":", "include", "=", "attributes", ".", "get", "(", "'__include__'", ",", "tuple", "(", ")", ")", "exclude", "=", "attributes", ".", "get", "(", "'__exclude__'", ",", "tuple", "(", ")", ")", "if", "not", "isinstance", "(", "include", ",", "tuple", ")", ":", "raise", "TypeError", "(", "\"Attribute __include__ must be a tuple.\"", ")", "if", "not", "isinstance", "(", "exclude", ",", "tuple", ")", ":", "raise", "TypeError", "(", "\"Attribute __exclude__ must be a tuple.\"", ")", "if", "all", "(", "(", "not", "include", ",", "not", "exclude", ")", ")", ":", "return", "None", "if", "all", "(", "(", "include", ",", "exclude", ")", ")", ":", "raise", "AttributeError", "(", "\"Usage of __include__ and __exclude__ \"", "\"at the same time is prohibited.\"", ")" ]
Check __include__ and __exclude__ attributes. :type attributes: dict
[ "Check", "__include__", "and", "__exclude__", "attributes", "." ]
python
train
guma44/GEOparse
GEOparse/GEOTypes.py
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L637-L649
def _get_object_as_soft(self): """Return object as SOFT formatted string.""" soft = [] if self.database is not None: soft.append(self.database._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] for subset in self.subsets.values(): soft.append(subset._get_object_as_soft()) soft += ["^%s = %s" % (self.geotype, self.name), self._get_columns_as_string(), self._get_table_as_string()] return "\n".join(soft)
[ "def", "_get_object_as_soft", "(", "self", ")", ":", "soft", "=", "[", "]", "if", "self", ".", "database", "is", "not", "None", ":", "soft", ".", "append", "(", "self", ".", "database", ".", "_get_object_as_soft", "(", ")", ")", "soft", "+=", "[", "\"^%s = %s\"", "%", "(", "self", ".", "geotype", ",", "self", ".", "name", ")", ",", "self", ".", "_get_metadata_as_string", "(", ")", "]", "for", "subset", "in", "self", ".", "subsets", ".", "values", "(", ")", ":", "soft", ".", "append", "(", "subset", ".", "_get_object_as_soft", "(", ")", ")", "soft", "+=", "[", "\"^%s = %s\"", "%", "(", "self", ".", "geotype", ",", "self", ".", "name", ")", ",", "self", ".", "_get_columns_as_string", "(", ")", ",", "self", ".", "_get_table_as_string", "(", ")", "]", "return", "\"\\n\"", ".", "join", "(", "soft", ")" ]
Return object as SOFT formatted string.
[ "Return", "object", "as", "SOFT", "formatted", "string", "." ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/variants/views.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/variants/views.py#L455-L459
def acmg(): """Calculate an ACMG classification from submitted criteria.""" criteria = request.args.getlist('criterion') classification = get_acmg(criteria) return jsonify(dict(classification=classification))
[ "def", "acmg", "(", ")", ":", "criteria", "=", "request", ".", "args", ".", "getlist", "(", "'criterion'", ")", "classification", "=", "get_acmg", "(", "criteria", ")", "return", "jsonify", "(", "dict", "(", "classification", "=", "classification", ")", ")" ]
Calculate an ACMG classification from submitted criteria.
[ "Calculate", "an", "ACMG", "classification", "from", "submitted", "criteria", "." ]
python
test
WoLpH/python-statsd
statsd/client.py
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L96-L102
def get_raw(self, name=None): '''Shortcut for getting a :class:`~statsd.raw.Raw` instance :keyword name: See :func:`~statsd.client.Client.get_client` :type name: str ''' return self.get_client(name=name, class_=statsd.Raw)
[ "def", "get_raw", "(", "self", ",", "name", "=", "None", ")", ":", "return", "self", ".", "get_client", "(", "name", "=", "name", ",", "class_", "=", "statsd", ".", "Raw", ")" ]
Shortcut for getting a :class:`~statsd.raw.Raw` instance :keyword name: See :func:`~statsd.client.Client.get_client` :type name: str
[ "Shortcut", "for", "getting", "a", ":", "class", ":", "~statsd", ".", "raw", ".", "Raw", "instance" ]
python
train
robotools/fontParts
Lib/fontParts/base/contour.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/contour.py#L534-L551
def appendSegment(self, type=None, points=None, smooth=False, segment=None): """ Append a segment to the contour. """ if segment is not None: if type is not None: type = segment.type if points is None: points = [(point.x, point.y) for point in segment.points] smooth = segment.smooth type = normalizers.normalizeSegmentType(type) pts = [] for pt in points: pt = normalizers.normalizeCoordinateTuple(pt) pts.append(pt) points = pts smooth = normalizers.normalizeBoolean(smooth) self._appendSegment(type=type, points=points, smooth=smooth)
[ "def", "appendSegment", "(", "self", ",", "type", "=", "None", ",", "points", "=", "None", ",", "smooth", "=", "False", ",", "segment", "=", "None", ")", ":", "if", "segment", "is", "not", "None", ":", "if", "type", "is", "not", "None", ":", "type", "=", "segment", ".", "type", "if", "points", "is", "None", ":", "points", "=", "[", "(", "point", ".", "x", ",", "point", ".", "y", ")", "for", "point", "in", "segment", ".", "points", "]", "smooth", "=", "segment", ".", "smooth", "type", "=", "normalizers", ".", "normalizeSegmentType", "(", "type", ")", "pts", "=", "[", "]", "for", "pt", "in", "points", ":", "pt", "=", "normalizers", ".", "normalizeCoordinateTuple", "(", "pt", ")", "pts", ".", "append", "(", "pt", ")", "points", "=", "pts", "smooth", "=", "normalizers", ".", "normalizeBoolean", "(", "smooth", ")", "self", ".", "_appendSegment", "(", "type", "=", "type", ",", "points", "=", "points", ",", "smooth", "=", "smooth", ")" ]
Append a segment to the contour.
[ "Append", "a", "segment", "to", "the", "contour", "." ]
python
train
mathandy/svgpathtools
svgpathtools/bezier.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/bezier.py#L236-L239
def interval_intersection_width(a, b, c, d): """returns the width of the intersection of intervals [a,b] and [c,d] (thinking of these as intervals on the real number line)""" return max(0, min(b, d) - max(a, c))
[ "def", "interval_intersection_width", "(", "a", ",", "b", ",", "c", ",", "d", ")", ":", "return", "max", "(", "0", ",", "min", "(", "b", ",", "d", ")", "-", "max", "(", "a", ",", "c", ")", ")" ]
returns the width of the intersection of intervals [a,b] and [c,d] (thinking of these as intervals on the real number line)
[ "returns", "the", "width", "of", "the", "intersection", "of", "intervals", "[", "a", "b", "]", "and", "[", "c", "d", "]", "(", "thinking", "of", "these", "as", "intervals", "on", "the", "real", "number", "line", ")" ]
python
train
airspeed-velocity/asv
asv/benchmark.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/benchmark.py#L303-L342
def get_source_code(items): """ Extract source code of given items, and concatenate and dedent it. """ sources = [] prev_class_name = None for func in items: try: lines, lineno = inspect.getsourcelines(func) except TypeError: continue if not lines: continue src = "\n".join(line.rstrip() for line in lines) src = textwrap.dedent(src) class_name = None if inspect.ismethod(func): # Add class name if hasattr(func, 'im_class'): class_name = func.im_class.__name__ elif hasattr(func, '__qualname__'): names = func.__qualname__.split('.') if len(names) > 1: class_name = names[-2] if class_name and prev_class_name != class_name: src = "class {0}:\n {1}".format( class_name, src.replace("\n", "\n ")) elif class_name: src = " {1}".format( class_name, src.replace("\n", "\n ")) sources.append(src) prev_class_name = class_name return "\n\n".join(sources).rstrip()
[ "def", "get_source_code", "(", "items", ")", ":", "sources", "=", "[", "]", "prev_class_name", "=", "None", "for", "func", "in", "items", ":", "try", ":", "lines", ",", "lineno", "=", "inspect", ".", "getsourcelines", "(", "func", ")", "except", "TypeError", ":", "continue", "if", "not", "lines", ":", "continue", "src", "=", "\"\\n\"", ".", "join", "(", "line", ".", "rstrip", "(", ")", "for", "line", "in", "lines", ")", "src", "=", "textwrap", ".", "dedent", "(", "src", ")", "class_name", "=", "None", "if", "inspect", ".", "ismethod", "(", "func", ")", ":", "# Add class name", "if", "hasattr", "(", "func", ",", "'im_class'", ")", ":", "class_name", "=", "func", ".", "im_class", ".", "__name__", "elif", "hasattr", "(", "func", ",", "'__qualname__'", ")", ":", "names", "=", "func", ".", "__qualname__", ".", "split", "(", "'.'", ")", "if", "len", "(", "names", ")", ">", "1", ":", "class_name", "=", "names", "[", "-", "2", "]", "if", "class_name", "and", "prev_class_name", "!=", "class_name", ":", "src", "=", "\"class {0}:\\n {1}\"", ".", "format", "(", "class_name", ",", "src", ".", "replace", "(", "\"\\n\"", ",", "\"\\n \"", ")", ")", "elif", "class_name", ":", "src", "=", "\" {1}\"", ".", "format", "(", "class_name", ",", "src", ".", "replace", "(", "\"\\n\"", ",", "\"\\n \"", ")", ")", "sources", ".", "append", "(", "src", ")", "prev_class_name", "=", "class_name", "return", "\"\\n\\n\"", ".", "join", "(", "sources", ")", ".", "rstrip", "(", ")" ]
Extract source code of given items, and concatenate and dedent it.
[ "Extract", "source", "code", "of", "given", "items", "and", "concatenate", "and", "dedent", "it", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/image.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/image.py#L121-L142
def monochrome(I, color, vmin=None, vmax=None): """Turns a intensity array to a monochrome 'image' by replacing each intensity by a scaled 'color' Values in I between vmin and vmax get scaled between 0 and 1, and values outside this range are clipped to this. Example >>> I = np.arange(16.).reshape(4,4) >>> color = (0, 0, 1) # red >>> rgb = vx.image.monochrome(I, color) # shape is (4,4,3) :param I: ndarray of any shape (2d for image) :param color: sequence of a (r, g and b) value :param vmin: normalization minimum for I, or np.nanmin(I) when None :param vmax: normalization maximum for I, or np.nanmax(I) when None :return: """ if vmin is None: vmin = np.nanmin(I) if vmax is None: vmax = np.nanmax(I) normalized = (I - vmin) / (vmax - vmin) return np.clip(normalized[..., np.newaxis], 0, 1) * np.array(color)
[ "def", "monochrome", "(", "I", ",", "color", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ")", ":", "if", "vmin", "is", "None", ":", "vmin", "=", "np", ".", "nanmin", "(", "I", ")", "if", "vmax", "is", "None", ":", "vmax", "=", "np", ".", "nanmax", "(", "I", ")", "normalized", "=", "(", "I", "-", "vmin", ")", "/", "(", "vmax", "-", "vmin", ")", "return", "np", ".", "clip", "(", "normalized", "[", "...", ",", "np", ".", "newaxis", "]", ",", "0", ",", "1", ")", "*", "np", ".", "array", "(", "color", ")" ]
Turns a intensity array to a monochrome 'image' by replacing each intensity by a scaled 'color' Values in I between vmin and vmax get scaled between 0 and 1, and values outside this range are clipped to this. Example >>> I = np.arange(16.).reshape(4,4) >>> color = (0, 0, 1) # red >>> rgb = vx.image.monochrome(I, color) # shape is (4,4,3) :param I: ndarray of any shape (2d for image) :param color: sequence of a (r, g and b) value :param vmin: normalization minimum for I, or np.nanmin(I) when None :param vmax: normalization maximum for I, or np.nanmax(I) when None :return:
[ "Turns", "a", "intensity", "array", "to", "a", "monochrome", "image", "by", "replacing", "each", "intensity", "by", "a", "scaled", "color" ]
python
test
AmesCornish/buttersink
buttersink/Store.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/Store.py#L84-L88
def listContents(self): """ Return list of volumes or diffs in this Store's selected directory. """ vols = list(self.listVolumes()) vols.sort(key=lambda v: self.getSendPath(v)) return [vol.display(self, detail="line") for vol in vols]
[ "def", "listContents", "(", "self", ")", ":", "vols", "=", "list", "(", "self", ".", "listVolumes", "(", ")", ")", "vols", ".", "sort", "(", "key", "=", "lambda", "v", ":", "self", ".", "getSendPath", "(", "v", ")", ")", "return", "[", "vol", ".", "display", "(", "self", ",", "detail", "=", "\"line\"", ")", "for", "vol", "in", "vols", "]" ]
Return list of volumes or diffs in this Store's selected directory.
[ "Return", "list", "of", "volumes", "or", "diffs", "in", "this", "Store", "s", "selected", "directory", "." ]
python
train
otto-torino/django-baton
baton/views.py
https://github.com/otto-torino/django-baton/blob/e791b5db3a0814bb49d8dfbdfb989d45e03594b7/baton/views.py#L19-L36
def get(self, request): """ Returns a json representing the menu voices in a format eaten by the js menu. Raised ImproperlyConfigured exceptions can be viewed in the browser console """ self.app_list = site.get_app_list(request) self.apps_dict = self.create_app_list_dict() # no menu provided items = get_config('MENU') if not items: voices = self.get_default_voices() else: voices = [] for item in items: self.add_voice(voices, item) return JsonResponse(voices, safe=False)
[ "def", "get", "(", "self", ",", "request", ")", ":", "self", ".", "app_list", "=", "site", ".", "get_app_list", "(", "request", ")", "self", ".", "apps_dict", "=", "self", ".", "create_app_list_dict", "(", ")", "# no menu provided", "items", "=", "get_config", "(", "'MENU'", ")", "if", "not", "items", ":", "voices", "=", "self", ".", "get_default_voices", "(", ")", "else", ":", "voices", "=", "[", "]", "for", "item", "in", "items", ":", "self", ".", "add_voice", "(", "voices", ",", "item", ")", "return", "JsonResponse", "(", "voices", ",", "safe", "=", "False", ")" ]
Returns a json representing the menu voices in a format eaten by the js menu. Raised ImproperlyConfigured exceptions can be viewed in the browser console
[ "Returns", "a", "json", "representing", "the", "menu", "voices", "in", "a", "format", "eaten", "by", "the", "js", "menu", ".", "Raised", "ImproperlyConfigured", "exceptions", "can", "be", "viewed", "in", "the", "browser", "console" ]
python
train
ggaughan/pipe2py
pipe2py/lib/pprint2.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/lib/pprint2.py#L55-L68
def str_args(args): """formats a list of function arguments prettily not as code (kwargs are tuples (argname, argvalue) """ res = [] for x in args: if isinstance(x, tuple) and len(x) == 2: key, value = x if value and str_arg(value): res += ["%s=%s" % (key, str_arg(value))] else: res += [str_arg(x)] return ', '.join(res)
[ "def", "str_args", "(", "args", ")", ":", "res", "=", "[", "]", "for", "x", "in", "args", ":", "if", "isinstance", "(", "x", ",", "tuple", ")", "and", "len", "(", "x", ")", "==", "2", ":", "key", ",", "value", "=", "x", "if", "value", "and", "str_arg", "(", "value", ")", ":", "res", "+=", "[", "\"%s=%s\"", "%", "(", "key", ",", "str_arg", "(", "value", ")", ")", "]", "else", ":", "res", "+=", "[", "str_arg", "(", "x", ")", "]", "return", "', '", ".", "join", "(", "res", ")" ]
formats a list of function arguments prettily not as code (kwargs are tuples (argname, argvalue)
[ "formats", "a", "list", "of", "function", "arguments", "prettily", "not", "as", "code" ]
python
train
tanghaibao/jcvi
jcvi/apps/phylo.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/phylo.py#L318-L337
def build_ml_phyml(alignment, outfile, work_dir=".", **kwargs): """ build maximum likelihood tree of DNA seqs with PhyML """ phy_file = op.join(work_dir, "work", "aln.phy") AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed") phyml_cl = PhymlCommandline(cmd=PHYML_BIN("phyml"), input=phy_file, **kwargs) logging.debug("Building ML tree using PhyML: %s" % phyml_cl) stdout, stderr = phyml_cl() tree_file = phy_file + "_phyml_tree.txt" if not op.exists(tree_file): print("***PhyML failed.", file=sys.stderr) return None sh("cp {0} {1}".format(tree_file, outfile), log=False) logging.debug("ML tree printed to %s" % outfile) return outfile, phy_file
[ "def", "build_ml_phyml", "(", "alignment", ",", "outfile", ",", "work_dir", "=", "\".\"", ",", "*", "*", "kwargs", ")", ":", "phy_file", "=", "op", ".", "join", "(", "work_dir", ",", "\"work\"", ",", "\"aln.phy\"", ")", "AlignIO", ".", "write", "(", "alignment", ",", "file", "(", "phy_file", ",", "\"w\"", ")", ",", "\"phylip-relaxed\"", ")", "phyml_cl", "=", "PhymlCommandline", "(", "cmd", "=", "PHYML_BIN", "(", "\"phyml\"", ")", ",", "input", "=", "phy_file", ",", "*", "*", "kwargs", ")", "logging", ".", "debug", "(", "\"Building ML tree using PhyML: %s\"", "%", "phyml_cl", ")", "stdout", ",", "stderr", "=", "phyml_cl", "(", ")", "tree_file", "=", "phy_file", "+", "\"_phyml_tree.txt\"", "if", "not", "op", ".", "exists", "(", "tree_file", ")", ":", "print", "(", "\"***PhyML failed.\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "None", "sh", "(", "\"cp {0} {1}\"", ".", "format", "(", "tree_file", ",", "outfile", ")", ",", "log", "=", "False", ")", "logging", ".", "debug", "(", "\"ML tree printed to %s\"", "%", "outfile", ")", "return", "outfile", ",", "phy_file" ]
build maximum likelihood tree of DNA seqs with PhyML
[ "build", "maximum", "likelihood", "tree", "of", "DNA", "seqs", "with", "PhyML" ]
python
train
PmagPy/PmagPy
pmagpy/convert_2_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/convert_2_magic.py#L3045-L3244
def iodp_dscr_lore(dscr_file,dscr_ex_file="", dir_path=".", input_dir_path="",volume=7,noave=False,\ meas_file="measurements.txt", offline_meas_file="",spec_file="specimens.txt"): """ Convert IODP discrete measurement files in MagIC file(s). This program assumes that you have created the specimens, samples, sites and location files using convert_2_magic.iodp_samples_csv from files downloaded from the LIMS online repository and that all samples are in that file. If there are offline treatments, you will also need the extended version of the SRM discrete download file from LORE. Parameters ---------- dscr_file : str input csv file downloaded from LIMS online repository dscr_ex_file : str input extended csv file downloaded from LIMS online repository dir_path : str output directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" offline_meas_file : str output measurement file for offline measurements , default "". must be specified if dscr_ex_file supplied spec_file : str specimens file name created by, for example, convert_2_magic.iodp_samples_csv, default "specimens.txt" file should already be in dir_path volume : float volume in cm^3 assumed during measurement on SRM. The so-called "Japanese" cubes have a volume of 7cc noave : Boolean if False, average replicate measurements Returns -------- type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written) """ # initialize defaults version_num = pmag.get_version() # format variables input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) # convert cc to m^3 volume = volume * 1e-6 meas_reqd_columns=['specimen','measurement','experiment','sequence','quality','method_codes',\ 'instrument_codes','citations',\ 'treat_temp','treat_ac_field','treat_dc_field',\ 'treat_dc_field_phi','treat_dc_field_theta','meas_temp',\ 'dir_dec','dir_inc','magn_moment','magn_volume',\ 'description','timestamp','software_packages',\ 'external_database_ids','treat_step_num','meas_n_orient'] dscr_file = pmag.resolve_file_name(dscr_file, input_dir_path) if dscr_ex_file:dscr_ex_file = pmag.resolve_file_name(dscr_ex_file, input_dir_path) spec_file = pmag.resolve_file_name(spec_file, dir_path) specimens_df=pd.read_csv(spec_file,sep='\t',header=1) if len(specimens_df)==0: print ('you must download and process the samples table from LORE prior to using this') print ('see convert_2_magic.iodp_samples_csv for help') return False LORE_specimens=list(specimens_df.specimen.unique()) in_df=pd.read_csv(dscr_file) in_df['offline_treatment']="" if dscr_ex_file: ex_df=pd.read_csv(dscr_ex_file) ex_df['Test No.']=ex_df['test test_number'] ex_df['offline_treatment']=ex_df['comments'] ex_df=ex_df[['Test No.','offline_treatment']] in_df=in_df.merge(ex_df,on='Test No.') in_df['offline_treatment']=in_df['offline_treatment_y'] in_df['offline_treatment'].fillna("",inplace=True) in_df.drop_duplicates(inplace=True) if len(in_df)==0: print ('you must download a csv file from the LIMS database and place it in your input_dir_path') return False in_df.sort_values(by='Test No.',inplace=True) in_df.reset_index(inplace=True) measurements_df=pd.DataFrame(columns=meas_reqd_columns) meas_out = os.path.join(output_dir_path, meas_file) if offline_meas_file: offline_meas_out = os.path.join(output_dir_path, offline_meas_file) if dscr_ex_file and not offline_meas_file: print ("You must specify an output file for the offline measurements with dscr_ex_file") return hole,srm_specimens=iodp_sample_names(in_df) for spec in list(srm_specimens.unique()): if spec not in LORE_specimens: print (' -W- ',spec, ' not found in specimens table ') print ( 'check your sample name or add to specimens table by hand\n') # set up defaults measurements_df['specimen']=srm_specimens measurements_df['offline_treatment']=in_df['offline_treatment'] measurements_df['sequence']=in_df['Test No.'] measurements_df['offline_list']="" measurements_df['quality']='g' measurements_df['citations']='This study' measurements_df['meas_temp']=273 measurements_df['software_packages']=version_num measurements_df["treat_temp"] = '%8.3e' % (273) # room temp in kelvin measurements_df["meas_temp"] = '%8.3e' % (273) # room temp in kelvin measurements_df['meas_n_orient']=1 # at least one orientation measurements_df["treat_ac_field"] = '0' measurements_df["treat_dc_field"] = '0' measurements_df["treat_dc_field_phi"] = '0' measurements_df["treat_dc_field_theta"] = '0' measurements_df["treat_step_num"] = '1' measurements_df["standard"] = 'u' # assume all data are "good" measurements_df["dir_csd"] = '0' # assume all data are "good" measurements_df["method_codes"] = 'LT-NO' # assume all are NRMs measurements_df['instrument_codes']="IODP-SRM" # assume all measurements on shipboard 2G measurements_df['timestamp']=pd.to_datetime(\ in_df['Timestamp (UTC)']).dt.strftime("%Y-%m-%dT%H:%M:%S")+'Z' measurements_df['dir_dec']=in_df['Declination background & drift corrected (deg)'] # declination measurements_df['dir_inc']=in_df['Inclination background & drift corrected (deg)'] # inclination measurements_df['magn_volume']=in_df['Intensity background & drift corrected (A/m)'] # magnetization measurements_df['magn_moment']=measurements_df['magn_volume']*volume # moment in Am^2 measurements_df['description']=in_df['Treatment Type'] # temporary column measurements_df['treat_ac_field']=in_df['Treatment Value']*1e-3 # assume all treatments are AF measurements_df.loc[measurements_df['description']=='IN-LINE AF DEMAG',\ 'method_codes']='LT-AF-Z' measurements_df.loc[measurements_df['description']=='IN-LINE AF DEMAG',\ 'instrument_codes']='IODP-SRM:IODP-SRM-AF' measurements_df['external_database_ids']='LORE['+in_df['Test No.'].astype('str')+']' measurements_df.fillna("",inplace=True) measurements_df.sort_values(by='sequence',inplace=True) if dscr_ex_file: meas_df=measurements_df[measurements_df.offline_treatment==""] # all the records with no offline treatments offline_df=pd.DataFrame(columns=meas_df.columns) # make a container for offline measurements arm_df=measurements_df[measurements_df['offline_treatment'].str.contains('ARM')] if len(arm_df)>0: # there are ARM treatment steps arm_df['offline_list']=arm_df['offline_treatment'].str.split(":") arm_list=arm_df.specimen.unique() for spc in arm_list: # get all the ARM treated specimens spc_df=arm_df[arm_df.specimen.str.match(spc)] # get all the measurements for this specimen seq_no=spc_df[spc_df.specimen.str.match(spc)].sequence.values[0] # get the sequence number of the ARM step end_seq_no=spc_df[spc_df.specimen.str.match(spc)].sequence.values[-1] # get the sequence number of the last ARM demag step arm_df.loc[arm_df.sequence==seq_no,'method_codes']='LT-AF-I:LP-ARM-AFD' # label the ARM record arm_df.loc[arm_df.sequence==seq_no,'experiment']=spc+'_LT-AF-I_LT-AF-Z_LP-ARM-AFD' # label the ARM record arm_df.loc[arm_df.sequence==seq_no,'treat_ac_field']=arm_df['offline_list'].str.get(1).astype('float')*1e-3 # AF peak field in mT converted to tesla arm_df.loc[arm_df.sequence==seq_no,'treat_dc_field']=arm_df['offline_list'].str.get(2).astype('float')*1e-3 # AF peak field in mT converted to tesla arm_df.loc[arm_df.sequence==seq_no,'instrument_codes']='IODP-SRM:IODP-DTECH' arm_df.loc[(arm_df.specimen.str.match(spc)) & (arm_df.sequence>seq_no) & (arm_df.sequence<=end_seq_no),'method_codes']= 'LT-AF-Z:LP-ARM-AFD' arm_df.loc[(arm_df.specimen.str.match(spc)) & (arm_df.sequence>seq_no) & (arm_df.sequence<=end_seq_no),'experiment']= spc+'LT-AF-I_LT-AF-Z_LP-ARM-AFD' arm_df.loc[(arm_df.specimen.str.match(spc)) & (arm_df.sequence>seq_no) & (arm_df.sequence<=end_seq_no),'instrument_codes']= 'IODP-SRM:IODP-SRM-AF' strings=[] for i in range(len(arm_df)):strings.append(str(i)) arm_df['measurement']=arm_df['experiment']+strings arm_df['description']=arm_df['offline_treatment'] offline_df=pd.concat([offline_df,arm_df]) # put the arm data into the offline dataframe irm_in_df=measurements_df[measurements_df['offline_treatment'].str.contains('IRM')] if len(irm_in_df)>0: # there are IRM treatment steps irm_in_df['offline_list']=irm_in_df['offline_treatment'].str.split(":") irm_list=irm_in_df.specimen.unique() irm_out_df=pd.DataFrame(columns=irm_in_df.columns) # make an output container for spc in irm_list: # get all the IRM treated specimens # first do IRM acquisition steps spc_df=irm_in_df[irm_in_df.specimen.str.match(spc)] # get all the measurements for this specimen spc_acq_df=spc_df[spc_df.treat_ac_field==0] # IRM acquisition step spc_acq_df['method_codes']='LT-IRM:LP-IRM' # label the IRM records spc_acq_df['experiment']=spc+'_LT-IRM_LP-IRM' # label the IRM experiment spc_acq_df['treat_dc_field']=spc_acq_df['offline_list'].str.get(1).astype('float')*1e-3 # IRM field in mT converted to tesla spc_acq_df['instrument_codes']='IODP-SRM:IODP-IM-10' # do the AF demag of the IRM sirm_seq_no=spc_acq_df[spc_acq_df.specimen.str.match(spc)].sequence.values[-1] # get the sequence number of the last IRM step spc_afd_df=spc_df[(spc_df.treat_ac_field!=0)] # spc_afd_df['method_codes']= 'LP-IRM:LP-IRM-AFD' spc_afd_df['experiment']= spc+'LP-IRM:LP-IRM-AFD' spc_afd_df['instrument_codes']= 'IODP-SRM:IODP-SRM-AFD' irm_out_df=pd.concat([irm_out_df,spc_acq_df,spc_afd_df]) strings=[] for i in range(len(irm_out_df)):strings.append(str(i)) irm_out_df['measurement']=irm_out_df['experiment']+strings irm_out_df['description']=irm_out_df['offline_treatment'] offline_df=pd.concat([offline_df,irm_out_df]) # put the irm data into the offline dataframe if dscr_ex_file: offline_df.drop(columns=['offline_list'],inplace=True) offline_df.drop(columns=['offline_treatment'],inplace=True) offline_df.sort_values(by='sequence',inplace=True) offline_df.drop_duplicates(subset=['sequence'],inplace=True) offline_df.fillna("",inplace=True) offline_dicts = offline_df.to_dict('records') pmag.magic_write(offline_meas_out, offline_dicts, 'measurements') measurements_df=meas_df # put all the non-offline treatments back into measurements_df if 'offline_treatment' in measurements_df.columns: measurements_df.drop(columns=['offline_treatment'],inplace=True) if 'offline_list' in measurements_df.columns: measurements_df.drop(columns=['offline_list'],inplace=True) measurements_df.sort_values(by='sequence',inplace=True) measurements_df.drop_duplicates(subset=['sequence'],inplace=True) measurements_df['treat_step_num']=measurements_df['sequence'] measurements_df.fillna("",inplace=True) meas_dicts = measurements_df.to_dict('records') meas_dicts=pmag.measurements_methods3(meas_dicts,noave=noave) pmag.magic_write(meas_out, meas_dicts, 'measurements') return True
[ "def", "iodp_dscr_lore", "(", "dscr_file", ",", "dscr_ex_file", "=", "\"\"", ",", "dir_path", "=", "\".\"", ",", "input_dir_path", "=", "\"\"", ",", "volume", "=", "7", ",", "noave", "=", "False", ",", "meas_file", "=", "\"measurements.txt\"", ",", "offline_meas_file", "=", "\"\"", ",", "spec_file", "=", "\"specimens.txt\"", ")", ":", "# initialize defaults", "version_num", "=", "pmag", ".", "get_version", "(", ")", "# format variables", "input_dir_path", ",", "output_dir_path", "=", "pmag", ".", "fix_directories", "(", "input_dir_path", ",", "dir_path", ")", "# convert cc to m^3", "volume", "=", "volume", "*", "1e-6", "meas_reqd_columns", "=", "[", "'specimen'", ",", "'measurement'", ",", "'experiment'", ",", "'sequence'", ",", "'quality'", ",", "'method_codes'", ",", "'instrument_codes'", ",", "'citations'", ",", "'treat_temp'", ",", "'treat_ac_field'", ",", "'treat_dc_field'", ",", "'treat_dc_field_phi'", ",", "'treat_dc_field_theta'", ",", "'meas_temp'", ",", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", ",", "'magn_volume'", ",", "'description'", ",", "'timestamp'", ",", "'software_packages'", ",", "'external_database_ids'", ",", "'treat_step_num'", ",", "'meas_n_orient'", "]", "dscr_file", "=", "pmag", ".", "resolve_file_name", "(", "dscr_file", ",", "input_dir_path", ")", "if", "dscr_ex_file", ":", "dscr_ex_file", "=", "pmag", ".", "resolve_file_name", "(", "dscr_ex_file", ",", "input_dir_path", ")", "spec_file", "=", "pmag", ".", "resolve_file_name", "(", "spec_file", ",", "dir_path", ")", "specimens_df", "=", "pd", ".", "read_csv", "(", "spec_file", ",", "sep", "=", "'\\t'", ",", "header", "=", "1", ")", "if", "len", "(", "specimens_df", ")", "==", "0", ":", "print", "(", "'you must download and process the samples table from LORE prior to using this'", ")", "print", "(", "'see convert_2_magic.iodp_samples_csv for help'", ")", "return", "False", "LORE_specimens", "=", "list", "(", "specimens_df", ".", "specimen", ".", "unique", "(", ")", ")", "in_df", "=", "pd", ".", "read_csv", "(", "dscr_file", ")", "in_df", "[", "'offline_treatment'", "]", "=", "\"\"", "if", "dscr_ex_file", ":", "ex_df", "=", "pd", ".", "read_csv", "(", "dscr_ex_file", ")", "ex_df", "[", "'Test No.'", "]", "=", "ex_df", "[", "'test test_number'", "]", "ex_df", "[", "'offline_treatment'", "]", "=", "ex_df", "[", "'comments'", "]", "ex_df", "=", "ex_df", "[", "[", "'Test No.'", ",", "'offline_treatment'", "]", "]", "in_df", "=", "in_df", ".", "merge", "(", "ex_df", ",", "on", "=", "'Test No.'", ")", "in_df", "[", "'offline_treatment'", "]", "=", "in_df", "[", "'offline_treatment_y'", "]", "in_df", "[", "'offline_treatment'", "]", ".", "fillna", "(", "\"\"", ",", "inplace", "=", "True", ")", "in_df", ".", "drop_duplicates", "(", "inplace", "=", "True", ")", "if", "len", "(", "in_df", ")", "==", "0", ":", "print", "(", "'you must download a csv file from the LIMS database and place it in your input_dir_path'", ")", "return", "False", "in_df", ".", "sort_values", "(", "by", "=", "'Test No.'", ",", "inplace", "=", "True", ")", "in_df", ".", "reset_index", "(", "inplace", "=", "True", ")", "measurements_df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "meas_reqd_columns", ")", "meas_out", "=", "os", ".", "path", ".", "join", "(", "output_dir_path", ",", "meas_file", ")", "if", "offline_meas_file", ":", "offline_meas_out", "=", "os", ".", "path", ".", "join", "(", "output_dir_path", ",", "offline_meas_file", ")", "if", "dscr_ex_file", "and", "not", "offline_meas_file", ":", "print", "(", "\"You must specify an output file for the offline measurements with dscr_ex_file\"", ")", "return", "hole", ",", "srm_specimens", "=", "iodp_sample_names", "(", "in_df", ")", "for", "spec", "in", "list", "(", "srm_specimens", ".", "unique", "(", ")", ")", ":", "if", "spec", "not", "in", "LORE_specimens", ":", "print", "(", "' -W- '", ",", "spec", ",", "' not found in specimens table '", ")", "print", "(", "'check your sample name or add to specimens table by hand\\n'", ")", "# set up defaults", "measurements_df", "[", "'specimen'", "]", "=", "srm_specimens", "measurements_df", "[", "'offline_treatment'", "]", "=", "in_df", "[", "'offline_treatment'", "]", "measurements_df", "[", "'sequence'", "]", "=", "in_df", "[", "'Test No.'", "]", "measurements_df", "[", "'offline_list'", "]", "=", "\"\"", "measurements_df", "[", "'quality'", "]", "=", "'g'", "measurements_df", "[", "'citations'", "]", "=", "'This study'", "measurements_df", "[", "'meas_temp'", "]", "=", "273", "measurements_df", "[", "'software_packages'", "]", "=", "version_num", "measurements_df", "[", "\"treat_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "measurements_df", "[", "\"meas_temp\"", "]", "=", "'%8.3e'", "%", "(", "273", ")", "# room temp in kelvin", "measurements_df", "[", "'meas_n_orient'", "]", "=", "1", "# at least one orientation", "measurements_df", "[", "\"treat_ac_field\"", "]", "=", "'0'", "measurements_df", "[", "\"treat_dc_field\"", "]", "=", "'0'", "measurements_df", "[", "\"treat_dc_field_phi\"", "]", "=", "'0'", "measurements_df", "[", "\"treat_dc_field_theta\"", "]", "=", "'0'", "measurements_df", "[", "\"treat_step_num\"", "]", "=", "'1'", "measurements_df", "[", "\"standard\"", "]", "=", "'u'", "# assume all data are \"good\"", "measurements_df", "[", "\"dir_csd\"", "]", "=", "'0'", "# assume all data are \"good\"", "measurements_df", "[", "\"method_codes\"", "]", "=", "'LT-NO'", "# assume all are NRMs", "measurements_df", "[", "'instrument_codes'", "]", "=", "\"IODP-SRM\"", "# assume all measurements on shipboard 2G", "measurements_df", "[", "'timestamp'", "]", "=", "pd", ".", "to_datetime", "(", "in_df", "[", "'Timestamp (UTC)'", "]", ")", ".", "dt", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "+", "'Z'", "measurements_df", "[", "'dir_dec'", "]", "=", "in_df", "[", "'Declination background & drift corrected (deg)'", "]", "# declination", "measurements_df", "[", "'dir_inc'", "]", "=", "in_df", "[", "'Inclination background & drift corrected (deg)'", "]", "# inclination", "measurements_df", "[", "'magn_volume'", "]", "=", "in_df", "[", "'Intensity background & drift corrected (A/m)'", "]", "# magnetization", "measurements_df", "[", "'magn_moment'", "]", "=", "measurements_df", "[", "'magn_volume'", "]", "*", "volume", "# moment in Am^2", "measurements_df", "[", "'description'", "]", "=", "in_df", "[", "'Treatment Type'", "]", "# temporary column", "measurements_df", "[", "'treat_ac_field'", "]", "=", "in_df", "[", "'Treatment Value'", "]", "*", "1e-3", "# assume all treatments are AF", "measurements_df", ".", "loc", "[", "measurements_df", "[", "'description'", "]", "==", "'IN-LINE AF DEMAG'", ",", "'method_codes'", "]", "=", "'LT-AF-Z'", "measurements_df", ".", "loc", "[", "measurements_df", "[", "'description'", "]", "==", "'IN-LINE AF DEMAG'", ",", "'instrument_codes'", "]", "=", "'IODP-SRM:IODP-SRM-AF'", "measurements_df", "[", "'external_database_ids'", "]", "=", "'LORE['", "+", "in_df", "[", "'Test No.'", "]", ".", "astype", "(", "'str'", ")", "+", "']'", "measurements_df", ".", "fillna", "(", "\"\"", ",", "inplace", "=", "True", ")", "measurements_df", ".", "sort_values", "(", "by", "=", "'sequence'", ",", "inplace", "=", "True", ")", "if", "dscr_ex_file", ":", "meas_df", "=", "measurements_df", "[", "measurements_df", ".", "offline_treatment", "==", "\"\"", "]", "# all the records with no offline treatments", "offline_df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "meas_df", ".", "columns", ")", "# make a container for offline measurements", "arm_df", "=", "measurements_df", "[", "measurements_df", "[", "'offline_treatment'", "]", ".", "str", ".", "contains", "(", "'ARM'", ")", "]", "if", "len", "(", "arm_df", ")", ">", "0", ":", "# there are ARM treatment steps", "arm_df", "[", "'offline_list'", "]", "=", "arm_df", "[", "'offline_treatment'", "]", ".", "str", ".", "split", "(", "\":\"", ")", "arm_list", "=", "arm_df", ".", "specimen", ".", "unique", "(", ")", "for", "spc", "in", "arm_list", ":", "# get all the ARM treated specimens", "spc_df", "=", "arm_df", "[", "arm_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", "]", "# get all the measurements for this specimen", "seq_no", "=", "spc_df", "[", "spc_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", "]", ".", "sequence", ".", "values", "[", "0", "]", "# get the sequence number of the ARM step", "end_seq_no", "=", "spc_df", "[", "spc_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", "]", ".", "sequence", ".", "values", "[", "-", "1", "]", "# get the sequence number of the last ARM demag step", "arm_df", ".", "loc", "[", "arm_df", ".", "sequence", "==", "seq_no", ",", "'method_codes'", "]", "=", "'LT-AF-I:LP-ARM-AFD'", "# label the ARM record", "arm_df", ".", "loc", "[", "arm_df", ".", "sequence", "==", "seq_no", ",", "'experiment'", "]", "=", "spc", "+", "'_LT-AF-I_LT-AF-Z_LP-ARM-AFD'", "# label the ARM record", "arm_df", ".", "loc", "[", "arm_df", ".", "sequence", "==", "seq_no", ",", "'treat_ac_field'", "]", "=", "arm_df", "[", "'offline_list'", "]", ".", "str", ".", "get", "(", "1", ")", ".", "astype", "(", "'float'", ")", "*", "1e-3", "# AF peak field in mT converted to tesla", "arm_df", ".", "loc", "[", "arm_df", ".", "sequence", "==", "seq_no", ",", "'treat_dc_field'", "]", "=", "arm_df", "[", "'offline_list'", "]", ".", "str", ".", "get", "(", "2", ")", ".", "astype", "(", "'float'", ")", "*", "1e-3", "# AF peak field in mT converted to tesla", "arm_df", ".", "loc", "[", "arm_df", ".", "sequence", "==", "seq_no", ",", "'instrument_codes'", "]", "=", "'IODP-SRM:IODP-DTECH'", "arm_df", ".", "loc", "[", "(", "arm_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", ")", "&", "(", "arm_df", ".", "sequence", ">", "seq_no", ")", "&", "(", "arm_df", ".", "sequence", "<=", "end_seq_no", ")", ",", "'method_codes'", "]", "=", "'LT-AF-Z:LP-ARM-AFD'", "arm_df", ".", "loc", "[", "(", "arm_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", ")", "&", "(", "arm_df", ".", "sequence", ">", "seq_no", ")", "&", "(", "arm_df", ".", "sequence", "<=", "end_seq_no", ")", ",", "'experiment'", "]", "=", "spc", "+", "'LT-AF-I_LT-AF-Z_LP-ARM-AFD'", "arm_df", ".", "loc", "[", "(", "arm_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", ")", "&", "(", "arm_df", ".", "sequence", ">", "seq_no", ")", "&", "(", "arm_df", ".", "sequence", "<=", "end_seq_no", ")", ",", "'instrument_codes'", "]", "=", "'IODP-SRM:IODP-SRM-AF'", "strings", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "arm_df", ")", ")", ":", "strings", ".", "append", "(", "str", "(", "i", ")", ")", "arm_df", "[", "'measurement'", "]", "=", "arm_df", "[", "'experiment'", "]", "+", "strings", "arm_df", "[", "'description'", "]", "=", "arm_df", "[", "'offline_treatment'", "]", "offline_df", "=", "pd", ".", "concat", "(", "[", "offline_df", ",", "arm_df", "]", ")", "# put the arm data into the offline dataframe", "irm_in_df", "=", "measurements_df", "[", "measurements_df", "[", "'offline_treatment'", "]", ".", "str", ".", "contains", "(", "'IRM'", ")", "]", "if", "len", "(", "irm_in_df", ")", ">", "0", ":", "# there are IRM treatment steps", "irm_in_df", "[", "'offline_list'", "]", "=", "irm_in_df", "[", "'offline_treatment'", "]", ".", "str", ".", "split", "(", "\":\"", ")", "irm_list", "=", "irm_in_df", ".", "specimen", ".", "unique", "(", ")", "irm_out_df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "irm_in_df", ".", "columns", ")", "# make an output container", "for", "spc", "in", "irm_list", ":", "# get all the IRM treated specimens", "# first do IRM acquisition steps", "spc_df", "=", "irm_in_df", "[", "irm_in_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", "]", "# get all the measurements for this specimen", "spc_acq_df", "=", "spc_df", "[", "spc_df", ".", "treat_ac_field", "==", "0", "]", "# IRM acquisition step", "spc_acq_df", "[", "'method_codes'", "]", "=", "'LT-IRM:LP-IRM'", "# label the IRM records", "spc_acq_df", "[", "'experiment'", "]", "=", "spc", "+", "'_LT-IRM_LP-IRM'", "# label the IRM experiment", "spc_acq_df", "[", "'treat_dc_field'", "]", "=", "spc_acq_df", "[", "'offline_list'", "]", ".", "str", ".", "get", "(", "1", ")", ".", "astype", "(", "'float'", ")", "*", "1e-3", "# IRM field in mT converted to tesla", "spc_acq_df", "[", "'instrument_codes'", "]", "=", "'IODP-SRM:IODP-IM-10'", "# do the AF demag of the IRM", "sirm_seq_no", "=", "spc_acq_df", "[", "spc_acq_df", ".", "specimen", ".", "str", ".", "match", "(", "spc", ")", "]", ".", "sequence", ".", "values", "[", "-", "1", "]", "# get the sequence number of the last IRM step", "spc_afd_df", "=", "spc_df", "[", "(", "spc_df", ".", "treat_ac_field", "!=", "0", ")", "]", "#", "spc_afd_df", "[", "'method_codes'", "]", "=", "'LP-IRM:LP-IRM-AFD'", "spc_afd_df", "[", "'experiment'", "]", "=", "spc", "+", "'LP-IRM:LP-IRM-AFD'", "spc_afd_df", "[", "'instrument_codes'", "]", "=", "'IODP-SRM:IODP-SRM-AFD'", "irm_out_df", "=", "pd", ".", "concat", "(", "[", "irm_out_df", ",", "spc_acq_df", ",", "spc_afd_df", "]", ")", "strings", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "irm_out_df", ")", ")", ":", "strings", ".", "append", "(", "str", "(", "i", ")", ")", "irm_out_df", "[", "'measurement'", "]", "=", "irm_out_df", "[", "'experiment'", "]", "+", "strings", "irm_out_df", "[", "'description'", "]", "=", "irm_out_df", "[", "'offline_treatment'", "]", "offline_df", "=", "pd", ".", "concat", "(", "[", "offline_df", ",", "irm_out_df", "]", ")", "# put the irm data into the offline dataframe", "if", "dscr_ex_file", ":", "offline_df", ".", "drop", "(", "columns", "=", "[", "'offline_list'", "]", ",", "inplace", "=", "True", ")", "offline_df", ".", "drop", "(", "columns", "=", "[", "'offline_treatment'", "]", ",", "inplace", "=", "True", ")", "offline_df", ".", "sort_values", "(", "by", "=", "'sequence'", ",", "inplace", "=", "True", ")", "offline_df", ".", "drop_duplicates", "(", "subset", "=", "[", "'sequence'", "]", ",", "inplace", "=", "True", ")", "offline_df", ".", "fillna", "(", "\"\"", ",", "inplace", "=", "True", ")", "offline_dicts", "=", "offline_df", ".", "to_dict", "(", "'records'", ")", "pmag", ".", "magic_write", "(", "offline_meas_out", ",", "offline_dicts", ",", "'measurements'", ")", "measurements_df", "=", "meas_df", "# put all the non-offline treatments back into measurements_df", "if", "'offline_treatment'", "in", "measurements_df", ".", "columns", ":", "measurements_df", ".", "drop", "(", "columns", "=", "[", "'offline_treatment'", "]", ",", "inplace", "=", "True", ")", "if", "'offline_list'", "in", "measurements_df", ".", "columns", ":", "measurements_df", ".", "drop", "(", "columns", "=", "[", "'offline_list'", "]", ",", "inplace", "=", "True", ")", "measurements_df", ".", "sort_values", "(", "by", "=", "'sequence'", ",", "inplace", "=", "True", ")", "measurements_df", ".", "drop_duplicates", "(", "subset", "=", "[", "'sequence'", "]", ",", "inplace", "=", "True", ")", "measurements_df", "[", "'treat_step_num'", "]", "=", "measurements_df", "[", "'sequence'", "]", "measurements_df", ".", "fillna", "(", "\"\"", ",", "inplace", "=", "True", ")", "meas_dicts", "=", "measurements_df", ".", "to_dict", "(", "'records'", ")", "meas_dicts", "=", "pmag", ".", "measurements_methods3", "(", "meas_dicts", ",", "noave", "=", "noave", ")", "pmag", ".", "magic_write", "(", "meas_out", ",", "meas_dicts", ",", "'measurements'", ")", "return", "True" ]
Convert IODP discrete measurement files in MagIC file(s). This program assumes that you have created the specimens, samples, sites and location files using convert_2_magic.iodp_samples_csv from files downloaded from the LIMS online repository and that all samples are in that file. If there are offline treatments, you will also need the extended version of the SRM discrete download file from LORE. Parameters ---------- dscr_file : str input csv file downloaded from LIMS online repository dscr_ex_file : str input extended csv file downloaded from LIMS online repository dir_path : str output directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" offline_meas_file : str output measurement file for offline measurements , default "". must be specified if dscr_ex_file supplied spec_file : str specimens file name created by, for example, convert_2_magic.iodp_samples_csv, default "specimens.txt" file should already be in dir_path volume : float volume in cm^3 assumed during measurement on SRM. The so-called "Japanese" cubes have a volume of 7cc noave : Boolean if False, average replicate measurements Returns -------- type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
[ "Convert", "IODP", "discrete", "measurement", "files", "in", "MagIC", "file", "(", "s", ")", ".", "This", "program", "assumes", "that", "you", "have", "created", "the", "specimens", "samples", "sites", "and", "location", "files", "using", "convert_2_magic", ".", "iodp_samples_csv", "from", "files", "downloaded", "from", "the", "LIMS", "online", "repository", "and", "that", "all", "samples", "are", "in", "that", "file", "." ]
python
train
gwpy/gwpy
gwpy/plot/axes.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L402-L492
def tile(self, x, y, w, h, color=None, anchor='center', edgecolors='face', linewidth=0.8, **kwargs): """Plot rectanguler tiles based onto these `Axes`. ``x`` and ``y`` give the anchor point for each tile, with ``w`` and ``h`` giving the extent in the X and Y axis respectively. Parameters ---------- x, y, w, h : `array_like`, shape (n, ) Input data color : `array_like`, shape (n, ) Array of amplitudes for tile color anchor : `str`, optional Anchor point for tiles relative to ``(x, y)`` coordinates, one of - ``'center'`` - center tile on ``(x, y)`` - ``'ll'`` - ``(x, y)`` defines lower-left corner of tile - ``'lr'`` - ``(x, y)`` defines lower-right corner of tile - ``'ul'`` - ``(x, y)`` defines upper-left corner of tile - ``'ur'`` - ``(x, y)`` defines upper-right corner of tile **kwargs Other keywords are passed to :meth:`~matplotlib.collections.PolyCollection` Returns ------- collection : `~matplotlib.collections.PolyCollection` the collection of tiles drawn Examples -------- >>> import numpy >>> from matplotlib import pyplot >>> import gwpy.plot # to get gwpy's Axes >>> x = numpy.arange(10) >>> y = numpy.arange(x.size) >>> w = numpy.ones_like(x) * .8 >>> h = numpy.ones_like(x) * .8 >>> fig = pyplot.figure() >>> ax = fig.gca() >>> ax.tile(x, y, w, h, anchor='ll') >>> pyplot.show() """ # get color and sort if color is not None and kwargs.get('c_sort', True): sortidx = color.argsort() x = x[sortidx] y = y[sortidx] w = w[sortidx] h = h[sortidx] color = color[sortidx] # define how to make a polygon for each tile if anchor == 'll': def _poly(x, y, w, h): return ((x, y), (x, y+h), (x+w, y+h), (x+w, y)) elif anchor == 'lr': def _poly(x, y, w, h): return ((x-w, y), (x-w, y+h), (x, y+h), (x, y)) elif anchor == 'ul': def _poly(x, y, w, h): return ((x, y-h), (x, y), (x+w, y), (x+w, y-h)) elif anchor == 'ur': def _poly(x, y, w, h): return ((x-w, y-h), (x-w, y), (x, y), (x, y-h)) elif anchor == 'center': def _poly(x, y, w, h): return ((x-w/2., y-h/2.), (x-w/2., y+h/2.), (x+w/2., y+h/2.), (x+w/2., y-h/2.)) else: raise ValueError("Unrecognised tile anchor {!r}".format(anchor)) # build collection cmap = kwargs.pop('cmap', rcParams['image.cmap']) coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)), edgecolors=edgecolors, linewidth=linewidth, **kwargs) if color is not None: coll.set_array(color) coll.set_cmap(cmap) out = self.add_collection(coll) self.autoscale_view() return out
[ "def", "tile", "(", "self", ",", "x", ",", "y", ",", "w", ",", "h", ",", "color", "=", "None", ",", "anchor", "=", "'center'", ",", "edgecolors", "=", "'face'", ",", "linewidth", "=", "0.8", ",", "*", "*", "kwargs", ")", ":", "# get color and sort", "if", "color", "is", "not", "None", "and", "kwargs", ".", "get", "(", "'c_sort'", ",", "True", ")", ":", "sortidx", "=", "color", ".", "argsort", "(", ")", "x", "=", "x", "[", "sortidx", "]", "y", "=", "y", "[", "sortidx", "]", "w", "=", "w", "[", "sortidx", "]", "h", "=", "h", "[", "sortidx", "]", "color", "=", "color", "[", "sortidx", "]", "# define how to make a polygon for each tile", "if", "anchor", "==", "'ll'", ":", "def", "_poly", "(", "x", ",", "y", ",", "w", ",", "h", ")", ":", "return", "(", "(", "x", ",", "y", ")", ",", "(", "x", ",", "y", "+", "h", ")", ",", "(", "x", "+", "w", ",", "y", "+", "h", ")", ",", "(", "x", "+", "w", ",", "y", ")", ")", "elif", "anchor", "==", "'lr'", ":", "def", "_poly", "(", "x", ",", "y", ",", "w", ",", "h", ")", ":", "return", "(", "(", "x", "-", "w", ",", "y", ")", ",", "(", "x", "-", "w", ",", "y", "+", "h", ")", ",", "(", "x", ",", "y", "+", "h", ")", ",", "(", "x", ",", "y", ")", ")", "elif", "anchor", "==", "'ul'", ":", "def", "_poly", "(", "x", ",", "y", ",", "w", ",", "h", ")", ":", "return", "(", "(", "x", ",", "y", "-", "h", ")", ",", "(", "x", ",", "y", ")", ",", "(", "x", "+", "w", ",", "y", ")", ",", "(", "x", "+", "w", ",", "y", "-", "h", ")", ")", "elif", "anchor", "==", "'ur'", ":", "def", "_poly", "(", "x", ",", "y", ",", "w", ",", "h", ")", ":", "return", "(", "(", "x", "-", "w", ",", "y", "-", "h", ")", ",", "(", "x", "-", "w", ",", "y", ")", ",", "(", "x", ",", "y", ")", ",", "(", "x", ",", "y", "-", "h", ")", ")", "elif", "anchor", "==", "'center'", ":", "def", "_poly", "(", "x", ",", "y", ",", "w", ",", "h", ")", ":", "return", "(", "(", "x", "-", "w", "/", "2.", ",", "y", "-", "h", "/", "2.", ")", ",", "(", "x", "-", "w", "/", "2.", ",", "y", "+", "h", "/", "2.", ")", ",", "(", "x", "+", "w", "/", "2.", ",", "y", "+", "h", "/", "2.", ")", ",", "(", "x", "+", "w", "/", "2.", ",", "y", "-", "h", "/", "2.", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Unrecognised tile anchor {!r}\"", ".", "format", "(", "anchor", ")", ")", "# build collection", "cmap", "=", "kwargs", ".", "pop", "(", "'cmap'", ",", "rcParams", "[", "'image.cmap'", "]", ")", "coll", "=", "PolyCollection", "(", "(", "_poly", "(", "*", "tile", ")", "for", "tile", "in", "zip", "(", "x", ",", "y", ",", "w", ",", "h", ")", ")", ",", "edgecolors", "=", "edgecolors", ",", "linewidth", "=", "linewidth", ",", "*", "*", "kwargs", ")", "if", "color", "is", "not", "None", ":", "coll", ".", "set_array", "(", "color", ")", "coll", ".", "set_cmap", "(", "cmap", ")", "out", "=", "self", ".", "add_collection", "(", "coll", ")", "self", ".", "autoscale_view", "(", ")", "return", "out" ]
Plot rectanguler tiles based onto these `Axes`. ``x`` and ``y`` give the anchor point for each tile, with ``w`` and ``h`` giving the extent in the X and Y axis respectively. Parameters ---------- x, y, w, h : `array_like`, shape (n, ) Input data color : `array_like`, shape (n, ) Array of amplitudes for tile color anchor : `str`, optional Anchor point for tiles relative to ``(x, y)`` coordinates, one of - ``'center'`` - center tile on ``(x, y)`` - ``'ll'`` - ``(x, y)`` defines lower-left corner of tile - ``'lr'`` - ``(x, y)`` defines lower-right corner of tile - ``'ul'`` - ``(x, y)`` defines upper-left corner of tile - ``'ur'`` - ``(x, y)`` defines upper-right corner of tile **kwargs Other keywords are passed to :meth:`~matplotlib.collections.PolyCollection` Returns ------- collection : `~matplotlib.collections.PolyCollection` the collection of tiles drawn Examples -------- >>> import numpy >>> from matplotlib import pyplot >>> import gwpy.plot # to get gwpy's Axes >>> x = numpy.arange(10) >>> y = numpy.arange(x.size) >>> w = numpy.ones_like(x) * .8 >>> h = numpy.ones_like(x) * .8 >>> fig = pyplot.figure() >>> ax = fig.gca() >>> ax.tile(x, y, w, h, anchor='ll') >>> pyplot.show()
[ "Plot", "rectanguler", "tiles", "based", "onto", "these", "Axes", "." ]
python
train
numenta/nupic
src/nupic/regions/record_sensor.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/record_sensor.py#L620-L629
def writeToProto(self, proto): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.writeToProto`. """ self.encoder.write(proto.encoder) if self.disabledEncoder is not None: self.disabledEncoder.write(proto.disabledEncoder) proto.topDownMode = int(self.topDownMode) proto.verbosity = self.verbosity proto.numCategories = self.numCategories
[ "def", "writeToProto", "(", "self", ",", "proto", ")", ":", "self", ".", "encoder", ".", "write", "(", "proto", ".", "encoder", ")", "if", "self", ".", "disabledEncoder", "is", "not", "None", ":", "self", ".", "disabledEncoder", ".", "write", "(", "proto", ".", "disabledEncoder", ")", "proto", ".", "topDownMode", "=", "int", "(", "self", ".", "topDownMode", ")", "proto", ".", "verbosity", "=", "self", ".", "verbosity", "proto", ".", "numCategories", "=", "self", ".", "numCategories" ]
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.writeToProto`.
[ "Overrides", ":", "meth", ":", "nupic", ".", "bindings", ".", "regions", ".", "PyRegion", ".", "PyRegion", ".", "writeToProto", "." ]
python
valid
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L673-L678
def get_block_hash(block_height, coin_symbol='btc', api_key=None): ''' Takes a block_height and returns the block_hash ''' return get_block_overview(block_representation=block_height, coin_symbol=coin_symbol, txn_limit=1, api_key=api_key)['hash']
[ "def", "get_block_hash", "(", "block_height", ",", "coin_symbol", "=", "'btc'", ",", "api_key", "=", "None", ")", ":", "return", "get_block_overview", "(", "block_representation", "=", "block_height", ",", "coin_symbol", "=", "coin_symbol", ",", "txn_limit", "=", "1", ",", "api_key", "=", "api_key", ")", "[", "'hash'", "]" ]
Takes a block_height and returns the block_hash
[ "Takes", "a", "block_height", "and", "returns", "the", "block_hash" ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/Client.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/Client.py#L1446-L1455
def __handle_low_seq_resend(self, msg, req): """special error case - low sequence number (update sequence number & resend if applicable). Returns True if a resend was scheduled, False otherwise. MUST be called within self.__requests lock.""" if msg[M_TYPE] == E_FAILED and msg[M_PAYLOAD][P_CODE] == E_FAILED_CODE_LOWSEQNUM: with self.__seqnum_lock: self.__seqnum = int(msg[M_PAYLOAD][P_MESSAGE]) # return value indicating shutdown not useful here since this is run in receiver thread self.__retry_enqueue(PreparedMessage(req._inner_msg_out, req.id_)) return True return False
[ "def", "__handle_low_seq_resend", "(", "self", ",", "msg", ",", "req", ")", ":", "if", "msg", "[", "M_TYPE", "]", "==", "E_FAILED", "and", "msg", "[", "M_PAYLOAD", "]", "[", "P_CODE", "]", "==", "E_FAILED_CODE_LOWSEQNUM", ":", "with", "self", ".", "__seqnum_lock", ":", "self", ".", "__seqnum", "=", "int", "(", "msg", "[", "M_PAYLOAD", "]", "[", "P_MESSAGE", "]", ")", "# return value indicating shutdown not useful here since this is run in receiver thread", "self", ".", "__retry_enqueue", "(", "PreparedMessage", "(", "req", ".", "_inner_msg_out", ",", "req", ".", "id_", ")", ")", "return", "True", "return", "False" ]
special error case - low sequence number (update sequence number & resend if applicable). Returns True if a resend was scheduled, False otherwise. MUST be called within self.__requests lock.
[ "special", "error", "case", "-", "low", "sequence", "number", "(", "update", "sequence", "number", "&", "resend", "if", "applicable", ")", ".", "Returns", "True", "if", "a", "resend", "was", "scheduled", "False", "otherwise", ".", "MUST", "be", "called", "within", "self", ".", "__requests", "lock", "." ]
python
train
Blueqat/Blueqat
examples/maxcut_qaoa.py
https://github.com/Blueqat/Blueqat/blob/2ac8592c79e7acf4f385d982af82fbd68dafa5cc/examples/maxcut_qaoa.py#L3-L22
def maxcut_qaoa(n_step, edges, minimizer=None, sampler=None, verbose=True): """Setup QAOA. :param n_step: The number of step of QAOA :param n_sample: The number of sampling time of each measurement in VQE. If None, use calculated ideal value. :param edges: The edges list of the graph. :returns Vqe object """ sampler = sampler or vqe.non_sampling_sampler minimizer = minimizer or vqe.get_scipy_minimizer( method="Powell", options={"ftol": 5.0e-2, "xtol": 5.0e-2, "maxiter": 1000, "disp": True} ) hamiltonian = pauli.I() * 0 for i, j in edges: hamiltonian += pauli.Z(i) * pauli.Z(j) return vqe.Vqe(vqe.QaoaAnsatz(hamiltonian, n_step), minimizer, sampler)
[ "def", "maxcut_qaoa", "(", "n_step", ",", "edges", ",", "minimizer", "=", "None", ",", "sampler", "=", "None", ",", "verbose", "=", "True", ")", ":", "sampler", "=", "sampler", "or", "vqe", ".", "non_sampling_sampler", "minimizer", "=", "minimizer", "or", "vqe", ".", "get_scipy_minimizer", "(", "method", "=", "\"Powell\"", ",", "options", "=", "{", "\"ftol\"", ":", "5.0e-2", ",", "\"xtol\"", ":", "5.0e-2", ",", "\"maxiter\"", ":", "1000", ",", "\"disp\"", ":", "True", "}", ")", "hamiltonian", "=", "pauli", ".", "I", "(", ")", "*", "0", "for", "i", ",", "j", "in", "edges", ":", "hamiltonian", "+=", "pauli", ".", "Z", "(", "i", ")", "*", "pauli", ".", "Z", "(", "j", ")", "return", "vqe", ".", "Vqe", "(", "vqe", ".", "QaoaAnsatz", "(", "hamiltonian", ",", "n_step", ")", ",", "minimizer", ",", "sampler", ")" ]
Setup QAOA. :param n_step: The number of step of QAOA :param n_sample: The number of sampling time of each measurement in VQE. If None, use calculated ideal value. :param edges: The edges list of the graph. :returns Vqe object
[ "Setup", "QAOA", "." ]
python
train
sci-bots/serial-device
serial_device/or_event.py
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/or_event.py#L39-L70
def OrEvent(*events): ''' Parameters ---------- events : list(threading.Event) List of events. Returns ------- threading.Event Event that is set when **at least one** of the events in :data:`events` is set. ''' or_event = threading.Event() def changed(): ''' Set ``or_event`` if any of the specified events have been set. ''' bools = [event_i.is_set() for event_i in events] if any(bools): or_event.set() else: or_event.clear() for event_i in events: # Override ``set`` and ``clear`` methods on event to update state of # `or_event` after performing default behaviour. orify(event_i, changed) # Set initial state of `or_event`. changed() return or_event
[ "def", "OrEvent", "(", "*", "events", ")", ":", "or_event", "=", "threading", ".", "Event", "(", ")", "def", "changed", "(", ")", ":", "'''\n Set ``or_event`` if any of the specified events have been set.\n '''", "bools", "=", "[", "event_i", ".", "is_set", "(", ")", "for", "event_i", "in", "events", "]", "if", "any", "(", "bools", ")", ":", "or_event", ".", "set", "(", ")", "else", ":", "or_event", ".", "clear", "(", ")", "for", "event_i", "in", "events", ":", "# Override ``set`` and ``clear`` methods on event to update state of", "# `or_event` after performing default behaviour.", "orify", "(", "event_i", ",", "changed", ")", "# Set initial state of `or_event`.", "changed", "(", ")", "return", "or_event" ]
Parameters ---------- events : list(threading.Event) List of events. Returns ------- threading.Event Event that is set when **at least one** of the events in :data:`events` is set.
[ "Parameters", "----------", "events", ":", "list", "(", "threading", ".", "Event", ")", "List", "of", "events", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/defects/utils.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/defects/utils.py#L1222-L1240
def sort_sites_by_integrated_chg(self, r=0.4): """ Get the average charge density around each local minima in the charge density and store the result in _extrema_df Args: r (float): radius of sphere around each site to evaluate the average """ if self.extrema_type is None: self.get_local_extrema() int_den = [] for isite in self.extrema_coords: mask = self._dist_mat(isite) < r vol_sphere = self.chgcar.structure.volume * (mask.sum()/self.chgcar.ngridpts) chg_in_sphere = np.sum(self.chgcar.data['total'] * mask) / mask.size / vol_sphere int_den.append(chg_in_sphere) self._extrema_df['avg_charge_den'] = int_den self._extrema_df.sort_values(by=['avg_charge_den'], inplace=True) self._extrema_df.reset_index(drop=True, inplace=True)
[ "def", "sort_sites_by_integrated_chg", "(", "self", ",", "r", "=", "0.4", ")", ":", "if", "self", ".", "extrema_type", "is", "None", ":", "self", ".", "get_local_extrema", "(", ")", "int_den", "=", "[", "]", "for", "isite", "in", "self", ".", "extrema_coords", ":", "mask", "=", "self", ".", "_dist_mat", "(", "isite", ")", "<", "r", "vol_sphere", "=", "self", ".", "chgcar", ".", "structure", ".", "volume", "*", "(", "mask", ".", "sum", "(", ")", "/", "self", ".", "chgcar", ".", "ngridpts", ")", "chg_in_sphere", "=", "np", ".", "sum", "(", "self", ".", "chgcar", ".", "data", "[", "'total'", "]", "*", "mask", ")", "/", "mask", ".", "size", "/", "vol_sphere", "int_den", ".", "append", "(", "chg_in_sphere", ")", "self", ".", "_extrema_df", "[", "'avg_charge_den'", "]", "=", "int_den", "self", ".", "_extrema_df", ".", "sort_values", "(", "by", "=", "[", "'avg_charge_den'", "]", ",", "inplace", "=", "True", ")", "self", ".", "_extrema_df", ".", "reset_index", "(", "drop", "=", "True", ",", "inplace", "=", "True", ")" ]
Get the average charge density around each local minima in the charge density and store the result in _extrema_df Args: r (float): radius of sphere around each site to evaluate the average
[ "Get", "the", "average", "charge", "density", "around", "each", "local", "minima", "in", "the", "charge", "density", "and", "store", "the", "result", "in", "_extrema_df", "Args", ":", "r", "(", "float", ")", ":", "radius", "of", "sphere", "around", "each", "site", "to", "evaluate", "the", "average" ]
python
train
nfcpy/nfcpy
src/nfc/clf/device.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/clf/device.py#L496-L526
def send_cmd_recv_rsp(self, target, data, timeout): """Exchange data with a remote Target Sends command *data* to the remote *target* discovered in the most recent call to one of the sense_xxx() methods. Note that *target* becomes invalid with any call to mute(), sense_xxx() or listen_xxx() Arguments: target (nfc.clf.RemoteTarget): The target returned by the last successful call of a sense_xxx() method. data (bytearray): The binary data to send to the remote device. timeout (float): The maximum number of seconds to wait for response data from the remote device. Returns: bytearray: Response data received from the remote device. Raises: nfc.clf.CommunicationError: When no data was received. """ fname = "send_cmd_recv_rsp" cname = self.__class__.__module__ + '.' + self.__class__.__name__ raise NotImplementedError("%s.%s() is required" % (cname, fname))
[ "def", "send_cmd_recv_rsp", "(", "self", ",", "target", ",", "data", ",", "timeout", ")", ":", "fname", "=", "\"send_cmd_recv_rsp\"", "cname", "=", "self", ".", "__class__", ".", "__module__", "+", "'.'", "+", "self", ".", "__class__", ".", "__name__", "raise", "NotImplementedError", "(", "\"%s.%s() is required\"", "%", "(", "cname", ",", "fname", ")", ")" ]
Exchange data with a remote Target Sends command *data* to the remote *target* discovered in the most recent call to one of the sense_xxx() methods. Note that *target* becomes invalid with any call to mute(), sense_xxx() or listen_xxx() Arguments: target (nfc.clf.RemoteTarget): The target returned by the last successful call of a sense_xxx() method. data (bytearray): The binary data to send to the remote device. timeout (float): The maximum number of seconds to wait for response data from the remote device. Returns: bytearray: Response data received from the remote device. Raises: nfc.clf.CommunicationError: When no data was received.
[ "Exchange", "data", "with", "a", "remote", "Target" ]
python
train
openid/python-openid
openid/extensions/ax.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/ax.py#L149-L179
def toTypeURIs(namespace_map, alias_list_s): """Given a namespace mapping and a string containing a comma-separated list of namespace aliases, return a list of type URIs that correspond to those aliases. @param namespace_map: The mapping from namespace URI to alias @type namespace_map: openid.message.NamespaceMap @param alias_list_s: The string containing the comma-separated list of aliases. May also be None for convenience. @type alias_list_s: str or NoneType @returns: The list of namespace URIs that corresponds to the supplied list of aliases. If the string was zero-length or None, an empty list will be returned. @raise KeyError: If an alias is present in the list of aliases but is not present in the namespace map. """ uris = [] if alias_list_s: for alias in alias_list_s.split(','): type_uri = namespace_map.getNamespaceURI(alias) if type_uri is None: raise KeyError( 'No type is defined for attribute name %r' % (alias,)) else: uris.append(type_uri) return uris
[ "def", "toTypeURIs", "(", "namespace_map", ",", "alias_list_s", ")", ":", "uris", "=", "[", "]", "if", "alias_list_s", ":", "for", "alias", "in", "alias_list_s", ".", "split", "(", "','", ")", ":", "type_uri", "=", "namespace_map", ".", "getNamespaceURI", "(", "alias", ")", "if", "type_uri", "is", "None", ":", "raise", "KeyError", "(", "'No type is defined for attribute name %r'", "%", "(", "alias", ",", ")", ")", "else", ":", "uris", ".", "append", "(", "type_uri", ")", "return", "uris" ]
Given a namespace mapping and a string containing a comma-separated list of namespace aliases, return a list of type URIs that correspond to those aliases. @param namespace_map: The mapping from namespace URI to alias @type namespace_map: openid.message.NamespaceMap @param alias_list_s: The string containing the comma-separated list of aliases. May also be None for convenience. @type alias_list_s: str or NoneType @returns: The list of namespace URIs that corresponds to the supplied list of aliases. If the string was zero-length or None, an empty list will be returned. @raise KeyError: If an alias is present in the list of aliases but is not present in the namespace map.
[ "Given", "a", "namespace", "mapping", "and", "a", "string", "containing", "a", "comma", "-", "separated", "list", "of", "namespace", "aliases", "return", "a", "list", "of", "type", "URIs", "that", "correspond", "to", "those", "aliases", "." ]
python
train
eavanvalkenburg/brunt-api
brunt/brunt.py
https://github.com/eavanvalkenburg/brunt-api/blob/c6bae43f56e0fd8f79b7af67d524611dd104dafa/brunt/brunt.py#L29-L72
def request(self, data, request_type: RequestTypes): """ internal request method :param session: session object from the Requests package :param data: internal data of your API call :param request: the type of request, based on the RequestType enum :returns: dict with sessionid for a login and the dict of the things for the other calls, or just success for PUT :raises: raises errors from Requests through the raise_for_status function """ #prepare the URL to send the request to url = data['host'] + data['path'] #fixed header content headers = self._DEFAULT_HEADER if self._sessionid: headers['Cookie'] = "skySSEIONID=" + self._sessionid #prepare the payload and add the length to the header, payload might be empty. if "data" in data: payload = json.dumps(data['data']) headers["Content-Length"] = str(len(data['data'])) else: payload = "" #send the request and capture the response resp = requests.request(request_type.value, url, data=payload, headers=headers) # raise an error if it occured in the Request. resp.raise_for_status() # no error, so set result to success ret = {'result': 'success'} # check if there is something in the response body if len(resp.text) > 0: respjson = resp.json() # if it is a list of things, then set the tag to things if type(respjson) is list: ret['things'] = respjson # otherwise to a single thing. If ID is part of it it is a login response, otherwise a Thing elif "ID" in respjson: ret['login'] = respjson # if it was a login a new cookie was send back, capture the sessionid from it self._sessionid = resp.cookies['skySSEIONID'] ret['cookie'] = resp.cookies else: ret['thing'] = respjson return ret
[ "def", "request", "(", "self", ",", "data", ",", "request_type", ":", "RequestTypes", ")", ":", "#prepare the URL to send the request to", "url", "=", "data", "[", "'host'", "]", "+", "data", "[", "'path'", "]", "#fixed header content", "headers", "=", "self", ".", "_DEFAULT_HEADER", "if", "self", ".", "_sessionid", ":", "headers", "[", "'Cookie'", "]", "=", "\"skySSEIONID=\"", "+", "self", ".", "_sessionid", "#prepare the payload and add the length to the header, payload might be empty.", "if", "\"data\"", "in", "data", ":", "payload", "=", "json", ".", "dumps", "(", "data", "[", "'data'", "]", ")", "headers", "[", "\"Content-Length\"", "]", "=", "str", "(", "len", "(", "data", "[", "'data'", "]", ")", ")", "else", ":", "payload", "=", "\"\"", "#send the request and capture the response", "resp", "=", "requests", ".", "request", "(", "request_type", ".", "value", ",", "url", ",", "data", "=", "payload", ",", "headers", "=", "headers", ")", "# raise an error if it occured in the Request.", "resp", ".", "raise_for_status", "(", ")", "# no error, so set result to success", "ret", "=", "{", "'result'", ":", "'success'", "}", "# check if there is something in the response body", "if", "len", "(", "resp", ".", "text", ")", ">", "0", ":", "respjson", "=", "resp", ".", "json", "(", ")", "# if it is a list of things, then set the tag to things", "if", "type", "(", "respjson", ")", "is", "list", ":", "ret", "[", "'things'", "]", "=", "respjson", "# otherwise to a single thing. If ID is part of it it is a login response, otherwise a Thing", "elif", "\"ID\"", "in", "respjson", ":", "ret", "[", "'login'", "]", "=", "respjson", "# if it was a login a new cookie was send back, capture the sessionid from it", "self", ".", "_sessionid", "=", "resp", ".", "cookies", "[", "'skySSEIONID'", "]", "ret", "[", "'cookie'", "]", "=", "resp", ".", "cookies", "else", ":", "ret", "[", "'thing'", "]", "=", "respjson", "return", "ret" ]
internal request method :param session: session object from the Requests package :param data: internal data of your API call :param request: the type of request, based on the RequestType enum :returns: dict with sessionid for a login and the dict of the things for the other calls, or just success for PUT :raises: raises errors from Requests through the raise_for_status function
[ "internal", "request", "method", ":", "param", "session", ":", "session", "object", "from", "the", "Requests", "package", ":", "param", "data", ":", "internal", "data", "of", "your", "API", "call", ":", "param", "request", ":", "the", "type", "of", "request", "based", "on", "the", "RequestType", "enum", ":", "returns", ":", "dict", "with", "sessionid", "for", "a", "login", "and", "the", "dict", "of", "the", "things", "for", "the", "other", "calls", "or", "just", "success", "for", "PUT", ":", "raises", ":", "raises", "errors", "from", "Requests", "through", "the", "raise_for_status", "function" ]
python
train
gitpython-developers/GitPython
git/remote.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/remote.py#L526-L554
def urls(self): """:return: Iterator yielding all configured URL targets on a remote as strings""" try: remote_details = self.repo.git.remote("get-url", "--all", self.name) for line in remote_details.split('\n'): yield line except GitCommandError as ex: ## We are on git < 2.7 (i.e TravisCI as of Oct-2016), # so `get-utl` command does not exist yet! # see: https://github.com/gitpython-developers/GitPython/pull/528#issuecomment-252976319 # and: http://stackoverflow.com/a/32991784/548792 # if 'Unknown subcommand: get-url' in str(ex): try: remote_details = self.repo.git.remote("show", self.name) for line in remote_details.split('\n'): if ' Push URL:' in line: yield line.split(': ')[-1] except GitCommandError as ex: if any(msg in str(ex) for msg in ['correct access rights', 'cannot run ssh']): # If ssh is not setup to access this repository, see issue 694 result = Git().execute( ['git', 'config', '--get', 'remote.%s.url' % self.name] ) yield result else: raise ex else: raise ex
[ "def", "urls", "(", "self", ")", ":", "try", ":", "remote_details", "=", "self", ".", "repo", ".", "git", ".", "remote", "(", "\"get-url\"", ",", "\"--all\"", ",", "self", ".", "name", ")", "for", "line", "in", "remote_details", ".", "split", "(", "'\\n'", ")", ":", "yield", "line", "except", "GitCommandError", "as", "ex", ":", "## We are on git < 2.7 (i.e TravisCI as of Oct-2016),", "# so `get-utl` command does not exist yet!", "# see: https://github.com/gitpython-developers/GitPython/pull/528#issuecomment-252976319", "# and: http://stackoverflow.com/a/32991784/548792", "#", "if", "'Unknown subcommand: get-url'", "in", "str", "(", "ex", ")", ":", "try", ":", "remote_details", "=", "self", ".", "repo", ".", "git", ".", "remote", "(", "\"show\"", ",", "self", ".", "name", ")", "for", "line", "in", "remote_details", ".", "split", "(", "'\\n'", ")", ":", "if", "' Push URL:'", "in", "line", ":", "yield", "line", ".", "split", "(", "': '", ")", "[", "-", "1", "]", "except", "GitCommandError", "as", "ex", ":", "if", "any", "(", "msg", "in", "str", "(", "ex", ")", "for", "msg", "in", "[", "'correct access rights'", ",", "'cannot run ssh'", "]", ")", ":", "# If ssh is not setup to access this repository, see issue 694", "result", "=", "Git", "(", ")", ".", "execute", "(", "[", "'git'", ",", "'config'", ",", "'--get'", ",", "'remote.%s.url'", "%", "self", ".", "name", "]", ")", "yield", "result", "else", ":", "raise", "ex", "else", ":", "raise", "ex" ]
:return: Iterator yielding all configured URL targets on a remote as strings
[ ":", "return", ":", "Iterator", "yielding", "all", "configured", "URL", "targets", "on", "a", "remote", "as", "strings" ]
python
train
vbwagner/ctypescrypto
ctypescrypto/cms.py
https://github.com/vbwagner/ctypescrypto/blob/33c32904cf5e04901f87f90e2499634b8feecd3e/ctypescrypto/cms.py#L183-L206
def verify(self, store, flags, data=None, certs=None): """ Verifies signature under CMS message using trusted cert store @param store - X509Store object with trusted certs @param flags - OR-ed combination of flag consants @param data - message data, if messge has detached signature param certs - list of certificates to use during verification If Flags.NOINTERN is specified, these are only sertificates to search for signing certificates @returns True if signature valid, False otherwise """ bio = None if data != None: bio_obj = Membio(data) bio = bio_obj.bio if certs is not None and len(certs) > 0: certstack_obj = StackOfX509(certs) # keep reference to prevent immediate __del__ call certstack = certstack_obj.ptr else: certstack = None res = libcrypto.CMS_verify(self.ptr, certstack, store.store, bio, None, flags) return res > 0
[ "def", "verify", "(", "self", ",", "store", ",", "flags", ",", "data", "=", "None", ",", "certs", "=", "None", ")", ":", "bio", "=", "None", "if", "data", "!=", "None", ":", "bio_obj", "=", "Membio", "(", "data", ")", "bio", "=", "bio_obj", ".", "bio", "if", "certs", "is", "not", "None", "and", "len", "(", "certs", ")", ">", "0", ":", "certstack_obj", "=", "StackOfX509", "(", "certs", ")", "# keep reference to prevent immediate __del__ call", "certstack", "=", "certstack_obj", ".", "ptr", "else", ":", "certstack", "=", "None", "res", "=", "libcrypto", ".", "CMS_verify", "(", "self", ".", "ptr", ",", "certstack", ",", "store", ".", "store", ",", "bio", ",", "None", ",", "flags", ")", "return", "res", ">", "0" ]
Verifies signature under CMS message using trusted cert store @param store - X509Store object with trusted certs @param flags - OR-ed combination of flag consants @param data - message data, if messge has detached signature param certs - list of certificates to use during verification If Flags.NOINTERN is specified, these are only sertificates to search for signing certificates @returns True if signature valid, False otherwise
[ "Verifies", "signature", "under", "CMS", "message", "using", "trusted", "cert", "store" ]
python
train
arneb/django-messages
django_messages/templatetags/inbox.py
https://github.com/arneb/django-messages/blob/8e4b8e6660740e6f716ea4a7f4d77221baf166c5/django_messages/templatetags/inbox.py#L19-L42
def do_print_inbox_count(parser, token): """ A templatetag to show the unread-count for a logged in user. Returns the number of unread messages in the user's inbox. Usage:: {% load inbox %} {% inbox_count %} {# or assign the value to a variable: #} {% inbox_count as my_var %} {{ my_var }} """ bits = token.contents.split() if len(bits) > 1: if len(bits) != 3: raise TemplateSyntaxError("inbox_count tag takes either no arguments or exactly two arguments") if bits[1] != 'as': raise TemplateSyntaxError("first argument to inbox_count tag must be 'as'") return InboxOutput(bits[2]) else: return InboxOutput()
[ "def", "do_print_inbox_count", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "contents", ".", "split", "(", ")", "if", "len", "(", "bits", ")", ">", "1", ":", "if", "len", "(", "bits", ")", "!=", "3", ":", "raise", "TemplateSyntaxError", "(", "\"inbox_count tag takes either no arguments or exactly two arguments\"", ")", "if", "bits", "[", "1", "]", "!=", "'as'", ":", "raise", "TemplateSyntaxError", "(", "\"first argument to inbox_count tag must be 'as'\"", ")", "return", "InboxOutput", "(", "bits", "[", "2", "]", ")", "else", ":", "return", "InboxOutput", "(", ")" ]
A templatetag to show the unread-count for a logged in user. Returns the number of unread messages in the user's inbox. Usage:: {% load inbox %} {% inbox_count %} {# or assign the value to a variable: #} {% inbox_count as my_var %} {{ my_var }}
[ "A", "templatetag", "to", "show", "the", "unread", "-", "count", "for", "a", "logged", "in", "user", ".", "Returns", "the", "number", "of", "unread", "messages", "in", "the", "user", "s", "inbox", ".", "Usage", "::" ]
python
train
python-fedex-devs/python-fedex
fedex/services/document_service.py
https://github.com/python-fedex-devs/python-fedex/blob/7ea2ca80c362f5dbbc8d959ab47648c7a4ab24eb/fedex/services/document_service.py#L41-L52
def _prepare_wsdl_objects(self): """ This is the data that will be used to create your shipment. Create the data structure and get it ready for the WSDL request. """ self.UploadDocumentsRequest = self.client.factory.create('UploadDocumentsRequest') self.OriginCountryCode =None self.DestinationCountryCode =None self.Usage ='ELECTRONIC_TRADE_DOCUMENTS'#Default Usage self.Documents = [] self.UploadDocumentsRequest.Documents = [] self.logger.debug(self.UploadDocumentsRequest)
[ "def", "_prepare_wsdl_objects", "(", "self", ")", ":", "self", ".", "UploadDocumentsRequest", "=", "self", ".", "client", ".", "factory", ".", "create", "(", "'UploadDocumentsRequest'", ")", "self", ".", "OriginCountryCode", "=", "None", "self", ".", "DestinationCountryCode", "=", "None", "self", ".", "Usage", "=", "'ELECTRONIC_TRADE_DOCUMENTS'", "#Default Usage", "self", ".", "Documents", "=", "[", "]", "self", ".", "UploadDocumentsRequest", ".", "Documents", "=", "[", "]", "self", ".", "logger", ".", "debug", "(", "self", ".", "UploadDocumentsRequest", ")" ]
This is the data that will be used to create your shipment. Create the data structure and get it ready for the WSDL request.
[ "This", "is", "the", "data", "that", "will", "be", "used", "to", "create", "your", "shipment", ".", "Create", "the", "data", "structure", "and", "get", "it", "ready", "for", "the", "WSDL", "request", "." ]
python
train
oanda/v20-python
src/v20/account.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/account.py#L248-L376
def from_dict(data, ctx): """ Instantiate a new Account from a dict (generally from loading a JSON response). The data used to instantiate the Account is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('balance') is not None: data['balance'] = ctx.convert_decimal_number( data.get('balance') ) if data.get('pl') is not None: data['pl'] = ctx.convert_decimal_number( data.get('pl') ) if data.get('resettablePL') is not None: data['resettablePL'] = ctx.convert_decimal_number( data.get('resettablePL') ) if data.get('financing') is not None: data['financing'] = ctx.convert_decimal_number( data.get('financing') ) if data.get('commission') is not None: data['commission'] = ctx.convert_decimal_number( data.get('commission') ) if data.get('guaranteedExecutionFees') is not None: data['guaranteedExecutionFees'] = ctx.convert_decimal_number( data.get('guaranteedExecutionFees') ) if data.get('marginRate') is not None: data['marginRate'] = ctx.convert_decimal_number( data.get('marginRate') ) if data.get('unrealizedPL') is not None: data['unrealizedPL'] = ctx.convert_decimal_number( data.get('unrealizedPL') ) if data.get('NAV') is not None: data['NAV'] = ctx.convert_decimal_number( data.get('NAV') ) if data.get('marginUsed') is not None: data['marginUsed'] = ctx.convert_decimal_number( data.get('marginUsed') ) if data.get('marginAvailable') is not None: data['marginAvailable'] = ctx.convert_decimal_number( data.get('marginAvailable') ) if data.get('positionValue') is not None: data['positionValue'] = ctx.convert_decimal_number( data.get('positionValue') ) if data.get('marginCloseoutUnrealizedPL') is not None: data['marginCloseoutUnrealizedPL'] = ctx.convert_decimal_number( data.get('marginCloseoutUnrealizedPL') ) if data.get('marginCloseoutNAV') is not None: data['marginCloseoutNAV'] = ctx.convert_decimal_number( data.get('marginCloseoutNAV') ) if data.get('marginCloseoutMarginUsed') is not None: data['marginCloseoutMarginUsed'] = ctx.convert_decimal_number( data.get('marginCloseoutMarginUsed') ) if data.get('marginCloseoutPercent') is not None: data['marginCloseoutPercent'] = ctx.convert_decimal_number( data.get('marginCloseoutPercent') ) if data.get('marginCloseoutPositionValue') is not None: data['marginCloseoutPositionValue'] = ctx.convert_decimal_number( data.get('marginCloseoutPositionValue') ) if data.get('withdrawalLimit') is not None: data['withdrawalLimit'] = ctx.convert_decimal_number( data.get('withdrawalLimit') ) if data.get('marginCallMarginUsed') is not None: data['marginCallMarginUsed'] = ctx.convert_decimal_number( data.get('marginCallMarginUsed') ) if data.get('marginCallPercent') is not None: data['marginCallPercent'] = ctx.convert_decimal_number( data.get('marginCallPercent') ) if data.get('trades') is not None: data['trades'] = [ ctx.trade.TradeSummary.from_dict(d, ctx) for d in data.get('trades') ] if data.get('positions') is not None: data['positions'] = [ ctx.position.Position.from_dict(d, ctx) for d in data.get('positions') ] if data.get('orders') is not None: data['orders'] = [ ctx.order.Order.from_dict(d, ctx) for d in data.get('orders') ] return Account(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'balance'", ")", "is", "not", "None", ":", "data", "[", "'balance'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'balance'", ")", ")", "if", "data", ".", "get", "(", "'pl'", ")", "is", "not", "None", ":", "data", "[", "'pl'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'pl'", ")", ")", "if", "data", ".", "get", "(", "'resettablePL'", ")", "is", "not", "None", ":", "data", "[", "'resettablePL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'resettablePL'", ")", ")", "if", "data", ".", "get", "(", "'financing'", ")", "is", "not", "None", ":", "data", "[", "'financing'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'financing'", ")", ")", "if", "data", ".", "get", "(", "'commission'", ")", "is", "not", "None", ":", "data", "[", "'commission'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'commission'", ")", ")", "if", "data", ".", "get", "(", "'guaranteedExecutionFees'", ")", "is", "not", "None", ":", "data", "[", "'guaranteedExecutionFees'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'guaranteedExecutionFees'", ")", ")", "if", "data", ".", "get", "(", "'marginRate'", ")", "is", "not", "None", ":", "data", "[", "'marginRate'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginRate'", ")", ")", "if", "data", ".", "get", "(", "'unrealizedPL'", ")", "is", "not", "None", ":", "data", "[", "'unrealizedPL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'unrealizedPL'", ")", ")", "if", "data", ".", "get", "(", "'NAV'", ")", "is", "not", "None", ":", "data", "[", "'NAV'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'NAV'", ")", ")", "if", "data", ".", "get", "(", "'marginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginAvailable'", ")", "is", "not", "None", ":", "data", "[", "'marginAvailable'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginAvailable'", ")", ")", "if", "data", ".", "get", "(", "'positionValue'", ")", "is", "not", "None", ":", "data", "[", "'positionValue'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'positionValue'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutUnrealizedPL'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutUnrealizedPL'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutUnrealizedPL'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutNAV'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutNAV'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutNAV'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutMarginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutMarginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutMarginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutPercent'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutPercent'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutPercent'", ")", ")", "if", "data", ".", "get", "(", "'marginCloseoutPositionValue'", ")", "is", "not", "None", ":", "data", "[", "'marginCloseoutPositionValue'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCloseoutPositionValue'", ")", ")", "if", "data", ".", "get", "(", "'withdrawalLimit'", ")", "is", "not", "None", ":", "data", "[", "'withdrawalLimit'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'withdrawalLimit'", ")", ")", "if", "data", ".", "get", "(", "'marginCallMarginUsed'", ")", "is", "not", "None", ":", "data", "[", "'marginCallMarginUsed'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCallMarginUsed'", ")", ")", "if", "data", ".", "get", "(", "'marginCallPercent'", ")", "is", "not", "None", ":", "data", "[", "'marginCallPercent'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'marginCallPercent'", ")", ")", "if", "data", ".", "get", "(", "'trades'", ")", "is", "not", "None", ":", "data", "[", "'trades'", "]", "=", "[", "ctx", ".", "trade", ".", "TradeSummary", ".", "from_dict", "(", "d", ",", "ctx", ")", "for", "d", "in", "data", ".", "get", "(", "'trades'", ")", "]", "if", "data", ".", "get", "(", "'positions'", ")", "is", "not", "None", ":", "data", "[", "'positions'", "]", "=", "[", "ctx", ".", "position", ".", "Position", ".", "from_dict", "(", "d", ",", "ctx", ")", "for", "d", "in", "data", ".", "get", "(", "'positions'", ")", "]", "if", "data", ".", "get", "(", "'orders'", ")", "is", "not", "None", ":", "data", "[", "'orders'", "]", "=", "[", "ctx", ".", "order", ".", "Order", ".", "from_dict", "(", "d", ",", "ctx", ")", "for", "d", "in", "data", ".", "get", "(", "'orders'", ")", "]", "return", "Account", "(", "*", "*", "data", ")" ]
Instantiate a new Account from a dict (generally from loading a JSON response). The data used to instantiate the Account is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
[ "Instantiate", "a", "new", "Account", "from", "a", "dict", "(", "generally", "from", "loading", "a", "JSON", "response", ")", ".", "The", "data", "used", "to", "instantiate", "the", "Account", "is", "a", "shallow", "copy", "of", "the", "dict", "passed", "in", "with", "any", "complex", "child", "types", "instantiated", "appropriately", "." ]
python
train
mishbahr/django-connected
connected_accounts/providers/base.py
https://github.com/mishbahr/django-connected/blob/7ec1f042786fef2eb6c00b1479ce47c90341ba81/connected_accounts/providers/base.py#L88-L94
def get_redirect_url(self, request, callback, parameters=None): """Build authentication redirect url.""" args = self.get_redirect_args(request, callback=callback) additional = parameters or {} args.update(additional) params = urlencode(args) return '{0}?{1}'.format(self.authorization_url, params)
[ "def", "get_redirect_url", "(", "self", ",", "request", ",", "callback", ",", "parameters", "=", "None", ")", ":", "args", "=", "self", ".", "get_redirect_args", "(", "request", ",", "callback", "=", "callback", ")", "additional", "=", "parameters", "or", "{", "}", "args", ".", "update", "(", "additional", ")", "params", "=", "urlencode", "(", "args", ")", "return", "'{0}?{1}'", ".", "format", "(", "self", ".", "authorization_url", ",", "params", ")" ]
Build authentication redirect url.
[ "Build", "authentication", "redirect", "url", "." ]
python
train
Scifabric/pybossa-client
pbclient/__init__.py
https://github.com/Scifabric/pybossa-client/blob/998d7cb0207ff5030dc800f0c2577c5692316c2c/pbclient/__init__.py#L674-L689
def update_result(result): """Update a result for a given result ID. :param result: PYBOSSA result """ try: result_id = result.id result = _forbidden_attributes(result) res = _pybossa_req('put', 'result', result_id, payload=result.data) if res.get('id'): return Result(res) else: return res except: # pragma: no cover raise
[ "def", "update_result", "(", "result", ")", ":", "try", ":", "result_id", "=", "result", ".", "id", "result", "=", "_forbidden_attributes", "(", "result", ")", "res", "=", "_pybossa_req", "(", "'put'", ",", "'result'", ",", "result_id", ",", "payload", "=", "result", ".", "data", ")", "if", "res", ".", "get", "(", "'id'", ")", ":", "return", "Result", "(", "res", ")", "else", ":", "return", "res", "except", ":", "# pragma: no cover", "raise" ]
Update a result for a given result ID. :param result: PYBOSSA result
[ "Update", "a", "result", "for", "a", "given", "result", "ID", "." ]
python
valid
mdgoldberg/sportsref
sportsref/nfl/teams.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L297-L308
def def_coordinator(self, year): """Returns the coach ID for the team's DC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the DC. """ try: dc_anchor = self._year_info_pq(year, 'Defensive Coordinator')('a') if dc_anchor: return dc_anchor.attr['href'] except ValueError: return None
[ "def", "def_coordinator", "(", "self", ",", "year", ")", ":", "try", ":", "dc_anchor", "=", "self", ".", "_year_info_pq", "(", "year", ",", "'Defensive Coordinator'", ")", "(", "'a'", ")", "if", "dc_anchor", ":", "return", "dc_anchor", ".", "attr", "[", "'href'", "]", "except", "ValueError", ":", "return", "None" ]
Returns the coach ID for the team's DC in a given year. :year: An int representing the year. :returns: A string containing the coach ID of the DC.
[ "Returns", "the", "coach", "ID", "for", "the", "team", "s", "DC", "in", "a", "given", "year", "." ]
python
test
KelSolaar/Umbra
umbra/ui/widgets/basic_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/basic_QPlainTextEdit.py#L799-L849
def replace(self, pattern, replacement_pattern, **kwargs): """ Replaces current given pattern occurence in the document with the replacement pattern. Usage:: >>> script_editor = Umbra.components_manager.get_interface("factory.script_editor") True >>> codeEditor = script_editor.get_current_editor() True >>> codeEditor.replace(search_pattern, replacement_pattern, case_sensitive=True, whole_word=True, \ regular_expressions=True, backward_search=True, wrap_around=True) True :param pattern: Pattern to replace. :type pattern: unicode :param replacement_pattern: Replacement pattern. :type replacement_pattern: unicode :param \*\*kwargs: Format settings. :type \*\*kwargs: dict :return: Method success. :rtype: bool """ settings = foundations.data_structures.Structure(**{"case_sensitive": False, "regular_expressions": False}) settings.update(kwargs) selected_text = self.get_selected_text() regex = "^{0}$".format( pattern if settings.regular_expressions else re.escape(foundations.strings.to_string(pattern))) flags = int() if settings.case_sensitive else re.IGNORECASE if not selected_text or not re.search(regex, selected_text, flags=flags): self.search(pattern, **kwargs) return False cursor = self.textCursor() metrics = self.get_selected_text_metrics() if cursor.isNull(): return False if not cursor.hasSelection(): return False cursor.insertText(replacement_pattern) self.patterns_replaced.emit([metrics]) self.search(pattern, **kwargs) return True
[ "def", "replace", "(", "self", ",", "pattern", ",", "replacement_pattern", ",", "*", "*", "kwargs", ")", ":", "settings", "=", "foundations", ".", "data_structures", ".", "Structure", "(", "*", "*", "{", "\"case_sensitive\"", ":", "False", ",", "\"regular_expressions\"", ":", "False", "}", ")", "settings", ".", "update", "(", "kwargs", ")", "selected_text", "=", "self", ".", "get_selected_text", "(", ")", "regex", "=", "\"^{0}$\"", ".", "format", "(", "pattern", "if", "settings", ".", "regular_expressions", "else", "re", ".", "escape", "(", "foundations", ".", "strings", ".", "to_string", "(", "pattern", ")", ")", ")", "flags", "=", "int", "(", ")", "if", "settings", ".", "case_sensitive", "else", "re", ".", "IGNORECASE", "if", "not", "selected_text", "or", "not", "re", ".", "search", "(", "regex", ",", "selected_text", ",", "flags", "=", "flags", ")", ":", "self", ".", "search", "(", "pattern", ",", "*", "*", "kwargs", ")", "return", "False", "cursor", "=", "self", ".", "textCursor", "(", ")", "metrics", "=", "self", ".", "get_selected_text_metrics", "(", ")", "if", "cursor", ".", "isNull", "(", ")", ":", "return", "False", "if", "not", "cursor", ".", "hasSelection", "(", ")", ":", "return", "False", "cursor", ".", "insertText", "(", "replacement_pattern", ")", "self", ".", "patterns_replaced", ".", "emit", "(", "[", "metrics", "]", ")", "self", ".", "search", "(", "pattern", ",", "*", "*", "kwargs", ")", "return", "True" ]
Replaces current given pattern occurence in the document with the replacement pattern. Usage:: >>> script_editor = Umbra.components_manager.get_interface("factory.script_editor") True >>> codeEditor = script_editor.get_current_editor() True >>> codeEditor.replace(search_pattern, replacement_pattern, case_sensitive=True, whole_word=True, \ regular_expressions=True, backward_search=True, wrap_around=True) True :param pattern: Pattern to replace. :type pattern: unicode :param replacement_pattern: Replacement pattern. :type replacement_pattern: unicode :param \*\*kwargs: Format settings. :type \*\*kwargs: dict :return: Method success. :rtype: bool
[ "Replaces", "current", "given", "pattern", "occurence", "in", "the", "document", "with", "the", "replacement", "pattern", "." ]
python
train
limix/limix-core
limix_core/mean/linear.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L567-L572
def getParams(self): """ get params """ rv = np.array([]) if self.n_terms>0: rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)]) return rv
[ "def", "getParams", "(", "self", ")", ":", "rv", "=", "np", ".", "array", "(", "[", "]", ")", "if", "self", ".", "n_terms", ">", "0", ":", "rv", "=", "np", ".", "concatenate", "(", "[", "np", ".", "reshape", "(", "self", ".", "B", "[", "term_i", "]", ",", "self", ".", "B", "[", "term_i", "]", ".", "size", ",", "order", "=", "'F'", ")", "for", "term_i", "in", "range", "(", "self", ".", "n_terms", ")", "]", ")", "return", "rv" ]
get params
[ "get", "params" ]
python
train
ambitioninc/rabbitmq-admin
rabbitmq_admin/api.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L32-L45
def get_node(self, name, memory=False, binary=False): """ An individual node in the RabbitMQ cluster. Set "memory=true" to get memory statistics, and "binary=true" to get a breakdown of binary memory use (may be expensive if there are many small binaries in the system). """ return self._api_get( url='/api/nodes/{0}'.format(name), params=dict( binary=binary, memory=memory, ), )
[ "def", "get_node", "(", "self", ",", "name", ",", "memory", "=", "False", ",", "binary", "=", "False", ")", ":", "return", "self", ".", "_api_get", "(", "url", "=", "'/api/nodes/{0}'", ".", "format", "(", "name", ")", ",", "params", "=", "dict", "(", "binary", "=", "binary", ",", "memory", "=", "memory", ",", ")", ",", ")" ]
An individual node in the RabbitMQ cluster. Set "memory=true" to get memory statistics, and "binary=true" to get a breakdown of binary memory use (may be expensive if there are many small binaries in the system).
[ "An", "individual", "node", "in", "the", "RabbitMQ", "cluster", ".", "Set", "memory", "=", "true", "to", "get", "memory", "statistics", "and", "binary", "=", "true", "to", "get", "a", "breakdown", "of", "binary", "memory", "use", "(", "may", "be", "expensive", "if", "there", "are", "many", "small", "binaries", "in", "the", "system", ")", "." ]
python
train
google/grr
grr/server/grr_response_server/gui/wsgiapp.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/wsgiapp.py#L65-L70
def StoreCSRFCookie(user, response): """Decorator for WSGI handler that inserts CSRF cookie into response.""" csrf_token = GenerateCSRFToken(user, None) response.set_cookie( "csrftoken", csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
[ "def", "StoreCSRFCookie", "(", "user", ",", "response", ")", ":", "csrf_token", "=", "GenerateCSRFToken", "(", "user", ",", "None", ")", "response", ".", "set_cookie", "(", "\"csrftoken\"", ",", "csrf_token", ",", "max_age", "=", "CSRF_TOKEN_DURATION", ".", "seconds", ")" ]
Decorator for WSGI handler that inserts CSRF cookie into response.
[ "Decorator", "for", "WSGI", "handler", "that", "inserts", "CSRF", "cookie", "into", "response", "." ]
python
train
edx/edx-enterprise
integrated_channels/integrated_channel/management/commands/transmit_learner_data.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/integrated_channel/management/commands/transmit_learner_data.py#L40-L53
def handle(self, *args, **options): """ Transmit the learner data for the EnterpriseCustomer(s) to the active integration channels. """ # Ensure that we were given an api_user name, and that User exists. api_username = options['api_user'] try: User.objects.get(username=api_username) except User.DoesNotExist: raise CommandError(_('A user with the username {username} was not found.').format(username=api_username)) # Transmit the learner data to each integrated channel for integrated_channel in self.get_integrated_channels(options): transmit_learner_data.delay(api_username, integrated_channel.channel_code(), integrated_channel.pk)
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "# Ensure that we were given an api_user name, and that User exists.", "api_username", "=", "options", "[", "'api_user'", "]", "try", ":", "User", ".", "objects", ".", "get", "(", "username", "=", "api_username", ")", "except", "User", ".", "DoesNotExist", ":", "raise", "CommandError", "(", "_", "(", "'A user with the username {username} was not found.'", ")", ".", "format", "(", "username", "=", "api_username", ")", ")", "# Transmit the learner data to each integrated channel", "for", "integrated_channel", "in", "self", ".", "get_integrated_channels", "(", "options", ")", ":", "transmit_learner_data", ".", "delay", "(", "api_username", ",", "integrated_channel", ".", "channel_code", "(", ")", ",", "integrated_channel", ".", "pk", ")" ]
Transmit the learner data for the EnterpriseCustomer(s) to the active integration channels.
[ "Transmit", "the", "learner", "data", "for", "the", "EnterpriseCustomer", "(", "s", ")", "to", "the", "active", "integration", "channels", "." ]
python
valid
Azure/azure-sdk-for-python
azure-keyvault/azure/keyvault/http_bearer_challenge_cache/__init__.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-keyvault/azure/keyvault/http_bearer_challenge_cache/__init__.py#L16-L31
def get_challenge_for_url(url): """ Gets the challenge for the cached URL. :param url: the URL the challenge is cached for. :rtype: HttpBearerChallenge """ if not url: raise ValueError('URL cannot be None') url = parse.urlparse(url) _lock.acquire() val = _cache.get(url.netloc) _lock.release() return val
[ "def", "get_challenge_for_url", "(", "url", ")", ":", "if", "not", "url", ":", "raise", "ValueError", "(", "'URL cannot be None'", ")", "url", "=", "parse", ".", "urlparse", "(", "url", ")", "_lock", ".", "acquire", "(", ")", "val", "=", "_cache", ".", "get", "(", "url", ".", "netloc", ")", "_lock", ".", "release", "(", ")", "return", "val" ]
Gets the challenge for the cached URL. :param url: the URL the challenge is cached for. :rtype: HttpBearerChallenge
[ "Gets", "the", "challenge", "for", "the", "cached", "URL", ".", ":", "param", "url", ":", "the", "URL", "the", "challenge", "is", "cached", "for", ".", ":", "rtype", ":", "HttpBearerChallenge" ]
python
test
cirruscluster/cirruscluster
cirruscluster/ami/builder.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ami/builder.py#L53-L61
def GetAmi(ec2, ami_spec): """ Get the boto ami object given a AmiSpecification object. """ images = ec2.get_all_images(owners=[ami_spec.owner_id] ) requested_image = None for image in images: if image.name == ami_spec.ami_name: requested_image = image break return requested_image
[ "def", "GetAmi", "(", "ec2", ",", "ami_spec", ")", ":", "images", "=", "ec2", ".", "get_all_images", "(", "owners", "=", "[", "ami_spec", ".", "owner_id", "]", ")", "requested_image", "=", "None", "for", "image", "in", "images", ":", "if", "image", ".", "name", "==", "ami_spec", ".", "ami_name", ":", "requested_image", "=", "image", "break", "return", "requested_image" ]
Get the boto ami object given a AmiSpecification object.
[ "Get", "the", "boto", "ami", "object", "given", "a", "AmiSpecification", "object", "." ]
python
train
jsommers/switchyard
switchyard/lib/openflow/openflow10.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/openflow/openflow10.py#L1212-L1226
def _unpack_actions(raw): ''' deserialize 1 or more actions; return a list of Action* objects ''' actions = [] while len(raw) > 0: atype, alen = struct.unpack('!HH', raw[:4]) atype = OpenflowActionType(atype) action = _ActionClassMap.get(atype)() action.from_bytes(raw[:alen]) raw = raw[alen:] actions.append(action) return actions
[ "def", "_unpack_actions", "(", "raw", ")", ":", "actions", "=", "[", "]", "while", "len", "(", "raw", ")", ">", "0", ":", "atype", ",", "alen", "=", "struct", ".", "unpack", "(", "'!HH'", ",", "raw", "[", ":", "4", "]", ")", "atype", "=", "OpenflowActionType", "(", "atype", ")", "action", "=", "_ActionClassMap", ".", "get", "(", "atype", ")", "(", ")", "action", ".", "from_bytes", "(", "raw", "[", ":", "alen", "]", ")", "raw", "=", "raw", "[", "alen", ":", "]", "actions", ".", "append", "(", "action", ")", "return", "actions" ]
deserialize 1 or more actions; return a list of Action* objects
[ "deserialize", "1", "or", "more", "actions", ";", "return", "a", "list", "of", "Action", "*", "objects" ]
python
train
madedotcom/photon-pump
photonpump/connection.py
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L788-L840
async def get_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, max_count: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): """ Read a range of events from the whole database. Args: direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_position (optional): The position to read from. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events >>> async for event in conn.get_all(max_count=5): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get_all( max_count=10, direction=StreamDirection.Backward ): >>> print(event) """ correlation_id = correlation_id cmd = convo.ReadAllEvents( msg.Position.for_direction(direction, from_position), max_count, resolve_links, require_master, direction=direction, credentials=self.credential, ) result = await self.dispatcher.start_conversation(cmd) return await result
[ "async", "def", "get_all", "(", "self", ",", "direction", ":", "msg", ".", "StreamDirection", "=", "msg", ".", "StreamDirection", ".", "Forward", ",", "from_position", ":", "Optional", "[", "Union", "[", "msg", ".", "Position", ",", "msg", ".", "_PositionSentinel", "]", "]", "=", "None", ",", "max_count", ":", "int", "=", "100", ",", "resolve_links", ":", "bool", "=", "True", ",", "require_master", ":", "bool", "=", "False", ",", "correlation_id", ":", "uuid", ".", "UUID", "=", "None", ",", ")", ":", "correlation_id", "=", "correlation_id", "cmd", "=", "convo", ".", "ReadAllEvents", "(", "msg", ".", "Position", ".", "for_direction", "(", "direction", ",", "from_position", ")", ",", "max_count", ",", "resolve_links", ",", "require_master", ",", "direction", "=", "direction", ",", "credentials", "=", "self", ".", "credential", ",", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "return", "await", "result" ]
Read a range of events from the whole database. Args: direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_position (optional): The position to read from. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events >>> async for event in conn.get_all(max_count=5): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get_all( max_count=10, direction=StreamDirection.Backward ): >>> print(event)
[ "Read", "a", "range", "of", "events", "from", "the", "whole", "database", "." ]
python
train
welchbj/sublemon
demos/from_the_readme.py
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/demos/from_the_readme.py#L23-L29
async def one(s: Sublemon): """Spin up some subprocesses, sleep, and echo a message for this coro.""" shell_cmds = [ 'sleep 1 && echo subprocess 1 in coroutine one', 'sleep 1 && echo subprocess 2 in coroutine one'] async for line in s.iter_lines(*shell_cmds): print(line)
[ "async", "def", "one", "(", "s", ":", "Sublemon", ")", ":", "shell_cmds", "=", "[", "'sleep 1 && echo subprocess 1 in coroutine one'", ",", "'sleep 1 && echo subprocess 2 in coroutine one'", "]", "async", "for", "line", "in", "s", ".", "iter_lines", "(", "*", "shell_cmds", ")", ":", "print", "(", "line", ")" ]
Spin up some subprocesses, sleep, and echo a message for this coro.
[ "Spin", "up", "some", "subprocesses", "sleep", "and", "echo", "a", "message", "for", "this", "coro", "." ]
python
train
Raynes/quarantine
quarantine/cdc.py
https://github.com/Raynes/quarantine/blob/742a318fcb7d34dbdf4fac388daff03a36872d8b/quarantine/cdc.py#L32-L36
def list_exes(self): """List the installed executables by this project.""" return [path.join(self.env_bin, f) for f in os.listdir(self.env_bin)]
[ "def", "list_exes", "(", "self", ")", ":", "return", "[", "path", ".", "join", "(", "self", ".", "env_bin", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "self", ".", "env_bin", ")", "]" ]
List the installed executables by this project.
[ "List", "the", "installed", "executables", "by", "this", "project", "." ]
python
train
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1290-L1298
def is_running(self, port): """Return True if a host on a specific port is running.""" try: con = self.client('localhost:%s' % port) con.admin.command('ping') return True except (AutoReconnect, ConnectionFailure, OperationFailure): # Catch OperationFailure to work around SERVER-31916. return False
[ "def", "is_running", "(", "self", ",", "port", ")", ":", "try", ":", "con", "=", "self", ".", "client", "(", "'localhost:%s'", "%", "port", ")", "con", ".", "admin", ".", "command", "(", "'ping'", ")", "return", "True", "except", "(", "AutoReconnect", ",", "ConnectionFailure", ",", "OperationFailure", ")", ":", "# Catch OperationFailure to work around SERVER-31916.", "return", "False" ]
Return True if a host on a specific port is running.
[ "Return", "True", "if", "a", "host", "on", "a", "specific", "port", "is", "running", "." ]
python
train
bpannier/simpletr64
simpletr64/devicetr64.py
https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/devicetr64.py#L644-L668
def _loadDeviceDefinitions(self, urlOfXMLDefinition, xml): """Internal call to parse the XML of the device definition. :param urlOfXMLDefinition: the URL to the XML device defintions :param xml: the XML content to parse """ # extract the base path of the given XML to make sure any relative URL later will be created correctly url = urlparse(urlOfXMLDefinition) baseURIPath = url.path.rpartition('/')[0] + "/" try: root = ET.fromstring(xml) except Exception as e: raise ValueError("Can not parse CPE definitions '" + urlOfXMLDefinition + "': " + str(e)) self.__deviceServiceDefinitions = {} self.__deviceSCPD = {} self.__deviceInformations = {'rootURL': urlOfXMLDefinition} self.__deviceUnknownKeys = {} self.__deviceXMLInitialized = False # iterate through all the informations self._iterateToFindSCPDElements(root, baseURIPath) self.__deviceXMLInitialized = True
[ "def", "_loadDeviceDefinitions", "(", "self", ",", "urlOfXMLDefinition", ",", "xml", ")", ":", "# extract the base path of the given XML to make sure any relative URL later will be created correctly", "url", "=", "urlparse", "(", "urlOfXMLDefinition", ")", "baseURIPath", "=", "url", ".", "path", ".", "rpartition", "(", "'/'", ")", "[", "0", "]", "+", "\"/\"", "try", ":", "root", "=", "ET", ".", "fromstring", "(", "xml", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Can not parse CPE definitions '\"", "+", "urlOfXMLDefinition", "+", "\"': \"", "+", "str", "(", "e", ")", ")", "self", ".", "__deviceServiceDefinitions", "=", "{", "}", "self", ".", "__deviceSCPD", "=", "{", "}", "self", ".", "__deviceInformations", "=", "{", "'rootURL'", ":", "urlOfXMLDefinition", "}", "self", ".", "__deviceUnknownKeys", "=", "{", "}", "self", ".", "__deviceXMLInitialized", "=", "False", "# iterate through all the informations", "self", ".", "_iterateToFindSCPDElements", "(", "root", ",", "baseURIPath", ")", "self", ".", "__deviceXMLInitialized", "=", "True" ]
Internal call to parse the XML of the device definition. :param urlOfXMLDefinition: the URL to the XML device defintions :param xml: the XML content to parse
[ "Internal", "call", "to", "parse", "the", "XML", "of", "the", "device", "definition", "." ]
python
train
fastai/fastai
fastai/callback.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L364-L367
def annealing_cos(start:Number, end:Number, pct:float)->Number: "Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0." cos_out = np.cos(np.pi * pct) + 1 return end + (start-end)/2 * cos_out
[ "def", "annealing_cos", "(", "start", ":", "Number", ",", "end", ":", "Number", ",", "pct", ":", "float", ")", "->", "Number", ":", "cos_out", "=", "np", ".", "cos", "(", "np", ".", "pi", "*", "pct", ")", "+", "1", "return", "end", "+", "(", "start", "-", "end", ")", "/", "2", "*", "cos_out" ]
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
[ "Cosine", "anneal", "from", "start", "to", "end", "as", "pct", "goes", "from", "0", ".", "0", "to", "1", ".", "0", "." ]
python
train
napalm-automation/napalm-logs
napalm_logs/device.py
https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/device.py#L140-L186
def _parse(self, msg_dict): ''' Parse a syslog message and check what OpenConfig object should be generated. ''' error_present = False # log.debug('Matching the message:') # log.debug(msg_dict) for message in self.compiled_messages: # log.debug('Matching using:') # log.debug(message) match_on = message['match_on'] if match_on not in msg_dict: # log.debug('%s is not a valid key in the partially parsed dict', match_on) continue if message['tag'] != msg_dict[match_on]: continue if '__python_fun__' in message: return { 'model': message['model'], 'error': message['error'], '__python_fun__': message['__python_fun__'] } error_present = True match = message['line'].search(msg_dict['message']) if not match: continue positions = message.get('positions', {}) values = message.get('values') ret = { 'model': message['model'], 'mapping': message['mapping'], 'replace': message['replace'], 'error': message['error'] } for key in values.keys(): # Check if the value needs to be replaced if key in message['replace']: result = napalm_logs.utils.cast(match.group(positions.get(key)), message['replace'][key]) else: result = match.group(positions.get(key)) ret[key] = result return ret if error_present is True: log.info('Configured regex did not match for os: %s tag %s', self._name, msg_dict.get('tag', '')) else: log.info('Syslog message not configured for os: %s tag %s', self._name, msg_dict.get('tag', ''))
[ "def", "_parse", "(", "self", ",", "msg_dict", ")", ":", "error_present", "=", "False", "# log.debug('Matching the message:')", "# log.debug(msg_dict)", "for", "message", "in", "self", ".", "compiled_messages", ":", "# log.debug('Matching using:')", "# log.debug(message)", "match_on", "=", "message", "[", "'match_on'", "]", "if", "match_on", "not", "in", "msg_dict", ":", "# log.debug('%s is not a valid key in the partially parsed dict', match_on)", "continue", "if", "message", "[", "'tag'", "]", "!=", "msg_dict", "[", "match_on", "]", ":", "continue", "if", "'__python_fun__'", "in", "message", ":", "return", "{", "'model'", ":", "message", "[", "'model'", "]", ",", "'error'", ":", "message", "[", "'error'", "]", ",", "'__python_fun__'", ":", "message", "[", "'__python_fun__'", "]", "}", "error_present", "=", "True", "match", "=", "message", "[", "'line'", "]", ".", "search", "(", "msg_dict", "[", "'message'", "]", ")", "if", "not", "match", ":", "continue", "positions", "=", "message", ".", "get", "(", "'positions'", ",", "{", "}", ")", "values", "=", "message", ".", "get", "(", "'values'", ")", "ret", "=", "{", "'model'", ":", "message", "[", "'model'", "]", ",", "'mapping'", ":", "message", "[", "'mapping'", "]", ",", "'replace'", ":", "message", "[", "'replace'", "]", ",", "'error'", ":", "message", "[", "'error'", "]", "}", "for", "key", "in", "values", ".", "keys", "(", ")", ":", "# Check if the value needs to be replaced", "if", "key", "in", "message", "[", "'replace'", "]", ":", "result", "=", "napalm_logs", ".", "utils", ".", "cast", "(", "match", ".", "group", "(", "positions", ".", "get", "(", "key", ")", ")", ",", "message", "[", "'replace'", "]", "[", "key", "]", ")", "else", ":", "result", "=", "match", ".", "group", "(", "positions", ".", "get", "(", "key", ")", ")", "ret", "[", "key", "]", "=", "result", "return", "ret", "if", "error_present", "is", "True", ":", "log", ".", "info", "(", "'Configured regex did not match for os: %s tag %s'", ",", "self", ".", "_name", ",", "msg_dict", ".", "get", "(", "'tag'", ",", "''", ")", ")", "else", ":", "log", ".", "info", "(", "'Syslog message not configured for os: %s tag %s'", ",", "self", ".", "_name", ",", "msg_dict", ".", "get", "(", "'tag'", ",", "''", ")", ")" ]
Parse a syslog message and check what OpenConfig object should be generated.
[ "Parse", "a", "syslog", "message", "and", "check", "what", "OpenConfig", "object", "should", "be", "generated", "." ]
python
train
cds-astro/mocpy
mocpy/abstract_moc.py
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/abstract_moc.py#L527-L564
def serialize(self, format='fits', optional_kw_dict=None): """ Serializes the MOC into a specific format. Possible formats are FITS, JSON and STRING Parameters ---------- format : str 'fits' by default. The other possible choice is 'json' or 'str'. optional_kw_dict : dict Optional keywords arguments added to the FITS header. Only used if ``format`` equals to 'fits'. Returns ------- result : `astropy.io.fits.HDUList` or JSON dictionary The result of the serialization. """ formats = ('fits', 'json', 'str') if format not in formats: raise ValueError('format should be one of %s' % (str(formats))) uniq_l = [] for uniq in self._uniq_pixels_iterator(): uniq_l.append(uniq) uniq = np.array(uniq_l) if format == 'fits': result = self._to_fits(uniq=uniq, optional_kw_dict=optional_kw_dict) elif format == 'str': result = self.__class__._to_str(uniq=uniq) else: # json format serialization result = self.__class__._to_json(uniq=uniq) return result
[ "def", "serialize", "(", "self", ",", "format", "=", "'fits'", ",", "optional_kw_dict", "=", "None", ")", ":", "formats", "=", "(", "'fits'", ",", "'json'", ",", "'str'", ")", "if", "format", "not", "in", "formats", ":", "raise", "ValueError", "(", "'format should be one of %s'", "%", "(", "str", "(", "formats", ")", ")", ")", "uniq_l", "=", "[", "]", "for", "uniq", "in", "self", ".", "_uniq_pixels_iterator", "(", ")", ":", "uniq_l", ".", "append", "(", "uniq", ")", "uniq", "=", "np", ".", "array", "(", "uniq_l", ")", "if", "format", "==", "'fits'", ":", "result", "=", "self", ".", "_to_fits", "(", "uniq", "=", "uniq", ",", "optional_kw_dict", "=", "optional_kw_dict", ")", "elif", "format", "==", "'str'", ":", "result", "=", "self", ".", "__class__", ".", "_to_str", "(", "uniq", "=", "uniq", ")", "else", ":", "# json format serialization", "result", "=", "self", ".", "__class__", ".", "_to_json", "(", "uniq", "=", "uniq", ")", "return", "result" ]
Serializes the MOC into a specific format. Possible formats are FITS, JSON and STRING Parameters ---------- format : str 'fits' by default. The other possible choice is 'json' or 'str'. optional_kw_dict : dict Optional keywords arguments added to the FITS header. Only used if ``format`` equals to 'fits'. Returns ------- result : `astropy.io.fits.HDUList` or JSON dictionary The result of the serialization.
[ "Serializes", "the", "MOC", "into", "a", "specific", "format", "." ]
python
train
appstore-zencore/daemon-application
src/daemon_application/base.py
https://github.com/appstore-zencore/daemon-application/blob/e8d716dbaa7becfda95e144cce51558b0c9615e5/src/daemon_application/base.py#L141-L151
def daemon_stop(pidfile, sig=None): """Stop application. """ logger.debug("stop daemon application pidfile={pidfile}.".format(pidfile=pidfile)) pid = load_pid(pidfile) logger.debug("load pid={pid}".format(pid=pid)) if not pid: six.print_("Application is not running or crashed...", file=os.sys.stderr) os.sys.exit(195) process_kill(pid, sig) return pid
[ "def", "daemon_stop", "(", "pidfile", ",", "sig", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"stop daemon application pidfile={pidfile}.\"", ".", "format", "(", "pidfile", "=", "pidfile", ")", ")", "pid", "=", "load_pid", "(", "pidfile", ")", "logger", ".", "debug", "(", "\"load pid={pid}\"", ".", "format", "(", "pid", "=", "pid", ")", ")", "if", "not", "pid", ":", "six", ".", "print_", "(", "\"Application is not running or crashed...\"", ",", "file", "=", "os", ".", "sys", ".", "stderr", ")", "os", ".", "sys", ".", "exit", "(", "195", ")", "process_kill", "(", "pid", ",", "sig", ")", "return", "pid" ]
Stop application.
[ "Stop", "application", "." ]
python
train
resync/resync
resync/resource_container.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/resource_container.py#L114-L120
def link(self, rel): """Look for link with specified rel, return else None.""" for link in self.ln: if ('rel' in link and link['rel'] == rel): return(link) return(None)
[ "def", "link", "(", "self", ",", "rel", ")", ":", "for", "link", "in", "self", ".", "ln", ":", "if", "(", "'rel'", "in", "link", "and", "link", "[", "'rel'", "]", "==", "rel", ")", ":", "return", "(", "link", ")", "return", "(", "None", ")" ]
Look for link with specified rel, return else None.
[ "Look", "for", "link", "with", "specified", "rel", "return", "else", "None", "." ]
python
train
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2522-L2527
def distinct_frequencies(self): """Return frequencies for each distinct haplotype.""" c = self.distinct_counts() n = self.shape[1] return c / n
[ "def", "distinct_frequencies", "(", "self", ")", ":", "c", "=", "self", ".", "distinct_counts", "(", ")", "n", "=", "self", ".", "shape", "[", "1", "]", "return", "c", "/", "n" ]
Return frequencies for each distinct haplotype.
[ "Return", "frequencies", "for", "each", "distinct", "haplotype", "." ]
python
train