repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
GNS3/gns3-server | gns3server/compute/iou/iou_vm.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/iou/iou_vm.py#L832-L867 | def adapter_add_nio_binding(self, adapter_number, port_number, nio):
"""
Adds a adapter NIO binding.
:param adapter_number: adapter number
:param port_number: port number
:param nio: NIO instance to add to the adapter/port
"""
try:
adapter = self._adapters[adapter_number]
except IndexError:
raise IOUError('Adapter {adapter_number} does not exist for IOU "{name}"'.format(name=self._name,
adapter_number=adapter_number))
if not adapter.port_exists(port_number):
raise IOUError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter,
port_number=port_number))
adapter.add_nio(port_number, nio)
log.info('IOU "{name}" [{id}]: {nio} added to {adapter_number}/{port_number}'.format(name=self._name,
id=self._id,
nio=nio,
adapter_number=adapter_number,
port_number=port_number))
if self.ubridge:
bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
yield from self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name,
iol_id=self.application_id,
bay=adapter_number,
unit=port_number,
lport=nio.lport,
rhost=nio.rhost,
rport=nio.rport))
yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters) | [
"def",
"adapter_add_nio_binding",
"(",
"self",
",",
"adapter_number",
",",
"port_number",
",",
"nio",
")",
":",
"try",
":",
"adapter",
"=",
"self",
".",
"_adapters",
"[",
"adapter_number",
"]",
"except",
"IndexError",
":",
"raise",
"IOUError",
"(",
"'Adapter {adapter_number} does not exist for IOU \"{name}\"'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"adapter_number",
"=",
"adapter_number",
")",
")",
"if",
"not",
"adapter",
".",
"port_exists",
"(",
"port_number",
")",
":",
"raise",
"IOUError",
"(",
"\"Port {port_number} does not exist in adapter {adapter}\"",
".",
"format",
"(",
"adapter",
"=",
"adapter",
",",
"port_number",
"=",
"port_number",
")",
")",
"adapter",
".",
"add_nio",
"(",
"port_number",
",",
"nio",
")",
"log",
".",
"info",
"(",
"'IOU \"{name}\" [{id}]: {nio} added to {adapter_number}/{port_number}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
",",
"nio",
"=",
"nio",
",",
"adapter_number",
"=",
"adapter_number",
",",
"port_number",
"=",
"port_number",
")",
")",
"if",
"self",
".",
"ubridge",
":",
"bridge_name",
"=",
"\"IOL-BRIDGE-{}\"",
".",
"format",
"(",
"self",
".",
"application_id",
"+",
"512",
")",
"yield",
"from",
"self",
".",
"_ubridge_send",
"(",
"\"iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}\"",
".",
"format",
"(",
"name",
"=",
"bridge_name",
",",
"iol_id",
"=",
"self",
".",
"application_id",
",",
"bay",
"=",
"adapter_number",
",",
"unit",
"=",
"port_number",
",",
"lport",
"=",
"nio",
".",
"lport",
",",
"rhost",
"=",
"nio",
".",
"rhost",
",",
"rport",
"=",
"nio",
".",
"rport",
")",
")",
"yield",
"from",
"self",
".",
"_ubridge_apply_filters",
"(",
"adapter_number",
",",
"port_number",
",",
"nio",
".",
"filters",
")"
]
| Adds a adapter NIO binding.
:param adapter_number: adapter number
:param port_number: port number
:param nio: NIO instance to add to the adapter/port | [
"Adds",
"a",
"adapter",
"NIO",
"binding",
"."
]
| python | train |
googlefonts/fontbakery | Lib/fontbakery/profiles/googlefonts.py | https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3579-L3611 | def com_google_fonts_check_family_tnum_horizontal_metrics(fonts):
"""All tabular figures must have the same width across the RIBBI-family."""
from fontbakery.constants import RIBBI_STYLE_NAMES
from fontTools.ttLib import TTFont
RIBBI_ttFonts = [TTFont(f)
for f in fonts
if style(f) in RIBBI_STYLE_NAMES]
tnum_widths = {}
for ttFont in RIBBI_ttFonts:
glyphs = ttFont.getGlyphSet()
tnum_glyphs = [(glyph_id, glyphs[glyph_id])
for glyph_id in glyphs.keys()
if glyph_id.endswith(".tnum")]
for glyph_id, glyph in tnum_glyphs:
if glyph.width not in tnum_widths:
tnum_widths[glyph.width] = [glyph_id]
else:
tnum_widths[glyph.width].append(glyph_id)
if len(tnum_widths.keys()) > 1:
max_num = 0
most_common_width = None
for width, glyphs in tnum_widths.items():
if len(glyphs) > max_num:
max_num = len(glyphs)
most_common_width = width
del tnum_widths[most_common_width]
yield FAIL, (f"The most common tabular glyph width is {most_common_width}."
" But there are other tabular glyphs with different widths"
f" such as the following ones:\n\t{tnum_widths}.")
else:
yield PASS, "OK" | [
"def",
"com_google_fonts_check_family_tnum_horizontal_metrics",
"(",
"fonts",
")",
":",
"from",
"fontbakery",
".",
"constants",
"import",
"RIBBI_STYLE_NAMES",
"from",
"fontTools",
".",
"ttLib",
"import",
"TTFont",
"RIBBI_ttFonts",
"=",
"[",
"TTFont",
"(",
"f",
")",
"for",
"f",
"in",
"fonts",
"if",
"style",
"(",
"f",
")",
"in",
"RIBBI_STYLE_NAMES",
"]",
"tnum_widths",
"=",
"{",
"}",
"for",
"ttFont",
"in",
"RIBBI_ttFonts",
":",
"glyphs",
"=",
"ttFont",
".",
"getGlyphSet",
"(",
")",
"tnum_glyphs",
"=",
"[",
"(",
"glyph_id",
",",
"glyphs",
"[",
"glyph_id",
"]",
")",
"for",
"glyph_id",
"in",
"glyphs",
".",
"keys",
"(",
")",
"if",
"glyph_id",
".",
"endswith",
"(",
"\".tnum\"",
")",
"]",
"for",
"glyph_id",
",",
"glyph",
"in",
"tnum_glyphs",
":",
"if",
"glyph",
".",
"width",
"not",
"in",
"tnum_widths",
":",
"tnum_widths",
"[",
"glyph",
".",
"width",
"]",
"=",
"[",
"glyph_id",
"]",
"else",
":",
"tnum_widths",
"[",
"glyph",
".",
"width",
"]",
".",
"append",
"(",
"glyph_id",
")",
"if",
"len",
"(",
"tnum_widths",
".",
"keys",
"(",
")",
")",
">",
"1",
":",
"max_num",
"=",
"0",
"most_common_width",
"=",
"None",
"for",
"width",
",",
"glyphs",
"in",
"tnum_widths",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"glyphs",
")",
">",
"max_num",
":",
"max_num",
"=",
"len",
"(",
"glyphs",
")",
"most_common_width",
"=",
"width",
"del",
"tnum_widths",
"[",
"most_common_width",
"]",
"yield",
"FAIL",
",",
"(",
"f\"The most common tabular glyph width is {most_common_width}.\"",
"\" But there are other tabular glyphs with different widths\"",
"f\" such as the following ones:\\n\\t{tnum_widths}.\"",
")",
"else",
":",
"yield",
"PASS",
",",
"\"OK\""
]
| All tabular figures must have the same width across the RIBBI-family. | [
"All",
"tabular",
"figures",
"must",
"have",
"the",
"same",
"width",
"across",
"the",
"RIBBI",
"-",
"family",
"."
]
| python | train |
openstates/billy | billy/reports/utils.py | https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/reports/utils.py#L11-L19 | def update_common(obj, report):
""" do updated_at checks """
# updated checks
if obj['updated_at'] >= yesterday:
report['_updated_today_count'] += 1
if obj['updated_at'] >= last_month:
report['_updated_this_month_count'] += 1
if obj['updated_at'] >= last_year:
report['_updated_this_year_count'] += 1 | [
"def",
"update_common",
"(",
"obj",
",",
"report",
")",
":",
"# updated checks",
"if",
"obj",
"[",
"'updated_at'",
"]",
">=",
"yesterday",
":",
"report",
"[",
"'_updated_today_count'",
"]",
"+=",
"1",
"if",
"obj",
"[",
"'updated_at'",
"]",
">=",
"last_month",
":",
"report",
"[",
"'_updated_this_month_count'",
"]",
"+=",
"1",
"if",
"obj",
"[",
"'updated_at'",
"]",
">=",
"last_year",
":",
"report",
"[",
"'_updated_this_year_count'",
"]",
"+=",
"1"
]
| do updated_at checks | [
"do",
"updated_at",
"checks"
]
| python | train |
gem/oq-engine | openquake/hazardlib/gsim/afshari_stewart_2016.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/afshari_stewart_2016.py#L174-L184 | def _get_tau(self, C, mag):
"""
Returns magnitude dependent inter-event standard deviation (tau)
(equation 14)
"""
if mag < 6.5:
return C["tau1"]
elif mag < 7.:
return C["tau1"] + (C["tau2"] - C["tau1"]) * ((mag - 6.5) / 0.5)
else:
return C["tau2"] | [
"def",
"_get_tau",
"(",
"self",
",",
"C",
",",
"mag",
")",
":",
"if",
"mag",
"<",
"6.5",
":",
"return",
"C",
"[",
"\"tau1\"",
"]",
"elif",
"mag",
"<",
"7.",
":",
"return",
"C",
"[",
"\"tau1\"",
"]",
"+",
"(",
"C",
"[",
"\"tau2\"",
"]",
"-",
"C",
"[",
"\"tau1\"",
"]",
")",
"*",
"(",
"(",
"mag",
"-",
"6.5",
")",
"/",
"0.5",
")",
"else",
":",
"return",
"C",
"[",
"\"tau2\"",
"]"
]
| Returns magnitude dependent inter-event standard deviation (tau)
(equation 14) | [
"Returns",
"magnitude",
"dependent",
"inter",
"-",
"event",
"standard",
"deviation",
"(",
"tau",
")",
"(",
"equation",
"14",
")"
]
| python | train |
LudovicRousseau/pyscard | smartcard/ExclusiveConnectCardConnection.py | https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/ExclusiveConnectCardConnection.py#L41-L72 | def connect(self, protocol=None, mode=None, disposition=None):
'''Disconnect and reconnect in exclusive mode PCSCCardconnections.'''
CardConnectionDecorator.connect(self, protocol, mode, disposition)
component = self.component
while True:
if isinstance(component,
smartcard.pcsc.PCSCCardConnection.PCSCCardConnection):
pcscprotocol = PCSCCardConnection.translateprotocolmask(
protocol)
if 0 == pcscprotocol:
pcscprotocol = component.getProtocol()
if component.hcard is not None:
hresult = SCardDisconnect(component.hcard,
SCARD_LEAVE_CARD)
if hresult != 0:
raise CardConnectionException(
'Failed to disconnect: ' +
SCardGetErrorMessage(hresult))
hresult, component.hcard, dwActiveProtocol = SCardConnect(
component.hcontext, str(component.reader),
SCARD_SHARE_EXCLUSIVE, pcscprotocol)
if hresult != 0:
raise CardConnectionException(
'Failed to connect with SCARD_SHARE_EXCLUSIVE' +
SCardGetErrorMessage(hresult))
# print('reconnected exclusive')
break
if hasattr(component, 'component'):
component = component.component
else:
break | [
"def",
"connect",
"(",
"self",
",",
"protocol",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"disposition",
"=",
"None",
")",
":",
"CardConnectionDecorator",
".",
"connect",
"(",
"self",
",",
"protocol",
",",
"mode",
",",
"disposition",
")",
"component",
"=",
"self",
".",
"component",
"while",
"True",
":",
"if",
"isinstance",
"(",
"component",
",",
"smartcard",
".",
"pcsc",
".",
"PCSCCardConnection",
".",
"PCSCCardConnection",
")",
":",
"pcscprotocol",
"=",
"PCSCCardConnection",
".",
"translateprotocolmask",
"(",
"protocol",
")",
"if",
"0",
"==",
"pcscprotocol",
":",
"pcscprotocol",
"=",
"component",
".",
"getProtocol",
"(",
")",
"if",
"component",
".",
"hcard",
"is",
"not",
"None",
":",
"hresult",
"=",
"SCardDisconnect",
"(",
"component",
".",
"hcard",
",",
"SCARD_LEAVE_CARD",
")",
"if",
"hresult",
"!=",
"0",
":",
"raise",
"CardConnectionException",
"(",
"'Failed to disconnect: '",
"+",
"SCardGetErrorMessage",
"(",
"hresult",
")",
")",
"hresult",
",",
"component",
".",
"hcard",
",",
"dwActiveProtocol",
"=",
"SCardConnect",
"(",
"component",
".",
"hcontext",
",",
"str",
"(",
"component",
".",
"reader",
")",
",",
"SCARD_SHARE_EXCLUSIVE",
",",
"pcscprotocol",
")",
"if",
"hresult",
"!=",
"0",
":",
"raise",
"CardConnectionException",
"(",
"'Failed to connect with SCARD_SHARE_EXCLUSIVE'",
"+",
"SCardGetErrorMessage",
"(",
"hresult",
")",
")",
"# print('reconnected exclusive')",
"break",
"if",
"hasattr",
"(",
"component",
",",
"'component'",
")",
":",
"component",
"=",
"component",
".",
"component",
"else",
":",
"break"
]
| Disconnect and reconnect in exclusive mode PCSCCardconnections. | [
"Disconnect",
"and",
"reconnect",
"in",
"exclusive",
"mode",
"PCSCCardconnections",
"."
]
| python | train |
PythonCharmers/python-future | src/future/builtins/newnext.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/builtins/newnext.py#L43-L67 | def newnext(iterator, default=_SENTINEL):
"""
next(iterator[, default])
Return the next item from the iterator. If default is given and the iterator
is exhausted, it is returned instead of raising StopIteration.
"""
# args = []
# if default is not _SENTINEL:
# args.append(default)
try:
try:
return iterator.__next__()
except AttributeError:
try:
return iterator.next()
except AttributeError:
raise TypeError("'{0}' object is not an iterator".format(
iterator.__class__.__name__))
except StopIteration as e:
if default is _SENTINEL:
raise e
else:
return default | [
"def",
"newnext",
"(",
"iterator",
",",
"default",
"=",
"_SENTINEL",
")",
":",
"# args = []",
"# if default is not _SENTINEL:",
"# args.append(default)",
"try",
":",
"try",
":",
"return",
"iterator",
".",
"__next__",
"(",
")",
"except",
"AttributeError",
":",
"try",
":",
"return",
"iterator",
".",
"next",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"\"'{0}' object is not an iterator\"",
".",
"format",
"(",
"iterator",
".",
"__class__",
".",
"__name__",
")",
")",
"except",
"StopIteration",
"as",
"e",
":",
"if",
"default",
"is",
"_SENTINEL",
":",
"raise",
"e",
"else",
":",
"return",
"default"
]
| next(iterator[, default])
Return the next item from the iterator. If default is given and the iterator
is exhausted, it is returned instead of raising StopIteration. | [
"next",
"(",
"iterator",
"[",
"default",
"]",
")"
]
| python | train |
ibis-project/ibis | ibis/expr/api.py | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L1350-L1363 | def _integer_to_interval(arg, unit='s'):
"""
Convert integer interval with the same inner type
Parameters
----------
unit : {'Y', 'M', 'W', 'D', 'h', 'm', s', 'ms', 'us', 'ns'}
Returns
-------
interval : interval value expression
"""
op = ops.IntervalFromInteger(arg, unit)
return op.to_expr() | [
"def",
"_integer_to_interval",
"(",
"arg",
",",
"unit",
"=",
"'s'",
")",
":",
"op",
"=",
"ops",
".",
"IntervalFromInteger",
"(",
"arg",
",",
"unit",
")",
"return",
"op",
".",
"to_expr",
"(",
")"
]
| Convert integer interval with the same inner type
Parameters
----------
unit : {'Y', 'M', 'W', 'D', 'h', 'm', s', 'ms', 'us', 'ns'}
Returns
-------
interval : interval value expression | [
"Convert",
"integer",
"interval",
"with",
"the",
"same",
"inner",
"type"
]
| python | train |
materialsproject/pymatgen | pymatgen/io/vasp/outputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/outputs.py#L2090-L2207 | def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be filled
self.er_ev = {} # will be dict (Spin.up/down) of array(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of array(3*float)
self.er_ev_tot = None # will be array(3*float)
self.er_bp_tot = None # will be array(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = np.array(map(float,
match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.append([r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, er_ev])
def er_bp(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2, er_bp])
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)])
results.context = Spin.up
search.append([r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_up])
def er_bp_up(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.up, er_bp_up])
def er_ev_dn(results, match):
results.er_ev[Spin.down] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
results.context = Spin.down
search.append([r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_dn])
def er_bp_dn(results, match):
results.er_bp[Spin.down] = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.down, er_bp_dn])
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_elc])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*ionic dipole moment: "
r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_ion])
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and \
self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and \
self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.") | [
"def",
"read_igpar",
"(",
"self",
")",
":",
"# variables to be filled",
"self",
".",
"er_ev",
"=",
"{",
"}",
"# will be dict (Spin.up/down) of array(3*float)",
"self",
".",
"er_bp",
"=",
"{",
"}",
"# will be dics (Spin.up/down) of array(3*float)",
"self",
".",
"er_ev_tot",
"=",
"None",
"# will be array(3*float)",
"self",
".",
"er_bp_tot",
"=",
"None",
"# will be array(3*float)",
"self",
".",
"p_elec",
"=",
"None",
"self",
".",
"p_ion",
"=",
"None",
"try",
":",
"search",
"=",
"[",
"]",
"# Nonspin cases",
"def",
"er_ev",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"er_ev",
"[",
"Spin",
".",
"up",
"]",
"=",
"np",
".",
"array",
"(",
"map",
"(",
"float",
",",
"match",
".",
"groups",
"(",
")",
"[",
"1",
":",
"4",
"]",
")",
")",
"/",
"2",
"results",
".",
"er_ev",
"[",
"Spin",
".",
"down",
"]",
"=",
"results",
".",
"er_ev",
"[",
"Spin",
".",
"up",
"]",
"results",
".",
"context",
"=",
"2",
"search",
".",
"append",
"(",
"[",
"r\"^ *e<r>_ev=\\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *\\)\"",
",",
"None",
",",
"er_ev",
"]",
")",
"def",
"er_bp",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"er_bp",
"[",
"Spin",
".",
"up",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"match",
".",
"group",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"4",
")",
"]",
")",
"/",
"2",
"results",
".",
"er_bp",
"[",
"Spin",
".",
"down",
"]",
"=",
"results",
".",
"er_bp",
"[",
"Spin",
".",
"up",
"]",
"search",
".",
"append",
"(",
"[",
"r\"^ *e<r>_bp=\\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *\\)\"",
",",
"lambda",
"results",
",",
"line",
":",
"results",
".",
"context",
"==",
"2",
",",
"er_bp",
"]",
")",
"# Spin cases",
"def",
"er_ev_up",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"er_ev",
"[",
"Spin",
".",
"up",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"match",
".",
"group",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"4",
")",
"]",
")",
"results",
".",
"context",
"=",
"Spin",
".",
"up",
"search",
".",
"append",
"(",
"[",
"r\"^.*Spin component 1 *e<r>_ev=\\( *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\\)\"",
",",
"None",
",",
"er_ev_up",
"]",
")",
"def",
"er_bp_up",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"er_bp",
"[",
"Spin",
".",
"up",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
",",
"float",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
",",
"float",
"(",
"match",
".",
"group",
"(",
"3",
")",
")",
"]",
")",
"search",
".",
"append",
"(",
"[",
"r\"^ *e<r>_bp=\\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *\\)\"",
",",
"lambda",
"results",
",",
"line",
":",
"results",
".",
"context",
"==",
"Spin",
".",
"up",
",",
"er_bp_up",
"]",
")",
"def",
"er_ev_dn",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"er_ev",
"[",
"Spin",
".",
"down",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
",",
"float",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
",",
"float",
"(",
"match",
".",
"group",
"(",
"3",
")",
")",
"]",
")",
"results",
".",
"context",
"=",
"Spin",
".",
"down",
"search",
".",
"append",
"(",
"[",
"r\"^.*Spin component 2 *e<r>_ev=\\( *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\\)\"",
",",
"None",
",",
"er_ev_dn",
"]",
")",
"def",
"er_bp_dn",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"er_bp",
"[",
"Spin",
".",
"down",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"match",
".",
"group",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"4",
")",
"]",
")",
"search",
".",
"append",
"(",
"[",
"r\"^ *e<r>_bp=\\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *\\)\"",
",",
"lambda",
"results",
",",
"line",
":",
"results",
".",
"context",
"==",
"Spin",
".",
"down",
",",
"er_bp_dn",
"]",
")",
"# Always present spin/non-spin",
"def",
"p_elc",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"p_elc",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"match",
".",
"group",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"4",
")",
"]",
")",
"search",
".",
"append",
"(",
"[",
"r\"^.*Total electronic dipole moment: \"",
"r\"*p\\[elc\\]=\\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *\\)\"",
",",
"None",
",",
"p_elc",
"]",
")",
"def",
"p_ion",
"(",
"results",
",",
"match",
")",
":",
"results",
".",
"p_ion",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"match",
".",
"group",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"4",
")",
"]",
")",
"search",
".",
"append",
"(",
"[",
"r\"^.*ionic dipole moment: \"",
"r\"*p\\[ion\\]=\\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) \"",
"r\"*([-0-9.Ee+]*) *\\)\"",
",",
"None",
",",
"p_ion",
"]",
")",
"self",
".",
"context",
"=",
"None",
"self",
".",
"er_ev",
"=",
"{",
"Spin",
".",
"up",
":",
"None",
",",
"Spin",
".",
"down",
":",
"None",
"}",
"self",
".",
"er_bp",
"=",
"{",
"Spin",
".",
"up",
":",
"None",
",",
"Spin",
".",
"down",
":",
"None",
"}",
"micro_pyawk",
"(",
"self",
".",
"filename",
",",
"search",
",",
"self",
")",
"if",
"self",
".",
"er_ev",
"[",
"Spin",
".",
"up",
"]",
"is",
"not",
"None",
"and",
"self",
".",
"er_ev",
"[",
"Spin",
".",
"down",
"]",
"is",
"not",
"None",
":",
"self",
".",
"er_ev_tot",
"=",
"self",
".",
"er_ev",
"[",
"Spin",
".",
"up",
"]",
"+",
"self",
".",
"er_ev",
"[",
"Spin",
".",
"down",
"]",
"if",
"self",
".",
"er_bp",
"[",
"Spin",
".",
"up",
"]",
"is",
"not",
"None",
"and",
"self",
".",
"er_bp",
"[",
"Spin",
".",
"down",
"]",
"is",
"not",
"None",
":",
"self",
".",
"er_bp_tot",
"=",
"self",
".",
"er_bp",
"[",
"Spin",
".",
"up",
"]",
"+",
"self",
".",
"er_bp",
"[",
"Spin",
".",
"down",
"]",
"except",
":",
"self",
".",
"er_ev_tot",
"=",
"None",
"self",
".",
"er_bp_tot",
"=",
"None",
"raise",
"Exception",
"(",
"\"IGPAR OUTCAR could not be parsed.\"",
")"
]
| Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are). | [
"Renders",
"accessible",
":",
"er_ev",
"=",
"e<r",
">",
"_ev",
"(",
"dictionary",
"with",
"Spin",
".",
"up",
"/",
"Spin",
".",
"down",
"as",
"keys",
")",
"er_bp",
"=",
"e<r",
">",
"_bp",
"(",
"dictionary",
"with",
"Spin",
".",
"up",
"/",
"Spin",
".",
"down",
"as",
"keys",
")",
"er_ev_tot",
"=",
"spin",
"up",
"+",
"spin",
"down",
"summed",
"er_bp_tot",
"=",
"spin",
"up",
"+",
"spin",
"down",
"summed",
"p_elc",
"=",
"spin",
"up",
"+",
"spin",
"down",
"summed",
"p_ion",
"=",
"spin",
"up",
"+",
"spin",
"down",
"summed"
]
| python | train |
pandas-dev/pandas | pandas/core/generic.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L274-L278 | def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d | [
"def",
"_construct_axes_dict",
"(",
"self",
",",
"axes",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"d",
"=",
"{",
"a",
":",
"self",
".",
"_get_axis",
"(",
"a",
")",
"for",
"a",
"in",
"(",
"axes",
"or",
"self",
".",
"_AXIS_ORDERS",
")",
"}",
"d",
".",
"update",
"(",
"kwargs",
")",
"return",
"d"
]
| Return an axes dictionary for myself. | [
"Return",
"an",
"axes",
"dictionary",
"for",
"myself",
"."
]
| python | train |
deviantony/valigator | valigator/scheduler.py | https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/scheduler.py#L11-L38 | def validate_backup(configuration, backup_data):
"""Celery task.
It will extract the backup archive into a unique folder
in the temporary directory specified in the configuration.
Once extracted, a Docker container will be started and will
start a restoration procedure. The worker will wait for the
container to exit and retrieve its return code.
A notification is sent if the return code is != 0.
If the return code == 0, the container will be removed.
Lastly, it will remove the temporary workdir.
"""
extract_archive(backup_data['archive_path'],
backup_data['workdir'])
docker_client = Client(configuration['docker']['url'])
container = run_container(docker_client, backup_data)
return_code = docker_client.wait(container)
print('Container return code: {}'.format(return_code))
if return_code != 0:
notifier = MailNotifier(configuration['mail'])
report = {'archive': backup_data['archive_path'],
'image': backup_data['image'],
'container_id': container.get('Id')}
notifier.send_report(report)
else:
docker_client.remove_container(container)
remove_file(backup_data['workdir']) | [
"def",
"validate_backup",
"(",
"configuration",
",",
"backup_data",
")",
":",
"extract_archive",
"(",
"backup_data",
"[",
"'archive_path'",
"]",
",",
"backup_data",
"[",
"'workdir'",
"]",
")",
"docker_client",
"=",
"Client",
"(",
"configuration",
"[",
"'docker'",
"]",
"[",
"'url'",
"]",
")",
"container",
"=",
"run_container",
"(",
"docker_client",
",",
"backup_data",
")",
"return_code",
"=",
"docker_client",
".",
"wait",
"(",
"container",
")",
"print",
"(",
"'Container return code: {}'",
".",
"format",
"(",
"return_code",
")",
")",
"if",
"return_code",
"!=",
"0",
":",
"notifier",
"=",
"MailNotifier",
"(",
"configuration",
"[",
"'mail'",
"]",
")",
"report",
"=",
"{",
"'archive'",
":",
"backup_data",
"[",
"'archive_path'",
"]",
",",
"'image'",
":",
"backup_data",
"[",
"'image'",
"]",
",",
"'container_id'",
":",
"container",
".",
"get",
"(",
"'Id'",
")",
"}",
"notifier",
".",
"send_report",
"(",
"report",
")",
"else",
":",
"docker_client",
".",
"remove_container",
"(",
"container",
")",
"remove_file",
"(",
"backup_data",
"[",
"'workdir'",
"]",
")"
]
| Celery task.
It will extract the backup archive into a unique folder
in the temporary directory specified in the configuration.
Once extracted, a Docker container will be started and will
start a restoration procedure. The worker will wait for the
container to exit and retrieve its return code.
A notification is sent if the return code is != 0.
If the return code == 0, the container will be removed.
Lastly, it will remove the temporary workdir. | [
"Celery",
"task",
".",
"It",
"will",
"extract",
"the",
"backup",
"archive",
"into",
"a",
"unique",
"folder",
"in",
"the",
"temporary",
"directory",
"specified",
"in",
"the",
"configuration",
"."
]
| python | train |
pkgw/pwkit | pwkit/io.py | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L736-L754 | def read_fits_bintable (self, hdu=1, drop_nonscalar_ok=True, **kwargs):
"""Open as a FITS file, read in a binary table, and return it as a
:class:`pandas.DataFrame`, converted with
:func:`pkwit.numutil.fits_recarray_to_data_frame`. The *hdu* argument
specifies which HDU to read, with its default 1 indicating the first
FITS extension. The *drop_nonscalar_ok* argument specifies if
non-scalar table values (which are inexpressible in
:class:`pandas.DataFrame`s) should be silently ignored (``True``) or
cause a :exc:`ValueError` to be raised (``False``). Other **kwargs**
are passed to :func:`astropy.io.fits.open`, (see
:meth:`Path.read_fits`) although the open mode is hardcoded to be
``"readonly"``.
"""
from astropy.io import fits
from .numutil import fits_recarray_to_data_frame as frtdf
with fits.open (text_type (self), mode='readonly', **kwargs) as hdulist:
return frtdf (hdulist[hdu].data, drop_nonscalar_ok=drop_nonscalar_ok) | [
"def",
"read_fits_bintable",
"(",
"self",
",",
"hdu",
"=",
"1",
",",
"drop_nonscalar_ok",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"astropy",
".",
"io",
"import",
"fits",
"from",
".",
"numutil",
"import",
"fits_recarray_to_data_frame",
"as",
"frtdf",
"with",
"fits",
".",
"open",
"(",
"text_type",
"(",
"self",
")",
",",
"mode",
"=",
"'readonly'",
",",
"*",
"*",
"kwargs",
")",
"as",
"hdulist",
":",
"return",
"frtdf",
"(",
"hdulist",
"[",
"hdu",
"]",
".",
"data",
",",
"drop_nonscalar_ok",
"=",
"drop_nonscalar_ok",
")"
]
| Open as a FITS file, read in a binary table, and return it as a
:class:`pandas.DataFrame`, converted with
:func:`pkwit.numutil.fits_recarray_to_data_frame`. The *hdu* argument
specifies which HDU to read, with its default 1 indicating the first
FITS extension. The *drop_nonscalar_ok* argument specifies if
non-scalar table values (which are inexpressible in
:class:`pandas.DataFrame`s) should be silently ignored (``True``) or
cause a :exc:`ValueError` to be raised (``False``). Other **kwargs**
are passed to :func:`astropy.io.fits.open`, (see
:meth:`Path.read_fits`) although the open mode is hardcoded to be
``"readonly"``. | [
"Open",
"as",
"a",
"FITS",
"file",
"read",
"in",
"a",
"binary",
"table",
"and",
"return",
"it",
"as",
"a",
":",
"class",
":",
"pandas",
".",
"DataFrame",
"converted",
"with",
":",
"func",
":",
"pkwit",
".",
"numutil",
".",
"fits_recarray_to_data_frame",
".",
"The",
"*",
"hdu",
"*",
"argument",
"specifies",
"which",
"HDU",
"to",
"read",
"with",
"its",
"default",
"1",
"indicating",
"the",
"first",
"FITS",
"extension",
".",
"The",
"*",
"drop_nonscalar_ok",
"*",
"argument",
"specifies",
"if",
"non",
"-",
"scalar",
"table",
"values",
"(",
"which",
"are",
"inexpressible",
"in",
":",
"class",
":",
"pandas",
".",
"DataFrame",
"s",
")",
"should",
"be",
"silently",
"ignored",
"(",
"True",
")",
"or",
"cause",
"a",
":",
"exc",
":",
"ValueError",
"to",
"be",
"raised",
"(",
"False",
")",
".",
"Other",
"**",
"kwargs",
"**",
"are",
"passed",
"to",
":",
"func",
":",
"astropy",
".",
"io",
".",
"fits",
".",
"open",
"(",
"see",
":",
"meth",
":",
"Path",
".",
"read_fits",
")",
"although",
"the",
"open",
"mode",
"is",
"hardcoded",
"to",
"be",
"readonly",
"."
]
| python | train |
spacetelescope/acstools | acstools/acs_destripe_plus.py | https://github.com/spacetelescope/acstools/blob/bbf8dd080cefcbf88529ec87c420f9e1b8002554/acstools/acs_destripe_plus.py#L111-L498 | def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15,
sigrej=2.0, lower=None, upper=None, binwidth=0.3,
scimask1=None, scimask2=None,
dqbits=None, rpt_clean=0, atol=0.01,
cte_correct=True, clobber=False, verbose=True):
r"""Calibrate post-SM4 ACS/WFC exposure(s) and use
standalone :ref:`acsdestripe`.
This takes a RAW image and generates a FLT file containing
its calibrated and destriped counterpart.
If CTE correction is performed, FLC will also be present.
Parameters
----------
inputfile : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*raw.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product of ``acs_destripe``.
This only affects the intermediate output file that will
be automatically renamed to ``*blv_tmp.fits`` during the processing.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's `imagestats` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
scimask1 : str or list of str
Mask images for *calibrated* ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
scimask2 : str or list of str
Mask images for *calibrated* ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `scimask1` and `scimask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
cte_correct : bool
Perform CTE correction.
verbose : bool
Print informational messages. Default = True.
Raises
------
ImportError
``stsci.tools`` not found.
IOError
Input file does not exist.
ValueError
Invalid header values or CALACS version.
"""
# Optional package dependencies
from stsci.tools import parseinput
try:
from stsci.tools.bitmask import interpret_bit_flags
except ImportError:
from stsci.tools.bitmask import (
interpret_bits_value as interpret_bit_flags
)
# process input file(s) and if we have multiple input files - recursively
# call acs_destripe_plus for each input image:
flist = parseinput.parseinput(inputfile)[0]
if isinstance(scimask1, str):
mlist1 = parseinput.parseinput(scimask1)[0]
elif isinstance(scimask1, np.ndarray):
mlist1 = [scimask1.copy()]
elif scimask1 is None:
mlist1 = []
elif isinstance(scimask1, list):
mlist1 = []
for m in scimask1:
if isinstance(m, np.ndarray):
mlist1.append(m.copy())
elif isinstance(m, str):
mlist1 += parseinput.parseinput(m)[0]
else:
raise TypeError("'scimask1' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'scimask1' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
if isinstance(scimask2, str):
mlist2 = parseinput.parseinput(scimask2)[0]
elif isinstance(scimask2, np.ndarray):
mlist2 = [scimask2.copy()]
elif scimask2 is None:
mlist2 = []
elif isinstance(scimask2, list):
mlist2 = []
for m in scimask2:
if isinstance(m, np.ndarray):
mlist2.append(m.copy())
elif isinstance(m, str):
mlist2 += parseinput.parseinput(m)[0]
else:
raise TypeError("'scimask2' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'scimask2' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
n_input = len(flist)
n_mask1 = len(mlist1)
n_mask2 = len(mlist2)
if n_input == 0:
raise ValueError(
'No input file(s) provided or the file(s) do not exist')
if n_mask1 == 0:
mlist1 = [None] * n_input
elif n_mask1 != n_input:
raise ValueError('Insufficient masks for [SCI,1]')
if n_mask2 == 0:
mlist2 = [None] * n_input
elif n_mask2 != n_input:
raise ValueError('Insufficient masks for [SCI,2]')
if n_input > 1:
for img, mf1, mf2 in zip(flist, mlist1, mlist2):
destripe_plus(
inputfile=img, suffix=suffix, stat=stat,
lower=lower, upper=upper, binwidth=binwidth,
maxiter=maxiter, sigrej=sigrej,
scimask1=scimask1, scimask2=scimask2, dqbits=dqbits,
cte_correct=cte_correct, clobber=clobber, verbose=verbose
)
return
inputfile = flist[0]
scimask1 = mlist1[0]
scimask2 = mlist2[0]
# verify that the RAW image exists in cwd
cwddir = os.getcwd()
if not os.path.exists(os.path.join(cwddir, inputfile)):
raise IOError("{0} does not exist.".format(inputfile))
# get image's primary header:
header = fits.getheader(inputfile)
# verify masks defined (or not) simultaneously:
if (header['CCDAMP'] == 'ABCD' and
((scimask1 is not None and scimask2 is None) or
(scimask1 is None and scimask2 is not None))):
raise ValueError("Both 'scimask1' and 'scimask2' must be specified "
"or not specified together.")
calacs_str = subprocess.check_output(['calacs.e', '--version']).split()[0]
calacs_ver = [int(x) for x in calacs_str.decode().split('.')]
if calacs_ver < [8, 3, 1]:
raise ValueError('CALACS {0} is incomptible. '
'Must be 8.3.1 or later.'.format(calacs_str))
# check date for post-SM4 and if supported subarray or full frame
is_subarray = False
ctecorr = header['PCTECORR']
aperture = header['APERTURE']
detector = header['DETECTOR']
date_obs = Time(header['DATE-OBS'])
# intermediate filenames
blvtmp_name = inputfile.replace('raw', 'blv_tmp')
blctmp_name = inputfile.replace('raw', 'blc_tmp')
# output filenames
tra_name = inputfile.replace('_raw.fits', '.tra')
flt_name = inputfile.replace('raw', 'flt')
flc_name = inputfile.replace('raw', 'flc')
if detector != 'WFC':
raise ValueError("{0} is not a WFC image, please check the 'DETECTOR'"
" keyword.".format(inputfile))
if date_obs < SM4_DATE:
raise ValueError(
"{0} is a pre-SM4 image.".format(inputfile))
if header['SUBARRAY'] and cte_correct:
if aperture in SUBARRAY_LIST:
is_subarray = True
else:
LOG.warning('Using non-supported subarray, '
'turning CTE correction off')
cte_correct = False
# delete files from previous CALACS runs
if clobber:
for tmpfilename in [blvtmp_name, blctmp_name, flt_name, flc_name,
tra_name]:
if os.path.exists(tmpfilename):
os.remove(tmpfilename)
# run ACSCCD on RAW
acsccd.acsccd(inputfile)
# modify user mask with DQ masks if requested
dqbits = interpret_bit_flags(dqbits)
if dqbits is not None:
# save 'tra' file in memory to trick the log file
# not to save first acs2d log as this is done only
# for the purpose of obtaining DQ masks.
# WISH: it would have been nice is there was an easy way of obtaining
# just the DQ masks as if data were calibrated but without
# having to recalibrate them with acs2d.
if os.path.isfile(tra_name):
with open(tra_name) as fh:
tra_lines = fh.readlines()
else:
tra_lines = None
# apply flats, etc.
acs2d.acs2d(blvtmp_name, verbose=False, quiet=True)
# extract DQ arrays from the FLT image:
dq1, dq2 = _read_DQ_arrays(flt_name)
mask1 = _get_mask(scimask1, 1)
scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits)
mask2 = _get_mask(scimask2, 2)
if dq2 is not None:
scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits)
elif mask2 is None:
scimask2 = None
# reconstruct trailer file:
if tra_lines is not None:
with open(tra_name, mode='w') as fh:
fh.writelines(tra_lines)
# delete temporary FLT image:
if os.path.isfile(flt_name):
os.remove(flt_name)
# execute destriping (post-SM4 data only)
acs_destripe.clean(
blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej,
lower=lower, upper=upper, binwidth=binwidth,
mask1=scimask1, mask2=scimask2, dqbits=dqbits,
rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose)
blvtmpsfx = 'blv_tmp_{0}'.format(suffix)
os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name)
# update subarray header
if is_subarray and cte_correct:
fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM')
ctecorr = 'PERFORM'
# perform CTE correction on destriped image
if cte_correct:
if ctecorr == 'PERFORM':
acscte.acscte(blvtmp_name)
else:
LOG.warning(
"PCTECORR={0}, cannot run CTE correction".format(ctecorr))
cte_correct = False
# run ACS2D to get FLT and FLC images
acs2d.acs2d(blvtmp_name)
if cte_correct:
acs2d.acs2d(blctmp_name)
# delete intermediate files
os.remove(blvtmp_name)
if cte_correct and os.path.isfile(blctmp_name):
os.remove(blctmp_name)
info_str = 'Done.\nFLT: {0}\n'.format(flt_name)
if cte_correct:
info_str += 'FLC: {0}\n'.format(flc_name)
LOG.info(info_str) | [
"def",
"destripe_plus",
"(",
"inputfile",
",",
"suffix",
"=",
"'strp'",
",",
"stat",
"=",
"'pmode1'",
",",
"maxiter",
"=",
"15",
",",
"sigrej",
"=",
"2.0",
",",
"lower",
"=",
"None",
",",
"upper",
"=",
"None",
",",
"binwidth",
"=",
"0.3",
",",
"scimask1",
"=",
"None",
",",
"scimask2",
"=",
"None",
",",
"dqbits",
"=",
"None",
",",
"rpt_clean",
"=",
"0",
",",
"atol",
"=",
"0.01",
",",
"cte_correct",
"=",
"True",
",",
"clobber",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"# Optional package dependencies",
"from",
"stsci",
".",
"tools",
"import",
"parseinput",
"try",
":",
"from",
"stsci",
".",
"tools",
".",
"bitmask",
"import",
"interpret_bit_flags",
"except",
"ImportError",
":",
"from",
"stsci",
".",
"tools",
".",
"bitmask",
"import",
"(",
"interpret_bits_value",
"as",
"interpret_bit_flags",
")",
"# process input file(s) and if we have multiple input files - recursively",
"# call acs_destripe_plus for each input image:",
"flist",
"=",
"parseinput",
".",
"parseinput",
"(",
"inputfile",
")",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"scimask1",
",",
"str",
")",
":",
"mlist1",
"=",
"parseinput",
".",
"parseinput",
"(",
"scimask1",
")",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"scimask1",
",",
"np",
".",
"ndarray",
")",
":",
"mlist1",
"=",
"[",
"scimask1",
".",
"copy",
"(",
")",
"]",
"elif",
"scimask1",
"is",
"None",
":",
"mlist1",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"scimask1",
",",
"list",
")",
":",
"mlist1",
"=",
"[",
"]",
"for",
"m",
"in",
"scimask1",
":",
"if",
"isinstance",
"(",
"m",
",",
"np",
".",
"ndarray",
")",
":",
"mlist1",
".",
"append",
"(",
"m",
".",
"copy",
"(",
")",
")",
"elif",
"isinstance",
"(",
"m",
",",
"str",
")",
":",
"mlist1",
"+=",
"parseinput",
".",
"parseinput",
"(",
"m",
")",
"[",
"0",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"'scimask1' must be a list of str or \"",
"\"numpy.ndarray values.\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"'scimask1' must be either a str, or a \"",
"\"numpy.ndarray, or a list of the two type of \"",
"\"values.\"",
")",
"if",
"isinstance",
"(",
"scimask2",
",",
"str",
")",
":",
"mlist2",
"=",
"parseinput",
".",
"parseinput",
"(",
"scimask2",
")",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"scimask2",
",",
"np",
".",
"ndarray",
")",
":",
"mlist2",
"=",
"[",
"scimask2",
".",
"copy",
"(",
")",
"]",
"elif",
"scimask2",
"is",
"None",
":",
"mlist2",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"scimask2",
",",
"list",
")",
":",
"mlist2",
"=",
"[",
"]",
"for",
"m",
"in",
"scimask2",
":",
"if",
"isinstance",
"(",
"m",
",",
"np",
".",
"ndarray",
")",
":",
"mlist2",
".",
"append",
"(",
"m",
".",
"copy",
"(",
")",
")",
"elif",
"isinstance",
"(",
"m",
",",
"str",
")",
":",
"mlist2",
"+=",
"parseinput",
".",
"parseinput",
"(",
"m",
")",
"[",
"0",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"'scimask2' must be a list of str or \"",
"\"numpy.ndarray values.\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"'scimask2' must be either a str, or a \"",
"\"numpy.ndarray, or a list of the two type of \"",
"\"values.\"",
")",
"n_input",
"=",
"len",
"(",
"flist",
")",
"n_mask1",
"=",
"len",
"(",
"mlist1",
")",
"n_mask2",
"=",
"len",
"(",
"mlist2",
")",
"if",
"n_input",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'No input file(s) provided or the file(s) do not exist'",
")",
"if",
"n_mask1",
"==",
"0",
":",
"mlist1",
"=",
"[",
"None",
"]",
"*",
"n_input",
"elif",
"n_mask1",
"!=",
"n_input",
":",
"raise",
"ValueError",
"(",
"'Insufficient masks for [SCI,1]'",
")",
"if",
"n_mask2",
"==",
"0",
":",
"mlist2",
"=",
"[",
"None",
"]",
"*",
"n_input",
"elif",
"n_mask2",
"!=",
"n_input",
":",
"raise",
"ValueError",
"(",
"'Insufficient masks for [SCI,2]'",
")",
"if",
"n_input",
">",
"1",
":",
"for",
"img",
",",
"mf1",
",",
"mf2",
"in",
"zip",
"(",
"flist",
",",
"mlist1",
",",
"mlist2",
")",
":",
"destripe_plus",
"(",
"inputfile",
"=",
"img",
",",
"suffix",
"=",
"suffix",
",",
"stat",
"=",
"stat",
",",
"lower",
"=",
"lower",
",",
"upper",
"=",
"upper",
",",
"binwidth",
"=",
"binwidth",
",",
"maxiter",
"=",
"maxiter",
",",
"sigrej",
"=",
"sigrej",
",",
"scimask1",
"=",
"scimask1",
",",
"scimask2",
"=",
"scimask2",
",",
"dqbits",
"=",
"dqbits",
",",
"cte_correct",
"=",
"cte_correct",
",",
"clobber",
"=",
"clobber",
",",
"verbose",
"=",
"verbose",
")",
"return",
"inputfile",
"=",
"flist",
"[",
"0",
"]",
"scimask1",
"=",
"mlist1",
"[",
"0",
"]",
"scimask2",
"=",
"mlist2",
"[",
"0",
"]",
"# verify that the RAW image exists in cwd",
"cwddir",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"cwddir",
",",
"inputfile",
")",
")",
":",
"raise",
"IOError",
"(",
"\"{0} does not exist.\"",
".",
"format",
"(",
"inputfile",
")",
")",
"# get image's primary header:",
"header",
"=",
"fits",
".",
"getheader",
"(",
"inputfile",
")",
"# verify masks defined (or not) simultaneously:",
"if",
"(",
"header",
"[",
"'CCDAMP'",
"]",
"==",
"'ABCD'",
"and",
"(",
"(",
"scimask1",
"is",
"not",
"None",
"and",
"scimask2",
"is",
"None",
")",
"or",
"(",
"scimask1",
"is",
"None",
"and",
"scimask2",
"is",
"not",
"None",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Both 'scimask1' and 'scimask2' must be specified \"",
"\"or not specified together.\"",
")",
"calacs_str",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'calacs.e'",
",",
"'--version'",
"]",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"calacs_ver",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"calacs_str",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'.'",
")",
"]",
"if",
"calacs_ver",
"<",
"[",
"8",
",",
"3",
",",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'CALACS {0} is incomptible. '",
"'Must be 8.3.1 or later.'",
".",
"format",
"(",
"calacs_str",
")",
")",
"# check date for post-SM4 and if supported subarray or full frame",
"is_subarray",
"=",
"False",
"ctecorr",
"=",
"header",
"[",
"'PCTECORR'",
"]",
"aperture",
"=",
"header",
"[",
"'APERTURE'",
"]",
"detector",
"=",
"header",
"[",
"'DETECTOR'",
"]",
"date_obs",
"=",
"Time",
"(",
"header",
"[",
"'DATE-OBS'",
"]",
")",
"# intermediate filenames",
"blvtmp_name",
"=",
"inputfile",
".",
"replace",
"(",
"'raw'",
",",
"'blv_tmp'",
")",
"blctmp_name",
"=",
"inputfile",
".",
"replace",
"(",
"'raw'",
",",
"'blc_tmp'",
")",
"# output filenames",
"tra_name",
"=",
"inputfile",
".",
"replace",
"(",
"'_raw.fits'",
",",
"'.tra'",
")",
"flt_name",
"=",
"inputfile",
".",
"replace",
"(",
"'raw'",
",",
"'flt'",
")",
"flc_name",
"=",
"inputfile",
".",
"replace",
"(",
"'raw'",
",",
"'flc'",
")",
"if",
"detector",
"!=",
"'WFC'",
":",
"raise",
"ValueError",
"(",
"\"{0} is not a WFC image, please check the 'DETECTOR'\"",
"\" keyword.\"",
".",
"format",
"(",
"inputfile",
")",
")",
"if",
"date_obs",
"<",
"SM4_DATE",
":",
"raise",
"ValueError",
"(",
"\"{0} is a pre-SM4 image.\"",
".",
"format",
"(",
"inputfile",
")",
")",
"if",
"header",
"[",
"'SUBARRAY'",
"]",
"and",
"cte_correct",
":",
"if",
"aperture",
"in",
"SUBARRAY_LIST",
":",
"is_subarray",
"=",
"True",
"else",
":",
"LOG",
".",
"warning",
"(",
"'Using non-supported subarray, '",
"'turning CTE correction off'",
")",
"cte_correct",
"=",
"False",
"# delete files from previous CALACS runs",
"if",
"clobber",
":",
"for",
"tmpfilename",
"in",
"[",
"blvtmp_name",
",",
"blctmp_name",
",",
"flt_name",
",",
"flc_name",
",",
"tra_name",
"]",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"tmpfilename",
")",
":",
"os",
".",
"remove",
"(",
"tmpfilename",
")",
"# run ACSCCD on RAW",
"acsccd",
".",
"acsccd",
"(",
"inputfile",
")",
"# modify user mask with DQ masks if requested",
"dqbits",
"=",
"interpret_bit_flags",
"(",
"dqbits",
")",
"if",
"dqbits",
"is",
"not",
"None",
":",
"# save 'tra' file in memory to trick the log file",
"# not to save first acs2d log as this is done only",
"# for the purpose of obtaining DQ masks.",
"# WISH: it would have been nice is there was an easy way of obtaining",
"# just the DQ masks as if data were calibrated but without",
"# having to recalibrate them with acs2d.",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"tra_name",
")",
":",
"with",
"open",
"(",
"tra_name",
")",
"as",
"fh",
":",
"tra_lines",
"=",
"fh",
".",
"readlines",
"(",
")",
"else",
":",
"tra_lines",
"=",
"None",
"# apply flats, etc.",
"acs2d",
".",
"acs2d",
"(",
"blvtmp_name",
",",
"verbose",
"=",
"False",
",",
"quiet",
"=",
"True",
")",
"# extract DQ arrays from the FLT image:",
"dq1",
",",
"dq2",
"=",
"_read_DQ_arrays",
"(",
"flt_name",
")",
"mask1",
"=",
"_get_mask",
"(",
"scimask1",
",",
"1",
")",
"scimask1",
"=",
"acs_destripe",
".",
"_mergeUserMaskAndDQ",
"(",
"dq1",
",",
"mask1",
",",
"dqbits",
")",
"mask2",
"=",
"_get_mask",
"(",
"scimask2",
",",
"2",
")",
"if",
"dq2",
"is",
"not",
"None",
":",
"scimask2",
"=",
"acs_destripe",
".",
"_mergeUserMaskAndDQ",
"(",
"dq2",
",",
"mask2",
",",
"dqbits",
")",
"elif",
"mask2",
"is",
"None",
":",
"scimask2",
"=",
"None",
"# reconstruct trailer file:",
"if",
"tra_lines",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"tra_name",
",",
"mode",
"=",
"'w'",
")",
"as",
"fh",
":",
"fh",
".",
"writelines",
"(",
"tra_lines",
")",
"# delete temporary FLT image:",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"flt_name",
")",
":",
"os",
".",
"remove",
"(",
"flt_name",
")",
"# execute destriping (post-SM4 data only)",
"acs_destripe",
".",
"clean",
"(",
"blvtmp_name",
",",
"suffix",
",",
"stat",
"=",
"stat",
",",
"maxiter",
"=",
"maxiter",
",",
"sigrej",
"=",
"sigrej",
",",
"lower",
"=",
"lower",
",",
"upper",
"=",
"upper",
",",
"binwidth",
"=",
"binwidth",
",",
"mask1",
"=",
"scimask1",
",",
"mask2",
"=",
"scimask2",
",",
"dqbits",
"=",
"dqbits",
",",
"rpt_clean",
"=",
"rpt_clean",
",",
"atol",
"=",
"atol",
",",
"clobber",
"=",
"clobber",
",",
"verbose",
"=",
"verbose",
")",
"blvtmpsfx",
"=",
"'blv_tmp_{0}'",
".",
"format",
"(",
"suffix",
")",
"os",
".",
"rename",
"(",
"inputfile",
".",
"replace",
"(",
"'raw'",
",",
"blvtmpsfx",
")",
",",
"blvtmp_name",
")",
"# update subarray header",
"if",
"is_subarray",
"and",
"cte_correct",
":",
"fits",
".",
"setval",
"(",
"blvtmp_name",
",",
"'PCTECORR'",
",",
"value",
"=",
"'PERFORM'",
")",
"ctecorr",
"=",
"'PERFORM'",
"# perform CTE correction on destriped image",
"if",
"cte_correct",
":",
"if",
"ctecorr",
"==",
"'PERFORM'",
":",
"acscte",
".",
"acscte",
"(",
"blvtmp_name",
")",
"else",
":",
"LOG",
".",
"warning",
"(",
"\"PCTECORR={0}, cannot run CTE correction\"",
".",
"format",
"(",
"ctecorr",
")",
")",
"cte_correct",
"=",
"False",
"# run ACS2D to get FLT and FLC images",
"acs2d",
".",
"acs2d",
"(",
"blvtmp_name",
")",
"if",
"cte_correct",
":",
"acs2d",
".",
"acs2d",
"(",
"blctmp_name",
")",
"# delete intermediate files",
"os",
".",
"remove",
"(",
"blvtmp_name",
")",
"if",
"cte_correct",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"blctmp_name",
")",
":",
"os",
".",
"remove",
"(",
"blctmp_name",
")",
"info_str",
"=",
"'Done.\\nFLT: {0}\\n'",
".",
"format",
"(",
"flt_name",
")",
"if",
"cte_correct",
":",
"info_str",
"+=",
"'FLC: {0}\\n'",
".",
"format",
"(",
"flc_name",
")",
"LOG",
".",
"info",
"(",
"info_str",
")"
]
| r"""Calibrate post-SM4 ACS/WFC exposure(s) and use
standalone :ref:`acsdestripe`.
This takes a RAW image and generates a FLT file containing
its calibrated and destriped counterpart.
If CTE correction is performed, FLC will also be present.
Parameters
----------
inputfile : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*raw.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product of ``acs_destripe``.
This only affects the intermediate output file that will
be automatically renamed to ``*blv_tmp.fits`` during the processing.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's `imagestats` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
scimask1 : str or list of str
Mask images for *calibrated* ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
scimask2 : str or list of str
Mask images for *calibrated* ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `scimask1` and `scimask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
cte_correct : bool
Perform CTE correction.
verbose : bool
Print informational messages. Default = True.
Raises
------
ImportError
``stsci.tools`` not found.
IOError
Input file does not exist.
ValueError
Invalid header values or CALACS version. | [
"r",
"Calibrate",
"post",
"-",
"SM4",
"ACS",
"/",
"WFC",
"exposure",
"(",
"s",
")",
"and",
"use",
"standalone",
":",
"ref",
":",
"acsdestripe",
"."
]
| python | train |
estnltk/estnltk | estnltk/text.py | https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L937-L941 | def timex_spans(self):
"""The list of spans of ``timexes`` layer elements."""
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.spans(TIMEXES) | [
"def",
"timex_spans",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_tagged",
"(",
"TIMEXES",
")",
":",
"self",
".",
"tag_timexes",
"(",
")",
"return",
"self",
".",
"spans",
"(",
"TIMEXES",
")"
]
| The list of spans of ``timexes`` layer elements. | [
"The",
"list",
"of",
"spans",
"of",
"timexes",
"layer",
"elements",
"."
]
| python | train |
divio/python-mautic | mautic/contacts.py | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/contacts.py#L17-L26 | def get_owners(self):
"""
Get a list of users available as contact owners
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/owners'.format(url=self.endpoint_url)
)
return self.process_response(response) | [
"def",
"get_owners",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_client",
".",
"session",
".",
"get",
"(",
"'{url}/list/owners'",
".",
"format",
"(",
"url",
"=",
"self",
".",
"endpoint_url",
")",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
")"
]
| Get a list of users available as contact owners
:return: dict|str | [
"Get",
"a",
"list",
"of",
"users",
"available",
"as",
"contact",
"owners"
]
| python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L853-L865 | def logical_chassis_fwdl_status_output_cluster_fwdl_entries_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
rbridge_id = ET.SubElement(cluster_fwdl_entries, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"logical_chassis_fwdl_status_output_cluster_fwdl_entries_rbridge_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"logical_chassis_fwdl_status",
"=",
"ET",
".",
"Element",
"(",
"\"logical_chassis_fwdl_status\"",
")",
"config",
"=",
"logical_chassis_fwdl_status",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"logical_chassis_fwdl_status",
",",
"\"output\"",
")",
"cluster_fwdl_entries",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"cluster-fwdl-entries\"",
")",
"rbridge_id",
"=",
"ET",
".",
"SubElement",
"(",
"cluster_fwdl_entries",
",",
"\"rbridge-id\"",
")",
"rbridge_id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'rbridge_id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| Auto Generated Code | [
"Auto",
"Generated",
"Code"
]
| python | train |
byt3bl33d3r/CrackMapExec | cme/protocols/smb/db_navigator.py | https://github.com/byt3bl33d3r/CrackMapExec/blob/333f1c4e06884e85b2776459963ef85d182aba8e/cme/protocols/smb/db_navigator.py#L255-L262 | def complete_hosts(self, text, line, begidx, endidx):
"Tab-complete 'creds' commands."
commands = ["add", "remove", "dc"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in commands if s.startswith(mline)] | [
"def",
"complete_hosts",
"(",
"self",
",",
"text",
",",
"line",
",",
"begidx",
",",
"endidx",
")",
":",
"commands",
"=",
"[",
"\"add\"",
",",
"\"remove\"",
",",
"\"dc\"",
"]",
"mline",
"=",
"line",
".",
"partition",
"(",
"' '",
")",
"[",
"2",
"]",
"offs",
"=",
"len",
"(",
"mline",
")",
"-",
"len",
"(",
"text",
")",
"return",
"[",
"s",
"[",
"offs",
":",
"]",
"for",
"s",
"in",
"commands",
"if",
"s",
".",
"startswith",
"(",
"mline",
")",
"]"
]
| Tab-complete 'creds' commands. | [
"Tab",
"-",
"complete",
"creds",
"commands",
"."
]
| python | train |
pandas-dev/pandas | pandas/core/internals/blocks.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L224-L234 | def make_block_same_class(self, values, placement=None, ndim=None,
dtype=None):
""" Wrap given values in a block of same type as self. """
if dtype is not None:
# issue 19431 fastparquet is passing this
warnings.warn("dtype argument is deprecated, will be removed "
"in a future release.", DeprecationWarning)
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, ndim=ndim,
klass=self.__class__, dtype=dtype) | [
"def",
"make_block_same_class",
"(",
"self",
",",
"values",
",",
"placement",
"=",
"None",
",",
"ndim",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"dtype",
"is",
"not",
"None",
":",
"# issue 19431 fastparquet is passing this",
"warnings",
".",
"warn",
"(",
"\"dtype argument is deprecated, will be removed \"",
"\"in a future release.\"",
",",
"DeprecationWarning",
")",
"if",
"placement",
"is",
"None",
":",
"placement",
"=",
"self",
".",
"mgr_locs",
"return",
"make_block",
"(",
"values",
",",
"placement",
"=",
"placement",
",",
"ndim",
"=",
"ndim",
",",
"klass",
"=",
"self",
".",
"__class__",
",",
"dtype",
"=",
"dtype",
")"
]
| Wrap given values in a block of same type as self. | [
"Wrap",
"given",
"values",
"in",
"a",
"block",
"of",
"same",
"type",
"as",
"self",
"."
]
| python | train |
PMBio/limix-backup | limix/core/old/gp/gp_base.py | https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/core/old/gp/gp_base.py#L120-L142 | def checkGradient(self,h=1e-6,verbose=True):
""" utility function to check the gradient of the gp """
grad_an = self.LMLgrad()
grad_num = {}
params0 = self.params.copy()
for key in list(self.params.keys()):
paramsL = params0.copy()
paramsR = params0.copy()
grad_num[key] = SP.zeros_like(self.params[key])
e = SP.zeros(self.params[key].shape[0])
for i in range(self.params[key].shape[0]):
e[i] = 1
paramsL[key]=params0[key]-h*e
paramsR[key]=params0[key]+h*e
lml_L = self.LML(paramsL)
lml_R = self.LML(paramsR)
grad_num[key][i] = (lml_R-lml_L)/(2*h)
e[i] = 0
if verbose:
print(('%s:'%key))
print((abs(grad_an[key]-grad_num[key])))
print('')
self.setParams(params0) | [
"def",
"checkGradient",
"(",
"self",
",",
"h",
"=",
"1e-6",
",",
"verbose",
"=",
"True",
")",
":",
"grad_an",
"=",
"self",
".",
"LMLgrad",
"(",
")",
"grad_num",
"=",
"{",
"}",
"params0",
"=",
"self",
".",
"params",
".",
"copy",
"(",
")",
"for",
"key",
"in",
"list",
"(",
"self",
".",
"params",
".",
"keys",
"(",
")",
")",
":",
"paramsL",
"=",
"params0",
".",
"copy",
"(",
")",
"paramsR",
"=",
"params0",
".",
"copy",
"(",
")",
"grad_num",
"[",
"key",
"]",
"=",
"SP",
".",
"zeros_like",
"(",
"self",
".",
"params",
"[",
"key",
"]",
")",
"e",
"=",
"SP",
".",
"zeros",
"(",
"self",
".",
"params",
"[",
"key",
"]",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"params",
"[",
"key",
"]",
".",
"shape",
"[",
"0",
"]",
")",
":",
"e",
"[",
"i",
"]",
"=",
"1",
"paramsL",
"[",
"key",
"]",
"=",
"params0",
"[",
"key",
"]",
"-",
"h",
"*",
"e",
"paramsR",
"[",
"key",
"]",
"=",
"params0",
"[",
"key",
"]",
"+",
"h",
"*",
"e",
"lml_L",
"=",
"self",
".",
"LML",
"(",
"paramsL",
")",
"lml_R",
"=",
"self",
".",
"LML",
"(",
"paramsR",
")",
"grad_num",
"[",
"key",
"]",
"[",
"i",
"]",
"=",
"(",
"lml_R",
"-",
"lml_L",
")",
"/",
"(",
"2",
"*",
"h",
")",
"e",
"[",
"i",
"]",
"=",
"0",
"if",
"verbose",
":",
"print",
"(",
"(",
"'%s:'",
"%",
"key",
")",
")",
"print",
"(",
"(",
"abs",
"(",
"grad_an",
"[",
"key",
"]",
"-",
"grad_num",
"[",
"key",
"]",
")",
")",
")",
"print",
"(",
"''",
")",
"self",
".",
"setParams",
"(",
"params0",
")"
]
| utility function to check the gradient of the gp | [
"utility",
"function",
"to",
"check",
"the",
"gradient",
"of",
"the",
"gp"
]
| python | train |
jaraco/jaraco.itertools | jaraco/itertools.py | https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L856-L877 | def partition_items(count, bin_size):
"""
Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2]
"""
num_bins = int(math.ceil(count / float(bin_size)))
bins = [0] * num_bins
for i in range(count):
bins[i % num_bins] += 1
return bins | [
"def",
"partition_items",
"(",
"count",
",",
"bin_size",
")",
":",
"num_bins",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"count",
"/",
"float",
"(",
"bin_size",
")",
")",
")",
"bins",
"=",
"[",
"0",
"]",
"*",
"num_bins",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"bins",
"[",
"i",
"%",
"num_bins",
"]",
"+=",
"1",
"return",
"bins"
]
| Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2] | [
"Given",
"the",
"total",
"number",
"of",
"items",
"determine",
"the",
"number",
"of",
"items",
"that",
"can",
"be",
"added",
"to",
"each",
"bin",
"with",
"a",
"limit",
"on",
"the",
"bin",
"size",
"."
]
| python | test |
PmagPy/PmagPy | programs/conversion_scripts2/pmd_magic2.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/pmd_magic2.py#L7-L276 | def main(command_line=True, **kwargs):
"""
NAME
pmd_magic.py
DESCRIPTION
converts PMD (Enkin) format files to magic_measurements format files
SYNTAX
pmd_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify naming convention
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
-lat: Lattitude of site (if no value given assumes 0)
-lon: Longitude of site (if no value given assumes 0)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT
PMD format files
"""
# initialize some stuff
noave=0
inst=""
samp_con,Z='1',""
missing=1
demag="N"
er_location_name="unknown"
citation='This study'
args=sys.argv
meth_code="LP-NO"
specnum=-1
MagRecs=[]
version_num=pmag.get_version()
Samps=[] # keeps track of sample orientations
DIspec=[]
MagFiles=[]
user=""
mag_file=""
dir_path='.'
ErSamps=[]
SampOuts=[]
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind=args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file= args[ind+1]
if "-spc" in args:
ind = args.index("-spc")
specnum = int(args[ind+1])
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-A" in args: noave=1
if "-mcd" in args:
ind=args.index("-mcd")
meth_code=args[ind+1]
if "-lat" in args:
ind=args.index("-lat")
site_lat=args[ind+1]
if "-lon" in args:
ind=args.index("-lon")
site_lon=args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
spec_file = kwargs.get('spec_file', 'er_specimens.txt')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt')
site_lat = kwargs.get('site_lat', 0)
site_lon = kwargs.get('site_lon', 0)
specnum = kwargs.get('specnum', 0)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
print(samp_con)
# format variables
mag_file = os.path.join(input_dir_path,mag_file)
meas_file = os.path.join(output_dir_path,meas_file)
spec_file = os.path.join(output_dir_path,spec_file)
samp_file = os.path.join(output_dir_path,samp_file)
site_file = os.path.join(output_dir_path,site_file)
if specnum!=0:specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("naming convention option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
# parse data
data=open(mag_file,'r').readlines() # read in data from file
comment=data[0]
line=data[1].strip()
line=line.replace("=","= ") # make finding orientations easier
rec=line.split() # read in sample orientation, etc.
er_specimen_name=rec[0]
ErSpecRec,ErSampRec,ErSiteRec={},{},{} # make a sample record
if specnum!=0:
er_sample_name=rec[0][:specnum]
else:
er_sample_name=rec[0]
if len(ErSamps)>0: # need to copy existing
for samp in ErSamps:
if samp['er_sample_name']==er_sample_name:
ErSampRec=samp # we'll ammend this one
else:
SampOuts.append(samp) # keep all the others
if int(samp_con)<6:
er_site_name=pmag.parse_site(er_sample_name,samp_con,Z)
else:
if 'er_site_name' in list(ErSampRec.keys()):er_site_name=ErSampREc['er_site_name']
if 'er_location_name' in list(ErSampRec.keys()):er_location_name=ErSampREc['er_location_name']
az_ind=rec.index('a=')+1
ErSampRec['er_sample_name']=er_sample_name
ErSampRec['er_sample_description']=comment
ErSampRec['sample_azimuth']=rec[az_ind]
dip_ind=rec.index('b=')+1
dip=-float(rec[dip_ind])
ErSampRec['sample_dip']='%7.1f'%(dip)
strike_ind=rec.index('s=')+1
ErSampRec['sample_bed_dip_direction']='%7.1f'%(float(rec[strike_ind])+90.)
bd_ind=rec.index('d=')+1
ErSampRec['sample_bed_dip']=rec[bd_ind]
v_ind=rec.index('v=')+1
vol=rec[v_ind][:-3]
date=rec[-2]
time=rec[-1]
ErSampRec['magic_method_codes']=meth_code
if 'er_location_name' not in list(ErSampRec.keys()):ErSampRec['er_location_name']=er_location_name
if 'er_site_name' not in list(ErSampRec.keys()):ErSampRec['er_site_name']=er_site_name
if 'er_citation_names' not in list(ErSampRec.keys()):ErSampRec['er_citation_names']='This study'
if 'magic_method_codes' not in list(ErSampRec.keys()):ErSampRec['magic_method_codes']='SO-NO'
ErSpecRec['er_specimen_name'] = er_specimen_name
ErSpecRec['er_sample_name'] = er_sample_name
ErSpecRec['er_site_name'] = er_site_name
ErSpecRec['er_location_name'] = er_location_name
ErSpecRec['er_citation_names']='This study'
ErSiteRec['er_site_name'] = er_site_name
ErSiteRec['er_location_name'] = er_location_name
ErSiteRec['er_citation_names']='This study'
ErSiteRec['site_lat'] = site_lat
ErSiteRec['site_lon']= site_lon
SpecOuts.append(ErSpecRec)
SampOuts.append(ErSampRec)
SiteOuts.append(ErSiteRec)
for k in range(3,len(data)): # read in data
line=data[k]
rec=line.split()
if len(rec)>1: # skip blank lines at bottom
MagRec={}
MagRec['measurement_description']='Date: '+date+' '+time
MagRec["er_citation_names"]="This study"
MagRec['er_location_name']=er_location_name
MagRec['er_site_name']=er_site_name
MagRec['er_sample_name']=er_sample_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=er_specimen_name
if rec[0]=='NRM':
meas_type="LT-NO"
elif rec[0][0]=='M' or rec[0][0]=='H':
meas_type="LT-AF-Z"
elif rec[0][0]=='T':
meas_type="LT-T-Z"
else:
print("measurement type unknown")
return False, "measurement type unknown"
X=[float(rec[1]),float(rec[2]),float(rec[3])]
Vec=pmag.cart2dir(X)
MagRec["measurement_magn_moment"]='%10.3e'% (Vec[2]) # Am^2
MagRec["measurement_magn_volume"]=rec[4] # A/m
MagRec["measurement_dec"]='%7.1f'%(Vec[0])
MagRec["measurement_inc"]='%7.1f'%(Vec[1])
MagRec["treatment_ac_field"]='0'
if meas_type!='LT-NO':
treat=float(rec[0][1:])
else:
treat=0
if meas_type=="LT-AF-Z":
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
elif meas_type=="LT-T-Z":
MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
pmag.magic_write(samp_file,SpecOuts,'er_specimens')
pmag.magic_write(samp_file,SampOuts,'er_samples')
pmag.magic_write(samp_file,SiteOuts,'er_sites')
return True, meas_file | [
"def",
"main",
"(",
"command_line",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# initialize some stuff",
"noave",
"=",
"0",
"inst",
"=",
"\"\"",
"samp_con",
",",
"Z",
"=",
"'1'",
",",
"\"\"",
"missing",
"=",
"1",
"demag",
"=",
"\"N\"",
"er_location_name",
"=",
"\"unknown\"",
"citation",
"=",
"'This study'",
"args",
"=",
"sys",
".",
"argv",
"meth_code",
"=",
"\"LP-NO\"",
"specnum",
"=",
"-",
"1",
"MagRecs",
"=",
"[",
"]",
"version_num",
"=",
"pmag",
".",
"get_version",
"(",
")",
"Samps",
"=",
"[",
"]",
"# keeps track of sample orientations",
"DIspec",
"=",
"[",
"]",
"MagFiles",
"=",
"[",
"]",
"user",
"=",
"\"\"",
"mag_file",
"=",
"\"\"",
"dir_path",
"=",
"'.'",
"ErSamps",
"=",
"[",
"]",
"SampOuts",
"=",
"[",
"]",
"samp_file",
"=",
"'er_samples.txt'",
"meas_file",
"=",
"'magic_measurements.txt'",
"#",
"# get command line arguments",
"#",
"if",
"command_line",
":",
"if",
"'-WD'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-WD'",
")",
"dir_path",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-ID'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-ID'",
")",
"input_dir_path",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"else",
":",
"input_dir_path",
"=",
"dir_path",
"output_dir_path",
"=",
"dir_path",
"if",
"\"-h\"",
"in",
"args",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"return",
"False",
"if",
"'-F'",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-F\"",
")",
"meas_file",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-Fsa'",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-Fsa\"",
")",
"samp_file",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"#try:",
"# open(samp_file,'r')",
"# ErSamps,file_type=pmag.magic_read(samp_file)",
"# print 'sample information will be appended to ', samp_file ",
"#except:",
"# print samp_file,' not found: sample information will be stored in new er_samples.txt file'",
"# samp_file = output_dir_path+'/er_samples.txt'",
"if",
"'-f'",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-f\"",
")",
"mag_file",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"\"-spc\"",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-spc\"",
")",
"specnum",
"=",
"int",
"(",
"args",
"[",
"ind",
"+",
"1",
"]",
")",
"if",
"\"-ncn\"",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-ncn\"",
")",
"samp_con",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"\"-loc\"",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-loc\"",
")",
"er_location_name",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"\"-A\"",
"in",
"args",
":",
"noave",
"=",
"1",
"if",
"\"-mcd\"",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-mcd\"",
")",
"meth_code",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"\"-lat\"",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-lat\"",
")",
"site_lat",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"\"-lon\"",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-lon\"",
")",
"site_lon",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"not",
"command_line",
":",
"dir_path",
"=",
"kwargs",
".",
"get",
"(",
"'dir_path'",
",",
"'.'",
")",
"input_dir_path",
"=",
"kwargs",
".",
"get",
"(",
"'input_dir_path'",
",",
"dir_path",
")",
"output_dir_path",
"=",
"dir_path",
"meas_file",
"=",
"kwargs",
".",
"get",
"(",
"'meas_file'",
",",
"'magic_measurements.txt'",
")",
"mag_file",
"=",
"kwargs",
".",
"get",
"(",
"'mag_file'",
")",
"spec_file",
"=",
"kwargs",
".",
"get",
"(",
"'spec_file'",
",",
"'er_specimens.txt'",
")",
"samp_file",
"=",
"kwargs",
".",
"get",
"(",
"'samp_file'",
",",
"'er_samples.txt'",
")",
"site_file",
"=",
"kwargs",
".",
"get",
"(",
"'site_file'",
",",
"'er_sites.txt'",
")",
"site_lat",
"=",
"kwargs",
".",
"get",
"(",
"'site_lat'",
",",
"0",
")",
"site_lon",
"=",
"kwargs",
".",
"get",
"(",
"'site_lon'",
",",
"0",
")",
"specnum",
"=",
"kwargs",
".",
"get",
"(",
"'specnum'",
",",
"0",
")",
"samp_con",
"=",
"kwargs",
".",
"get",
"(",
"'samp_con'",
",",
"'1'",
")",
"er_location_name",
"=",
"kwargs",
".",
"get",
"(",
"'er_location_name'",
",",
"''",
")",
"noave",
"=",
"kwargs",
".",
"get",
"(",
"'noave'",
",",
"0",
")",
"# default (0) means DO average",
"meth_code",
"=",
"kwargs",
".",
"get",
"(",
"'meth_code'",
",",
"\"LP-NO\"",
")",
"print",
"(",
"samp_con",
")",
"# format variables",
"mag_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir_path",
",",
"mag_file",
")",
"meas_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir_path",
",",
"meas_file",
")",
"spec_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir_path",
",",
"spec_file",
")",
"samp_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir_path",
",",
"samp_file",
")",
"site_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir_path",
",",
"site_file",
")",
"if",
"specnum",
"!=",
"0",
":",
"specnum",
"=",
"-",
"specnum",
"if",
"\"4\"",
"in",
"samp_con",
":",
"if",
"\"-\"",
"not",
"in",
"samp_con",
":",
"print",
"(",
"\"naming convention option [4] must be in form 4-Z where Z is an integer\"",
")",
"return",
"False",
",",
"\"naming convention option [4] must be in form 4-Z where Z is an integer\"",
"else",
":",
"Z",
"=",
"samp_con",
".",
"split",
"(",
"\"-\"",
")",
"[",
"1",
"]",
"samp_con",
"=",
"\"4\"",
"if",
"\"7\"",
"in",
"samp_con",
":",
"if",
"\"-\"",
"not",
"in",
"samp_con",
":",
"print",
"(",
"\"option [7] must be in form 7-Z where Z is an integer\"",
")",
"return",
"False",
",",
"\"naming convention option [7] must be in form 7-Z where Z is an integer\"",
"else",
":",
"Z",
"=",
"samp_con",
".",
"split",
"(",
"\"-\"",
")",
"[",
"1",
"]",
"samp_con",
"=",
"\"7\"",
"# parse data",
"data",
"=",
"open",
"(",
"mag_file",
",",
"'r'",
")",
".",
"readlines",
"(",
")",
"# read in data from file",
"comment",
"=",
"data",
"[",
"0",
"]",
"line",
"=",
"data",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"line",
"=",
"line",
".",
"replace",
"(",
"\"=\"",
",",
"\"= \"",
")",
"# make finding orientations easier",
"rec",
"=",
"line",
".",
"split",
"(",
")",
"# read in sample orientation, etc.",
"er_specimen_name",
"=",
"rec",
"[",
"0",
"]",
"ErSpecRec",
",",
"ErSampRec",
",",
"ErSiteRec",
"=",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
"# make a sample record",
"if",
"specnum",
"!=",
"0",
":",
"er_sample_name",
"=",
"rec",
"[",
"0",
"]",
"[",
":",
"specnum",
"]",
"else",
":",
"er_sample_name",
"=",
"rec",
"[",
"0",
"]",
"if",
"len",
"(",
"ErSamps",
")",
">",
"0",
":",
"# need to copy existing",
"for",
"samp",
"in",
"ErSamps",
":",
"if",
"samp",
"[",
"'er_sample_name'",
"]",
"==",
"er_sample_name",
":",
"ErSampRec",
"=",
"samp",
"# we'll ammend this one",
"else",
":",
"SampOuts",
".",
"append",
"(",
"samp",
")",
"# keep all the others",
"if",
"int",
"(",
"samp_con",
")",
"<",
"6",
":",
"er_site_name",
"=",
"pmag",
".",
"parse_site",
"(",
"er_sample_name",
",",
"samp_con",
",",
"Z",
")",
"else",
":",
"if",
"'er_site_name'",
"in",
"list",
"(",
"ErSampRec",
".",
"keys",
"(",
")",
")",
":",
"er_site_name",
"=",
"ErSampREc",
"[",
"'er_site_name'",
"]",
"if",
"'er_location_name'",
"in",
"list",
"(",
"ErSampRec",
".",
"keys",
"(",
")",
")",
":",
"er_location_name",
"=",
"ErSampREc",
"[",
"'er_location_name'",
"]",
"az_ind",
"=",
"rec",
".",
"index",
"(",
"'a='",
")",
"+",
"1",
"ErSampRec",
"[",
"'er_sample_name'",
"]",
"=",
"er_sample_name",
"ErSampRec",
"[",
"'er_sample_description'",
"]",
"=",
"comment",
"ErSampRec",
"[",
"'sample_azimuth'",
"]",
"=",
"rec",
"[",
"az_ind",
"]",
"dip_ind",
"=",
"rec",
".",
"index",
"(",
"'b='",
")",
"+",
"1",
"dip",
"=",
"-",
"float",
"(",
"rec",
"[",
"dip_ind",
"]",
")",
"ErSampRec",
"[",
"'sample_dip'",
"]",
"=",
"'%7.1f'",
"%",
"(",
"dip",
")",
"strike_ind",
"=",
"rec",
".",
"index",
"(",
"'s='",
")",
"+",
"1",
"ErSampRec",
"[",
"'sample_bed_dip_direction'",
"]",
"=",
"'%7.1f'",
"%",
"(",
"float",
"(",
"rec",
"[",
"strike_ind",
"]",
")",
"+",
"90.",
")",
"bd_ind",
"=",
"rec",
".",
"index",
"(",
"'d='",
")",
"+",
"1",
"ErSampRec",
"[",
"'sample_bed_dip'",
"]",
"=",
"rec",
"[",
"bd_ind",
"]",
"v_ind",
"=",
"rec",
".",
"index",
"(",
"'v='",
")",
"+",
"1",
"vol",
"=",
"rec",
"[",
"v_ind",
"]",
"[",
":",
"-",
"3",
"]",
"date",
"=",
"rec",
"[",
"-",
"2",
"]",
"time",
"=",
"rec",
"[",
"-",
"1",
"]",
"ErSampRec",
"[",
"'magic_method_codes'",
"]",
"=",
"meth_code",
"if",
"'er_location_name'",
"not",
"in",
"list",
"(",
"ErSampRec",
".",
"keys",
"(",
")",
")",
":",
"ErSampRec",
"[",
"'er_location_name'",
"]",
"=",
"er_location_name",
"if",
"'er_site_name'",
"not",
"in",
"list",
"(",
"ErSampRec",
".",
"keys",
"(",
")",
")",
":",
"ErSampRec",
"[",
"'er_site_name'",
"]",
"=",
"er_site_name",
"if",
"'er_citation_names'",
"not",
"in",
"list",
"(",
"ErSampRec",
".",
"keys",
"(",
")",
")",
":",
"ErSampRec",
"[",
"'er_citation_names'",
"]",
"=",
"'This study'",
"if",
"'magic_method_codes'",
"not",
"in",
"list",
"(",
"ErSampRec",
".",
"keys",
"(",
")",
")",
":",
"ErSampRec",
"[",
"'magic_method_codes'",
"]",
"=",
"'SO-NO'",
"ErSpecRec",
"[",
"'er_specimen_name'",
"]",
"=",
"er_specimen_name",
"ErSpecRec",
"[",
"'er_sample_name'",
"]",
"=",
"er_sample_name",
"ErSpecRec",
"[",
"'er_site_name'",
"]",
"=",
"er_site_name",
"ErSpecRec",
"[",
"'er_location_name'",
"]",
"=",
"er_location_name",
"ErSpecRec",
"[",
"'er_citation_names'",
"]",
"=",
"'This study'",
"ErSiteRec",
"[",
"'er_site_name'",
"]",
"=",
"er_site_name",
"ErSiteRec",
"[",
"'er_location_name'",
"]",
"=",
"er_location_name",
"ErSiteRec",
"[",
"'er_citation_names'",
"]",
"=",
"'This study'",
"ErSiteRec",
"[",
"'site_lat'",
"]",
"=",
"site_lat",
"ErSiteRec",
"[",
"'site_lon'",
"]",
"=",
"site_lon",
"SpecOuts",
".",
"append",
"(",
"ErSpecRec",
")",
"SampOuts",
".",
"append",
"(",
"ErSampRec",
")",
"SiteOuts",
".",
"append",
"(",
"ErSiteRec",
")",
"for",
"k",
"in",
"range",
"(",
"3",
",",
"len",
"(",
"data",
")",
")",
":",
"# read in data",
"line",
"=",
"data",
"[",
"k",
"]",
"rec",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"rec",
")",
">",
"1",
":",
"# skip blank lines at bottom ",
"MagRec",
"=",
"{",
"}",
"MagRec",
"[",
"'measurement_description'",
"]",
"=",
"'Date: '",
"+",
"date",
"+",
"' '",
"+",
"time",
"MagRec",
"[",
"\"er_citation_names\"",
"]",
"=",
"\"This study\"",
"MagRec",
"[",
"'er_location_name'",
"]",
"=",
"er_location_name",
"MagRec",
"[",
"'er_site_name'",
"]",
"=",
"er_site_name",
"MagRec",
"[",
"'er_sample_name'",
"]",
"=",
"er_sample_name",
"MagRec",
"[",
"'magic_software_packages'",
"]",
"=",
"version_num",
"MagRec",
"[",
"\"treatment_temp\"",
"]",
"=",
"'%8.3e'",
"%",
"(",
"273",
")",
"# room temp in kelvin",
"MagRec",
"[",
"\"measurement_temp\"",
"]",
"=",
"'%8.3e'",
"%",
"(",
"273",
")",
"# room temp in kelvin",
"MagRec",
"[",
"\"measurement_flag\"",
"]",
"=",
"'g'",
"MagRec",
"[",
"\"measurement_standard\"",
"]",
"=",
"'u'",
"MagRec",
"[",
"\"measurement_number\"",
"]",
"=",
"'1'",
"MagRec",
"[",
"\"er_specimen_name\"",
"]",
"=",
"er_specimen_name",
"if",
"rec",
"[",
"0",
"]",
"==",
"'NRM'",
":",
"meas_type",
"=",
"\"LT-NO\"",
"elif",
"rec",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'M'",
"or",
"rec",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'H'",
":",
"meas_type",
"=",
"\"LT-AF-Z\"",
"elif",
"rec",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'T'",
":",
"meas_type",
"=",
"\"LT-T-Z\"",
"else",
":",
"print",
"(",
"\"measurement type unknown\"",
")",
"return",
"False",
",",
"\"measurement type unknown\"",
"X",
"=",
"[",
"float",
"(",
"rec",
"[",
"1",
"]",
")",
",",
"float",
"(",
"rec",
"[",
"2",
"]",
")",
",",
"float",
"(",
"rec",
"[",
"3",
"]",
")",
"]",
"Vec",
"=",
"pmag",
".",
"cart2dir",
"(",
"X",
")",
"MagRec",
"[",
"\"measurement_magn_moment\"",
"]",
"=",
"'%10.3e'",
"%",
"(",
"Vec",
"[",
"2",
"]",
")",
"# Am^2 ",
"MagRec",
"[",
"\"measurement_magn_volume\"",
"]",
"=",
"rec",
"[",
"4",
"]",
"# A/m ",
"MagRec",
"[",
"\"measurement_dec\"",
"]",
"=",
"'%7.1f'",
"%",
"(",
"Vec",
"[",
"0",
"]",
")",
"MagRec",
"[",
"\"measurement_inc\"",
"]",
"=",
"'%7.1f'",
"%",
"(",
"Vec",
"[",
"1",
"]",
")",
"MagRec",
"[",
"\"treatment_ac_field\"",
"]",
"=",
"'0'",
"if",
"meas_type",
"!=",
"'LT-NO'",
":",
"treat",
"=",
"float",
"(",
"rec",
"[",
"0",
"]",
"[",
"1",
":",
"]",
")",
"else",
":",
"treat",
"=",
"0",
"if",
"meas_type",
"==",
"\"LT-AF-Z\"",
":",
"MagRec",
"[",
"\"treatment_ac_field\"",
"]",
"=",
"'%8.3e'",
"%",
"(",
"treat",
"*",
"1e-3",
")",
"# convert from mT to tesla",
"elif",
"meas_type",
"==",
"\"LT-T-Z\"",
":",
"MagRec",
"[",
"\"treatment_temp\"",
"]",
"=",
"'%8.3e'",
"%",
"(",
"treat",
"+",
"273.",
")",
"# temp in kelvin",
"MagRec",
"[",
"'magic_method_codes'",
"]",
"=",
"meas_type",
"MagRecs",
".",
"append",
"(",
"MagRec",
")",
"MagOuts",
"=",
"pmag",
".",
"measurements_methods",
"(",
"MagRecs",
",",
"noave",
")",
"pmag",
".",
"magic_write",
"(",
"meas_file",
",",
"MagOuts",
",",
"'magic_measurements'",
")",
"print",
"(",
"\"results put in \"",
",",
"meas_file",
")",
"pmag",
".",
"magic_write",
"(",
"samp_file",
",",
"SpecOuts",
",",
"'er_specimens'",
")",
"pmag",
".",
"magic_write",
"(",
"samp_file",
",",
"SampOuts",
",",
"'er_samples'",
")",
"pmag",
".",
"magic_write",
"(",
"samp_file",
",",
"SiteOuts",
",",
"'er_sites'",
")",
"return",
"True",
",",
"meas_file"
]
| NAME
pmd_magic.py
DESCRIPTION
converts PMD (Enkin) format files to magic_measurements format files
SYNTAX
pmd_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify naming convention
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
-lat: Lattitude of site (if no value given assumes 0)
-lon: Longitude of site (if no value given assumes 0)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT
PMD format files | [
"NAME",
"pmd_magic",
".",
"py",
"DESCRIPTION",
"converts",
"PMD",
"(",
"Enkin",
")",
"format",
"files",
"to",
"magic_measurements",
"format",
"files"
]
| python | train |
WebarchivCZ/WA-KAT | src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py | https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py#L144-L157 | def set_kw_typeahead_input(cls):
"""
Map the typeahead input to remote dataset.
"""
# get reference to parent element
parent_id = cls.intput_el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = cls.intput_el.parent.parent.id
window.make_keyword_typeahead_tag(
"#" + parent_id,
join(settings.API_PATH, "kw_list.json"),
cls.on_select_callback,
) | [
"def",
"set_kw_typeahead_input",
"(",
"cls",
")",
":",
"# get reference to parent element",
"parent_id",
"=",
"cls",
".",
"intput_el",
".",
"parent",
".",
"id",
"if",
"\"typeahead\"",
"not",
"in",
"parent_id",
".",
"lower",
"(",
")",
":",
"parent_id",
"=",
"cls",
".",
"intput_el",
".",
"parent",
".",
"parent",
".",
"id",
"window",
".",
"make_keyword_typeahead_tag",
"(",
"\"#\"",
"+",
"parent_id",
",",
"join",
"(",
"settings",
".",
"API_PATH",
",",
"\"kw_list.json\"",
")",
",",
"cls",
".",
"on_select_callback",
",",
")"
]
| Map the typeahead input to remote dataset. | [
"Map",
"the",
"typeahead",
"input",
"to",
"remote",
"dataset",
"."
]
| python | train |
pantsbuild/pants | contrib/python/src/python/pants/contrib/python/checks/checker/common.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/python/src/python/pants/contrib/python/checks/checker/common.py#L218-L227 | def line_range(self, line_number):
"""Return a slice for the given line number"""
if line_number <= 0 or line_number > len(self.lines):
raise IndexError('NOTE: Python file line numbers are offset by 1.')
if line_number not in self.logical_lines:
return slice(line_number, line_number + 1)
else:
start, stop, _ = self.logical_lines[line_number]
return slice(start, stop) | [
"def",
"line_range",
"(",
"self",
",",
"line_number",
")",
":",
"if",
"line_number",
"<=",
"0",
"or",
"line_number",
">",
"len",
"(",
"self",
".",
"lines",
")",
":",
"raise",
"IndexError",
"(",
"'NOTE: Python file line numbers are offset by 1.'",
")",
"if",
"line_number",
"not",
"in",
"self",
".",
"logical_lines",
":",
"return",
"slice",
"(",
"line_number",
",",
"line_number",
"+",
"1",
")",
"else",
":",
"start",
",",
"stop",
",",
"_",
"=",
"self",
".",
"logical_lines",
"[",
"line_number",
"]",
"return",
"slice",
"(",
"start",
",",
"stop",
")"
]
| Return a slice for the given line number | [
"Return",
"a",
"slice",
"for",
"the",
"given",
"line",
"number"
]
| python | train |
opendatateam/udata | udata/forms/fields.py | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L55-L59 | def pre_validate(self, form):
'''Calls preprocessors before pre_validation'''
for preprocessor in self._preprocessors:
preprocessor(form, self)
super(FieldHelper, self).pre_validate(form) | [
"def",
"pre_validate",
"(",
"self",
",",
"form",
")",
":",
"for",
"preprocessor",
"in",
"self",
".",
"_preprocessors",
":",
"preprocessor",
"(",
"form",
",",
"self",
")",
"super",
"(",
"FieldHelper",
",",
"self",
")",
".",
"pre_validate",
"(",
"form",
")"
]
| Calls preprocessors before pre_validation | [
"Calls",
"preprocessors",
"before",
"pre_validation"
]
| python | train |
JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/widgets/reftrackwidget.py | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/reftrackwidget.py#L402-L411 | def replace(self, ):
"""Replace the current reftrack
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.replace(tfi) | [
"def",
"replace",
"(",
"self",
",",
")",
":",
"tfi",
"=",
"self",
".",
"get_taskfileinfo_selection",
"(",
")",
"if",
"tfi",
":",
"self",
".",
"reftrack",
".",
"replace",
"(",
"tfi",
")"
]
| Replace the current reftrack
:returns: None
:rtype: None
:raises: None | [
"Replace",
"the",
"current",
"reftrack"
]
| python | train |
neurosynth/neurosynth | neurosynth/analysis/reduce.py | https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/analysis/reduce.py#L176-L323 | def run_lda(abstracts, n_topics=50, n_words=31, n_iters=1000, alpha=None,
beta=0.001):
""" Perform topic modeling using Latent Dirichlet Allocation with the
Java toolbox MALLET.
Args:
abstracts: A pandas DataFrame with two columns ('pmid' and 'abstract')
containing article abstracts.
n_topics: Number of topics to generate. Default=50.
n_words: Number of top words to return for each topic. Default=31,
based on Poldrack et al. (2012).
n_iters: Number of iterations to run in training topic model.
Default=1000.
alpha: The Dirichlet prior on the per-document topic
distributions.
Default: 50 / n_topics, based on Poldrack et al. (2012).
beta: The Dirichlet prior on the per-topic word distribution.
Default: 0.001, based on Poldrack et al. (2012).
Returns:
weights_df: A pandas DataFrame derived from the MALLET
output-doc-topics output file. Contains the weight assigned
to each article for each topic, which can be used to select
articles for topic-based meta-analyses (accepted threshold
from Poldrack article is 0.001). [n_topics]+1 columns:
'pmid' is the first column and the following columns are
the topic names. The names of the topics match the names
in df (e.g., topic_000).
keys_df: A pandas DataFrame derived from the MALLET
output-topic-keys output file. Contains the top [n_words]
words for each topic, which can act as a summary of the
topic's content. Two columns: 'topic' and 'terms'. The
names of the topics match the names in weights (e.g.,
topic_000).
"""
if abstracts.index.name != 'pmid':
abstracts.index = abstracts['pmid']
resdir = os.path.abspath(get_resource_path())
tempdir = os.path.join(resdir, 'topic_models')
absdir = os.path.join(tempdir, 'abstracts')
if not os.path.isdir(tempdir):
os.mkdir(tempdir)
if alpha is None:
alpha = 50. / n_topics
# Check for presence of abstract files and convert if necessary
if not os.path.isdir(absdir):
print('Abstracts folder not found. Creating abstract files...')
os.mkdir(absdir)
for pmid in abstracts.index.values:
abstract = abstracts.loc[pmid]['abstract']
with open(os.path.join(absdir, str(pmid) + '.txt'), 'w') as fo:
fo.write(abstract)
# Run MALLET topic modeling
print('Generating topics...')
mallet_bin = join(dirname(dirname(__file__)),
'resources/mallet/bin/mallet')
import_str = ('{mallet} import-dir '
'--input {absdir} '
'--output {outdir}/topic-input.mallet '
'--keep-sequence '
'--remove-stopwords').format(mallet=mallet_bin,
absdir=absdir,
outdir=tempdir)
train_str = ('{mallet} train-topics '
'--input {out}/topic-input.mallet '
'--num-topics {n_topics} '
'--num-top-words {n_words} '
'--output-topic-keys {out}/topic_keys.txt '
'--output-doc-topics {out}/doc_topics.txt '
'--num-iterations {n_iters} '
'--output-model {out}/saved_model.mallet '
'--random-seed 1 '
'--alpha {alpha} '
'--beta {beta}').format(mallet=mallet_bin, out=tempdir,
n_topics=n_topics, n_words=n_words,
n_iters=n_iters,
alpha=alpha, beta=beta)
subprocess.call(import_str, shell=True)
subprocess.call(train_str, shell=True)
# Read in and convert doc_topics and topic_keys.
def clean_str(string):
return os.path.basename(os.path.splitext(string)[0])
def get_sort(lst):
return [i[0] for i in sorted(enumerate(lst), key=lambda x:x[1])]
topic_names = ['topic_{0:03d}'.format(i) for i in range(n_topics)]
# doc_topics: Topic weights for each paper.
# The conversion here is pretty ugly at the moment.
# First row should be dropped. First column is row number and can be used
# as the index.
# Second column is 'file: /full/path/to/pmid.txt' <-- Parse to get pmid.
# After that, odd columns are topic numbers and even columns are the
# weights for the topics in the preceding column. These columns are sorted
# on an individual pmid basis by the weights.
n_cols = (2 * n_topics) + 1
dt_df = pd.read_csv(os.path.join(tempdir, 'doc_topics.txt'),
delimiter='\t', skiprows=1, header=None, index_col=0)
dt_df = dt_df[dt_df.columns[:n_cols]]
# Get pmids from filenames
dt_df[1] = dt_df[1].apply(clean_str)
# Put weights (even cols) and topics (odd cols) into separate dfs.
weights_df = dt_df[dt_df.columns[2::2]]
weights_df.index = dt_df[1]
weights_df.columns = range(n_topics)
topics_df = dt_df[dt_df.columns[1::2]]
topics_df.index = dt_df[1]
topics_df.columns = range(n_topics)
# Sort columns in weights_df separately for each row using topics_df.
sorters_df = topics_df.apply(get_sort, axis=1)
weights = weights_df.as_matrix()
sorters = sorters_df.as_matrix()
# there has to be a better way to do this.
for i in range(sorters.shape[0]):
weights[i, :] = weights[i, sorters[i, :]]
# Define topic names (e.g., topic_000)
index = dt_df[1]
weights_df = pd.DataFrame(columns=topic_names, data=weights, index=index)
weights_df.index.name = 'pmid'
# topic_keys: Top [n_words] words for each topic.
keys_df = pd.read_csv(os.path.join(tempdir, 'topic_keys.txt'),
delimiter='\t', header=None, index_col=0)
# Second column is a list of the terms.
keys_df = keys_df[[2]]
keys_df.rename(columns={2: 'terms'}, inplace=True)
keys_df.index = topic_names
keys_df.index.name = 'topic'
# Remove all temporary files (abstract files, model, and outputs).
shutil.rmtree(tempdir)
# Return article topic weights and topic keys.
return weights_df, keys_df | [
"def",
"run_lda",
"(",
"abstracts",
",",
"n_topics",
"=",
"50",
",",
"n_words",
"=",
"31",
",",
"n_iters",
"=",
"1000",
",",
"alpha",
"=",
"None",
",",
"beta",
"=",
"0.001",
")",
":",
"if",
"abstracts",
".",
"index",
".",
"name",
"!=",
"'pmid'",
":",
"abstracts",
".",
"index",
"=",
"abstracts",
"[",
"'pmid'",
"]",
"resdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"get_resource_path",
"(",
")",
")",
"tempdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"resdir",
",",
"'topic_models'",
")",
"absdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'abstracts'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tempdir",
")",
":",
"os",
".",
"mkdir",
"(",
"tempdir",
")",
"if",
"alpha",
"is",
"None",
":",
"alpha",
"=",
"50.",
"/",
"n_topics",
"# Check for presence of abstract files and convert if necessary",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"absdir",
")",
":",
"print",
"(",
"'Abstracts folder not found. Creating abstract files...'",
")",
"os",
".",
"mkdir",
"(",
"absdir",
")",
"for",
"pmid",
"in",
"abstracts",
".",
"index",
".",
"values",
":",
"abstract",
"=",
"abstracts",
".",
"loc",
"[",
"pmid",
"]",
"[",
"'abstract'",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"absdir",
",",
"str",
"(",
"pmid",
")",
"+",
"'.txt'",
")",
",",
"'w'",
")",
"as",
"fo",
":",
"fo",
".",
"write",
"(",
"abstract",
")",
"# Run MALLET topic modeling",
"print",
"(",
"'Generating topics...'",
")",
"mallet_bin",
"=",
"join",
"(",
"dirname",
"(",
"dirname",
"(",
"__file__",
")",
")",
",",
"'resources/mallet/bin/mallet'",
")",
"import_str",
"=",
"(",
"'{mallet} import-dir '",
"'--input {absdir} '",
"'--output {outdir}/topic-input.mallet '",
"'--keep-sequence '",
"'--remove-stopwords'",
")",
".",
"format",
"(",
"mallet",
"=",
"mallet_bin",
",",
"absdir",
"=",
"absdir",
",",
"outdir",
"=",
"tempdir",
")",
"train_str",
"=",
"(",
"'{mallet} train-topics '",
"'--input {out}/topic-input.mallet '",
"'--num-topics {n_topics} '",
"'--num-top-words {n_words} '",
"'--output-topic-keys {out}/topic_keys.txt '",
"'--output-doc-topics {out}/doc_topics.txt '",
"'--num-iterations {n_iters} '",
"'--output-model {out}/saved_model.mallet '",
"'--random-seed 1 '",
"'--alpha {alpha} '",
"'--beta {beta}'",
")",
".",
"format",
"(",
"mallet",
"=",
"mallet_bin",
",",
"out",
"=",
"tempdir",
",",
"n_topics",
"=",
"n_topics",
",",
"n_words",
"=",
"n_words",
",",
"n_iters",
"=",
"n_iters",
",",
"alpha",
"=",
"alpha",
",",
"beta",
"=",
"beta",
")",
"subprocess",
".",
"call",
"(",
"import_str",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"call",
"(",
"train_str",
",",
"shell",
"=",
"True",
")",
"# Read in and convert doc_topics and topic_keys.",
"def",
"clean_str",
"(",
"string",
")",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"string",
")",
"[",
"0",
"]",
")",
"def",
"get_sort",
"(",
"lst",
")",
":",
"return",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"sorted",
"(",
"enumerate",
"(",
"lst",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"]",
"topic_names",
"=",
"[",
"'topic_{0:03d}'",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"n_topics",
")",
"]",
"# doc_topics: Topic weights for each paper.",
"# The conversion here is pretty ugly at the moment.",
"# First row should be dropped. First column is row number and can be used",
"# as the index.",
"# Second column is 'file: /full/path/to/pmid.txt' <-- Parse to get pmid.",
"# After that, odd columns are topic numbers and even columns are the",
"# weights for the topics in the preceding column. These columns are sorted",
"# on an individual pmid basis by the weights.",
"n_cols",
"=",
"(",
"2",
"*",
"n_topics",
")",
"+",
"1",
"dt_df",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'doc_topics.txt'",
")",
",",
"delimiter",
"=",
"'\\t'",
",",
"skiprows",
"=",
"1",
",",
"header",
"=",
"None",
",",
"index_col",
"=",
"0",
")",
"dt_df",
"=",
"dt_df",
"[",
"dt_df",
".",
"columns",
"[",
":",
"n_cols",
"]",
"]",
"# Get pmids from filenames",
"dt_df",
"[",
"1",
"]",
"=",
"dt_df",
"[",
"1",
"]",
".",
"apply",
"(",
"clean_str",
")",
"# Put weights (even cols) and topics (odd cols) into separate dfs.",
"weights_df",
"=",
"dt_df",
"[",
"dt_df",
".",
"columns",
"[",
"2",
":",
":",
"2",
"]",
"]",
"weights_df",
".",
"index",
"=",
"dt_df",
"[",
"1",
"]",
"weights_df",
".",
"columns",
"=",
"range",
"(",
"n_topics",
")",
"topics_df",
"=",
"dt_df",
"[",
"dt_df",
".",
"columns",
"[",
"1",
":",
":",
"2",
"]",
"]",
"topics_df",
".",
"index",
"=",
"dt_df",
"[",
"1",
"]",
"topics_df",
".",
"columns",
"=",
"range",
"(",
"n_topics",
")",
"# Sort columns in weights_df separately for each row using topics_df.",
"sorters_df",
"=",
"topics_df",
".",
"apply",
"(",
"get_sort",
",",
"axis",
"=",
"1",
")",
"weights",
"=",
"weights_df",
".",
"as_matrix",
"(",
")",
"sorters",
"=",
"sorters_df",
".",
"as_matrix",
"(",
")",
"# there has to be a better way to do this.",
"for",
"i",
"in",
"range",
"(",
"sorters",
".",
"shape",
"[",
"0",
"]",
")",
":",
"weights",
"[",
"i",
",",
":",
"]",
"=",
"weights",
"[",
"i",
",",
"sorters",
"[",
"i",
",",
":",
"]",
"]",
"# Define topic names (e.g., topic_000)",
"index",
"=",
"dt_df",
"[",
"1",
"]",
"weights_df",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"topic_names",
",",
"data",
"=",
"weights",
",",
"index",
"=",
"index",
")",
"weights_df",
".",
"index",
".",
"name",
"=",
"'pmid'",
"# topic_keys: Top [n_words] words for each topic.",
"keys_df",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'topic_keys.txt'",
")",
",",
"delimiter",
"=",
"'\\t'",
",",
"header",
"=",
"None",
",",
"index_col",
"=",
"0",
")",
"# Second column is a list of the terms.",
"keys_df",
"=",
"keys_df",
"[",
"[",
"2",
"]",
"]",
"keys_df",
".",
"rename",
"(",
"columns",
"=",
"{",
"2",
":",
"'terms'",
"}",
",",
"inplace",
"=",
"True",
")",
"keys_df",
".",
"index",
"=",
"topic_names",
"keys_df",
".",
"index",
".",
"name",
"=",
"'topic'",
"# Remove all temporary files (abstract files, model, and outputs).",
"shutil",
".",
"rmtree",
"(",
"tempdir",
")",
"# Return article topic weights and topic keys.",
"return",
"weights_df",
",",
"keys_df"
]
| Perform topic modeling using Latent Dirichlet Allocation with the
Java toolbox MALLET.
Args:
abstracts: A pandas DataFrame with two columns ('pmid' and 'abstract')
containing article abstracts.
n_topics: Number of topics to generate. Default=50.
n_words: Number of top words to return for each topic. Default=31,
based on Poldrack et al. (2012).
n_iters: Number of iterations to run in training topic model.
Default=1000.
alpha: The Dirichlet prior on the per-document topic
distributions.
Default: 50 / n_topics, based on Poldrack et al. (2012).
beta: The Dirichlet prior on the per-topic word distribution.
Default: 0.001, based on Poldrack et al. (2012).
Returns:
weights_df: A pandas DataFrame derived from the MALLET
output-doc-topics output file. Contains the weight assigned
to each article for each topic, which can be used to select
articles for topic-based meta-analyses (accepted threshold
from Poldrack article is 0.001). [n_topics]+1 columns:
'pmid' is the first column and the following columns are
the topic names. The names of the topics match the names
in df (e.g., topic_000).
keys_df: A pandas DataFrame derived from the MALLET
output-topic-keys output file. Contains the top [n_words]
words for each topic, which can act as a summary of the
topic's content. Two columns: 'topic' and 'terms'. The
names of the topics match the names in weights (e.g.,
topic_000). | [
"Perform",
"topic",
"modeling",
"using",
"Latent",
"Dirichlet",
"Allocation",
"with",
"the",
"Java",
"toolbox",
"MALLET",
"."
]
| python | test |
senaite/senaite.core | bika/lims/adapters/identifiers.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/adapters/identifiers.py#L39-L45 | def IdentifiersIndexer(instance):
"""Return a list of unique Identifier strings
This populates the Identifiers Keyword index, but with some
replacements to prevent the word-splitter etc from taking effect.
"""
identifiers = instance.Schema()['Identifiers'].get(instance)
return [safe_unicode(i['Identifier']) for i in identifiers] | [
"def",
"IdentifiersIndexer",
"(",
"instance",
")",
":",
"identifiers",
"=",
"instance",
".",
"Schema",
"(",
")",
"[",
"'Identifiers'",
"]",
".",
"get",
"(",
"instance",
")",
"return",
"[",
"safe_unicode",
"(",
"i",
"[",
"'Identifier'",
"]",
")",
"for",
"i",
"in",
"identifiers",
"]"
]
| Return a list of unique Identifier strings
This populates the Identifiers Keyword index, but with some
replacements to prevent the word-splitter etc from taking effect. | [
"Return",
"a",
"list",
"of",
"unique",
"Identifier",
"strings",
"This",
"populates",
"the",
"Identifiers",
"Keyword",
"index",
"but",
"with",
"some",
"replacements",
"to",
"prevent",
"the",
"word",
"-",
"splitter",
"etc",
"from",
"taking",
"effect",
"."
]
| python | train |
Fantomas42/django-blog-zinnia | zinnia/sitemaps.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/sitemaps.py#L61-L69 | def get_queryset(self):
"""
Build a queryset of items with published entries and annotated
with the number of entries and the latest modification date.
"""
return self.model.published.annotate(
count_entries_published=Count('entries')).annotate(
last_update=Max('entries__last_update')).order_by(
'-count_entries_published', '-last_update', '-pk') | [
"def",
"get_queryset",
"(",
"self",
")",
":",
"return",
"self",
".",
"model",
".",
"published",
".",
"annotate",
"(",
"count_entries_published",
"=",
"Count",
"(",
"'entries'",
")",
")",
".",
"annotate",
"(",
"last_update",
"=",
"Max",
"(",
"'entries__last_update'",
")",
")",
".",
"order_by",
"(",
"'-count_entries_published'",
",",
"'-last_update'",
",",
"'-pk'",
")"
]
| Build a queryset of items with published entries and annotated
with the number of entries and the latest modification date. | [
"Build",
"a",
"queryset",
"of",
"items",
"with",
"published",
"entries",
"and",
"annotated",
"with",
"the",
"number",
"of",
"entries",
"and",
"the",
"latest",
"modification",
"date",
"."
]
| python | train |
miLibris/flask-rest-jsonapi | flask_rest_jsonapi/data_layers/alchemy.py | https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/data_layers/alchemy.py#L197-L254 | def create_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Create a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
self.before_create_relationship(json_data, relationship_field, related_id_field, view_kwargs)
obj = self.get_object(view_kwargs)
if obj is None:
url_field = getattr(self, 'url_field', 'id')
filter_value = view_kwargs[url_field]
raise ObjectNotFound('{}: {} not found'.format(self.model.__name__, filter_value),
source={'parameter': url_field})
if not hasattr(obj, relationship_field):
raise RelationNotFound("{} has no attribute {}".format(obj.__class__.__name__, relationship_field))
related_model = getattr(obj.__class__, relationship_field).property.mapper.class_
updated = False
if isinstance(json_data['data'], list):
obj_ids = {str(getattr(obj__, related_id_field)) for obj__ in getattr(obj, relationship_field)}
for obj_ in json_data['data']:
if obj_['id'] not in obj_ids:
getattr(obj,
relationship_field).append(self.get_related_object(related_model, related_id_field, obj_))
updated = True
else:
related_object = None
if json_data['data'] is not None:
related_object = self.get_related_object(related_model, related_id_field, json_data['data'])
obj_id = getattr(getattr(obj, relationship_field), related_id_field, None)
new_obj_id = getattr(related_object, related_id_field, None)
if obj_id != new_obj_id:
setattr(obj, relationship_field, related_object)
updated = True
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Create relationship error: " + str(e))
self.after_create_relationship(obj, updated, json_data, relationship_field, related_id_field, view_kwargs)
return obj, updated | [
"def",
"create_relationship",
"(",
"self",
",",
"json_data",
",",
"relationship_field",
",",
"related_id_field",
",",
"view_kwargs",
")",
":",
"self",
".",
"before_create_relationship",
"(",
"json_data",
",",
"relationship_field",
",",
"related_id_field",
",",
"view_kwargs",
")",
"obj",
"=",
"self",
".",
"get_object",
"(",
"view_kwargs",
")",
"if",
"obj",
"is",
"None",
":",
"url_field",
"=",
"getattr",
"(",
"self",
",",
"'url_field'",
",",
"'id'",
")",
"filter_value",
"=",
"view_kwargs",
"[",
"url_field",
"]",
"raise",
"ObjectNotFound",
"(",
"'{}: {} not found'",
".",
"format",
"(",
"self",
".",
"model",
".",
"__name__",
",",
"filter_value",
")",
",",
"source",
"=",
"{",
"'parameter'",
":",
"url_field",
"}",
")",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"relationship_field",
")",
":",
"raise",
"RelationNotFound",
"(",
"\"{} has no attribute {}\"",
".",
"format",
"(",
"obj",
".",
"__class__",
".",
"__name__",
",",
"relationship_field",
")",
")",
"related_model",
"=",
"getattr",
"(",
"obj",
".",
"__class__",
",",
"relationship_field",
")",
".",
"property",
".",
"mapper",
".",
"class_",
"updated",
"=",
"False",
"if",
"isinstance",
"(",
"json_data",
"[",
"'data'",
"]",
",",
"list",
")",
":",
"obj_ids",
"=",
"{",
"str",
"(",
"getattr",
"(",
"obj__",
",",
"related_id_field",
")",
")",
"for",
"obj__",
"in",
"getattr",
"(",
"obj",
",",
"relationship_field",
")",
"}",
"for",
"obj_",
"in",
"json_data",
"[",
"'data'",
"]",
":",
"if",
"obj_",
"[",
"'id'",
"]",
"not",
"in",
"obj_ids",
":",
"getattr",
"(",
"obj",
",",
"relationship_field",
")",
".",
"append",
"(",
"self",
".",
"get_related_object",
"(",
"related_model",
",",
"related_id_field",
",",
"obj_",
")",
")",
"updated",
"=",
"True",
"else",
":",
"related_object",
"=",
"None",
"if",
"json_data",
"[",
"'data'",
"]",
"is",
"not",
"None",
":",
"related_object",
"=",
"self",
".",
"get_related_object",
"(",
"related_model",
",",
"related_id_field",
",",
"json_data",
"[",
"'data'",
"]",
")",
"obj_id",
"=",
"getattr",
"(",
"getattr",
"(",
"obj",
",",
"relationship_field",
")",
",",
"related_id_field",
",",
"None",
")",
"new_obj_id",
"=",
"getattr",
"(",
"related_object",
",",
"related_id_field",
",",
"None",
")",
"if",
"obj_id",
"!=",
"new_obj_id",
":",
"setattr",
"(",
"obj",
",",
"relationship_field",
",",
"related_object",
")",
"updated",
"=",
"True",
"try",
":",
"self",
".",
"session",
".",
"commit",
"(",
")",
"except",
"JsonApiException",
"as",
"e",
":",
"self",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"session",
".",
"rollback",
"(",
")",
"raise",
"JsonApiException",
"(",
"\"Create relationship error: \"",
"+",
"str",
"(",
"e",
")",
")",
"self",
".",
"after_create_relationship",
"(",
"obj",
",",
"updated",
",",
"json_data",
",",
"relationship_field",
",",
"related_id_field",
",",
"view_kwargs",
")",
"return",
"obj",
",",
"updated"
]
| Create a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False | [
"Create",
"a",
"relationship"
]
| python | train |
josiah-wolf-oberholtzer/uqbar | uqbar/io/__init__.py | https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/io/__init__.py#L146-L186 | def write(
contents: str,
path: Union[str, pathlib.Path],
verbose: bool = False,
logger_func=None,
) -> bool:
"""
Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output
"""
print_func = logger_func or print
path = pathlib.Path(path)
if path.exists():
with path.open("r") as file_pointer:
old_contents = file_pointer.read()
if old_contents == contents:
if verbose:
print_func("preserved {}".format(path))
return False
else:
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("rewrote {}".format(path))
return True
elif not path.exists():
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("wrote {}".format(path))
return True | [
"def",
"write",
"(",
"contents",
":",
"str",
",",
"path",
":",
"Union",
"[",
"str",
",",
"pathlib",
".",
"Path",
"]",
",",
"verbose",
":",
"bool",
"=",
"False",
",",
"logger_func",
"=",
"None",
",",
")",
"->",
"bool",
":",
"print_func",
"=",
"logger_func",
"or",
"print",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"path",
")",
"if",
"path",
".",
"exists",
"(",
")",
":",
"with",
"path",
".",
"open",
"(",
"\"r\"",
")",
"as",
"file_pointer",
":",
"old_contents",
"=",
"file_pointer",
".",
"read",
"(",
")",
"if",
"old_contents",
"==",
"contents",
":",
"if",
"verbose",
":",
"print_func",
"(",
"\"preserved {}\"",
".",
"format",
"(",
"path",
")",
")",
"return",
"False",
"else",
":",
"with",
"path",
".",
"open",
"(",
"\"w\"",
")",
"as",
"file_pointer",
":",
"file_pointer",
".",
"write",
"(",
"contents",
")",
"if",
"verbose",
":",
"print_func",
"(",
"\"rewrote {}\"",
".",
"format",
"(",
"path",
")",
")",
"return",
"True",
"elif",
"not",
"path",
".",
"exists",
"(",
")",
":",
"if",
"not",
"path",
".",
"parent",
".",
"exists",
"(",
")",
":",
"path",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
")",
"with",
"path",
".",
"open",
"(",
"\"w\"",
")",
"as",
"file_pointer",
":",
"file_pointer",
".",
"write",
"(",
"contents",
")",
"if",
"verbose",
":",
"print_func",
"(",
"\"wrote {}\"",
".",
"format",
"(",
"path",
")",
")",
"return",
"True"
]
| Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output | [
"Writes",
"contents",
"to",
"path",
"."
]
| python | train |
ampl/amplpy | amplpy/entity.py | https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/entity.py#L177-L205 | def setValues(self, data):
"""
Set the values of this entiy to the correponding values of a
DataFrame indexed over the same sets (or a subset).
This function assigns the values in the first data column of
the passed dataframe to the entity the function is called from.
In particular, the statement:
.. code-block:: python
x.setValues(y.getValues())
is semantically equivalent to the AMPL statement:
.. code-block:: ampl
let {s in S} x[s] := y[s];
Args:
data: The data to set the entity to.
"""
if isinstance(data, DataFrame):
self._impl.setValuesDf(data._impl)
else:
if pd is not None and isinstance(data, (pd.DataFrame, pd.Series)):
df = DataFrame.fromPandas(data)
self._impl.setValuesDf(df._impl)
return
raise TypeError | [
"def",
"setValues",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"DataFrame",
")",
":",
"self",
".",
"_impl",
".",
"setValuesDf",
"(",
"data",
".",
"_impl",
")",
"else",
":",
"if",
"pd",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"data",
",",
"(",
"pd",
".",
"DataFrame",
",",
"pd",
".",
"Series",
")",
")",
":",
"df",
"=",
"DataFrame",
".",
"fromPandas",
"(",
"data",
")",
"self",
".",
"_impl",
".",
"setValuesDf",
"(",
"df",
".",
"_impl",
")",
"return",
"raise",
"TypeError"
]
| Set the values of this entiy to the correponding values of a
DataFrame indexed over the same sets (or a subset).
This function assigns the values in the first data column of
the passed dataframe to the entity the function is called from.
In particular, the statement:
.. code-block:: python
x.setValues(y.getValues())
is semantically equivalent to the AMPL statement:
.. code-block:: ampl
let {s in S} x[s] := y[s];
Args:
data: The data to set the entity to. | [
"Set",
"the",
"values",
"of",
"this",
"entiy",
"to",
"the",
"correponding",
"values",
"of",
"a",
"DataFrame",
"indexed",
"over",
"the",
"same",
"sets",
"(",
"or",
"a",
"subset",
")",
".",
"This",
"function",
"assigns",
"the",
"values",
"in",
"the",
"first",
"data",
"column",
"of",
"the",
"passed",
"dataframe",
"to",
"the",
"entity",
"the",
"function",
"is",
"called",
"from",
".",
"In",
"particular",
"the",
"statement",
":"
]
| python | train |
ontio/ontology-python-sdk | ontology/smart_contract/neo_contract/oep4.py | https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/neo_contract/oep4.py#L89-L102 | def get_total_supply(self) -> int:
"""
This interface is used to call the TotalSupply method in ope4
that return the total supply of the oep4 token.
:return: the total supply of the oep4 token.
"""
func = InvokeFunction('totalSupply')
response = self.__sdk.get_network().send_neo_vm_transaction_pre_exec(self.__hex_contract_address, None, func)
try:
total_supply = ContractDataParser.to_int(response['Result'])
except SDKException:
total_supply = 0
return total_supply | [
"def",
"get_total_supply",
"(",
"self",
")",
"->",
"int",
":",
"func",
"=",
"InvokeFunction",
"(",
"'totalSupply'",
")",
"response",
"=",
"self",
".",
"__sdk",
".",
"get_network",
"(",
")",
".",
"send_neo_vm_transaction_pre_exec",
"(",
"self",
".",
"__hex_contract_address",
",",
"None",
",",
"func",
")",
"try",
":",
"total_supply",
"=",
"ContractDataParser",
".",
"to_int",
"(",
"response",
"[",
"'Result'",
"]",
")",
"except",
"SDKException",
":",
"total_supply",
"=",
"0",
"return",
"total_supply"
]
| This interface is used to call the TotalSupply method in ope4
that return the total supply of the oep4 token.
:return: the total supply of the oep4 token. | [
"This",
"interface",
"is",
"used",
"to",
"call",
"the",
"TotalSupply",
"method",
"in",
"ope4",
"that",
"return",
"the",
"total",
"supply",
"of",
"the",
"oep4",
"token",
"."
]
| python | train |
kejbaly2/metrique | metrique/sqlalchemy.py | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L849-L863 | def index_list(self):
'''
List all cube indexes
:param collection: cube name
:param owner: username of cube owner
'''
logger.info('Listing indexes')
_ix = {}
_i = self.inspector
for tbl in _i.get_table_names():
_ix.setdefault(tbl, [])
for ix in _i.get_indexes(tbl):
_ix[tbl].append(ix)
return _ix | [
"def",
"index_list",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Listing indexes'",
")",
"_ix",
"=",
"{",
"}",
"_i",
"=",
"self",
".",
"inspector",
"for",
"tbl",
"in",
"_i",
".",
"get_table_names",
"(",
")",
":",
"_ix",
".",
"setdefault",
"(",
"tbl",
",",
"[",
"]",
")",
"for",
"ix",
"in",
"_i",
".",
"get_indexes",
"(",
"tbl",
")",
":",
"_ix",
"[",
"tbl",
"]",
".",
"append",
"(",
"ix",
")",
"return",
"_ix"
]
| List all cube indexes
:param collection: cube name
:param owner: username of cube owner | [
"List",
"all",
"cube",
"indexes"
]
| python | train |
quantumlib/Cirq | cirq/devices/noise_model.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/devices/noise_model.py#L94-L111 | def noisy_operation(self, operation: 'cirq.Operation') -> 'cirq.OP_TREE':
"""Adds noise to an individual operation.
Args:
operation: The operation to make noisy.
Returns:
An OP_TREE corresponding to the noisy operations implementing the
noisy version of the given operation.
"""
if not hasattr(self.noisy_moments, '_not_overridden'):
return self.noisy_moments([ops.Moment([operation])],
operation.qubits)
if not hasattr(self.noisy_moment, '_not_overridden'):
return self.noisy_moment(ops.Moment([operation]), operation.qubits)
assert False, 'Should be unreachable.' | [
"def",
"noisy_operation",
"(",
"self",
",",
"operation",
":",
"'cirq.Operation'",
")",
"->",
"'cirq.OP_TREE'",
":",
"if",
"not",
"hasattr",
"(",
"self",
".",
"noisy_moments",
",",
"'_not_overridden'",
")",
":",
"return",
"self",
".",
"noisy_moments",
"(",
"[",
"ops",
".",
"Moment",
"(",
"[",
"operation",
"]",
")",
"]",
",",
"operation",
".",
"qubits",
")",
"if",
"not",
"hasattr",
"(",
"self",
".",
"noisy_moment",
",",
"'_not_overridden'",
")",
":",
"return",
"self",
".",
"noisy_moment",
"(",
"ops",
".",
"Moment",
"(",
"[",
"operation",
"]",
")",
",",
"operation",
".",
"qubits",
")",
"assert",
"False",
",",
"'Should be unreachable.'"
]
| Adds noise to an individual operation.
Args:
operation: The operation to make noisy.
Returns:
An OP_TREE corresponding to the noisy operations implementing the
noisy version of the given operation. | [
"Adds",
"noise",
"to",
"an",
"individual",
"operation",
"."
]
| python | train |
255BITS/hyperchamber | hyperchamber/selector.py | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L29-L32 | def set(self, key, value):
"""Sets a hyperparameter. Can be used to set an array of hyperparameters."""
self.store[key]=value
return self.store | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"store",
"[",
"key",
"]",
"=",
"value",
"return",
"self",
".",
"store"
]
| Sets a hyperparameter. Can be used to set an array of hyperparameters. | [
"Sets",
"a",
"hyperparameter",
".",
"Can",
"be",
"used",
"to",
"set",
"an",
"array",
"of",
"hyperparameters",
"."
]
| python | train |
Valuehorizon/valuehorizon-companies | companies/models.py | https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L242-L249 | def get_immediate_children_ownership(self):
"""
Return all direct subsidiaries of this company AS OWNERSHIP OBJECTS.
Excludes subsidiaries of subsidiaries.
"""
ownership = Ownership.objects.filter(parent=self).select_related('child', 'child__country')
return ownership | [
"def",
"get_immediate_children_ownership",
"(",
"self",
")",
":",
"ownership",
"=",
"Ownership",
".",
"objects",
".",
"filter",
"(",
"parent",
"=",
"self",
")",
".",
"select_related",
"(",
"'child'",
",",
"'child__country'",
")",
"return",
"ownership"
]
| Return all direct subsidiaries of this company AS OWNERSHIP OBJECTS.
Excludes subsidiaries of subsidiaries. | [
"Return",
"all",
"direct",
"subsidiaries",
"of",
"this",
"company",
"AS",
"OWNERSHIP",
"OBJECTS",
".",
"Excludes",
"subsidiaries",
"of",
"subsidiaries",
"."
]
| python | train |
wonambi-python/wonambi | wonambi/widgets/notes.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L878-L908 | def find_row(self, ev_start, ev_end):
"""Highlight event row in table from start and end time.
Parameters
----------
ev_start : float
start time, in seconds from record start
ev_end : float
end time, in seconds from record start
Returns
-------
int
index of event row in idx_annot_list QTableWidget
"""
all_starts = self.idx_annot_list.property('start')
all_ends = self.idx_annot_list.property('end')
for i, (start, end) in enumerate(zip(all_starts, all_ends)):
if start == ev_start and end == ev_end:
return i
for i, start in enumerate(all_starts):
if start == ev_start:
return i
for i, end in enumerate(all_ends):
if end == ev_end:
return i
raise ValueError | [
"def",
"find_row",
"(",
"self",
",",
"ev_start",
",",
"ev_end",
")",
":",
"all_starts",
"=",
"self",
".",
"idx_annot_list",
".",
"property",
"(",
"'start'",
")",
"all_ends",
"=",
"self",
".",
"idx_annot_list",
".",
"property",
"(",
"'end'",
")",
"for",
"i",
",",
"(",
"start",
",",
"end",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"all_starts",
",",
"all_ends",
")",
")",
":",
"if",
"start",
"==",
"ev_start",
"and",
"end",
"==",
"ev_end",
":",
"return",
"i",
"for",
"i",
",",
"start",
"in",
"enumerate",
"(",
"all_starts",
")",
":",
"if",
"start",
"==",
"ev_start",
":",
"return",
"i",
"for",
"i",
",",
"end",
"in",
"enumerate",
"(",
"all_ends",
")",
":",
"if",
"end",
"==",
"ev_end",
":",
"return",
"i",
"raise",
"ValueError"
]
| Highlight event row in table from start and end time.
Parameters
----------
ev_start : float
start time, in seconds from record start
ev_end : float
end time, in seconds from record start
Returns
-------
int
index of event row in idx_annot_list QTableWidget | [
"Highlight",
"event",
"row",
"in",
"table",
"from",
"start",
"and",
"end",
"time",
".",
"Parameters",
"----------",
"ev_start",
":",
"float",
"start",
"time",
"in",
"seconds",
"from",
"record",
"start",
"ev_end",
":",
"float",
"end",
"time",
"in",
"seconds",
"from",
"record",
"start",
"Returns",
"-------",
"int",
"index",
"of",
"event",
"row",
"in",
"idx_annot_list",
"QTableWidget"
]
| python | train |
creare-com/pydem | pydem/reader/my_types.py | https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/reader/my_types.py#L150-L159 | def grid_coords_from_corners(upper_left_corner, lower_right_corner, size):
''' Points are the outer edges of the UL and LR pixels. Size is rows, columns.
GC projection type is taken from Points. '''
assert upper_left_corner.wkt == lower_right_corner.wkt
geotransform = np.array([upper_left_corner.lon, -(upper_left_corner.lon - lower_right_corner.lon) / float(size[1]), 0,
upper_left_corner.lat, 0, -(upper_left_corner.lat - lower_right_corner.lat) / float(size[0])])
return GridCoordinates(geotransform=geotransform,
wkt=upper_left_corner.wkt,
y_size=size[0],
x_size=size[1]) | [
"def",
"grid_coords_from_corners",
"(",
"upper_left_corner",
",",
"lower_right_corner",
",",
"size",
")",
":",
"assert",
"upper_left_corner",
".",
"wkt",
"==",
"lower_right_corner",
".",
"wkt",
"geotransform",
"=",
"np",
".",
"array",
"(",
"[",
"upper_left_corner",
".",
"lon",
",",
"-",
"(",
"upper_left_corner",
".",
"lon",
"-",
"lower_right_corner",
".",
"lon",
")",
"/",
"float",
"(",
"size",
"[",
"1",
"]",
")",
",",
"0",
",",
"upper_left_corner",
".",
"lat",
",",
"0",
",",
"-",
"(",
"upper_left_corner",
".",
"lat",
"-",
"lower_right_corner",
".",
"lat",
")",
"/",
"float",
"(",
"size",
"[",
"0",
"]",
")",
"]",
")",
"return",
"GridCoordinates",
"(",
"geotransform",
"=",
"geotransform",
",",
"wkt",
"=",
"upper_left_corner",
".",
"wkt",
",",
"y_size",
"=",
"size",
"[",
"0",
"]",
",",
"x_size",
"=",
"size",
"[",
"1",
"]",
")"
]
| Points are the outer edges of the UL and LR pixels. Size is rows, columns.
GC projection type is taken from Points. | [
"Points",
"are",
"the",
"outer",
"edges",
"of",
"the",
"UL",
"and",
"LR",
"pixels",
".",
"Size",
"is",
"rows",
"columns",
".",
"GC",
"projection",
"type",
"is",
"taken",
"from",
"Points",
"."
]
| python | train |
astropy/photutils | photutils/segmentation/properties.py | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L931-L945 | def area(self):
"""
The total unmasked area of the source segment in units of
pixels**2.
Note that the source area may be smaller than its segment area
if a mask is input to `SourceProperties` or `source_properties`,
or if the ``data`` within the segment contains invalid values
(e.g. NaN or infs).
"""
if self._is_completely_masked:
return np.nan * u.pix**2
else:
return len(self.values) * u.pix**2 | [
"def",
"area",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_completely_masked",
":",
"return",
"np",
".",
"nan",
"*",
"u",
".",
"pix",
"**",
"2",
"else",
":",
"return",
"len",
"(",
"self",
".",
"values",
")",
"*",
"u",
".",
"pix",
"**",
"2"
]
| The total unmasked area of the source segment in units of
pixels**2.
Note that the source area may be smaller than its segment area
if a mask is input to `SourceProperties` or `source_properties`,
or if the ``data`` within the segment contains invalid values
(e.g. NaN or infs). | [
"The",
"total",
"unmasked",
"area",
"of",
"the",
"source",
"segment",
"in",
"units",
"of",
"pixels",
"**",
"2",
"."
]
| python | train |
ValvePython/steam | steam/steamid.py | https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/steamid.py#L145-L171 | def as_steam3(self):
"""
:return: steam3 format (e.g ``[U:1:1234]``)
:rtype: :class:`str`
"""
typechar = str(ETypeChar(self.type))
instance = None
if self.type in (EType.AnonGameServer, EType.Multiseat):
instance = self.instance
elif self.type == EType.Individual:
if self.instance != 1:
instance = self.instance
elif self.type == EType.Chat:
if self.instance & EInstanceFlag.Clan:
typechar = 'c'
elif self.instance & EInstanceFlag.Lobby:
typechar = 'L'
else:
typechar = 'T'
parts = [typechar, int(self.universe), self.id]
if instance is not None:
parts.append(instance)
return '[%s]' % (':'.join(map(str, parts))) | [
"def",
"as_steam3",
"(",
"self",
")",
":",
"typechar",
"=",
"str",
"(",
"ETypeChar",
"(",
"self",
".",
"type",
")",
")",
"instance",
"=",
"None",
"if",
"self",
".",
"type",
"in",
"(",
"EType",
".",
"AnonGameServer",
",",
"EType",
".",
"Multiseat",
")",
":",
"instance",
"=",
"self",
".",
"instance",
"elif",
"self",
".",
"type",
"==",
"EType",
".",
"Individual",
":",
"if",
"self",
".",
"instance",
"!=",
"1",
":",
"instance",
"=",
"self",
".",
"instance",
"elif",
"self",
".",
"type",
"==",
"EType",
".",
"Chat",
":",
"if",
"self",
".",
"instance",
"&",
"EInstanceFlag",
".",
"Clan",
":",
"typechar",
"=",
"'c'",
"elif",
"self",
".",
"instance",
"&",
"EInstanceFlag",
".",
"Lobby",
":",
"typechar",
"=",
"'L'",
"else",
":",
"typechar",
"=",
"'T'",
"parts",
"=",
"[",
"typechar",
",",
"int",
"(",
"self",
".",
"universe",
")",
",",
"self",
".",
"id",
"]",
"if",
"instance",
"is",
"not",
"None",
":",
"parts",
".",
"append",
"(",
"instance",
")",
"return",
"'[%s]'",
"%",
"(",
"':'",
".",
"join",
"(",
"map",
"(",
"str",
",",
"parts",
")",
")",
")"
]
| :return: steam3 format (e.g ``[U:1:1234]``)
:rtype: :class:`str` | [
":",
"return",
":",
"steam3",
"format",
"(",
"e",
".",
"g",
"[",
"U",
":",
"1",
":",
"1234",
"]",
")",
":",
"rtype",
":",
":",
"class",
":",
"str"
]
| python | train |
collectiveacuity/labPack | labpack/storage/aws/s3.py | https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/aws/s3.py#L1873-L1991 | def list(self, prefix='', delimiter='', filter_function=None, max_results=1, previous_key=''):
'''
a method to list keys in the collection
:param prefix: string with prefix value to filter results
:param delimiter: string with value results must not contain (after prefix)
:param filter_function: (positional arguments) function used to filter results
:param max_results: integer with maximum number of results to return
:param previous_key: string with key in collection to begin search after
:return: list of key strings
NOTE: each key string can be divided into one or more segments
based upon the / characters which occur in the key string as
well as its file extension type. if the key string represents
a file path, then each directory in the path, the file name
and the file extension are all separate indexed values.
eg. lab/unittests/1473719695.2165067.json is indexed:
[ 'lab', 'unittests', '1473719695.2165067', '.json' ]
it is possible to filter the records in the collection according
to one or more of these path segments using a filter_function.
NOTE: the filter_function must be able to accept an array of positional
arguments and return a value that can evaluate to true or false.
while searching the records, list produces an array of strings
which represent the directory structure in relative path of each
key string. if a filter_function is provided, this list of strings
is fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results.
'''
title = '%s.list' % self.__class__.__name__
# validate input
input_fields = {
'prefix': prefix,
'delimiter': delimiter,
'max_results': max_results,
'record_key': previous_key
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct default response
results_list = []
# handle filter function filter
if filter_function:
# validate filter function
try:
path_segments = [ 'lab', 'unittests', '1473719695.2165067', '.json' ]
filter_function(*path_segments)
except:
err_msg = '%s(filter_function=%s)' % (title, filter_function.__class__.__name__)
raise TypeError('%s must accept positional arguments.' % err_msg)
# construct keyword arguments
list_kwargs = {
'bucket_name': self.bucket_name,
'prefix': prefix,
'delimiter': delimiter
}
# determine starting key
starting_key = '1'
if previous_key:
previous_kwargs = {}
previous_kwargs.update(**list_kwargs)
previous_kwargs['max_results'] = 1
previous_kwargs['starting_key'] = previous_key
search_list, next_key = self.s3.list_records(**list_kwargs)
list_kwargs['starting_key'] = next_key
# iterate filter over collection
import os
while starting_key:
search_list, starting_key = self.s3.list_records(**list_kwargs)
for record in search_list:
record_key = record['key']
path_segments = record_key.split(os.sep)
if filter_function(*path_segments):
results_list.append(record_key)
if len(results_list) == max_results:
return results_list
# handle other filters
else:
# construct keyword arguments
list_kwargs = {
'bucket_name': self.bucket_name,
'prefix': prefix,
'delimiter': delimiter,
'max_results': max_results
}
# determine starting key
if previous_key:
previous_kwargs = {}
previous_kwargs.update(**list_kwargs)
previous_kwargs['max_results'] = 1
previous_kwargs['starting_key'] = previous_key
search_list, starting_key = self.s3.list_records(**list_kwargs)
list_kwargs['starting_key'] = starting_key
# retrieve results
search_list, starting_key = self.s3.list_records(**list_kwargs)
# construct result list
for record in search_list:
results_list.append(record['key'])
return results_list | [
"def",
"list",
"(",
"self",
",",
"prefix",
"=",
"''",
",",
"delimiter",
"=",
"''",
",",
"filter_function",
"=",
"None",
",",
"max_results",
"=",
"1",
",",
"previous_key",
"=",
"''",
")",
":",
"title",
"=",
"'%s.list'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate input",
"input_fields",
"=",
"{",
"'prefix'",
":",
"prefix",
",",
"'delimiter'",
":",
"delimiter",
",",
"'max_results'",
":",
"max_results",
",",
"'record_key'",
":",
"previous_key",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"if",
"value",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# construct default response",
"results_list",
"=",
"[",
"]",
"# handle filter function filter",
"if",
"filter_function",
":",
"# validate filter function",
"try",
":",
"path_segments",
"=",
"[",
"'lab'",
",",
"'unittests'",
",",
"'1473719695.2165067'",
",",
"'.json'",
"]",
"filter_function",
"(",
"*",
"path_segments",
")",
"except",
":",
"err_msg",
"=",
"'%s(filter_function=%s)'",
"%",
"(",
"title",
",",
"filter_function",
".",
"__class__",
".",
"__name__",
")",
"raise",
"TypeError",
"(",
"'%s must accept positional arguments.'",
"%",
"err_msg",
")",
"# construct keyword arguments",
"list_kwargs",
"=",
"{",
"'bucket_name'",
":",
"self",
".",
"bucket_name",
",",
"'prefix'",
":",
"prefix",
",",
"'delimiter'",
":",
"delimiter",
"}",
"# determine starting key",
"starting_key",
"=",
"'1'",
"if",
"previous_key",
":",
"previous_kwargs",
"=",
"{",
"}",
"previous_kwargs",
".",
"update",
"(",
"*",
"*",
"list_kwargs",
")",
"previous_kwargs",
"[",
"'max_results'",
"]",
"=",
"1",
"previous_kwargs",
"[",
"'starting_key'",
"]",
"=",
"previous_key",
"search_list",
",",
"next_key",
"=",
"self",
".",
"s3",
".",
"list_records",
"(",
"*",
"*",
"list_kwargs",
")",
"list_kwargs",
"[",
"'starting_key'",
"]",
"=",
"next_key",
"# iterate filter over collection",
"import",
"os",
"while",
"starting_key",
":",
"search_list",
",",
"starting_key",
"=",
"self",
".",
"s3",
".",
"list_records",
"(",
"*",
"*",
"list_kwargs",
")",
"for",
"record",
"in",
"search_list",
":",
"record_key",
"=",
"record",
"[",
"'key'",
"]",
"path_segments",
"=",
"record_key",
".",
"split",
"(",
"os",
".",
"sep",
")",
"if",
"filter_function",
"(",
"*",
"path_segments",
")",
":",
"results_list",
".",
"append",
"(",
"record_key",
")",
"if",
"len",
"(",
"results_list",
")",
"==",
"max_results",
":",
"return",
"results_list",
"# handle other filters",
"else",
":",
"# construct keyword arguments",
"list_kwargs",
"=",
"{",
"'bucket_name'",
":",
"self",
".",
"bucket_name",
",",
"'prefix'",
":",
"prefix",
",",
"'delimiter'",
":",
"delimiter",
",",
"'max_results'",
":",
"max_results",
"}",
"# determine starting key",
"if",
"previous_key",
":",
"previous_kwargs",
"=",
"{",
"}",
"previous_kwargs",
".",
"update",
"(",
"*",
"*",
"list_kwargs",
")",
"previous_kwargs",
"[",
"'max_results'",
"]",
"=",
"1",
"previous_kwargs",
"[",
"'starting_key'",
"]",
"=",
"previous_key",
"search_list",
",",
"starting_key",
"=",
"self",
".",
"s3",
".",
"list_records",
"(",
"*",
"*",
"list_kwargs",
")",
"list_kwargs",
"[",
"'starting_key'",
"]",
"=",
"starting_key",
"# retrieve results ",
"search_list",
",",
"starting_key",
"=",
"self",
".",
"s3",
".",
"list_records",
"(",
"*",
"*",
"list_kwargs",
")",
"# construct result list",
"for",
"record",
"in",
"search_list",
":",
"results_list",
".",
"append",
"(",
"record",
"[",
"'key'",
"]",
")",
"return",
"results_list"
]
| a method to list keys in the collection
:param prefix: string with prefix value to filter results
:param delimiter: string with value results must not contain (after prefix)
:param filter_function: (positional arguments) function used to filter results
:param max_results: integer with maximum number of results to return
:param previous_key: string with key in collection to begin search after
:return: list of key strings
NOTE: each key string can be divided into one or more segments
based upon the / characters which occur in the key string as
well as its file extension type. if the key string represents
a file path, then each directory in the path, the file name
and the file extension are all separate indexed values.
eg. lab/unittests/1473719695.2165067.json is indexed:
[ 'lab', 'unittests', '1473719695.2165067', '.json' ]
it is possible to filter the records in the collection according
to one or more of these path segments using a filter_function.
NOTE: the filter_function must be able to accept an array of positional
arguments and return a value that can evaluate to true or false.
while searching the records, list produces an array of strings
which represent the directory structure in relative path of each
key string. if a filter_function is provided, this list of strings
is fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results. | [
"a",
"method",
"to",
"list",
"keys",
"in",
"the",
"collection"
]
| python | train |
volfpeter/graphscraper | src/graphscraper/base.py | https://github.com/volfpeter/graphscraper/blob/11d407509956a282ee25190ed6491a162fc0fe7f/src/graphscraper/base.py#L330-L387 | def get_node_by_name(self, node_name: str,
can_validate_and_load: bool = False,
external_id: Optional[str] = None) -> Optional[Node]:
"""
Returns the node with the given name if it exists either in the graph
or in its database cache or `None` otherwise.
Arguments:
node_name (str): The name of the node to return.
can_validate_and_load (bool): Whether `self._graph.get_authentic_node_name(node_name)`
can be called to validate the node name and add the node
to the graph if the node name is valid.
external_id (Optional[str]): An optional external ID that is used only if there no node
with the given name in the graph or in the cache and
`can_validate_and_load` is `True`.
Returns:
The node with the given name if it exists either in the graph
or in its database cache, `None` otherwise.
"""
node: Node = self._node_name_map.get(node_name)
if node is not None:
return node
db_node: DBNode = self._graph.database.Node.find_by_name(node_name)
if db_node is None:
if can_validate_and_load:
node_name = self._graph.get_authentic_node_name(node_name)
if node_name is not None:
node = self._node_name_map.get(node_name)
if node is not None:
return node
db_node = self._graph.database.Node.find_by_name(node_name)
if db_node is None:
self._internal_add_node(node_name=node_name,
external_id=external_id,
are_neighbors_cached=False,
add_to_cache=True)
else:
self._internal_add_node(node_name=db_node.name,
external_id=db_node.external_id,
are_neighbors_cached=db_node.are_neighbors_cached,
add_to_cache=False)
else:
return None
else:
self._internal_add_node(node_name=db_node.name,
external_id=db_node.external_id,
are_neighbors_cached=db_node.are_neighbors_cached,
add_to_cache=False)
node = self._node_name_map.get(node_name)
# Trying to load the cached neighbors of the created node from the database could
# cause a very-very-very deep recursion, so don't even think about doing it here.
return node | [
"def",
"get_node_by_name",
"(",
"self",
",",
"node_name",
":",
"str",
",",
"can_validate_and_load",
":",
"bool",
"=",
"False",
",",
"external_id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Optional",
"[",
"Node",
"]",
":",
"node",
":",
"Node",
"=",
"self",
".",
"_node_name_map",
".",
"get",
"(",
"node_name",
")",
"if",
"node",
"is",
"not",
"None",
":",
"return",
"node",
"db_node",
":",
"DBNode",
"=",
"self",
".",
"_graph",
".",
"database",
".",
"Node",
".",
"find_by_name",
"(",
"node_name",
")",
"if",
"db_node",
"is",
"None",
":",
"if",
"can_validate_and_load",
":",
"node_name",
"=",
"self",
".",
"_graph",
".",
"get_authentic_node_name",
"(",
"node_name",
")",
"if",
"node_name",
"is",
"not",
"None",
":",
"node",
"=",
"self",
".",
"_node_name_map",
".",
"get",
"(",
"node_name",
")",
"if",
"node",
"is",
"not",
"None",
":",
"return",
"node",
"db_node",
"=",
"self",
".",
"_graph",
".",
"database",
".",
"Node",
".",
"find_by_name",
"(",
"node_name",
")",
"if",
"db_node",
"is",
"None",
":",
"self",
".",
"_internal_add_node",
"(",
"node_name",
"=",
"node_name",
",",
"external_id",
"=",
"external_id",
",",
"are_neighbors_cached",
"=",
"False",
",",
"add_to_cache",
"=",
"True",
")",
"else",
":",
"self",
".",
"_internal_add_node",
"(",
"node_name",
"=",
"db_node",
".",
"name",
",",
"external_id",
"=",
"db_node",
".",
"external_id",
",",
"are_neighbors_cached",
"=",
"db_node",
".",
"are_neighbors_cached",
",",
"add_to_cache",
"=",
"False",
")",
"else",
":",
"return",
"None",
"else",
":",
"self",
".",
"_internal_add_node",
"(",
"node_name",
"=",
"db_node",
".",
"name",
",",
"external_id",
"=",
"db_node",
".",
"external_id",
",",
"are_neighbors_cached",
"=",
"db_node",
".",
"are_neighbors_cached",
",",
"add_to_cache",
"=",
"False",
")",
"node",
"=",
"self",
".",
"_node_name_map",
".",
"get",
"(",
"node_name",
")",
"# Trying to load the cached neighbors of the created node from the database could\r",
"# cause a very-very-very deep recursion, so don't even think about doing it here.\r",
"return",
"node"
]
| Returns the node with the given name if it exists either in the graph
or in its database cache or `None` otherwise.
Arguments:
node_name (str): The name of the node to return.
can_validate_and_load (bool): Whether `self._graph.get_authentic_node_name(node_name)`
can be called to validate the node name and add the node
to the graph if the node name is valid.
external_id (Optional[str]): An optional external ID that is used only if there no node
with the given name in the graph or in the cache and
`can_validate_and_load` is `True`.
Returns:
The node with the given name if it exists either in the graph
or in its database cache, `None` otherwise. | [
"Returns",
"the",
"node",
"with",
"the",
"given",
"name",
"if",
"it",
"exists",
"either",
"in",
"the",
"graph",
"or",
"in",
"its",
"database",
"cache",
"or",
"None",
"otherwise",
".",
"Arguments",
":",
"node_name",
"(",
"str",
")",
":",
"The",
"name",
"of",
"the",
"node",
"to",
"return",
".",
"can_validate_and_load",
"(",
"bool",
")",
":",
"Whether",
"self",
".",
"_graph",
".",
"get_authentic_node_name",
"(",
"node_name",
")",
"can",
"be",
"called",
"to",
"validate",
"the",
"node",
"name",
"and",
"add",
"the",
"node",
"to",
"the",
"graph",
"if",
"the",
"node",
"name",
"is",
"valid",
".",
"external_id",
"(",
"Optional",
"[",
"str",
"]",
")",
":",
"An",
"optional",
"external",
"ID",
"that",
"is",
"used",
"only",
"if",
"there",
"no",
"node",
"with",
"the",
"given",
"name",
"in",
"the",
"graph",
"or",
"in",
"the",
"cache",
"and",
"can_validate_and_load",
"is",
"True",
".",
"Returns",
":",
"The",
"node",
"with",
"the",
"given",
"name",
"if",
"it",
"exists",
"either",
"in",
"the",
"graph",
"or",
"in",
"its",
"database",
"cache",
"None",
"otherwise",
"."
]
| python | train |
BD2KGenomics/protect | src/protect/mutation_calling/somaticsniper.py | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L67-L123 | def run_somaticsniper(job, tumor_bam, normal_bam, univ_options, somaticsniper_options, split=True):
"""
Run the SomaticSniper subgraph on the DNA bams. Optionally split the results into
per-chromosome vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running SomaticSniper
on every chromosome
perchrom_somaticsniper:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: toil.fileStore.FileID|dict
"""
# Get a list of chromosomes to handle
if somaticsniper_options['chromosomes']:
chromosomes = somaticsniper_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, somaticsniper_options['genome_fai'])
perchrom_somaticsniper = defaultdict()
snipe = job.wrapJobFn(run_somaticsniper_full, tumor_bam, normal_bam, univ_options,
somaticsniper_options,
disk=PromisedRequirement(sniper_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
pileup = job.wrapJobFn(run_pileup, tumor_bam, univ_options, somaticsniper_options,
disk=PromisedRequirement(pileup_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
filtersnipes = job.wrapJobFn(filter_somaticsniper, tumor_bam, snipe.rv(), pileup.rv(),
univ_options, somaticsniper_options,
disk=PromisedRequirement(sniper_filter_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
somaticsniper_options['genome_fasta']),
memory='6G')
job.addChild(snipe)
job.addChild(pileup)
snipe.addChild(filtersnipes)
pileup.addChild(filtersnipes)
if split:
unmerge_snipes = job.wrapJobFn(unmerge, filtersnipes.rv(), 'somaticsniper', chromosomes,
somaticsniper_options, univ_options)
filtersnipes.addChild(unmerge_snipes)
return unmerge_snipes.rv()
else:
return filtersnipes.rv() | [
"def",
"run_somaticsniper",
"(",
"job",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"split",
"=",
"True",
")",
":",
"# Get a list of chromosomes to handle",
"if",
"somaticsniper_options",
"[",
"'chromosomes'",
"]",
":",
"chromosomes",
"=",
"somaticsniper_options",
"[",
"'chromosomes'",
"]",
"else",
":",
"chromosomes",
"=",
"sample_chromosomes",
"(",
"job",
",",
"somaticsniper_options",
"[",
"'genome_fai'",
"]",
")",
"perchrom_somaticsniper",
"=",
"defaultdict",
"(",
")",
"snipe",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_somaticsniper_full",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"sniper_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"normal_bam",
"[",
"'normal_dna_fix_pg_sorted.bam'",
"]",
",",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"pileup",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_pileup",
",",
"tumor_bam",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"pileup_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"filtersnipes",
"=",
"job",
".",
"wrapJobFn",
"(",
"filter_somaticsniper",
",",
"tumor_bam",
",",
"snipe",
".",
"rv",
"(",
")",
",",
"pileup",
".",
"rv",
"(",
")",
",",
"univ_options",
",",
"somaticsniper_options",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"sniper_filter_disk",
",",
"tumor_bam",
"[",
"'tumor_dna_fix_pg_sorted.bam'",
"]",
",",
"somaticsniper_options",
"[",
"'genome_fasta'",
"]",
")",
",",
"memory",
"=",
"'6G'",
")",
"job",
".",
"addChild",
"(",
"snipe",
")",
"job",
".",
"addChild",
"(",
"pileup",
")",
"snipe",
".",
"addChild",
"(",
"filtersnipes",
")",
"pileup",
".",
"addChild",
"(",
"filtersnipes",
")",
"if",
"split",
":",
"unmerge_snipes",
"=",
"job",
".",
"wrapJobFn",
"(",
"unmerge",
",",
"filtersnipes",
".",
"rv",
"(",
")",
",",
"'somaticsniper'",
",",
"chromosomes",
",",
"somaticsniper_options",
",",
"univ_options",
")",
"filtersnipes",
".",
"addChild",
"(",
"unmerge_snipes",
")",
"return",
"unmerge_snipes",
".",
"rv",
"(",
")",
"else",
":",
"return",
"filtersnipes",
".",
"rv",
"(",
")"
]
| Run the SomaticSniper subgraph on the DNA bams. Optionally split the results into
per-chromosome vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running SomaticSniper
on every chromosome
perchrom_somaticsniper:
|- 'chr1': fsID
|- 'chr2' fsID
|
|-...
|
+- 'chrM': fsID
:rtype: toil.fileStore.FileID|dict | [
"Run",
"the",
"SomaticSniper",
"subgraph",
"on",
"the",
"DNA",
"bams",
".",
"Optionally",
"split",
"the",
"results",
"into",
"per",
"-",
"chromosome",
"vcfs",
"."
]
| python | train |
mlperf/training | reinforcement/tensorflow/minigo/ml_perf/reference_implementation.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/ml_perf/reference_implementation.py#L417-L447 | def main(unused_argv):
"""Run the reinforcement learning loop."""
print('Wiping dir %s' % FLAGS.base_dir, flush=True)
shutil.rmtree(FLAGS.base_dir, ignore_errors=True)
dirs = [fsdb.models_dir(), fsdb.selfplay_dir(), fsdb.holdout_dir(),
fsdb.eval_dir(), fsdb.golden_chunk_dir(), fsdb.working_dir()]
for d in dirs:
ensure_dir_exists(d);
# Copy the flag files so there's no chance of them getting accidentally
# overwritten while the RL loop is running.
flags_dir = os.path.join(FLAGS.base_dir, 'flags')
shutil.copytree(FLAGS.flags_dir, flags_dir)
FLAGS.flags_dir = flags_dir
# Copy the target model to the models directory so we can find it easily.
shutil.copy(FLAGS.target_path, os.path.join(fsdb.models_dir(), 'target.pb'))
logging.getLogger().addHandler(
logging.FileHandler(os.path.join(FLAGS.base_dir, 'rl_loop.log')))
formatter = logging.Formatter('[%(asctime)s] %(message)s',
'%Y-%m-%d %H:%M:%S')
for handler in logging.getLogger().handlers:
handler.setFormatter(formatter)
with logged_timer('Total time'):
try:
rl_loop()
finally:
asyncio.get_event_loop().close() | [
"def",
"main",
"(",
"unused_argv",
")",
":",
"print",
"(",
"'Wiping dir %s'",
"%",
"FLAGS",
".",
"base_dir",
",",
"flush",
"=",
"True",
")",
"shutil",
".",
"rmtree",
"(",
"FLAGS",
".",
"base_dir",
",",
"ignore_errors",
"=",
"True",
")",
"dirs",
"=",
"[",
"fsdb",
".",
"models_dir",
"(",
")",
",",
"fsdb",
".",
"selfplay_dir",
"(",
")",
",",
"fsdb",
".",
"holdout_dir",
"(",
")",
",",
"fsdb",
".",
"eval_dir",
"(",
")",
",",
"fsdb",
".",
"golden_chunk_dir",
"(",
")",
",",
"fsdb",
".",
"working_dir",
"(",
")",
"]",
"for",
"d",
"in",
"dirs",
":",
"ensure_dir_exists",
"(",
"d",
")",
"# Copy the flag files so there's no chance of them getting accidentally",
"# overwritten while the RL loop is running.",
"flags_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"base_dir",
",",
"'flags'",
")",
"shutil",
".",
"copytree",
"(",
"FLAGS",
".",
"flags_dir",
",",
"flags_dir",
")",
"FLAGS",
".",
"flags_dir",
"=",
"flags_dir",
"# Copy the target model to the models directory so we can find it easily.",
"shutil",
".",
"copy",
"(",
"FLAGS",
".",
"target_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"fsdb",
".",
"models_dir",
"(",
")",
",",
"'target.pb'",
")",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"addHandler",
"(",
"logging",
".",
"FileHandler",
"(",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"base_dir",
",",
"'rl_loop.log'",
")",
")",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'[%(asctime)s] %(message)s'",
",",
"'%Y-%m-%d %H:%M:%S'",
")",
"for",
"handler",
"in",
"logging",
".",
"getLogger",
"(",
")",
".",
"handlers",
":",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"with",
"logged_timer",
"(",
"'Total time'",
")",
":",
"try",
":",
"rl_loop",
"(",
")",
"finally",
":",
"asyncio",
".",
"get_event_loop",
"(",
")",
".",
"close",
"(",
")"
]
| Run the reinforcement learning loop. | [
"Run",
"the",
"reinforcement",
"learning",
"loop",
"."
]
| python | train |
calmjs/calmjs.parse | src/calmjs/parse/parsers/es5.py | https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L542-L550 | def p_arguments(self, p):
"""arguments : LPAREN RPAREN
| LPAREN argument_list RPAREN
"""
if len(p) == 4:
p[0] = self.asttypes.Arguments(p[2])
else:
p[0] = self.asttypes.Arguments([])
p[0].setpos(p) | [
"def",
"p_arguments",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"Arguments",
"(",
"p",
"[",
"2",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"Arguments",
"(",
"[",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
]
| arguments : LPAREN RPAREN
| LPAREN argument_list RPAREN | [
"arguments",
":",
"LPAREN",
"RPAREN",
"|",
"LPAREN",
"argument_list",
"RPAREN"
]
| python | train |
SeleniumHQ/selenium | py/selenium/webdriver/common/action_chains.py | https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/common/action_chains.py#L327-L343 | def send_keys(self, *keys_to_send):
"""
Sends keys to current focused element.
:Args:
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
typing = keys_to_typing(keys_to_send)
if self._driver.w3c:
for key in typing:
self.key_down(key)
self.key_up(key)
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {'value': typing}))
return self | [
"def",
"send_keys",
"(",
"self",
",",
"*",
"keys_to_send",
")",
":",
"typing",
"=",
"keys_to_typing",
"(",
"keys_to_send",
")",
"if",
"self",
".",
"_driver",
".",
"w3c",
":",
"for",
"key",
"in",
"typing",
":",
"self",
".",
"key_down",
"(",
"key",
")",
"self",
".",
"key_up",
"(",
"key",
")",
"else",
":",
"self",
".",
"_actions",
".",
"append",
"(",
"lambda",
":",
"self",
".",
"_driver",
".",
"execute",
"(",
"Command",
".",
"SEND_KEYS_TO_ACTIVE_ELEMENT",
",",
"{",
"'value'",
":",
"typing",
"}",
")",
")",
"return",
"self"
]
| Sends keys to current focused element.
:Args:
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class. | [
"Sends",
"keys",
"to",
"current",
"focused",
"element",
"."
]
| python | train |
hyperledger/sawtooth-core | validator/sawtooth_validator/execution/scheduler_parallel.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/scheduler_parallel.py#L107-L121 | def prune(self, address):
'''
Remove all children (and descendants) below ADDRESS.
Arguments:
address (str): the address to be pruned
'''
try:
for step in self._walk_to_address(address):
node = step
except AddressNotInTree:
return
node.children.clear() | [
"def",
"prune",
"(",
"self",
",",
"address",
")",
":",
"try",
":",
"for",
"step",
"in",
"self",
".",
"_walk_to_address",
"(",
"address",
")",
":",
"node",
"=",
"step",
"except",
"AddressNotInTree",
":",
"return",
"node",
".",
"children",
".",
"clear",
"(",
")"
]
| Remove all children (and descendants) below ADDRESS.
Arguments:
address (str): the address to be pruned | [
"Remove",
"all",
"children",
"(",
"and",
"descendants",
")",
"below",
"ADDRESS",
"."
]
| python | train |
IBMStreams/pypi.streamsx | streamsx/spl/spl.py | https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/spl/spl.py#L1082-L1097 | def ignore(wrapped):
"""
Decorator to ignore a Python function.
If a Python callable is decorated with ``@spl.ignore``
then function is ignored by ``spl-python-extract.py``.
Args:
wrapped: Function that will be ignored.
"""
@functools.wraps(wrapped)
def _ignore(*args, **kwargs):
return wrapped(*args, **kwargs)
_ignore._splpy_optype = _OperatorType.Ignore
_ignore._splpy_file = inspect.getsourcefile(wrapped)
return _ignore | [
"def",
"ignore",
"(",
"wrapped",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"wrapped",
")",
"def",
"_ignore",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"_ignore",
".",
"_splpy_optype",
"=",
"_OperatorType",
".",
"Ignore",
"_ignore",
".",
"_splpy_file",
"=",
"inspect",
".",
"getsourcefile",
"(",
"wrapped",
")",
"return",
"_ignore"
]
| Decorator to ignore a Python function.
If a Python callable is decorated with ``@spl.ignore``
then function is ignored by ``spl-python-extract.py``.
Args:
wrapped: Function that will be ignored. | [
"Decorator",
"to",
"ignore",
"a",
"Python",
"function",
"."
]
| python | train |
apache/spark | python/pyspark/sql/functions.py | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L908-L924 | def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format)) | [
"def",
"date_format",
"(",
"date",
",",
"format",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"date_format",
"(",
"_to_java_column",
"(",
"date",
")",
",",
"format",
")",
")"
]
| Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')] | [
"Converts",
"a",
"date",
"/",
"timestamp",
"/",
"string",
"to",
"a",
"value",
"of",
"string",
"in",
"the",
"format",
"specified",
"by",
"the",
"date",
"format",
"given",
"by",
"the",
"second",
"argument",
"."
]
| python | train |
idlesign/django-siteprefs | siteprefs/toolbox.py | https://github.com/idlesign/django-siteprefs/blob/3d6bf5e64220fe921468a36fce68e15d7947cf92/siteprefs/toolbox.py#L171-L180 | def patch_locals(depth=2):
"""Temporarily (see unpatch_locals()) replaces all module variables
considered preferences with PatchedLocal objects, so that every
variable has different hash returned by id().
"""
for name, locals_dict in traverse_local_prefs(depth):
locals_dict[name] = PatchedLocal(name, locals_dict[name])
get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL] = True | [
"def",
"patch_locals",
"(",
"depth",
"=",
"2",
")",
":",
"for",
"name",
",",
"locals_dict",
"in",
"traverse_local_prefs",
"(",
"depth",
")",
":",
"locals_dict",
"[",
"name",
"]",
"=",
"PatchedLocal",
"(",
"name",
",",
"locals_dict",
"[",
"name",
"]",
")",
"get_frame_locals",
"(",
"depth",
")",
"[",
"__PATCHED_LOCALS_SENTINEL",
"]",
"=",
"True"
]
| Temporarily (see unpatch_locals()) replaces all module variables
considered preferences with PatchedLocal objects, so that every
variable has different hash returned by id(). | [
"Temporarily",
"(",
"see",
"unpatch_locals",
"()",
")",
"replaces",
"all",
"module",
"variables",
"considered",
"preferences",
"with",
"PatchedLocal",
"objects",
"so",
"that",
"every",
"variable",
"has",
"different",
"hash",
"returned",
"by",
"id",
"()",
"."
]
| python | valid |
idlesign/srptools | srptools/context.py | https://github.com/idlesign/srptools/blob/eb08a27137d3216e41d63bbeafbac79f43881a6a/srptools/context.py#L213-L232 | def get_common_session_key_proof(self, session_key, salt, server_public, client_public):
"""M = H(H(N) XOR H(g) | H(U) | s | A | B | K)
:param bytes session_key:
:param int salt:
:param int server_public:
:param int client_public:
:rtype: bytes
"""
h = self.hash
prove = h(
h(self._prime) ^ h(self._gen),
h(self._user),
salt,
client_public,
server_public,
session_key,
as_bytes=True
)
return prove | [
"def",
"get_common_session_key_proof",
"(",
"self",
",",
"session_key",
",",
"salt",
",",
"server_public",
",",
"client_public",
")",
":",
"h",
"=",
"self",
".",
"hash",
"prove",
"=",
"h",
"(",
"h",
"(",
"self",
".",
"_prime",
")",
"^",
"h",
"(",
"self",
".",
"_gen",
")",
",",
"h",
"(",
"self",
".",
"_user",
")",
",",
"salt",
",",
"client_public",
",",
"server_public",
",",
"session_key",
",",
"as_bytes",
"=",
"True",
")",
"return",
"prove"
]
| M = H(H(N) XOR H(g) | H(U) | s | A | B | K)
:param bytes session_key:
:param int salt:
:param int server_public:
:param int client_public:
:rtype: bytes | [
"M",
"=",
"H",
"(",
"H",
"(",
"N",
")",
"XOR",
"H",
"(",
"g",
")",
"|",
"H",
"(",
"U",
")",
"|",
"s",
"|",
"A",
"|",
"B",
"|",
"K",
")"
]
| python | train |
ejeschke/ginga | ginga/ImageView.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L2488-L2503 | def panset_xy(self, data_x, data_y, no_reset=False):
"""Similar to :meth:`set_pan`, except that input pan positions
are always in data space.
"""
pan_coord = self.t_['pan_coord']
# To center on the pixel
if pan_coord == 'wcs':
image = self.get_image()
if image is None:
return
pan_x, pan_y = image.pixtoradec(data_x, data_y)
else:
pan_x, pan_y = data_x, data_y
self.set_pan(pan_x, pan_y, coord=pan_coord, no_reset=no_reset) | [
"def",
"panset_xy",
"(",
"self",
",",
"data_x",
",",
"data_y",
",",
"no_reset",
"=",
"False",
")",
":",
"pan_coord",
"=",
"self",
".",
"t_",
"[",
"'pan_coord'",
"]",
"# To center on the pixel",
"if",
"pan_coord",
"==",
"'wcs'",
":",
"image",
"=",
"self",
".",
"get_image",
"(",
")",
"if",
"image",
"is",
"None",
":",
"return",
"pan_x",
",",
"pan_y",
"=",
"image",
".",
"pixtoradec",
"(",
"data_x",
",",
"data_y",
")",
"else",
":",
"pan_x",
",",
"pan_y",
"=",
"data_x",
",",
"data_y",
"self",
".",
"set_pan",
"(",
"pan_x",
",",
"pan_y",
",",
"coord",
"=",
"pan_coord",
",",
"no_reset",
"=",
"no_reset",
")"
]
| Similar to :meth:`set_pan`, except that input pan positions
are always in data space. | [
"Similar",
"to",
":",
"meth",
":",
"set_pan",
"except",
"that",
"input",
"pan",
"positions",
"are",
"always",
"in",
"data",
"space",
"."
]
| python | train |
PyCQA/astroid | astroid/node_classes.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L333-L364 | def infer(self, context=None, **kwargs):
"""Get a generator of the inferred values.
This is the main entry point to the inference system.
.. seealso:: :ref:`inference`
If the instance has some explicit inference function set, it will be
called instead of the default interface.
:returns: The inferred values.
:rtype: iterable
"""
if context is not None:
context = context.extra_context.get(self, context)
if self._explicit_inference is not None:
# explicit_inference is not bound, give it self explicitly
try:
# pylint: disable=not-callable
return self._explicit_inference(self, context, **kwargs)
except exceptions.UseInferenceDefault:
pass
if not context:
return self._infer(context, **kwargs)
key = (self, context.lookupname, context.callcontext, context.boundnode)
if key in context.inferred:
return iter(context.inferred[key])
gen = context.cache_generator(key, self._infer(context, **kwargs))
return util.limit_inference(gen, MANAGER.max_inferable_values) | [
"def",
"infer",
"(",
"self",
",",
"context",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"context",
"is",
"not",
"None",
":",
"context",
"=",
"context",
".",
"extra_context",
".",
"get",
"(",
"self",
",",
"context",
")",
"if",
"self",
".",
"_explicit_inference",
"is",
"not",
"None",
":",
"# explicit_inference is not bound, give it self explicitly",
"try",
":",
"# pylint: disable=not-callable",
"return",
"self",
".",
"_explicit_inference",
"(",
"self",
",",
"context",
",",
"*",
"*",
"kwargs",
")",
"except",
"exceptions",
".",
"UseInferenceDefault",
":",
"pass",
"if",
"not",
"context",
":",
"return",
"self",
".",
"_infer",
"(",
"context",
",",
"*",
"*",
"kwargs",
")",
"key",
"=",
"(",
"self",
",",
"context",
".",
"lookupname",
",",
"context",
".",
"callcontext",
",",
"context",
".",
"boundnode",
")",
"if",
"key",
"in",
"context",
".",
"inferred",
":",
"return",
"iter",
"(",
"context",
".",
"inferred",
"[",
"key",
"]",
")",
"gen",
"=",
"context",
".",
"cache_generator",
"(",
"key",
",",
"self",
".",
"_infer",
"(",
"context",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"util",
".",
"limit_inference",
"(",
"gen",
",",
"MANAGER",
".",
"max_inferable_values",
")"
]
| Get a generator of the inferred values.
This is the main entry point to the inference system.
.. seealso:: :ref:`inference`
If the instance has some explicit inference function set, it will be
called instead of the default interface.
:returns: The inferred values.
:rtype: iterable | [
"Get",
"a",
"generator",
"of",
"the",
"inferred",
"values",
"."
]
| python | train |
allenai/allennlp | allennlp/state_machines/trainers/expected_risk_minimization.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/trainers/expected_risk_minimization.py#L151-L166 | def _get_best_final_states(self, finished_states: List[StateType]) -> Dict[int, List[StateType]]:
"""
Returns the best finished states for each batch instance based on model scores. We return
at most ``self._max_num_decoded_sequences`` number of sequences per instance.
"""
batch_states: Dict[int, List[StateType]] = defaultdict(list)
for state in finished_states:
batch_states[state.batch_indices[0]].append(state)
best_states: Dict[int, List[StateType]] = {}
for batch_index, states in batch_states.items():
# The time this sort takes is pretty negligible, no particular need to optimize this
# yet. Maybe with a larger beam size...
finished_to_sort = [(-state.score[0].item(), state) for state in states]
finished_to_sort.sort(key=lambda x: x[0])
best_states[batch_index] = [state[1] for state in finished_to_sort[:self._beam_size]]
return best_states | [
"def",
"_get_best_final_states",
"(",
"self",
",",
"finished_states",
":",
"List",
"[",
"StateType",
"]",
")",
"->",
"Dict",
"[",
"int",
",",
"List",
"[",
"StateType",
"]",
"]",
":",
"batch_states",
":",
"Dict",
"[",
"int",
",",
"List",
"[",
"StateType",
"]",
"]",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"state",
"in",
"finished_states",
":",
"batch_states",
"[",
"state",
".",
"batch_indices",
"[",
"0",
"]",
"]",
".",
"append",
"(",
"state",
")",
"best_states",
":",
"Dict",
"[",
"int",
",",
"List",
"[",
"StateType",
"]",
"]",
"=",
"{",
"}",
"for",
"batch_index",
",",
"states",
"in",
"batch_states",
".",
"items",
"(",
")",
":",
"# The time this sort takes is pretty negligible, no particular need to optimize this",
"# yet. Maybe with a larger beam size...",
"finished_to_sort",
"=",
"[",
"(",
"-",
"state",
".",
"score",
"[",
"0",
"]",
".",
"item",
"(",
")",
",",
"state",
")",
"for",
"state",
"in",
"states",
"]",
"finished_to_sort",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"best_states",
"[",
"batch_index",
"]",
"=",
"[",
"state",
"[",
"1",
"]",
"for",
"state",
"in",
"finished_to_sort",
"[",
":",
"self",
".",
"_beam_size",
"]",
"]",
"return",
"best_states"
]
| Returns the best finished states for each batch instance based on model scores. We return
at most ``self._max_num_decoded_sequences`` number of sequences per instance. | [
"Returns",
"the",
"best",
"finished",
"states",
"for",
"each",
"batch",
"instance",
"based",
"on",
"model",
"scores",
".",
"We",
"return",
"at",
"most",
"self",
".",
"_max_num_decoded_sequences",
"number",
"of",
"sequences",
"per",
"instance",
"."
]
| python | train |
liminspace/dju-common | dju_common/validators.py | https://github.com/liminspace/dju-common/blob/c68860bb84d454a35e66275841c20f38375c2135/dju_common/validators.py#L6-L14 | def validate_email_domain(email):
""" Validates email domain by blacklist. """
try:
domain = email.split('@', 1)[1].lower().strip()
except IndexError:
return
if domain in dju_settings.DJU_EMAIL_DOMAIN_BLACK_LIST:
raise ValidationError(_(u'Email with domain "%(domain)s" is disallowed.'),
code='banned_domain', params={'domain': domain}) | [
"def",
"validate_email_domain",
"(",
"email",
")",
":",
"try",
":",
"domain",
"=",
"email",
".",
"split",
"(",
"'@'",
",",
"1",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"except",
"IndexError",
":",
"return",
"if",
"domain",
"in",
"dju_settings",
".",
"DJU_EMAIL_DOMAIN_BLACK_LIST",
":",
"raise",
"ValidationError",
"(",
"_",
"(",
"u'Email with domain \"%(domain)s\" is disallowed.'",
")",
",",
"code",
"=",
"'banned_domain'",
",",
"params",
"=",
"{",
"'domain'",
":",
"domain",
"}",
")"
]
| Validates email domain by blacklist. | [
"Validates",
"email",
"domain",
"by",
"blacklist",
"."
]
| python | train |
mcs07/ChemDataExtractor | chemdataextractor/scrape/scraper.py | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/scrape/scraper.py#L77-L90 | def run(self, url):
"""Request URL, scrape response and return an EntityList."""
url = self.process_url(url)
if not url:
return
response = self.make_request(self.http, url)
selector = self.process_response(response)
entities = []
for root in self.get_roots(selector):
entity = self.entity(root)
entity = self.process_entity(entity)
if entity:
entities.append(entity)
return EntityList(*entities) | [
"def",
"run",
"(",
"self",
",",
"url",
")",
":",
"url",
"=",
"self",
".",
"process_url",
"(",
"url",
")",
"if",
"not",
"url",
":",
"return",
"response",
"=",
"self",
".",
"make_request",
"(",
"self",
".",
"http",
",",
"url",
")",
"selector",
"=",
"self",
".",
"process_response",
"(",
"response",
")",
"entities",
"=",
"[",
"]",
"for",
"root",
"in",
"self",
".",
"get_roots",
"(",
"selector",
")",
":",
"entity",
"=",
"self",
".",
"entity",
"(",
"root",
")",
"entity",
"=",
"self",
".",
"process_entity",
"(",
"entity",
")",
"if",
"entity",
":",
"entities",
".",
"append",
"(",
"entity",
")",
"return",
"EntityList",
"(",
"*",
"entities",
")"
]
| Request URL, scrape response and return an EntityList. | [
"Request",
"URL",
"scrape",
"response",
"and",
"return",
"an",
"EntityList",
"."
]
| python | train |
kevin1024/vcrpy | vcr/stubs/__init__.py | https://github.com/kevin1024/vcrpy/blob/114fcd29b43c55896aaa6a6613bc7766f2707c8b/vcr/stubs/__init__.py#L137-L148 | def _uri(self, url):
"""Returns request absolute URI"""
if url and not url.startswith('/'):
# Then this must be a proxy request.
return url
uri = "{0}://{1}{2}{3}".format(
self._protocol,
self.real_connection.host,
self._port_postfix(),
url,
)
return uri | [
"def",
"_uri",
"(",
"self",
",",
"url",
")",
":",
"if",
"url",
"and",
"not",
"url",
".",
"startswith",
"(",
"'/'",
")",
":",
"# Then this must be a proxy request.",
"return",
"url",
"uri",
"=",
"\"{0}://{1}{2}{3}\"",
".",
"format",
"(",
"self",
".",
"_protocol",
",",
"self",
".",
"real_connection",
".",
"host",
",",
"self",
".",
"_port_postfix",
"(",
")",
",",
"url",
",",
")",
"return",
"uri"
]
| Returns request absolute URI | [
"Returns",
"request",
"absolute",
"URI"
]
| python | train |
CamDavidsonPilon/lifelines | lifelines/fitters/__init__.py | https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/__init__.py#L986-L1003 | def hazard_at_times(self, times, label=None):
"""
Return a Pandas series of the predicted hazard at specific times.
Parameters
-----------
times: iterable or float
values to return the hazard at.
label: string, optional
Rename the series returned. Useful for plotting.
Returns
--------
pd.Series
"""
label = coalesce(label, self._label)
return pd.Series(self._hazard(self._fitted_parameters_, times), index=_to_array(times), name=label) | [
"def",
"hazard_at_times",
"(",
"self",
",",
"times",
",",
"label",
"=",
"None",
")",
":",
"label",
"=",
"coalesce",
"(",
"label",
",",
"self",
".",
"_label",
")",
"return",
"pd",
".",
"Series",
"(",
"self",
".",
"_hazard",
"(",
"self",
".",
"_fitted_parameters_",
",",
"times",
")",
",",
"index",
"=",
"_to_array",
"(",
"times",
")",
",",
"name",
"=",
"label",
")"
]
| Return a Pandas series of the predicted hazard at specific times.
Parameters
-----------
times: iterable or float
values to return the hazard at.
label: string, optional
Rename the series returned. Useful for plotting.
Returns
--------
pd.Series | [
"Return",
"a",
"Pandas",
"series",
"of",
"the",
"predicted",
"hazard",
"at",
"specific",
"times",
"."
]
| python | train |
atztogo/phonopy | phonopy/interface/qe.py | https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/interface/qe.py#L457-L473 | def _parse_q2r(self, f):
"""Parse q2r output file
The format of q2r output is described at the mailing list below:
http://www.democritos.it/pipermail/pw_forum/2005-April/002408.html
http://www.democritos.it/pipermail/pw_forum/2008-September/010099.html
http://www.democritos.it/pipermail/pw_forum/2009-August/013613.html
https://www.mail-archive.com/[email protected]/msg24388.html
"""
natom, dim, epsilon, borns = self._parse_parameters(f)
fc_dct = {'fc': self._parse_fc(f, natom, dim),
'dimension': dim,
'dielectric': epsilon,
'born': borns}
return fc_dct | [
"def",
"_parse_q2r",
"(",
"self",
",",
"f",
")",
":",
"natom",
",",
"dim",
",",
"epsilon",
",",
"borns",
"=",
"self",
".",
"_parse_parameters",
"(",
"f",
")",
"fc_dct",
"=",
"{",
"'fc'",
":",
"self",
".",
"_parse_fc",
"(",
"f",
",",
"natom",
",",
"dim",
")",
",",
"'dimension'",
":",
"dim",
",",
"'dielectric'",
":",
"epsilon",
",",
"'born'",
":",
"borns",
"}",
"return",
"fc_dct"
]
| Parse q2r output file
The format of q2r output is described at the mailing list below:
http://www.democritos.it/pipermail/pw_forum/2005-April/002408.html
http://www.democritos.it/pipermail/pw_forum/2008-September/010099.html
http://www.democritos.it/pipermail/pw_forum/2009-August/013613.html
https://www.mail-archive.com/[email protected]/msg24388.html | [
"Parse",
"q2r",
"output",
"file"
]
| python | train |
ask/carrot | carrot/messaging.py | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L914-L926 | def _receive_callback(self, raw_message):
"""Internal method used when a message is received in consume mode."""
message = self.backend.message_to_python(raw_message)
if self.auto_ack and not message.acknowledged:
message.ack()
try:
decoded = message.decode()
except Exception, exc:
if not self.on_decode_error:
raise
self.on_decode_error(message, exc)
else:
self.receive(decoded, message) | [
"def",
"_receive_callback",
"(",
"self",
",",
"raw_message",
")",
":",
"message",
"=",
"self",
".",
"backend",
".",
"message_to_python",
"(",
"raw_message",
")",
"if",
"self",
".",
"auto_ack",
"and",
"not",
"message",
".",
"acknowledged",
":",
"message",
".",
"ack",
"(",
")",
"try",
":",
"decoded",
"=",
"message",
".",
"decode",
"(",
")",
"except",
"Exception",
",",
"exc",
":",
"if",
"not",
"self",
".",
"on_decode_error",
":",
"raise",
"self",
".",
"on_decode_error",
"(",
"message",
",",
"exc",
")",
"else",
":",
"self",
".",
"receive",
"(",
"decoded",
",",
"message",
")"
]
| Internal method used when a message is received in consume mode. | [
"Internal",
"method",
"used",
"when",
"a",
"message",
"is",
"received",
"in",
"consume",
"mode",
"."
]
| python | train |
GreenBuildingRegistry/yaml-config | yamlconf/config.py | https://github.com/GreenBuildingRegistry/yaml-config/blob/3d4bf4cadd07d4c3b71674077bd7cf16efb6ea10/yamlconf/config.py#L142-L147 | def keys(self, section=None):
"""Provide dict like keys method"""
if not section and self.section:
section = self.section
config = self.config.get(section, {}) if section else self.config
return config.keys() | [
"def",
"keys",
"(",
"self",
",",
"section",
"=",
"None",
")",
":",
"if",
"not",
"section",
"and",
"self",
".",
"section",
":",
"section",
"=",
"self",
".",
"section",
"config",
"=",
"self",
".",
"config",
".",
"get",
"(",
"section",
",",
"{",
"}",
")",
"if",
"section",
"else",
"self",
".",
"config",
"return",
"config",
".",
"keys",
"(",
")"
]
| Provide dict like keys method | [
"Provide",
"dict",
"like",
"keys",
"method"
]
| python | train |
webrecorder/warcio | warcio/archiveiterator.py | https://github.com/webrecorder/warcio/blob/c64c4394805e13256695f51af072c95389397ee9/warcio/archiveiterator.py#L231-L247 | def _next_record(self, next_line):
""" Use loader to parse the record from the reader stream
Supporting warc and arc records
"""
record = self.loader.parse_record_stream(self.reader,
next_line,
self.known_format,
self.no_record_parse,
self.ensure_http_headers)
self.member_info = None
# Track known format for faster parsing of other records
if not self.mixed_arc_warc:
self.known_format = record.format
return record | [
"def",
"_next_record",
"(",
"self",
",",
"next_line",
")",
":",
"record",
"=",
"self",
".",
"loader",
".",
"parse_record_stream",
"(",
"self",
".",
"reader",
",",
"next_line",
",",
"self",
".",
"known_format",
",",
"self",
".",
"no_record_parse",
",",
"self",
".",
"ensure_http_headers",
")",
"self",
".",
"member_info",
"=",
"None",
"# Track known format for faster parsing of other records",
"if",
"not",
"self",
".",
"mixed_arc_warc",
":",
"self",
".",
"known_format",
"=",
"record",
".",
"format",
"return",
"record"
]
| Use loader to parse the record from the reader stream
Supporting warc and arc records | [
"Use",
"loader",
"to",
"parse",
"the",
"record",
"from",
"the",
"reader",
"stream",
"Supporting",
"warc",
"and",
"arc",
"records"
]
| python | train |
wandb/client | wandb/vendor/prompt_toolkit/clipboard/base.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/clipboard/base.py#L46-L51 | def set_text(self, text): # Not abstract.
"""
Shortcut for setting plain text on clipboard.
"""
assert isinstance(text, six.string_types)
self.set_data(ClipboardData(text)) | [
"def",
"set_text",
"(",
"self",
",",
"text",
")",
":",
"# Not abstract.",
"assert",
"isinstance",
"(",
"text",
",",
"six",
".",
"string_types",
")",
"self",
".",
"set_data",
"(",
"ClipboardData",
"(",
"text",
")",
")"
]
| Shortcut for setting plain text on clipboard. | [
"Shortcut",
"for",
"setting",
"plain",
"text",
"on",
"clipboard",
"."
]
| python | train |
costastf/locationsharinglib | _CI/library/patch.py | https://github.com/costastf/locationsharinglib/blob/dcd74b0cdb59b951345df84987238763e50ef282/_CI/library/patch.py#L143-L156 | def xstrip(filename):
""" Make relative path out of absolute by stripping
prefixes used on Linux, OS X and Windows.
This function is critical for security.
"""
while xisabs(filename):
# strip windows drive with all slashes
if re.match(b'\\w:[\\\\/]', filename):
filename = re.sub(b'^\\w+:[\\\\/]+', b'', filename)
# strip all slashes
elif re.match(b'[\\\\/]', filename):
filename = re.sub(b'^[\\\\/]+', b'', filename)
return filename | [
"def",
"xstrip",
"(",
"filename",
")",
":",
"while",
"xisabs",
"(",
"filename",
")",
":",
"# strip windows drive with all slashes",
"if",
"re",
".",
"match",
"(",
"b'\\\\w:[\\\\\\\\/]'",
",",
"filename",
")",
":",
"filename",
"=",
"re",
".",
"sub",
"(",
"b'^\\\\w+:[\\\\\\\\/]+'",
",",
"b''",
",",
"filename",
")",
"# strip all slashes",
"elif",
"re",
".",
"match",
"(",
"b'[\\\\\\\\/]'",
",",
"filename",
")",
":",
"filename",
"=",
"re",
".",
"sub",
"(",
"b'^[\\\\\\\\/]+'",
",",
"b''",
",",
"filename",
")",
"return",
"filename"
]
| Make relative path out of absolute by stripping
prefixes used on Linux, OS X and Windows.
This function is critical for security. | [
"Make",
"relative",
"path",
"out",
"of",
"absolute",
"by",
"stripping",
"prefixes",
"used",
"on",
"Linux",
"OS",
"X",
"and",
"Windows",
"."
]
| python | train |
sdispater/orator | orator/orm/relations/has_many.py | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/has_many.py#L14-L26 | def init_relation(self, models, relation):
"""
Initialize the relation on a set of models.
:type models: list
:type relation: str
"""
for model in models:
model.set_relation(
relation, Result(self._related.new_collection(), self, model)
)
return models | [
"def",
"init_relation",
"(",
"self",
",",
"models",
",",
"relation",
")",
":",
"for",
"model",
"in",
"models",
":",
"model",
".",
"set_relation",
"(",
"relation",
",",
"Result",
"(",
"self",
".",
"_related",
".",
"new_collection",
"(",
")",
",",
"self",
",",
"model",
")",
")",
"return",
"models"
]
| Initialize the relation on a set of models.
:type models: list
:type relation: str | [
"Initialize",
"the",
"relation",
"on",
"a",
"set",
"of",
"models",
"."
]
| python | train |
user-cont/conu | conu/backend/podman/backend.py | https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/podman/backend.py#L139-L155 | def list_images(self):
"""
List all available podman images.
:return: collection of instances of :class:`conu.PodmanImage`
"""
images = []
for image in self._list_all_podman_images():
try:
i_name, tag = parse_reference(image["names"][0])
except (IndexError, TypeError):
i_name, tag = None, None
d_im = PodmanImage(i_name, tag=tag, identifier=image["id"],
pull_policy=PodmanImagePullPolicy.NEVER)
images.append(d_im)
return images | [
"def",
"list_images",
"(",
"self",
")",
":",
"images",
"=",
"[",
"]",
"for",
"image",
"in",
"self",
".",
"_list_all_podman_images",
"(",
")",
":",
"try",
":",
"i_name",
",",
"tag",
"=",
"parse_reference",
"(",
"image",
"[",
"\"names\"",
"]",
"[",
"0",
"]",
")",
"except",
"(",
"IndexError",
",",
"TypeError",
")",
":",
"i_name",
",",
"tag",
"=",
"None",
",",
"None",
"d_im",
"=",
"PodmanImage",
"(",
"i_name",
",",
"tag",
"=",
"tag",
",",
"identifier",
"=",
"image",
"[",
"\"id\"",
"]",
",",
"pull_policy",
"=",
"PodmanImagePullPolicy",
".",
"NEVER",
")",
"images",
".",
"append",
"(",
"d_im",
")",
"return",
"images"
]
| List all available podman images.
:return: collection of instances of :class:`conu.PodmanImage` | [
"List",
"all",
"available",
"podman",
"images",
"."
]
| python | train |
Unidata/MetPy | metpy/calc/basic.py | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/basic.py#L431-L468 | def geopotential_to_height(geopot):
r"""Compute height from a given geopotential.
Parameters
----------
geopotential : `pint.Quantity`
Geopotential (array_like)
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> from metpy.constants import g, G, me, Re
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0,10000, num = 11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
Notes
-----
Derived from definition of geopotential in [Hobbs2006]_ pg.14 Eq.1.8.
"""
# Calculate geopotential
height = (((1 / mpconsts.Re) - (geopot / (mpconsts.G * mpconsts.me))) ** -1) - mpconsts.Re
return height | [
"def",
"geopotential_to_height",
"(",
"geopot",
")",
":",
"# Calculate geopotential",
"height",
"=",
"(",
"(",
"(",
"1",
"/",
"mpconsts",
".",
"Re",
")",
"-",
"(",
"geopot",
"/",
"(",
"mpconsts",
".",
"G",
"*",
"mpconsts",
".",
"me",
")",
")",
")",
"**",
"-",
"1",
")",
"-",
"mpconsts",
".",
"Re",
"return",
"height"
]
| r"""Compute height from a given geopotential.
Parameters
----------
geopotential : `pint.Quantity`
Geopotential (array_like)
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> from metpy.constants import g, G, me, Re
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0,10000, num = 11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
Notes
-----
Derived from definition of geopotential in [Hobbs2006]_ pg.14 Eq.1.8. | [
"r",
"Compute",
"height",
"from",
"a",
"given",
"geopotential",
"."
]
| python | train |
SwoopSearch/pyaddress | address/address.py | https://github.com/SwoopSearch/pyaddress/blob/62ebb07a6840e710d256406a8ec1d06abec0e1c4/address/address.py#L250-L279 | def preprocess_address(self, address):
"""
Takes a basic address and attempts to clean it up, extract reasonably assured bits that may throw off the
rest of the parsing, and return the cleaned address.
"""
# Run some basic cleaning
address = address.replace("# ", "#")
address = address.replace(" & ", "&")
# Clear the address of things like 'X units', which shouldn't be in an address anyway. We won't save this for now.
if re.search(r"-?-?\w+ units", address, re.IGNORECASE):
address = re.sub(r"-?-?\w+ units", "", address, flags=re.IGNORECASE)
# Sometimes buildings are put in parantheses.
# building_match = re.search(r"\(.*\)", address, re.IGNORECASE)
# if building_match:
# self.building = self._clean(building_match.group().replace('(', '').replace(')', ''))
# address = re.sub(r"\(.*\)", "", address, flags=re.IGNORECASE)
# Now let's get the apartment stuff out of the way. Using only sure match regexes, delete apartment parts from
# the address. This prevents things like "Unit" being the street name.
apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+',
r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*',
r'style\s\w{1,2}', r'townhouse style\s\w{1,2}']
for regex in apartment_regexes:
apartment_match = re.search(regex, address, re.IGNORECASE)
if apartment_match:
# print "Matched regex: ", regex, apartment_match.group()
self.apartment = self._clean(apartment_match.group())
address = re.sub(regex, "", address, flags=re.IGNORECASE)
# Now check for things like ", ," which throw off dstk
address = re.sub(r"\,\s*\,", ",", address)
return address | [
"def",
"preprocess_address",
"(",
"self",
",",
"address",
")",
":",
"# Run some basic cleaning",
"address",
"=",
"address",
".",
"replace",
"(",
"\"# \"",
",",
"\"#\"",
")",
"address",
"=",
"address",
".",
"replace",
"(",
"\" & \"",
",",
"\"&\"",
")",
"# Clear the address of things like 'X units', which shouldn't be in an address anyway. We won't save this for now.",
"if",
"re",
".",
"search",
"(",
"r\"-?-?\\w+ units\"",
",",
"address",
",",
"re",
".",
"IGNORECASE",
")",
":",
"address",
"=",
"re",
".",
"sub",
"(",
"r\"-?-?\\w+ units\"",
",",
"\"\"",
",",
"address",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"# Sometimes buildings are put in parantheses.",
"# building_match = re.search(r\"\\(.*\\)\", address, re.IGNORECASE)",
"# if building_match:",
"# self.building = self._clean(building_match.group().replace('(', '').replace(')', ''))",
"# address = re.sub(r\"\\(.*\\)\", \"\", address, flags=re.IGNORECASE)",
"# Now let's get the apartment stuff out of the way. Using only sure match regexes, delete apartment parts from",
"# the address. This prevents things like \"Unit\" being the street name.",
"apartment_regexes",
"=",
"[",
"r'#\\w+ & \\w+'",
",",
"'#\\w+ rm \\w+'",
",",
"\"#\\w+-\\w\"",
",",
"r'apt #{0,1}\\w+'",
",",
"r'apartment #{0,1}\\w+'",
",",
"r'#\\w+'",
",",
"r'# \\w+'",
",",
"r'rm \\w+'",
",",
"r'unit #?\\w+'",
",",
"r'units #?\\w+'",
",",
"r'- #{0,1}\\w+'",
",",
"r'no\\s?\\d+\\w*'",
",",
"r'style\\s\\w{1,2}'",
",",
"r'townhouse style\\s\\w{1,2}'",
"]",
"for",
"regex",
"in",
"apartment_regexes",
":",
"apartment_match",
"=",
"re",
".",
"search",
"(",
"regex",
",",
"address",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"apartment_match",
":",
"# print \"Matched regex: \", regex, apartment_match.group()",
"self",
".",
"apartment",
"=",
"self",
".",
"_clean",
"(",
"apartment_match",
".",
"group",
"(",
")",
")",
"address",
"=",
"re",
".",
"sub",
"(",
"regex",
",",
"\"\"",
",",
"address",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"# Now check for things like \", ,\" which throw off dstk",
"address",
"=",
"re",
".",
"sub",
"(",
"r\"\\,\\s*\\,\"",
",",
"\",\"",
",",
"address",
")",
"return",
"address"
]
| Takes a basic address and attempts to clean it up, extract reasonably assured bits that may throw off the
rest of the parsing, and return the cleaned address. | [
"Takes",
"a",
"basic",
"address",
"and",
"attempts",
"to",
"clean",
"it",
"up",
"extract",
"reasonably",
"assured",
"bits",
"that",
"may",
"throw",
"off",
"the",
"rest",
"of",
"the",
"parsing",
"and",
"return",
"the",
"cleaned",
"address",
"."
]
| python | train |
timothyb0912/pylogit | pylogit/base_multinomial_cm_v2.py | https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L386-L407 | def check_num_rows_of_parameter_array(param_array, correct_num_rows, title):
"""
Ensures that `param_array.shape[0]` has the correct magnitude. Raises a
helpful ValueError if otherwise.
Parameters
----------
param_array : ndarray.
correct_num_rows : int.
The int that `param_array.shape[0]` should equal.
title : str.
The 'name' of the param_array whose shape is being checked.
Results
-------
None.
"""
if param_array.shape[0] != correct_num_rows:
msg = "{}.shape[0] should equal {}, but it does not"
raise ValueError(msg.format(title, correct_num_rows))
return None | [
"def",
"check_num_rows_of_parameter_array",
"(",
"param_array",
",",
"correct_num_rows",
",",
"title",
")",
":",
"if",
"param_array",
".",
"shape",
"[",
"0",
"]",
"!=",
"correct_num_rows",
":",
"msg",
"=",
"\"{}.shape[0] should equal {}, but it does not\"",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"title",
",",
"correct_num_rows",
")",
")",
"return",
"None"
]
| Ensures that `param_array.shape[0]` has the correct magnitude. Raises a
helpful ValueError if otherwise.
Parameters
----------
param_array : ndarray.
correct_num_rows : int.
The int that `param_array.shape[0]` should equal.
title : str.
The 'name' of the param_array whose shape is being checked.
Results
-------
None. | [
"Ensures",
"that",
"param_array",
".",
"shape",
"[",
"0",
"]",
"has",
"the",
"correct",
"magnitude",
".",
"Raises",
"a",
"helpful",
"ValueError",
"if",
"otherwise",
"."
]
| python | train |
facebook/watchman | python/pywatchman/__init__.py | https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/python/pywatchman/__init__.py#L397-L434 | def _get_overlapped_result_ex_impl(pipe, olap, nbytes, millis, alertable):
""" Windows 7 and earlier does not support GetOverlappedResultEx. The
alternative is to use GetOverlappedResult and wait for read or write
operation to complete. This is done be using CreateEvent and
WaitForSingleObjectEx. CreateEvent, WaitForSingleObjectEx
and GetOverlappedResult are all part of Windows API since WindowsXP.
This is the exact same implementation that can be found in the watchman
source code (see get_overlapped_result_ex_impl in stream_win.c). This
way, maintenance should be simplified.
"""
log("Preparing to wait for maximum %dms", millis)
if millis != 0:
waitReturnCode = WaitForSingleObjectEx(olap.hEvent, millis, alertable)
if waitReturnCode == WAIT_OBJECT_0:
# Event is signaled, overlapped IO operation result should be available.
pass
elif waitReturnCode == WAIT_IO_COMPLETION:
# WaitForSingleObjectEx returnes because the system added an I/O completion
# routine or an asynchronous procedure call (APC) to the thread queue.
SetLastError(WAIT_IO_COMPLETION)
pass
elif waitReturnCode == WAIT_TIMEOUT:
# We reached the maximum allowed wait time, the IO operation failed
# to complete in timely fashion.
SetLastError(WAIT_TIMEOUT)
return False
elif waitReturnCode == WAIT_FAILED:
# something went wrong calling WaitForSingleObjectEx
err = GetLastError()
log("WaitForSingleObjectEx failed: %s", _win32_strerror(err))
return False
else:
# unexpected situation deserving investigation.
err = GetLastError()
log("Unexpected error: %s", _win32_strerror(err))
return False
return GetOverlappedResult(pipe, olap, nbytes, False) | [
"def",
"_get_overlapped_result_ex_impl",
"(",
"pipe",
",",
"olap",
",",
"nbytes",
",",
"millis",
",",
"alertable",
")",
":",
"log",
"(",
"\"Preparing to wait for maximum %dms\"",
",",
"millis",
")",
"if",
"millis",
"!=",
"0",
":",
"waitReturnCode",
"=",
"WaitForSingleObjectEx",
"(",
"olap",
".",
"hEvent",
",",
"millis",
",",
"alertable",
")",
"if",
"waitReturnCode",
"==",
"WAIT_OBJECT_0",
":",
"# Event is signaled, overlapped IO operation result should be available.",
"pass",
"elif",
"waitReturnCode",
"==",
"WAIT_IO_COMPLETION",
":",
"# WaitForSingleObjectEx returnes because the system added an I/O completion",
"# routine or an asynchronous procedure call (APC) to the thread queue.",
"SetLastError",
"(",
"WAIT_IO_COMPLETION",
")",
"pass",
"elif",
"waitReturnCode",
"==",
"WAIT_TIMEOUT",
":",
"# We reached the maximum allowed wait time, the IO operation failed",
"# to complete in timely fashion.",
"SetLastError",
"(",
"WAIT_TIMEOUT",
")",
"return",
"False",
"elif",
"waitReturnCode",
"==",
"WAIT_FAILED",
":",
"# something went wrong calling WaitForSingleObjectEx",
"err",
"=",
"GetLastError",
"(",
")",
"log",
"(",
"\"WaitForSingleObjectEx failed: %s\"",
",",
"_win32_strerror",
"(",
"err",
")",
")",
"return",
"False",
"else",
":",
"# unexpected situation deserving investigation.",
"err",
"=",
"GetLastError",
"(",
")",
"log",
"(",
"\"Unexpected error: %s\"",
",",
"_win32_strerror",
"(",
"err",
")",
")",
"return",
"False",
"return",
"GetOverlappedResult",
"(",
"pipe",
",",
"olap",
",",
"nbytes",
",",
"False",
")"
]
| Windows 7 and earlier does not support GetOverlappedResultEx. The
alternative is to use GetOverlappedResult and wait for read or write
operation to complete. This is done be using CreateEvent and
WaitForSingleObjectEx. CreateEvent, WaitForSingleObjectEx
and GetOverlappedResult are all part of Windows API since WindowsXP.
This is the exact same implementation that can be found in the watchman
source code (see get_overlapped_result_ex_impl in stream_win.c). This
way, maintenance should be simplified. | [
"Windows",
"7",
"and",
"earlier",
"does",
"not",
"support",
"GetOverlappedResultEx",
".",
"The",
"alternative",
"is",
"to",
"use",
"GetOverlappedResult",
"and",
"wait",
"for",
"read",
"or",
"write",
"operation",
"to",
"complete",
".",
"This",
"is",
"done",
"be",
"using",
"CreateEvent",
"and",
"WaitForSingleObjectEx",
".",
"CreateEvent",
"WaitForSingleObjectEx",
"and",
"GetOverlappedResult",
"are",
"all",
"part",
"of",
"Windows",
"API",
"since",
"WindowsXP",
".",
"This",
"is",
"the",
"exact",
"same",
"implementation",
"that",
"can",
"be",
"found",
"in",
"the",
"watchman",
"source",
"code",
"(",
"see",
"get_overlapped_result_ex_impl",
"in",
"stream_win",
".",
"c",
")",
".",
"This",
"way",
"maintenance",
"should",
"be",
"simplified",
"."
]
| python | train |
couchbase/couchbase-python-client | couchbase/bucket.py | https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucket.py#L1159-L1167 | def replace_multi(self, keys, ttl=0, format=None,
persist_to=0, replicate_to=0):
"""Replace multiple keys. Multi variant of :meth:`replace`
.. seealso:: :meth:`replace`, :meth:`upsert_multi`, :meth:`upsert`
"""
return _Base.replace_multi(self, keys, ttl=ttl, format=format,
persist_to=persist_to,
replicate_to=replicate_to) | [
"def",
"replace_multi",
"(",
"self",
",",
"keys",
",",
"ttl",
"=",
"0",
",",
"format",
"=",
"None",
",",
"persist_to",
"=",
"0",
",",
"replicate_to",
"=",
"0",
")",
":",
"return",
"_Base",
".",
"replace_multi",
"(",
"self",
",",
"keys",
",",
"ttl",
"=",
"ttl",
",",
"format",
"=",
"format",
",",
"persist_to",
"=",
"persist_to",
",",
"replicate_to",
"=",
"replicate_to",
")"
]
| Replace multiple keys. Multi variant of :meth:`replace`
.. seealso:: :meth:`replace`, :meth:`upsert_multi`, :meth:`upsert` | [
"Replace",
"multiple",
"keys",
".",
"Multi",
"variant",
"of",
":",
"meth",
":",
"replace"
]
| python | train |
pgxcentre/geneparse | geneparse/index/impute2.py | https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/index/impute2.py#L59-L92 | def generate_index(fn, cols=None, names=None, sep=" "):
"""Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
"""
# Some assertions
assert cols is not None, "'cols' was not set"
assert names is not None, "'names' was not set"
assert len(cols) == len(names)
# Getting the open function
bgzip, open_func = get_open_func(fn, return_fmt=True)
# Reading the required columns
data = pd.read_csv(fn, sep=sep, engine="c", usecols=cols, names=names,
compression="gzip" if bgzip else None)
# Getting the seek information
f = open_func(fn, "rb")
data["seek"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-1]
f.close()
# Saving the index to file
write_index(get_index_fn(fn), data)
return data | [
"def",
"generate_index",
"(",
"fn",
",",
"cols",
"=",
"None",
",",
"names",
"=",
"None",
",",
"sep",
"=",
"\" \"",
")",
":",
"# Some assertions",
"assert",
"cols",
"is",
"not",
"None",
",",
"\"'cols' was not set\"",
"assert",
"names",
"is",
"not",
"None",
",",
"\"'names' was not set\"",
"assert",
"len",
"(",
"cols",
")",
"==",
"len",
"(",
"names",
")",
"# Getting the open function",
"bgzip",
",",
"open_func",
"=",
"get_open_func",
"(",
"fn",
",",
"return_fmt",
"=",
"True",
")",
"# Reading the required columns",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"fn",
",",
"sep",
"=",
"sep",
",",
"engine",
"=",
"\"c\"",
",",
"usecols",
"=",
"cols",
",",
"names",
"=",
"names",
",",
"compression",
"=",
"\"gzip\"",
"if",
"bgzip",
"else",
"None",
")",
"# Getting the seek information",
"f",
"=",
"open_func",
"(",
"fn",
",",
"\"rb\"",
")",
"data",
"[",
"\"seek\"",
"]",
"=",
"np",
".",
"fromiter",
"(",
"_seek_generator",
"(",
"f",
")",
",",
"dtype",
"=",
"np",
".",
"uint",
")",
"[",
":",
"-",
"1",
"]",
"f",
".",
"close",
"(",
")",
"# Saving the index to file",
"write_index",
"(",
"get_index_fn",
"(",
"fn",
")",
",",
"data",
")",
"return",
"data"
]
| Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index. | [
"Build",
"a",
"index",
"for",
"the",
"given",
"file",
"."
]
| python | train |
tcalmant/ipopo | pelix/ipopo/constants.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/constants.py#L245-L270 | def use_waiting_list(bundle_context):
# type: (BundleContext) -> Any
"""
Utility context to use the iPOPO waiting list safely in a "with" block.
It looks after the the iPOPO waiting list service and releases its
reference when exiting the context.
:param bundle_context: The calling bundle context
:return: The iPOPO waiting list service
:raise BundleException: Service not found
"""
# Get the service and its reference
ref = bundle_context.get_service_reference(SERVICE_IPOPO_WAITING_LIST)
if ref is None:
raise BundleException("No iPOPO waiting list service available")
try:
# Give the service
yield bundle_context.get_service(ref)
finally:
try:
# Release it
bundle_context.unget_service(ref)
except BundleException:
# Service might have already been unregistered
pass | [
"def",
"use_waiting_list",
"(",
"bundle_context",
")",
":",
"# type: (BundleContext) -> Any",
"# Get the service and its reference",
"ref",
"=",
"bundle_context",
".",
"get_service_reference",
"(",
"SERVICE_IPOPO_WAITING_LIST",
")",
"if",
"ref",
"is",
"None",
":",
"raise",
"BundleException",
"(",
"\"No iPOPO waiting list service available\"",
")",
"try",
":",
"# Give the service",
"yield",
"bundle_context",
".",
"get_service",
"(",
"ref",
")",
"finally",
":",
"try",
":",
"# Release it",
"bundle_context",
".",
"unget_service",
"(",
"ref",
")",
"except",
"BundleException",
":",
"# Service might have already been unregistered",
"pass"
]
| Utility context to use the iPOPO waiting list safely in a "with" block.
It looks after the the iPOPO waiting list service and releases its
reference when exiting the context.
:param bundle_context: The calling bundle context
:return: The iPOPO waiting list service
:raise BundleException: Service not found | [
"Utility",
"context",
"to",
"use",
"the",
"iPOPO",
"waiting",
"list",
"safely",
"in",
"a",
"with",
"block",
".",
"It",
"looks",
"after",
"the",
"the",
"iPOPO",
"waiting",
"list",
"service",
"and",
"releases",
"its",
"reference",
"when",
"exiting",
"the",
"context",
"."
]
| python | train |
duniter/duniter-python-api | duniterpy/helpers.py | https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/helpers.py#L31-L42 | def xor_bytes(b1: bytes, b2: bytes) -> bytearray:
"""
Apply XOR operation on two bytes arguments
:param b1: First bytes argument
:param b2: Second bytes argument
:rtype bytearray:
"""
result = bytearray()
for i1, i2 in zip(b1, b2):
result.append(i1 ^ i2)
return result | [
"def",
"xor_bytes",
"(",
"b1",
":",
"bytes",
",",
"b2",
":",
"bytes",
")",
"->",
"bytearray",
":",
"result",
"=",
"bytearray",
"(",
")",
"for",
"i1",
",",
"i2",
"in",
"zip",
"(",
"b1",
",",
"b2",
")",
":",
"result",
".",
"append",
"(",
"i1",
"^",
"i2",
")",
"return",
"result"
]
| Apply XOR operation on two bytes arguments
:param b1: First bytes argument
:param b2: Second bytes argument
:rtype bytearray: | [
"Apply",
"XOR",
"operation",
"on",
"two",
"bytes",
"arguments"
]
| python | train |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L231-L267 | def getargs(obj):
"""Get the names and default values of a function's arguments"""
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = get_meth_func(obj)
elif inspect.isclass(obj) and hasattr(obj, '__init__'):
func_obj = getattr(obj, '__init__')
else:
return []
if not hasattr(func_obj, 'func_code'):
# Builtin: try to extract info from doc
args = getargsfromdoc(func_obj)
if args is not None:
return args
else:
# Example: PyQt5
return getargsfromdoc(obj)
args, _, _ = inspect.getargs(func_obj.func_code)
if not args:
return getargsfromdoc(obj)
# Supporting tuple arguments in def statement:
for i_arg, arg in enumerate(args):
if isinstance(arg, list):
args[i_arg] = "(%s)" % ", ".join(arg)
defaults = get_func_defaults(func_obj)
if defaults is not None:
for index, default in enumerate(defaults):
args[index+len(args)-len(defaults)] += '='+repr(default)
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(args) == 1:
return None
if 'self' in args:
args.remove('self')
return args | [
"def",
"getargs",
"(",
"obj",
")",
":",
"if",
"inspect",
".",
"isfunction",
"(",
"obj",
")",
"or",
"inspect",
".",
"isbuiltin",
"(",
"obj",
")",
":",
"func_obj",
"=",
"obj",
"elif",
"inspect",
".",
"ismethod",
"(",
"obj",
")",
":",
"func_obj",
"=",
"get_meth_func",
"(",
"obj",
")",
"elif",
"inspect",
".",
"isclass",
"(",
"obj",
")",
"and",
"hasattr",
"(",
"obj",
",",
"'__init__'",
")",
":",
"func_obj",
"=",
"getattr",
"(",
"obj",
",",
"'__init__'",
")",
"else",
":",
"return",
"[",
"]",
"if",
"not",
"hasattr",
"(",
"func_obj",
",",
"'func_code'",
")",
":",
"# Builtin: try to extract info from doc",
"args",
"=",
"getargsfromdoc",
"(",
"func_obj",
")",
"if",
"args",
"is",
"not",
"None",
":",
"return",
"args",
"else",
":",
"# Example: PyQt5",
"return",
"getargsfromdoc",
"(",
"obj",
")",
"args",
",",
"_",
",",
"_",
"=",
"inspect",
".",
"getargs",
"(",
"func_obj",
".",
"func_code",
")",
"if",
"not",
"args",
":",
"return",
"getargsfromdoc",
"(",
"obj",
")",
"# Supporting tuple arguments in def statement:",
"for",
"i_arg",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"list",
")",
":",
"args",
"[",
"i_arg",
"]",
"=",
"\"(%s)\"",
"%",
"\", \"",
".",
"join",
"(",
"arg",
")",
"defaults",
"=",
"get_func_defaults",
"(",
"func_obj",
")",
"if",
"defaults",
"is",
"not",
"None",
":",
"for",
"index",
",",
"default",
"in",
"enumerate",
"(",
"defaults",
")",
":",
"args",
"[",
"index",
"+",
"len",
"(",
"args",
")",
"-",
"len",
"(",
"defaults",
")",
"]",
"+=",
"'='",
"+",
"repr",
"(",
"default",
")",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
"or",
"inspect",
".",
"ismethod",
"(",
"obj",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"return",
"None",
"if",
"'self'",
"in",
"args",
":",
"args",
".",
"remove",
"(",
"'self'",
")",
"return",
"args"
]
| Get the names and default values of a function's arguments | [
"Get",
"the",
"names",
"and",
"default",
"values",
"of",
"a",
"function",
"s",
"arguments"
]
| python | train |
awslabs/sockeye | sockeye/utils.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L909-L916 | def metric_value_is_better(new: float, old: float, metric: str) -> bool:
"""
Returns true if new value is strictly better than old for given metric.
"""
if C.METRIC_MAXIMIZE[metric]:
return new > old
else:
return new < old | [
"def",
"metric_value_is_better",
"(",
"new",
":",
"float",
",",
"old",
":",
"float",
",",
"metric",
":",
"str",
")",
"->",
"bool",
":",
"if",
"C",
".",
"METRIC_MAXIMIZE",
"[",
"metric",
"]",
":",
"return",
"new",
">",
"old",
"else",
":",
"return",
"new",
"<",
"old"
]
| Returns true if new value is strictly better than old for given metric. | [
"Returns",
"true",
"if",
"new",
"value",
"is",
"strictly",
"better",
"than",
"old",
"for",
"given",
"metric",
"."
]
| python | train |
ruipgil/TrackToTrip | tracktotrip/track.py | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/track.py#L99-L115 | def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place simplification of segments
Args:
max_dist_error (float): Min distance error, in meters
max_speed_error (float): Min speed error, in km/h
topology_only: Boolean, optional. True to keep
the topology, neglecting velocity and time
accuracy (use common Douglas-Ramen-Peucker).
False (default) to simplify segments keeping
the velocity between points.
Returns:
This track
"""
for segment in self.segments:
segment.simplify(eps, max_dist_error, max_speed_error, topology_only)
return self | [
"def",
"simplify",
"(",
"self",
",",
"eps",
",",
"max_dist_error",
",",
"max_speed_error",
",",
"topology_only",
"=",
"False",
")",
":",
"for",
"segment",
"in",
"self",
".",
"segments",
":",
"segment",
".",
"simplify",
"(",
"eps",
",",
"max_dist_error",
",",
"max_speed_error",
",",
"topology_only",
")",
"return",
"self"
]
| In-place simplification of segments
Args:
max_dist_error (float): Min distance error, in meters
max_speed_error (float): Min speed error, in km/h
topology_only: Boolean, optional. True to keep
the topology, neglecting velocity and time
accuracy (use common Douglas-Ramen-Peucker).
False (default) to simplify segments keeping
the velocity between points.
Returns:
This track | [
"In",
"-",
"place",
"simplification",
"of",
"segments"
]
| python | train |
johnwmillr/LyricsGenius | lyricsgenius/api.py | https://github.com/johnwmillr/LyricsGenius/blob/e36482f7c42235037f3b9b7013edcd54141124e3/lyricsgenius/api.py#L336-L381 | def save_artists(self, artists, filename="artist_lyrics", overwrite=False):
"""Save lyrics from multiple Artist objects as JSON object
:param artists: List of Artist objects to save lyrics from
:param filename: Name of output file (json)
:param overwrite: Overwrites preexisting file if True
"""
if isinstance(artists, Artist):
artists = [artists]
# Create a temporary directory for lyrics
start = time.time()
tmp_dir = 'tmp_lyrics'
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
count = 0
else:
count = len(os.listdir(tmp_dir))
# Check if file already exists
if os.path.isfile(filename + ".json") and not overwrite:
msg = "{f} already exists. Overwrite?\n(y/n): ".format(f=filename)
if input(msg).lower() != "y":
print("Leaving file in place. Exiting.")
os.rmdir(tmp_dir)
return
# Extract each artist's lyrics in json format
all_lyrics = {'artists': []}
for n, artist in enumerate(artists):
if isinstance(artist, Artist):
all_lyrics['artists'].append({})
f = "tmp_{n}_{a}".format(n=count + n,
a=artist.name.replace(" ", ""))
tmp_file = os.path.join(tmp_dir, f)
if self.verbose:
print(tmp_file)
all_lyrics['artists'][-1] = artist.save_lyrics(overwrite=True)
# Save all of the lyrics
with open(filename + '.json', 'w') as outfile:
json.dump(all_lyrics, outfile)
# Delete the temporary directory
shutil.rmtree(tmp_dir)
elapsed = (time.time() - start) / 60 / 60
print("Time elapsed: {t} hours".format(t=elapsed)) | [
"def",
"save_artists",
"(",
"self",
",",
"artists",
",",
"filename",
"=",
"\"artist_lyrics\"",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"artists",
",",
"Artist",
")",
":",
"artists",
"=",
"[",
"artists",
"]",
"# Create a temporary directory for lyrics",
"start",
"=",
"time",
".",
"time",
"(",
")",
"tmp_dir",
"=",
"'tmp_lyrics'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tmp_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"tmp_dir",
")",
"count",
"=",
"0",
"else",
":",
"count",
"=",
"len",
"(",
"os",
".",
"listdir",
"(",
"tmp_dir",
")",
")",
"# Check if file already exists",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
"+",
"\".json\"",
")",
"and",
"not",
"overwrite",
":",
"msg",
"=",
"\"{f} already exists. Overwrite?\\n(y/n): \"",
".",
"format",
"(",
"f",
"=",
"filename",
")",
"if",
"input",
"(",
"msg",
")",
".",
"lower",
"(",
")",
"!=",
"\"y\"",
":",
"print",
"(",
"\"Leaving file in place. Exiting.\"",
")",
"os",
".",
"rmdir",
"(",
"tmp_dir",
")",
"return",
"# Extract each artist's lyrics in json format",
"all_lyrics",
"=",
"{",
"'artists'",
":",
"[",
"]",
"}",
"for",
"n",
",",
"artist",
"in",
"enumerate",
"(",
"artists",
")",
":",
"if",
"isinstance",
"(",
"artist",
",",
"Artist",
")",
":",
"all_lyrics",
"[",
"'artists'",
"]",
".",
"append",
"(",
"{",
"}",
")",
"f",
"=",
"\"tmp_{n}_{a}\"",
".",
"format",
"(",
"n",
"=",
"count",
"+",
"n",
",",
"a",
"=",
"artist",
".",
"name",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
")",
"tmp_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"f",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"tmp_file",
")",
"all_lyrics",
"[",
"'artists'",
"]",
"[",
"-",
"1",
"]",
"=",
"artist",
".",
"save_lyrics",
"(",
"overwrite",
"=",
"True",
")",
"# Save all of the lyrics",
"with",
"open",
"(",
"filename",
"+",
"'.json'",
",",
"'w'",
")",
"as",
"outfile",
":",
"json",
".",
"dump",
"(",
"all_lyrics",
",",
"outfile",
")",
"# Delete the temporary directory",
"shutil",
".",
"rmtree",
"(",
"tmp_dir",
")",
"elapsed",
"=",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
"/",
"60",
"/",
"60",
"print",
"(",
"\"Time elapsed: {t} hours\"",
".",
"format",
"(",
"t",
"=",
"elapsed",
")",
")"
]
| Save lyrics from multiple Artist objects as JSON object
:param artists: List of Artist objects to save lyrics from
:param filename: Name of output file (json)
:param overwrite: Overwrites preexisting file if True | [
"Save",
"lyrics",
"from",
"multiple",
"Artist",
"objects",
"as",
"JSON",
"object",
":",
"param",
"artists",
":",
"List",
"of",
"Artist",
"objects",
"to",
"save",
"lyrics",
"from",
":",
"param",
"filename",
":",
"Name",
"of",
"output",
"file",
"(",
"json",
")",
":",
"param",
"overwrite",
":",
"Overwrites",
"preexisting",
"file",
"if",
"True"
]
| python | train |
pricingassistant/mongokat | mongokat/collection.py | https://github.com/pricingassistant/mongokat/blob/61eaf4bc1c4cc359c6f9592ec97b9a04d9561411/mongokat/collection.py#L225-L240 | def find_by_ids(self, _ids, projection=None, **kwargs):
"""
Does a big _id:$in query on any iterator
"""
id_list = [ObjectId(_id) for _id in _ids]
if len(_ids) == 0:
return [] # FIXME : this should be an empty cursor !
# Optimized path when only fetching the _id field.
# Be mindful this might not filter missing documents that may not have been returned, had we done the query.
if projection is not None and list(projection.keys()) == ["_id"]:
return [self({"_id": x}, fetched_fields={"_id": True}) for x in id_list]
else:
return self.find({"_id": {"$in": id_list}}, projection=projection, **kwargs) | [
"def",
"find_by_ids",
"(",
"self",
",",
"_ids",
",",
"projection",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"id_list",
"=",
"[",
"ObjectId",
"(",
"_id",
")",
"for",
"_id",
"in",
"_ids",
"]",
"if",
"len",
"(",
"_ids",
")",
"==",
"0",
":",
"return",
"[",
"]",
"# FIXME : this should be an empty cursor !",
"# Optimized path when only fetching the _id field.",
"# Be mindful this might not filter missing documents that may not have been returned, had we done the query.",
"if",
"projection",
"is",
"not",
"None",
"and",
"list",
"(",
"projection",
".",
"keys",
"(",
")",
")",
"==",
"[",
"\"_id\"",
"]",
":",
"return",
"[",
"self",
"(",
"{",
"\"_id\"",
":",
"x",
"}",
",",
"fetched_fields",
"=",
"{",
"\"_id\"",
":",
"True",
"}",
")",
"for",
"x",
"in",
"id_list",
"]",
"else",
":",
"return",
"self",
".",
"find",
"(",
"{",
"\"_id\"",
":",
"{",
"\"$in\"",
":",
"id_list",
"}",
"}",
",",
"projection",
"=",
"projection",
",",
"*",
"*",
"kwargs",
")"
]
| Does a big _id:$in query on any iterator | [
"Does",
"a",
"big",
"_id",
":",
"$in",
"query",
"on",
"any",
"iterator"
]
| python | train |
rapidpro/expressions | python/temba_expressions/functions/custom.py | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/functions/custom.py#L10-L26 | def field(ctx, text, index, delimiter=' '):
"""
Reference a field in string separated by a delimiter
"""
splits = text.split(delimiter)
# remove our delimiters and whitespace
splits = [f for f in splits if f != delimiter and len(f.strip()) > 0]
index = conversions.to_integer(index, ctx)
if index < 1:
raise ValueError('Field index cannot be less than 1')
if index <= len(splits):
return splits[index-1]
else:
return '' | [
"def",
"field",
"(",
"ctx",
",",
"text",
",",
"index",
",",
"delimiter",
"=",
"' '",
")",
":",
"splits",
"=",
"text",
".",
"split",
"(",
"delimiter",
")",
"# remove our delimiters and whitespace",
"splits",
"=",
"[",
"f",
"for",
"f",
"in",
"splits",
"if",
"f",
"!=",
"delimiter",
"and",
"len",
"(",
"f",
".",
"strip",
"(",
")",
")",
">",
"0",
"]",
"index",
"=",
"conversions",
".",
"to_integer",
"(",
"index",
",",
"ctx",
")",
"if",
"index",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'Field index cannot be less than 1'",
")",
"if",
"index",
"<=",
"len",
"(",
"splits",
")",
":",
"return",
"splits",
"[",
"index",
"-",
"1",
"]",
"else",
":",
"return",
"''"
]
| Reference a field in string separated by a delimiter | [
"Reference",
"a",
"field",
"in",
"string",
"separated",
"by",
"a",
"delimiter"
]
| python | train |
iotile/coretools | iotileemulate/iotile/emulate/utilities/format_rpc.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/utilities/format_rpc.py#L6-L32 | def format_rpc(data):
"""Format an RPC call and response.
Args:
data (tuple): A tuple containing the address, rpc_id, argument and
response payloads and any error code.
Returns:
str: The formated RPC string.
"""
address, rpc_id, args, resp, _status = data
name = rpc_name(rpc_id)
if isinstance(args, (bytes, bytearray)):
arg_str = hexlify(args)
else:
arg_str = repr(args)
if isinstance(resp, (bytes, bytearray)):
resp_str = hexlify(resp)
else:
resp_str = repr(resp)
#FIXME: Check and print status as well
return "%s called on address %d, payload=%s, response=%s" % (name, address, arg_str, resp_str) | [
"def",
"format_rpc",
"(",
"data",
")",
":",
"address",
",",
"rpc_id",
",",
"args",
",",
"resp",
",",
"_status",
"=",
"data",
"name",
"=",
"rpc_name",
"(",
"rpc_id",
")",
"if",
"isinstance",
"(",
"args",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"arg_str",
"=",
"hexlify",
"(",
"args",
")",
"else",
":",
"arg_str",
"=",
"repr",
"(",
"args",
")",
"if",
"isinstance",
"(",
"resp",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"resp_str",
"=",
"hexlify",
"(",
"resp",
")",
"else",
":",
"resp_str",
"=",
"repr",
"(",
"resp",
")",
"#FIXME: Check and print status as well",
"return",
"\"%s called on address %d, payload=%s, response=%s\"",
"%",
"(",
"name",
",",
"address",
",",
"arg_str",
",",
"resp_str",
")"
]
| Format an RPC call and response.
Args:
data (tuple): A tuple containing the address, rpc_id, argument and
response payloads and any error code.
Returns:
str: The formated RPC string. | [
"Format",
"an",
"RPC",
"call",
"and",
"response",
"."
]
| python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/nsx.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/nsx.py#L69-L96 | def set_nsxcontroller_ip(self, **kwargs):
"""
Set nsx-controller IP
Args:
IP (str): IPV4 address.
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
"""
name = kwargs.pop('name')
ip_addr = str((kwargs.pop('ip_addr', None)))
nsxipaddress = ip_interface(unicode(ip_addr))
if nsxipaddress.version != 4:
raise ValueError('NSX Controller ip must be IPV4')
ip_args = dict(name=name, address=ip_addr)
method_name = 'nsx_controller_connection_addr_address'
method_class = self._brocade_tunnels
nsxcontroller_attr = getattr(method_class, method_name)
config = nsxcontroller_attr(**ip_args)
output = self._callback(config)
return output | [
"def",
"set_nsxcontroller_ip",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"ip_addr",
"=",
"str",
"(",
"(",
"kwargs",
".",
"pop",
"(",
"'ip_addr'",
",",
"None",
")",
")",
")",
"nsxipaddress",
"=",
"ip_interface",
"(",
"unicode",
"(",
"ip_addr",
")",
")",
"if",
"nsxipaddress",
".",
"version",
"!=",
"4",
":",
"raise",
"ValueError",
"(",
"'NSX Controller ip must be IPV4'",
")",
"ip_args",
"=",
"dict",
"(",
"name",
"=",
"name",
",",
"address",
"=",
"ip_addr",
")",
"method_name",
"=",
"'nsx_controller_connection_addr_address'",
"method_class",
"=",
"self",
".",
"_brocade_tunnels",
"nsxcontroller_attr",
"=",
"getattr",
"(",
"method_class",
",",
"method_name",
")",
"config",
"=",
"nsxcontroller_attr",
"(",
"*",
"*",
"ip_args",
")",
"output",
"=",
"self",
".",
"_callback",
"(",
"config",
")",
"return",
"output"
]
| Set nsx-controller IP
Args:
IP (str): IPV4 address.
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None | [
"Set",
"nsx",
"-",
"controller",
"IP"
]
| python | train |
manns/pyspread | pyspread/src/gui/_main_window.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_main_window.py#L614-L631 | def OnNewGpgKey(self, event):
"""New GPG key event handler.
Launches GPG choice and creation dialog
"""
if gnupg is None:
return
if genkey is None:
# gnupg is not present
self.interfaces.display_warning(
_("Python gnupg not found. No key selected."),
_("Key selection failed."))
else:
# gnupg is present
genkey() | [
"def",
"OnNewGpgKey",
"(",
"self",
",",
"event",
")",
":",
"if",
"gnupg",
"is",
"None",
":",
"return",
"if",
"genkey",
"is",
"None",
":",
"# gnupg is not present",
"self",
".",
"interfaces",
".",
"display_warning",
"(",
"_",
"(",
"\"Python gnupg not found. No key selected.\"",
")",
",",
"_",
"(",
"\"Key selection failed.\"",
")",
")",
"else",
":",
"# gnupg is present",
"genkey",
"(",
")"
]
| New GPG key event handler.
Launches GPG choice and creation dialog | [
"New",
"GPG",
"key",
"event",
"handler",
"."
]
| python | train |
iotile/coretools | iotilecore/iotile/core/utilities/workqueue_thread.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/workqueue_thread.py#L300-L314 | def stop(self, timeout=None, force=False):
"""Stop the worker thread and synchronously wait for it to finish.
Args:
timeout (float): The maximum time to wait for the thread to stop
before raising a TimeoutExpiredError. If force is True, TimeoutExpiredError
is not raised and the thread is just marked as a daemon thread
so that it does not block cleanly exiting the process.
force (bool): If true and the thread does not exit in timeout seconds
no error is raised since the thread is marked as daemon and will
be killed when the process exits.
"""
self.signal_stop()
self.wait_stopped(timeout, force) | [
"def",
"stop",
"(",
"self",
",",
"timeout",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"self",
".",
"signal_stop",
"(",
")",
"self",
".",
"wait_stopped",
"(",
"timeout",
",",
"force",
")"
]
| Stop the worker thread and synchronously wait for it to finish.
Args:
timeout (float): The maximum time to wait for the thread to stop
before raising a TimeoutExpiredError. If force is True, TimeoutExpiredError
is not raised and the thread is just marked as a daemon thread
so that it does not block cleanly exiting the process.
force (bool): If true and the thread does not exit in timeout seconds
no error is raised since the thread is marked as daemon and will
be killed when the process exits. | [
"Stop",
"the",
"worker",
"thread",
"and",
"synchronously",
"wait",
"for",
"it",
"to",
"finish",
"."
]
| python | train |
arista-eosplus/pyeapi | pyeapi/client.py | https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/client.py#L815-L842 | def connect_to(name):
"""Creates a node instance based on an entry from the config
This function will retrieve the settings for the specified connection
from the config and return a Node instance. The configuration must
be loaded prior to calling this function.
Args:
name (str): The name of the connection to load from the config. The
name argument should be the connection name (everything right of
the colon from the INI file)
Returns:
This function will return an instance of Node with the settings
from the config instance.
Raises:
AttributeError: raised if the specified configuration name is not
found in the loaded configuration
"""
kwargs = config_for(name)
if not kwargs:
raise AttributeError('connection profile not found in config')
node = connect(return_node=True, **kwargs)
return node | [
"def",
"connect_to",
"(",
"name",
")",
":",
"kwargs",
"=",
"config_for",
"(",
"name",
")",
"if",
"not",
"kwargs",
":",
"raise",
"AttributeError",
"(",
"'connection profile not found in config'",
")",
"node",
"=",
"connect",
"(",
"return_node",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"return",
"node"
]
| Creates a node instance based on an entry from the config
This function will retrieve the settings for the specified connection
from the config and return a Node instance. The configuration must
be loaded prior to calling this function.
Args:
name (str): The name of the connection to load from the config. The
name argument should be the connection name (everything right of
the colon from the INI file)
Returns:
This function will return an instance of Node with the settings
from the config instance.
Raises:
AttributeError: raised if the specified configuration name is not
found in the loaded configuration | [
"Creates",
"a",
"node",
"instance",
"based",
"on",
"an",
"entry",
"from",
"the",
"config"
]
| python | train |
googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/transaction.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/transaction.py#L229-L264 | def _pre_commit(self, transaction, *args, **kwargs):
"""Begin transaction and call the wrapped callable.
If the callable raises an exception, the transaction will be rolled
back. If not, the transaction will be "ready" for ``Commit`` (i.e.
it will have staged writes).
Args:
transaction (~.firestore_v1beta1.transaction.Transaction): A
transaction to execute the callable within.
args (Tuple[Any, ...]): The extra positional arguments to pass
along to the wrapped callable.
kwargs (Dict[str, Any]): The extra keyword arguments to pass
along to the wrapped callable.
Returns:
Any: result of the wrapped callable.
Raises:
Exception: Any failure caused by ``to_wrap``.
"""
# Force the ``transaction`` to be not "in progress".
transaction._clean_up()
transaction._begin(retry_id=self.retry_id)
# Update the stored transaction IDs.
self.current_id = transaction._id
if self.retry_id is None:
self.retry_id = self.current_id
try:
return self.to_wrap(transaction, *args, **kwargs)
except: # noqa
# NOTE: If ``rollback`` fails this will lose the information
# from the original failure.
transaction._rollback()
raise | [
"def",
"_pre_commit",
"(",
"self",
",",
"transaction",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Force the ``transaction`` to be not \"in progress\".",
"transaction",
".",
"_clean_up",
"(",
")",
"transaction",
".",
"_begin",
"(",
"retry_id",
"=",
"self",
".",
"retry_id",
")",
"# Update the stored transaction IDs.",
"self",
".",
"current_id",
"=",
"transaction",
".",
"_id",
"if",
"self",
".",
"retry_id",
"is",
"None",
":",
"self",
".",
"retry_id",
"=",
"self",
".",
"current_id",
"try",
":",
"return",
"self",
".",
"to_wrap",
"(",
"transaction",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"# noqa",
"# NOTE: If ``rollback`` fails this will lose the information",
"# from the original failure.",
"transaction",
".",
"_rollback",
"(",
")",
"raise"
]
| Begin transaction and call the wrapped callable.
If the callable raises an exception, the transaction will be rolled
back. If not, the transaction will be "ready" for ``Commit`` (i.e.
it will have staged writes).
Args:
transaction (~.firestore_v1beta1.transaction.Transaction): A
transaction to execute the callable within.
args (Tuple[Any, ...]): The extra positional arguments to pass
along to the wrapped callable.
kwargs (Dict[str, Any]): The extra keyword arguments to pass
along to the wrapped callable.
Returns:
Any: result of the wrapped callable.
Raises:
Exception: Any failure caused by ``to_wrap``. | [
"Begin",
"transaction",
"and",
"call",
"the",
"wrapped",
"callable",
"."
]
| python | train |
sdispater/eloquent | eloquent/orm/relations/morph_pivot.py | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/relations/morph_pivot.py#L25-L35 | def delete(self):
"""
Delete the pivot model record from the database.
:rtype: int
"""
query = self._get_delete_query()
query.where(self._morph_type, self._morph_class)
return query.delete() | [
"def",
"delete",
"(",
"self",
")",
":",
"query",
"=",
"self",
".",
"_get_delete_query",
"(",
")",
"query",
".",
"where",
"(",
"self",
".",
"_morph_type",
",",
"self",
".",
"_morph_class",
")",
"return",
"query",
".",
"delete",
"(",
")"
]
| Delete the pivot model record from the database.
:rtype: int | [
"Delete",
"the",
"pivot",
"model",
"record",
"from",
"the",
"database",
"."
]
| python | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L431-L446 | def areadinf(np, angfile, sca, outlet=None, wg=None, edgecontaimination=False,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Accumulate area according to Dinf flow direction"""
# -nc means do not consider edge contaimination
if edgecontaimination:
in_params = {'-nc': None}
else:
in_params = None
fname = TauDEM.func_name('areadinf')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-ang': angfile, '-o': outlet, '-wg': wg}, workingdir,
in_params,
{'-sca': sca},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"areadinf",
"(",
"np",
",",
"angfile",
",",
"sca",
",",
"outlet",
"=",
"None",
",",
"wg",
"=",
"None",
",",
"edgecontaimination",
"=",
"False",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"# -nc means do not consider edge contaimination",
"if",
"edgecontaimination",
":",
"in_params",
"=",
"{",
"'-nc'",
":",
"None",
"}",
"else",
":",
"in_params",
"=",
"None",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'areadinf'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-ang'",
":",
"angfile",
",",
"'-o'",
":",
"outlet",
",",
"'-wg'",
":",
"wg",
"}",
",",
"workingdir",
",",
"in_params",
",",
"{",
"'-sca'",
":",
"sca",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
]
| Run Accumulate area according to Dinf flow direction | [
"Run",
"Accumulate",
"area",
"according",
"to",
"Dinf",
"flow",
"direction"
]
| python | train |
quantumlib/Cirq | cirq/ops/common_gates.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/ops/common_gates.py#L1144-L1147 | def Rz(rads: Union[float, sympy.Basic]) -> ZPowGate:
"""Returns a gate with the matrix e^{-i Z rads / 2}."""
pi = sympy.pi if protocols.is_parameterized(rads) else np.pi
return ZPowGate(exponent=rads / pi, global_shift=-0.5) | [
"def",
"Rz",
"(",
"rads",
":",
"Union",
"[",
"float",
",",
"sympy",
".",
"Basic",
"]",
")",
"->",
"ZPowGate",
":",
"pi",
"=",
"sympy",
".",
"pi",
"if",
"protocols",
".",
"is_parameterized",
"(",
"rads",
")",
"else",
"np",
".",
"pi",
"return",
"ZPowGate",
"(",
"exponent",
"=",
"rads",
"/",
"pi",
",",
"global_shift",
"=",
"-",
"0.5",
")"
]
| Returns a gate with the matrix e^{-i Z rads / 2}. | [
"Returns",
"a",
"gate",
"with",
"the",
"matrix",
"e^",
"{",
"-",
"i",
"Z",
"rads",
"/",
"2",
"}",
"."
]
| python | train |
manns/pyspread | pyspread/src/gui/_grid.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1405-L1427 | def OnDeleteCols(self, event):
"""Deletes columns from all tables of the grid"""
bbox = self.grid.selection.get_bbox()
if bbox is None or bbox[1][1] is None:
# Insert rows at cursor
del_point = self.grid.actions.cursor[1]
no_cols = 1
else:
# Insert at right edge of bounding box
del_point = bbox[0][1]
no_cols = self._get_no_rowscols(bbox)[1]
with undo.group(_("Delete columns")):
self.grid.actions.delete_cols(del_point, no_cols)
self.grid.GetTable().ResetView()
# Update the default sized cell sizes
self.grid.actions.zoom()
event.Skip() | [
"def",
"OnDeleteCols",
"(",
"self",
",",
"event",
")",
":",
"bbox",
"=",
"self",
".",
"grid",
".",
"selection",
".",
"get_bbox",
"(",
")",
"if",
"bbox",
"is",
"None",
"or",
"bbox",
"[",
"1",
"]",
"[",
"1",
"]",
"is",
"None",
":",
"# Insert rows at cursor",
"del_point",
"=",
"self",
".",
"grid",
".",
"actions",
".",
"cursor",
"[",
"1",
"]",
"no_cols",
"=",
"1",
"else",
":",
"# Insert at right edge of bounding box",
"del_point",
"=",
"bbox",
"[",
"0",
"]",
"[",
"1",
"]",
"no_cols",
"=",
"self",
".",
"_get_no_rowscols",
"(",
"bbox",
")",
"[",
"1",
"]",
"with",
"undo",
".",
"group",
"(",
"_",
"(",
"\"Delete columns\"",
")",
")",
":",
"self",
".",
"grid",
".",
"actions",
".",
"delete_cols",
"(",
"del_point",
",",
"no_cols",
")",
"self",
".",
"grid",
".",
"GetTable",
"(",
")",
".",
"ResetView",
"(",
")",
"# Update the default sized cell sizes",
"self",
".",
"grid",
".",
"actions",
".",
"zoom",
"(",
")",
"event",
".",
"Skip",
"(",
")"
]
| Deletes columns from all tables of the grid | [
"Deletes",
"columns",
"from",
"all",
"tables",
"of",
"the",
"grid"
]
| python | train |
rigetti/pyquil | pyquil/operator_estimation.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/operator_estimation.py#L486-L512 | def construct_tpb_graph(experiments: TomographyExperiment):
"""
Construct a graph where an edge signifies two experiments are diagonal in a TPB.
"""
g = nx.Graph()
for expt in experiments:
assert len(expt) == 1, 'already grouped?'
expt = expt[0]
if expt not in g:
g.add_node(expt, count=1)
else:
g.nodes[expt]['count'] += 1
for expt1, expt2 in itertools.combinations(experiments, r=2):
expt1 = expt1[0]
expt2 = expt2[0]
if expt1 == expt2:
continue
max_weight_in = _max_weight_state([expt1.in_state, expt2.in_state])
max_weight_out = _max_weight_operator([expt1.out_operator, expt2.out_operator])
if max_weight_in is not None and max_weight_out is not None:
g.add_edge(expt1, expt2)
return g | [
"def",
"construct_tpb_graph",
"(",
"experiments",
":",
"TomographyExperiment",
")",
":",
"g",
"=",
"nx",
".",
"Graph",
"(",
")",
"for",
"expt",
"in",
"experiments",
":",
"assert",
"len",
"(",
"expt",
")",
"==",
"1",
",",
"'already grouped?'",
"expt",
"=",
"expt",
"[",
"0",
"]",
"if",
"expt",
"not",
"in",
"g",
":",
"g",
".",
"add_node",
"(",
"expt",
",",
"count",
"=",
"1",
")",
"else",
":",
"g",
".",
"nodes",
"[",
"expt",
"]",
"[",
"'count'",
"]",
"+=",
"1",
"for",
"expt1",
",",
"expt2",
"in",
"itertools",
".",
"combinations",
"(",
"experiments",
",",
"r",
"=",
"2",
")",
":",
"expt1",
"=",
"expt1",
"[",
"0",
"]",
"expt2",
"=",
"expt2",
"[",
"0",
"]",
"if",
"expt1",
"==",
"expt2",
":",
"continue",
"max_weight_in",
"=",
"_max_weight_state",
"(",
"[",
"expt1",
".",
"in_state",
",",
"expt2",
".",
"in_state",
"]",
")",
"max_weight_out",
"=",
"_max_weight_operator",
"(",
"[",
"expt1",
".",
"out_operator",
",",
"expt2",
".",
"out_operator",
"]",
")",
"if",
"max_weight_in",
"is",
"not",
"None",
"and",
"max_weight_out",
"is",
"not",
"None",
":",
"g",
".",
"add_edge",
"(",
"expt1",
",",
"expt2",
")",
"return",
"g"
]
| Construct a graph where an edge signifies two experiments are diagonal in a TPB. | [
"Construct",
"a",
"graph",
"where",
"an",
"edge",
"signifies",
"two",
"experiments",
"are",
"diagonal",
"in",
"a",
"TPB",
"."
]
| python | train |
brutasse/graphite-api | graphite_api/render/glyph.py | https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L511-L524 | def computeSlop(self, step, divisor):
"""Compute the slop that would result from step and divisor.
Return the slop, or None if this combination can't cover the full
range. See chooseStep() for the definition of "slop".
"""
bottom = step * math.floor(self.minValue / float(step) + EPSILON)
top = bottom + step * divisor
if top >= self.maxValue - EPSILON * step:
return max(top - self.maxValue, self.minValue - bottom)
else:
return None | [
"def",
"computeSlop",
"(",
"self",
",",
"step",
",",
"divisor",
")",
":",
"bottom",
"=",
"step",
"*",
"math",
".",
"floor",
"(",
"self",
".",
"minValue",
"/",
"float",
"(",
"step",
")",
"+",
"EPSILON",
")",
"top",
"=",
"bottom",
"+",
"step",
"*",
"divisor",
"if",
"top",
">=",
"self",
".",
"maxValue",
"-",
"EPSILON",
"*",
"step",
":",
"return",
"max",
"(",
"top",
"-",
"self",
".",
"maxValue",
",",
"self",
".",
"minValue",
"-",
"bottom",
")",
"else",
":",
"return",
"None"
]
| Compute the slop that would result from step and divisor.
Return the slop, or None if this combination can't cover the full
range. See chooseStep() for the definition of "slop". | [
"Compute",
"the",
"slop",
"that",
"would",
"result",
"from",
"step",
"and",
"divisor",
"."
]
| python | train |
Becksteinlab/GromacsWrapper | gromacs/run.py | https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L177-L190 | def mpicommand(self, *args, **kwargs):
"""Return a list of the mpi command portion of the commandline.
Only allows primitive mpi at the moment:
*mpiexec* -n *ncores* *mdrun* *mdrun-args*
(This is a primitive example for OpenMP. Override it for more
complicated cases.)
"""
if self.mpiexec is None:
raise NotImplementedError("Override mpiexec to enable the simple OpenMP launcher")
# example implementation
ncores = kwargs.pop('ncores', 8)
return [self.mpiexec, '-n', str(ncores)] | [
"def",
"mpicommand",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"mpiexec",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Override mpiexec to enable the simple OpenMP launcher\"",
")",
"# example implementation",
"ncores",
"=",
"kwargs",
".",
"pop",
"(",
"'ncores'",
",",
"8",
")",
"return",
"[",
"self",
".",
"mpiexec",
",",
"'-n'",
",",
"str",
"(",
"ncores",
")",
"]"
]
| Return a list of the mpi command portion of the commandline.
Only allows primitive mpi at the moment:
*mpiexec* -n *ncores* *mdrun* *mdrun-args*
(This is a primitive example for OpenMP. Override it for more
complicated cases.) | [
"Return",
"a",
"list",
"of",
"the",
"mpi",
"command",
"portion",
"of",
"the",
"commandline",
"."
]
| python | valid |
DistrictDataLabs/yellowbrick | yellowbrick/bestfit.py | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/bestfit.py#L49-L157 | def draw_best_fit(X, y, ax, estimator='linear', **kwargs):
"""
Uses Scikit-Learn to fit a model to X and y then uses the resulting model
to predict the curve based on the X values. This curve is drawn to the ax
(matplotlib axis) which must be passed as the third variable.
The estimator function can be one of the following:
- ``'linear'``: Uses OLS to fit the regression
- ``'quadratic'``: Uses OLS with Polynomial order 2
- ``'exponential'``: Not implemented yet
- ``'log'``: Not implemented yet
- ``'select_best'``: Selects the best fit via MSE
The remaining keyword arguments are passed to ax.plot to define and
describe the line of best fit.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
estimator : string, default: 'linear'
The name of the estimator function used to draw the best fit line.
The estimator can currently be one of linear, quadratic, exponential,
log, or select_best. The select best method uses the minimum MSE to
select the best fit line.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style and
label the line of best fit. By default, the standard line color is
used unless the color keyword argument is passed in.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
"""
# Estimators are the types of best fit lines that can be drawn.
estimators = {
LINEAR: fit_linear, # Uses OLS to fit the regression
QUADRATIC: fit_quadratic, # Uses OLS with Polynomial order 2
EXPONENTIAL: fit_exponential, # Not implemented yet
LOG: fit_log, # Not implemented yet
SELECT_BEST: fit_select_best, # Selects the best fit via MSE
}
# Check to make sure that a correct estimator value was passed in.
if estimator not in estimators:
raise YellowbrickValueError(
"'{}' not a valid type of estimator; choose from {}".format(
estimator, ", ".join(estimators.keys())
)
)
# Then collect the estimator function from the mapping.
estimator = estimators[estimator]
# Ensure that X and y are the same length
if len(X) != len(y):
raise YellowbrickValueError((
"X and y must have same length:"
" X len {} doesn't match y len {}!"
).format(len(X), len(y)))
# Ensure that X and y are np.arrays
X = np.array(X)
y = np.array(y)
# Verify that X is a two dimensional array for Scikit-Learn esitmators
# and that its dimensions are (n, 1) where n is the number of rows.
if X.ndim < 2:
X = X[:,np.newaxis] # Reshape X into the correct dimensions
if X.ndim > 2:
raise YellowbrickValueError(
"X must be a (1,) or (n,1) dimensional array not {}".format(X.shape)
)
# Verify that y is a (n,) dimensional array
if y.ndim > 1:
raise YellowbrickValueError(
"y must be a (1,) dimensional array not {}".format(y.shape)
)
# Uses the estimator to fit the data and get the model back.
model = estimator(X, y)
# Set the color if not passed in.
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['color'] = LINE_COLOR
# Get the current working axes
ax = ax or plt.gca()
# Plot line of best fit onto the axes that were passed in.
# TODO: determine if xlim or X.min(), X.max() are better params
xr = np.linspace(*ax.get_xlim(), num=100)
ax.plot(xr, model.predict(xr[:,np.newaxis]), **kwargs)
return ax | [
"def",
"draw_best_fit",
"(",
"X",
",",
"y",
",",
"ax",
",",
"estimator",
"=",
"'linear'",
",",
"*",
"*",
"kwargs",
")",
":",
"# Estimators are the types of best fit lines that can be drawn.",
"estimators",
"=",
"{",
"LINEAR",
":",
"fit_linear",
",",
"# Uses OLS to fit the regression",
"QUADRATIC",
":",
"fit_quadratic",
",",
"# Uses OLS with Polynomial order 2",
"EXPONENTIAL",
":",
"fit_exponential",
",",
"# Not implemented yet",
"LOG",
":",
"fit_log",
",",
"# Not implemented yet",
"SELECT_BEST",
":",
"fit_select_best",
",",
"# Selects the best fit via MSE",
"}",
"# Check to make sure that a correct estimator value was passed in.",
"if",
"estimator",
"not",
"in",
"estimators",
":",
"raise",
"YellowbrickValueError",
"(",
"\"'{}' not a valid type of estimator; choose from {}\"",
".",
"format",
"(",
"estimator",
",",
"\", \"",
".",
"join",
"(",
"estimators",
".",
"keys",
"(",
")",
")",
")",
")",
"# Then collect the estimator function from the mapping.",
"estimator",
"=",
"estimators",
"[",
"estimator",
"]",
"# Ensure that X and y are the same length",
"if",
"len",
"(",
"X",
")",
"!=",
"len",
"(",
"y",
")",
":",
"raise",
"YellowbrickValueError",
"(",
"(",
"\"X and y must have same length:\"",
"\" X len {} doesn't match y len {}!\"",
")",
".",
"format",
"(",
"len",
"(",
"X",
")",
",",
"len",
"(",
"y",
")",
")",
")",
"# Ensure that X and y are np.arrays",
"X",
"=",
"np",
".",
"array",
"(",
"X",
")",
"y",
"=",
"np",
".",
"array",
"(",
"y",
")",
"# Verify that X is a two dimensional array for Scikit-Learn esitmators",
"# and that its dimensions are (n, 1) where n is the number of rows.",
"if",
"X",
".",
"ndim",
"<",
"2",
":",
"X",
"=",
"X",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"# Reshape X into the correct dimensions",
"if",
"X",
".",
"ndim",
">",
"2",
":",
"raise",
"YellowbrickValueError",
"(",
"\"X must be a (1,) or (n,1) dimensional array not {}\"",
".",
"format",
"(",
"X",
".",
"shape",
")",
")",
"# Verify that y is a (n,) dimensional array",
"if",
"y",
".",
"ndim",
">",
"1",
":",
"raise",
"YellowbrickValueError",
"(",
"\"y must be a (1,) dimensional array not {}\"",
".",
"format",
"(",
"y",
".",
"shape",
")",
")",
"# Uses the estimator to fit the data and get the model back.",
"model",
"=",
"estimator",
"(",
"X",
",",
"y",
")",
"# Set the color if not passed in.",
"if",
"'c'",
"not",
"in",
"kwargs",
"and",
"'color'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'color'",
"]",
"=",
"LINE_COLOR",
"# Get the current working axes",
"ax",
"=",
"ax",
"or",
"plt",
".",
"gca",
"(",
")",
"# Plot line of best fit onto the axes that were passed in.",
"# TODO: determine if xlim or X.min(), X.max() are better params",
"xr",
"=",
"np",
".",
"linspace",
"(",
"*",
"ax",
".",
"get_xlim",
"(",
")",
",",
"num",
"=",
"100",
")",
"ax",
".",
"plot",
"(",
"xr",
",",
"model",
".",
"predict",
"(",
"xr",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"ax"
]
| Uses Scikit-Learn to fit a model to X and y then uses the resulting model
to predict the curve based on the X values. This curve is drawn to the ax
(matplotlib axis) which must be passed as the third variable.
The estimator function can be one of the following:
- ``'linear'``: Uses OLS to fit the regression
- ``'quadratic'``: Uses OLS with Polynomial order 2
- ``'exponential'``: Not implemented yet
- ``'log'``: Not implemented yet
- ``'select_best'``: Selects the best fit via MSE
The remaining keyword arguments are passed to ax.plot to define and
describe the line of best fit.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
estimator : string, default: 'linear'
The name of the estimator function used to draw the best fit line.
The estimator can currently be one of linear, quadratic, exponential,
log, or select_best. The select best method uses the minimum MSE to
select the best fit line.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style and
label the line of best fit. By default, the standard line color is
used unless the color keyword argument is passed in.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it. | [
"Uses",
"Scikit",
"-",
"Learn",
"to",
"fit",
"a",
"model",
"to",
"X",
"and",
"y",
"then",
"uses",
"the",
"resulting",
"model",
"to",
"predict",
"the",
"curve",
"based",
"on",
"the",
"X",
"values",
".",
"This",
"curve",
"is",
"drawn",
"to",
"the",
"ax",
"(",
"matplotlib",
"axis",
")",
"which",
"must",
"be",
"passed",
"as",
"the",
"third",
"variable",
"."
]
| python | train |
ssato/python-anytemplate | anytemplate/engines/pystache.py | https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/engines/pystache.py#L61-L78 | def _make_renderer(self, at_paths, at_encoding, **kwargs):
"""
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
"""
for eopt in ("file_encoding", "string_encoding"):
default = self._roptions.get(eopt, at_encoding.lower())
self._roptions[eopt] = kwargs.get(eopt, default)
pkey = "search_dirs"
paths = kwargs.get(pkey, []) + self._roptions.get(pkey, [])
if at_paths is not None:
paths = at_paths + paths
self._roptions[pkey] = paths
return pystache.renderer.Renderer(**self._roptions) | [
"def",
"_make_renderer",
"(",
"self",
",",
"at_paths",
",",
"at_encoding",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"eopt",
"in",
"(",
"\"file_encoding\"",
",",
"\"string_encoding\"",
")",
":",
"default",
"=",
"self",
".",
"_roptions",
".",
"get",
"(",
"eopt",
",",
"at_encoding",
".",
"lower",
"(",
")",
")",
"self",
".",
"_roptions",
"[",
"eopt",
"]",
"=",
"kwargs",
".",
"get",
"(",
"eopt",
",",
"default",
")",
"pkey",
"=",
"\"search_dirs\"",
"paths",
"=",
"kwargs",
".",
"get",
"(",
"pkey",
",",
"[",
"]",
")",
"+",
"self",
".",
"_roptions",
".",
"get",
"(",
"pkey",
",",
"[",
"]",
")",
"if",
"at_paths",
"is",
"not",
"None",
":",
"paths",
"=",
"at_paths",
"+",
"paths",
"self",
".",
"_roptions",
"[",
"pkey",
"]",
"=",
"paths",
"return",
"pystache",
".",
"renderer",
".",
"Renderer",
"(",
"*",
"*",
"self",
".",
"_roptions",
")"
]
| :param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled. | [
":",
"param",
"at_paths",
":",
"Template",
"search",
"paths",
":",
"param",
"at_encoding",
":",
"Template",
"encoding",
":",
"param",
"kwargs",
":",
"Keyword",
"arguments",
"passed",
"to",
"the",
"template",
"engine",
"to",
"render",
"templates",
"with",
"specific",
"features",
"enabled",
"."
]
| python | train |
obfusk/m | m.py | https://github.com/obfusk/m/blob/23ec2754abc9e945e5f01fcc64c13c833faf2e33/m.py#L1705-L1715 | def zlines(f = None, sep = "\0", osep = None, size = 8192): # {{{1
"""File iterator that uses alternative line terminators."""
if f is None: f = sys.stdin
if osep is None: osep = sep
buf = ""
while True:
chars = f.read(size)
if not chars: break
buf += chars; lines = buf.split(sep); buf = lines.pop()
for line in lines: yield line + osep
if buf: yield buf | [
"def",
"zlines",
"(",
"f",
"=",
"None",
",",
"sep",
"=",
"\"\\0\"",
",",
"osep",
"=",
"None",
",",
"size",
"=",
"8192",
")",
":",
"# {{{1",
"if",
"f",
"is",
"None",
":",
"f",
"=",
"sys",
".",
"stdin",
"if",
"osep",
"is",
"None",
":",
"osep",
"=",
"sep",
"buf",
"=",
"\"\"",
"while",
"True",
":",
"chars",
"=",
"f",
".",
"read",
"(",
"size",
")",
"if",
"not",
"chars",
":",
"break",
"buf",
"+=",
"chars",
"lines",
"=",
"buf",
".",
"split",
"(",
"sep",
")",
"buf",
"=",
"lines",
".",
"pop",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"yield",
"line",
"+",
"osep",
"if",
"buf",
":",
"yield",
"buf"
]
| File iterator that uses alternative line terminators. | [
"File",
"iterator",
"that",
"uses",
"alternative",
"line",
"terminators",
"."
]
| python | train |
f3at/feat | src/feat/models/getter.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/getter.py#L53-L65 | def source_attr(attr_name):
"""
Creates a getter that will drop the current value
and retrieve the source's attribute with specified name.
@param attr_name: the name of an attribute belonging to the source.
@type attr_name: str
"""
def source_attr(_value, context, **_params):
value = getattr(context["model"].source, attr_name)
return _attr(value)
return source_attr | [
"def",
"source_attr",
"(",
"attr_name",
")",
":",
"def",
"source_attr",
"(",
"_value",
",",
"context",
",",
"*",
"*",
"_params",
")",
":",
"value",
"=",
"getattr",
"(",
"context",
"[",
"\"model\"",
"]",
".",
"source",
",",
"attr_name",
")",
"return",
"_attr",
"(",
"value",
")",
"return",
"source_attr"
]
| Creates a getter that will drop the current value
and retrieve the source's attribute with specified name.
@param attr_name: the name of an attribute belonging to the source.
@type attr_name: str | [
"Creates",
"a",
"getter",
"that",
"will",
"drop",
"the",
"current",
"value",
"and",
"retrieve",
"the",
"source",
"s",
"attribute",
"with",
"specified",
"name",
"."
]
| python | train |
delfick/harpoon | harpoon/option_spec/image_objs.py | https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/option_spec/image_objs.py#L494-L501 | def pair(self):
"""Get the name and value for this environment variable"""
if self.set_val is not None:
return self.env_name, self.set_val
elif self.default_val is not None:
return self.env_name, os.environ.get(self.env_name, self.default_val)
else:
return self.env_name, os.environ[self.env_name] | [
"def",
"pair",
"(",
"self",
")",
":",
"if",
"self",
".",
"set_val",
"is",
"not",
"None",
":",
"return",
"self",
".",
"env_name",
",",
"self",
".",
"set_val",
"elif",
"self",
".",
"default_val",
"is",
"not",
"None",
":",
"return",
"self",
".",
"env_name",
",",
"os",
".",
"environ",
".",
"get",
"(",
"self",
".",
"env_name",
",",
"self",
".",
"default_val",
")",
"else",
":",
"return",
"self",
".",
"env_name",
",",
"os",
".",
"environ",
"[",
"self",
".",
"env_name",
"]"
]
| Get the name and value for this environment variable | [
"Get",
"the",
"name",
"and",
"value",
"for",
"this",
"environment",
"variable"
]
| python | train |
angr/angr | angr/procedures/libc/strtol.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/procedures/libc/strtol.py#L78-L149 | def _string_to_int(s, state, region, base, signed, read_length=None):
"""
reads values from s and generates the symbolic number that it would equal
the first char is either a number in the given base, or the result is 0
expression indicates whether or not it was successful
"""
# if length wasn't provided, read the maximum bytes
length = state.libc.max_strtol_len if read_length is None else read_length
# expression whether or not it was valid at all
expression, _ = strtol._char_to_val(region.load(s, 1), base)
cases = []
# to detect overflows we keep it in a larger bv and extract it at the end
num_bits = min(state.arch.bits*2, 128)
current_val = state.solver.BVV(0, num_bits)
num_bytes = state.solver.BVS("num_bytes", state.arch.bits)
constraints_num_bytes = []
conditions = []
cutoff = False
# we need all the conditions to hold except the last one to have found a value
for i in range(length):
char = region.load(s + i, 1)
condition, value = strtol._char_to_val(char, base)
# if it was the end we'll get the current val
cases.append((num_bytes == i, current_val))
# identify the constraints necessary to set num_bytes to the current value
# the current char (i.e. the terminator if this is satisfied) should not be a char,
# so `condition` should be false, plus all the previous conditions should be satisfied
case_constraints = conditions + [state.solver.Not(condition)] + [num_bytes == i]
constraints_num_bytes.append(state.solver.And(*case_constraints))
# break the loop early if no value past this is viable
if condition.is_false():
cutoff = True # ???
break
# add the value and the condition
current_val = current_val*base + value.zero_extend(num_bits-8)
conditions.append(condition)
# the last one is unterminated, let's ignore it
if not cutoff:
cases.append((num_bytes == length, current_val))
case_constraints = conditions + [num_bytes == length]
constraints_num_bytes.append(state.solver.And(*case_constraints))
# only one of the constraints need to hold
# since the constraints look like (num_bytes == 2 and the first 2 chars are valid, and the 3rd isn't)
final_constraint = state.solver.Or(*constraints_num_bytes)
if final_constraint.op == '__eq__' and final_constraint.args[0] is num_bytes and not final_constraint.args[1].symbolic:
# CONCRETE CASE
result = cases[state.solver.eval(final_constraint.args[1])][1]
num_bytes = final_constraint.args[1]
else:
# symbolic case
state.add_constraints(final_constraint)
result = state.solver.ite_cases(cases, 0)
# overflow check
max_bits = state.arch.bits-1 if signed else state.arch.bits
max_val = 2**max_bits - 1
result = state.solver.If(result < max_val, state.solver.Extract(state.arch.bits-1, 0, result),
state.solver.BVV(max_val, state.arch.bits))
return expression, result, num_bytes | [
"def",
"_string_to_int",
"(",
"s",
",",
"state",
",",
"region",
",",
"base",
",",
"signed",
",",
"read_length",
"=",
"None",
")",
":",
"# if length wasn't provided, read the maximum bytes",
"length",
"=",
"state",
".",
"libc",
".",
"max_strtol_len",
"if",
"read_length",
"is",
"None",
"else",
"read_length",
"# expression whether or not it was valid at all",
"expression",
",",
"_",
"=",
"strtol",
".",
"_char_to_val",
"(",
"region",
".",
"load",
"(",
"s",
",",
"1",
")",
",",
"base",
")",
"cases",
"=",
"[",
"]",
"# to detect overflows we keep it in a larger bv and extract it at the end",
"num_bits",
"=",
"min",
"(",
"state",
".",
"arch",
".",
"bits",
"*",
"2",
",",
"128",
")",
"current_val",
"=",
"state",
".",
"solver",
".",
"BVV",
"(",
"0",
",",
"num_bits",
")",
"num_bytes",
"=",
"state",
".",
"solver",
".",
"BVS",
"(",
"\"num_bytes\"",
",",
"state",
".",
"arch",
".",
"bits",
")",
"constraints_num_bytes",
"=",
"[",
"]",
"conditions",
"=",
"[",
"]",
"cutoff",
"=",
"False",
"# we need all the conditions to hold except the last one to have found a value",
"for",
"i",
"in",
"range",
"(",
"length",
")",
":",
"char",
"=",
"region",
".",
"load",
"(",
"s",
"+",
"i",
",",
"1",
")",
"condition",
",",
"value",
"=",
"strtol",
".",
"_char_to_val",
"(",
"char",
",",
"base",
")",
"# if it was the end we'll get the current val",
"cases",
".",
"append",
"(",
"(",
"num_bytes",
"==",
"i",
",",
"current_val",
")",
")",
"# identify the constraints necessary to set num_bytes to the current value",
"# the current char (i.e. the terminator if this is satisfied) should not be a char,",
"# so `condition` should be false, plus all the previous conditions should be satisfied",
"case_constraints",
"=",
"conditions",
"+",
"[",
"state",
".",
"solver",
".",
"Not",
"(",
"condition",
")",
"]",
"+",
"[",
"num_bytes",
"==",
"i",
"]",
"constraints_num_bytes",
".",
"append",
"(",
"state",
".",
"solver",
".",
"And",
"(",
"*",
"case_constraints",
")",
")",
"# break the loop early if no value past this is viable",
"if",
"condition",
".",
"is_false",
"(",
")",
":",
"cutoff",
"=",
"True",
"# ???",
"break",
"# add the value and the condition",
"current_val",
"=",
"current_val",
"*",
"base",
"+",
"value",
".",
"zero_extend",
"(",
"num_bits",
"-",
"8",
")",
"conditions",
".",
"append",
"(",
"condition",
")",
"# the last one is unterminated, let's ignore it",
"if",
"not",
"cutoff",
":",
"cases",
".",
"append",
"(",
"(",
"num_bytes",
"==",
"length",
",",
"current_val",
")",
")",
"case_constraints",
"=",
"conditions",
"+",
"[",
"num_bytes",
"==",
"length",
"]",
"constraints_num_bytes",
".",
"append",
"(",
"state",
".",
"solver",
".",
"And",
"(",
"*",
"case_constraints",
")",
")",
"# only one of the constraints need to hold",
"# since the constraints look like (num_bytes == 2 and the first 2 chars are valid, and the 3rd isn't)",
"final_constraint",
"=",
"state",
".",
"solver",
".",
"Or",
"(",
"*",
"constraints_num_bytes",
")",
"if",
"final_constraint",
".",
"op",
"==",
"'__eq__'",
"and",
"final_constraint",
".",
"args",
"[",
"0",
"]",
"is",
"num_bytes",
"and",
"not",
"final_constraint",
".",
"args",
"[",
"1",
"]",
".",
"symbolic",
":",
"# CONCRETE CASE",
"result",
"=",
"cases",
"[",
"state",
".",
"solver",
".",
"eval",
"(",
"final_constraint",
".",
"args",
"[",
"1",
"]",
")",
"]",
"[",
"1",
"]",
"num_bytes",
"=",
"final_constraint",
".",
"args",
"[",
"1",
"]",
"else",
":",
"# symbolic case",
"state",
".",
"add_constraints",
"(",
"final_constraint",
")",
"result",
"=",
"state",
".",
"solver",
".",
"ite_cases",
"(",
"cases",
",",
"0",
")",
"# overflow check",
"max_bits",
"=",
"state",
".",
"arch",
".",
"bits",
"-",
"1",
"if",
"signed",
"else",
"state",
".",
"arch",
".",
"bits",
"max_val",
"=",
"2",
"**",
"max_bits",
"-",
"1",
"result",
"=",
"state",
".",
"solver",
".",
"If",
"(",
"result",
"<",
"max_val",
",",
"state",
".",
"solver",
".",
"Extract",
"(",
"state",
".",
"arch",
".",
"bits",
"-",
"1",
",",
"0",
",",
"result",
")",
",",
"state",
".",
"solver",
".",
"BVV",
"(",
"max_val",
",",
"state",
".",
"arch",
".",
"bits",
")",
")",
"return",
"expression",
",",
"result",
",",
"num_bytes"
]
| reads values from s and generates the symbolic number that it would equal
the first char is either a number in the given base, or the result is 0
expression indicates whether or not it was successful | [
"reads",
"values",
"from",
"s",
"and",
"generates",
"the",
"symbolic",
"number",
"that",
"it",
"would",
"equal",
"the",
"first",
"char",
"is",
"either",
"a",
"number",
"in",
"the",
"given",
"base",
"or",
"the",
"result",
"is",
"0",
"expression",
"indicates",
"whether",
"or",
"not",
"it",
"was",
"successful"
]
| python | train |
j0ack/flask-codemirror | flask_codemirror/__init__.py | https://github.com/j0ack/flask-codemirror/blob/81ad831ff849b60bb34de5db727ad626ff3c9bdc/flask_codemirror/__init__.py#L92-L127 | def include_codemirror(self):
"""Include resources in pages"""
contents = []
# base
js = self._get_tag('codemirror.js', 'script')
css = self._get_tag('codemirror.css', 'stylesheet')
if js and css:
contents.append(js)
contents.append(css)
# languages
for language in self.languages:
url = self.__class__.LANGUAGE_REL_URL.format(language)
js = self._get_tag(url, 'script')
if js:
contents.append(js)
# theme
if self.theme:
url = self.__class__.THEME_REL_URL.format(self.theme)
css = self._get_tag(url, 'stylesheet')
if css:
contents.append(css)
# addons
if self.addons:
# add to list
for addon_type, name in self.addons:
url = self.__class__.ADDON_REL_URL.format(addon_type, name)
js = self._get_tag(url, 'script')
if js:
contents.append(js)
# if there is a css file relative to this addon
url = self.__class__.ADDON_CSS_REL_URL.format(addon_type, name)
css = self._get_tag(url, 'stylesheet', False)
if css:
contents.append(css)
# return html
return Markup('\n'.join(contents)) | [
"def",
"include_codemirror",
"(",
"self",
")",
":",
"contents",
"=",
"[",
"]",
"# base",
"js",
"=",
"self",
".",
"_get_tag",
"(",
"'codemirror.js'",
",",
"'script'",
")",
"css",
"=",
"self",
".",
"_get_tag",
"(",
"'codemirror.css'",
",",
"'stylesheet'",
")",
"if",
"js",
"and",
"css",
":",
"contents",
".",
"append",
"(",
"js",
")",
"contents",
".",
"append",
"(",
"css",
")",
"# languages",
"for",
"language",
"in",
"self",
".",
"languages",
":",
"url",
"=",
"self",
".",
"__class__",
".",
"LANGUAGE_REL_URL",
".",
"format",
"(",
"language",
")",
"js",
"=",
"self",
".",
"_get_tag",
"(",
"url",
",",
"'script'",
")",
"if",
"js",
":",
"contents",
".",
"append",
"(",
"js",
")",
"# theme",
"if",
"self",
".",
"theme",
":",
"url",
"=",
"self",
".",
"__class__",
".",
"THEME_REL_URL",
".",
"format",
"(",
"self",
".",
"theme",
")",
"css",
"=",
"self",
".",
"_get_tag",
"(",
"url",
",",
"'stylesheet'",
")",
"if",
"css",
":",
"contents",
".",
"append",
"(",
"css",
")",
"# addons",
"if",
"self",
".",
"addons",
":",
"# add to list",
"for",
"addon_type",
",",
"name",
"in",
"self",
".",
"addons",
":",
"url",
"=",
"self",
".",
"__class__",
".",
"ADDON_REL_URL",
".",
"format",
"(",
"addon_type",
",",
"name",
")",
"js",
"=",
"self",
".",
"_get_tag",
"(",
"url",
",",
"'script'",
")",
"if",
"js",
":",
"contents",
".",
"append",
"(",
"js",
")",
"# if there is a css file relative to this addon",
"url",
"=",
"self",
".",
"__class__",
".",
"ADDON_CSS_REL_URL",
".",
"format",
"(",
"addon_type",
",",
"name",
")",
"css",
"=",
"self",
".",
"_get_tag",
"(",
"url",
",",
"'stylesheet'",
",",
"False",
")",
"if",
"css",
":",
"contents",
".",
"append",
"(",
"css",
")",
"# return html",
"return",
"Markup",
"(",
"'\\n'",
".",
"join",
"(",
"contents",
")",
")"
]
| Include resources in pages | [
"Include",
"resources",
"in",
"pages"
]
| python | train |
carpyncho/feets | feets/libs/ls_fap.py | https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/libs/ls_fap.py#L207-L215 | def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau | [
"def",
"fap_davies",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"'standard'",
")",
":",
"N",
"=",
"len",
"(",
"t",
")",
"fap_s",
"=",
"fap_single",
"(",
"Z",
",",
"N",
",",
"normalization",
"=",
"normalization",
")",
"tau",
"=",
"tau_davies",
"(",
"Z",
",",
"fmax",
",",
"t",
",",
"y",
",",
"dy",
",",
"normalization",
"=",
"normalization",
")",
"return",
"fap_s",
"+",
"tau"
]
| Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008) | [
"Davies",
"upper",
"-",
"bound",
"to",
"the",
"false",
"alarm",
"probability"
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.