repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
arkottke/pysra | pysra/propagation.py | LinearElasticCalculator.calc_stress_tf | def calc_stress_tf(self, lin, lout, damped):
"""Compute the stress transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
"""
tf = self.calc_strain_tf(lin, lout)
if damped:
# Scale by complex shear modulus to include the influence of
# damping
tf *= lout.layer.comp_shear_mod
else:
tf *= lout.layer.shear_mod
return tf | python | def calc_stress_tf(self, lin, lout, damped):
"""Compute the stress transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
"""
tf = self.calc_strain_tf(lin, lout)
if damped:
# Scale by complex shear modulus to include the influence of
# damping
tf *= lout.layer.comp_shear_mod
else:
tf *= lout.layer.shear_mod
return tf | [
"def",
"calc_stress_tf",
"(",
"self",
",",
"lin",
",",
"lout",
",",
"damped",
")",
":",
"tf",
"=",
"self",
".",
"calc_strain_tf",
"(",
"lin",
",",
"lout",
")",
"if",
"damped",
":",
"# Scale by complex shear modulus to include the influence of",
"# damping",
"tf",
"*=",
"lout",
".",
"layer",
".",
"comp_shear_mod",
"else",
":",
"tf",
"*=",
"lout",
".",
"layer",
".",
"shear_mod",
"return",
"tf"
] | Compute the stress transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer. | [
"Compute",
"the",
"stress",
"transfer",
"function",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L380-L400 | train |
arkottke/pysra | pysra/propagation.py | LinearElasticCalculator.calc_strain_tf | def calc_strain_tf(self, lin, lout):
"""Compute the strain transfer function from `lout` to
`location_in`.
The strain transfer function from the acceleration at layer `n`
(outcrop) to the mid-height of layer `m` (within) is defined as
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
Returns
-------
strain_tf : :class:`numpy.ndarray`
Transfer function to be applied to an acceleration FAS.
"""
# FIXME: Correct discussion for using acceleration FAS
# Strain(angFreq, z=h_m/2)
# ------------------------ =
# accel_n(angFreq)
#
# i k*_m [ A_m exp(i k*_m h_m / 2) - B_m exp(-i k*_m h_m / 2)]
# ------------------------------------------------------------
# -angFreq^2 (2 * A_n)
#
assert lout.wave_field == WaveField.within
ang_freqs = self.motion.angular_freqs
# The numerator cannot be computed using wave_at_location() because
# it is A - B.
cterm = 1j * self._wave_nums[lout.index, :] * lout.depth_within
numer = (1j * self._wave_nums[lout.index, :] *
(self._waves_a[lout.index, :] * np.exp(cterm) -
self._waves_b[lout.index, :] * np.exp(-cterm)))
denom = -ang_freqs ** 2 * self.wave_at_location(lin)
# Only compute transfer function for non-zero frequencies
mask = ~np.isclose(ang_freqs, 0)
tf = np.zeros_like(mask, dtype=np.complex)
# Scale into units from gravity
tf[mask] = GRAVITY * numer[mask] / denom[mask]
return tf | python | def calc_strain_tf(self, lin, lout):
"""Compute the strain transfer function from `lout` to
`location_in`.
The strain transfer function from the acceleration at layer `n`
(outcrop) to the mid-height of layer `m` (within) is defined as
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
Returns
-------
strain_tf : :class:`numpy.ndarray`
Transfer function to be applied to an acceleration FAS.
"""
# FIXME: Correct discussion for using acceleration FAS
# Strain(angFreq, z=h_m/2)
# ------------------------ =
# accel_n(angFreq)
#
# i k*_m [ A_m exp(i k*_m h_m / 2) - B_m exp(-i k*_m h_m / 2)]
# ------------------------------------------------------------
# -angFreq^2 (2 * A_n)
#
assert lout.wave_field == WaveField.within
ang_freqs = self.motion.angular_freqs
# The numerator cannot be computed using wave_at_location() because
# it is A - B.
cterm = 1j * self._wave_nums[lout.index, :] * lout.depth_within
numer = (1j * self._wave_nums[lout.index, :] *
(self._waves_a[lout.index, :] * np.exp(cterm) -
self._waves_b[lout.index, :] * np.exp(-cterm)))
denom = -ang_freqs ** 2 * self.wave_at_location(lin)
# Only compute transfer function for non-zero frequencies
mask = ~np.isclose(ang_freqs, 0)
tf = np.zeros_like(mask, dtype=np.complex)
# Scale into units from gravity
tf[mask] = GRAVITY * numer[mask] / denom[mask]
return tf | [
"def",
"calc_strain_tf",
"(",
"self",
",",
"lin",
",",
"lout",
")",
":",
"# FIXME: Correct discussion for using acceleration FAS",
"# Strain(angFreq, z=h_m/2)",
"# ------------------------ =",
"# accel_n(angFreq)",
"#",
"# i k*_m [ A_m exp(i k*_m h_m / 2) - B_m exp(-i k*_m h_m / 2)]",
"# ------------------------------------------------------------",
"# -angFreq^2 (2 * A_n)",
"#",
"assert",
"lout",
".",
"wave_field",
"==",
"WaveField",
".",
"within",
"ang_freqs",
"=",
"self",
".",
"motion",
".",
"angular_freqs",
"# The numerator cannot be computed using wave_at_location() because",
"# it is A - B.",
"cterm",
"=",
"1j",
"*",
"self",
".",
"_wave_nums",
"[",
"lout",
".",
"index",
",",
":",
"]",
"*",
"lout",
".",
"depth_within",
"numer",
"=",
"(",
"1j",
"*",
"self",
".",
"_wave_nums",
"[",
"lout",
".",
"index",
",",
":",
"]",
"*",
"(",
"self",
".",
"_waves_a",
"[",
"lout",
".",
"index",
",",
":",
"]",
"*",
"np",
".",
"exp",
"(",
"cterm",
")",
"-",
"self",
".",
"_waves_b",
"[",
"lout",
".",
"index",
",",
":",
"]",
"*",
"np",
".",
"exp",
"(",
"-",
"cterm",
")",
")",
")",
"denom",
"=",
"-",
"ang_freqs",
"**",
"2",
"*",
"self",
".",
"wave_at_location",
"(",
"lin",
")",
"# Only compute transfer function for non-zero frequencies",
"mask",
"=",
"~",
"np",
".",
"isclose",
"(",
"ang_freqs",
",",
"0",
")",
"tf",
"=",
"np",
".",
"zeros_like",
"(",
"mask",
",",
"dtype",
"=",
"np",
".",
"complex",
")",
"# Scale into units from gravity",
"tf",
"[",
"mask",
"]",
"=",
"GRAVITY",
"*",
"numer",
"[",
"mask",
"]",
"/",
"denom",
"[",
"mask",
"]",
"return",
"tf"
] | Compute the strain transfer function from `lout` to
`location_in`.
The strain transfer function from the acceleration at layer `n`
(outcrop) to the mid-height of layer `m` (within) is defined as
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer.
Returns
-------
strain_tf : :class:`numpy.ndarray`
Transfer function to be applied to an acceleration FAS. | [
"Compute",
"the",
"strain",
"transfer",
"function",
"from",
"lout",
"to",
"location_in",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L402-L448 | train |
arkottke/pysra | pysra/propagation.py | EquivalentLinearCalculator._estimate_strains | def _estimate_strains(self):
"""Compute an estimate of the strains."""
# Estimate the strain based on the PGV and shear-wave velocity
for l in self._profile:
l.reset()
l.strain = self._motion.pgv / l.initial_shear_vel | python | def _estimate_strains(self):
"""Compute an estimate of the strains."""
# Estimate the strain based on the PGV and shear-wave velocity
for l in self._profile:
l.reset()
l.strain = self._motion.pgv / l.initial_shear_vel | [
"def",
"_estimate_strains",
"(",
"self",
")",
":",
"# Estimate the strain based on the PGV and shear-wave velocity",
"for",
"l",
"in",
"self",
".",
"_profile",
":",
"l",
".",
"reset",
"(",
")",
"l",
".",
"strain",
"=",
"self",
".",
"_motion",
".",
"pgv",
"/",
"l",
".",
"initial_shear_vel"
] | Compute an estimate of the strains. | [
"Compute",
"an",
"estimate",
"of",
"the",
"strains",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L514-L519 | train |
arkottke/pysra | pysra/propagation.py | EquivalentLinearCalculator._calc_strain | def _calc_strain(self, loc_input, loc_layer, motion, *args):
"""Compute the strain used for iterations of material properties."""
strain_max = self._calc_strain_max(loc_input, loc_layer, motion, *args)
return self.strain_ratio * strain_max | python | def _calc_strain(self, loc_input, loc_layer, motion, *args):
"""Compute the strain used for iterations of material properties."""
strain_max = self._calc_strain_max(loc_input, loc_layer, motion, *args)
return self.strain_ratio * strain_max | [
"def",
"_calc_strain",
"(",
"self",
",",
"loc_input",
",",
"loc_layer",
",",
"motion",
",",
"*",
"args",
")",
":",
"strain_max",
"=",
"self",
".",
"_calc_strain_max",
"(",
"loc_input",
",",
"loc_layer",
",",
"motion",
",",
"*",
"args",
")",
"return",
"self",
".",
"strain_ratio",
"*",
"strain_max"
] | Compute the strain used for iterations of material properties. | [
"Compute",
"the",
"strain",
"used",
"for",
"iterations",
"of",
"material",
"properties",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L557-L560 | train |
arkottke/pysra | pysra/propagation.py | EquivalentLinearCalculator._calc_strain_max | def _calc_strain_max(self, loc_input, loc_layer, motion, *args):
"""Compute the effective strain at the center of a layer."""
return motion.calc_peak(
self.calc_strain_tf(loc_input, loc_layer)) | python | def _calc_strain_max(self, loc_input, loc_layer, motion, *args):
"""Compute the effective strain at the center of a layer."""
return motion.calc_peak(
self.calc_strain_tf(loc_input, loc_layer)) | [
"def",
"_calc_strain_max",
"(",
"self",
",",
"loc_input",
",",
"loc_layer",
",",
"motion",
",",
"*",
"args",
")",
":",
"return",
"motion",
".",
"calc_peak",
"(",
"self",
".",
"calc_strain_tf",
"(",
"loc_input",
",",
"loc_layer",
")",
")"
] | Compute the effective strain at the center of a layer. | [
"Compute",
"the",
"effective",
"strain",
"at",
"the",
"center",
"of",
"a",
"layer",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L562-L565 | train |
arkottke/pysra | pysra/propagation.py | FrequencyDependentEqlCalculator._estimate_strains | def _estimate_strains(self):
"""Estimate the strains by running an EQL site response.
This step was recommended in Section 8.3.1 of Zalachoris (2014).
"""
eql = EquivalentLinearCalculator()
eql(self._motion, self._profile, self._loc_input) | python | def _estimate_strains(self):
"""Estimate the strains by running an EQL site response.
This step was recommended in Section 8.3.1 of Zalachoris (2014).
"""
eql = EquivalentLinearCalculator()
eql(self._motion, self._profile, self._loc_input) | [
"def",
"_estimate_strains",
"(",
"self",
")",
":",
"eql",
"=",
"EquivalentLinearCalculator",
"(",
")",
"eql",
"(",
"self",
".",
"_motion",
",",
"self",
".",
"_profile",
",",
"self",
".",
"_loc_input",
")"
] | Estimate the strains by running an EQL site response.
This step was recommended in Section 8.3.1 of Zalachoris (2014). | [
"Estimate",
"the",
"strains",
"by",
"running",
"an",
"EQL",
"site",
"response",
"."
] | c72fd389d6c15203c0c00728ac00f101bae6369d | https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L606-L612 | train |
guaix-ucm/numina | numina/core/recipes.py | timeit | def timeit(method):
"""Decorator to measure the time used by the recipe"""
import datetime
@functools.wraps(method)
def timed_method(self, rinput):
time_start = datetime.datetime.utcnow()
result = method(self, rinput)
time_end = datetime.datetime.utcnow()
result.time_it(time_start, time_end)
self.logger.info('total time measured')
return result
return timed_method | python | def timeit(method):
"""Decorator to measure the time used by the recipe"""
import datetime
@functools.wraps(method)
def timed_method(self, rinput):
time_start = datetime.datetime.utcnow()
result = method(self, rinput)
time_end = datetime.datetime.utcnow()
result.time_it(time_start, time_end)
self.logger.info('total time measured')
return result
return timed_method | [
"def",
"timeit",
"(",
"method",
")",
":",
"import",
"datetime",
"@",
"functools",
".",
"wraps",
"(",
"method",
")",
"def",
"timed_method",
"(",
"self",
",",
"rinput",
")",
":",
"time_start",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"result",
"=",
"method",
"(",
"self",
",",
"rinput",
")",
"time_end",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"result",
".",
"time_it",
"(",
"time_start",
",",
"time_end",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'total time measured'",
")",
"return",
"result",
"return",
"timed_method"
] | Decorator to measure the time used by the recipe | [
"Decorator",
"to",
"measure",
"the",
"time",
"used",
"by",
"the",
"recipe"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipes.py#L261-L276 | train |
guaix-ucm/numina | numina/core/recipes.py | BaseRecipe.save_intermediate_img | def save_intermediate_img(self, img, name):
"""Save intermediate FITS objects."""
if self.intermediate_results:
img.writeto(name, overwrite=True) | python | def save_intermediate_img(self, img, name):
"""Save intermediate FITS objects."""
if self.intermediate_results:
img.writeto(name, overwrite=True) | [
"def",
"save_intermediate_img",
"(",
"self",
",",
"img",
",",
"name",
")",
":",
"if",
"self",
".",
"intermediate_results",
":",
"img",
".",
"writeto",
"(",
"name",
",",
"overwrite",
"=",
"True",
")"
] | Save intermediate FITS objects. | [
"Save",
"intermediate",
"FITS",
"objects",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipes.py#L161-L164 | train |
guaix-ucm/numina | numina/core/recipes.py | BaseRecipe.save_intermediate_array | def save_intermediate_array(self, array, name):
"""Save intermediate array object as FITS."""
if self.intermediate_results:
fits.writeto(name, array, overwrite=True) | python | def save_intermediate_array(self, array, name):
"""Save intermediate array object as FITS."""
if self.intermediate_results:
fits.writeto(name, array, overwrite=True) | [
"def",
"save_intermediate_array",
"(",
"self",
",",
"array",
",",
"name",
")",
":",
"if",
"self",
".",
"intermediate_results",
":",
"fits",
".",
"writeto",
"(",
"name",
",",
"array",
",",
"overwrite",
"=",
"True",
")"
] | Save intermediate array object as FITS. | [
"Save",
"intermediate",
"array",
"object",
"as",
"FITS",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipes.py#L166-L169 | train |
guaix-ucm/numina | numina/core/recipes.py | BaseRecipe.build_recipe_input | def build_recipe_input(self, ob, dal):
"""Build a RecipeInput object."""
result = {}
# We have to decide if the ob input
# is a plain description (ObservingBlock)
# or if it contains the nested results (Obsres)
#
# it has to contain the tags corresponding to the observing modes...
ob_query_skip = False
ob_query_field = 'obresult'
if isinstance(ob, ObservingBlock):
import numina.types.obsresult as obtype
# We have to build an Obsres
for key, req in self.requirements().items():
if isinstance(req.type, obtype.ObservationResultType):
ob_query_field = key
ob_query_skip = True
query_option = self.query_options.get(key)
# print('req for ob is named', key, query_option)
new_or = ObservationResult()
new_or.__dict__ = ob.__dict__
obsres = req.query(dal, new_or, options=query_option)
tagger = self.mode.tagger
if tagger is not None:
self.logger.debug('Use mode tagger to fill tags in OB')
obsres.tags = tagger(obsres)
else:
obsres.tags = None
break
else:
# nothing to do
obsres = ob
else:
obsres = ob
# Get tags_names per REQ
self.logger.debug('getting query fields per REQ')
qfields = set()
for key, req in self.requirements().items():
tag_n = req.tag_names()
self.logger.debug("%s has these query fields %s", key, tag_n)
qfields.update(tag_n)
if obsres.tags is None:
self.logger.debug('running recipe tagger')
self.logger.debug('with query fields %s', qfields)
if qfields:
obsres.tags = self.obsres_extractor(obsres, qfields)
else:
obsres.tags = {}
for key, req in self.requirements().items():
try:
query_option = self.query_options.get(key)
if key == ob_query_field and ob_query_skip:
result[key] = obsres
else:
result[key] = req.query(dal, obsres, options=query_option)
except NoResultFound as notfound:
req.on_query_not_found(notfound)
return self.create_input(**result) | python | def build_recipe_input(self, ob, dal):
"""Build a RecipeInput object."""
result = {}
# We have to decide if the ob input
# is a plain description (ObservingBlock)
# or if it contains the nested results (Obsres)
#
# it has to contain the tags corresponding to the observing modes...
ob_query_skip = False
ob_query_field = 'obresult'
if isinstance(ob, ObservingBlock):
import numina.types.obsresult as obtype
# We have to build an Obsres
for key, req in self.requirements().items():
if isinstance(req.type, obtype.ObservationResultType):
ob_query_field = key
ob_query_skip = True
query_option = self.query_options.get(key)
# print('req for ob is named', key, query_option)
new_or = ObservationResult()
new_or.__dict__ = ob.__dict__
obsres = req.query(dal, new_or, options=query_option)
tagger = self.mode.tagger
if tagger is not None:
self.logger.debug('Use mode tagger to fill tags in OB')
obsres.tags = tagger(obsres)
else:
obsres.tags = None
break
else:
# nothing to do
obsres = ob
else:
obsres = ob
# Get tags_names per REQ
self.logger.debug('getting query fields per REQ')
qfields = set()
for key, req in self.requirements().items():
tag_n = req.tag_names()
self.logger.debug("%s has these query fields %s", key, tag_n)
qfields.update(tag_n)
if obsres.tags is None:
self.logger.debug('running recipe tagger')
self.logger.debug('with query fields %s', qfields)
if qfields:
obsres.tags = self.obsres_extractor(obsres, qfields)
else:
obsres.tags = {}
for key, req in self.requirements().items():
try:
query_option = self.query_options.get(key)
if key == ob_query_field and ob_query_skip:
result[key] = obsres
else:
result[key] = req.query(dal, obsres, options=query_option)
except NoResultFound as notfound:
req.on_query_not_found(notfound)
return self.create_input(**result) | [
"def",
"build_recipe_input",
"(",
"self",
",",
"ob",
",",
"dal",
")",
":",
"result",
"=",
"{",
"}",
"# We have to decide if the ob input",
"# is a plain description (ObservingBlock)",
"# or if it contains the nested results (Obsres)",
"#",
"# it has to contain the tags corresponding to the observing modes...",
"ob_query_skip",
"=",
"False",
"ob_query_field",
"=",
"'obresult'",
"if",
"isinstance",
"(",
"ob",
",",
"ObservingBlock",
")",
":",
"import",
"numina",
".",
"types",
".",
"obsresult",
"as",
"obtype",
"# We have to build an Obsres",
"for",
"key",
",",
"req",
"in",
"self",
".",
"requirements",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"req",
".",
"type",
",",
"obtype",
".",
"ObservationResultType",
")",
":",
"ob_query_field",
"=",
"key",
"ob_query_skip",
"=",
"True",
"query_option",
"=",
"self",
".",
"query_options",
".",
"get",
"(",
"key",
")",
"# print('req for ob is named', key, query_option)",
"new_or",
"=",
"ObservationResult",
"(",
")",
"new_or",
".",
"__dict__",
"=",
"ob",
".",
"__dict__",
"obsres",
"=",
"req",
".",
"query",
"(",
"dal",
",",
"new_or",
",",
"options",
"=",
"query_option",
")",
"tagger",
"=",
"self",
".",
"mode",
".",
"tagger",
"if",
"tagger",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Use mode tagger to fill tags in OB'",
")",
"obsres",
".",
"tags",
"=",
"tagger",
"(",
"obsres",
")",
"else",
":",
"obsres",
".",
"tags",
"=",
"None",
"break",
"else",
":",
"# nothing to do",
"obsres",
"=",
"ob",
"else",
":",
"obsres",
"=",
"ob",
"# Get tags_names per REQ",
"self",
".",
"logger",
".",
"debug",
"(",
"'getting query fields per REQ'",
")",
"qfields",
"=",
"set",
"(",
")",
"for",
"key",
",",
"req",
"in",
"self",
".",
"requirements",
"(",
")",
".",
"items",
"(",
")",
":",
"tag_n",
"=",
"req",
".",
"tag_names",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s has these query fields %s\"",
",",
"key",
",",
"tag_n",
")",
"qfields",
".",
"update",
"(",
"tag_n",
")",
"if",
"obsres",
".",
"tags",
"is",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'running recipe tagger'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'with query fields %s'",
",",
"qfields",
")",
"if",
"qfields",
":",
"obsres",
".",
"tags",
"=",
"self",
".",
"obsres_extractor",
"(",
"obsres",
",",
"qfields",
")",
"else",
":",
"obsres",
".",
"tags",
"=",
"{",
"}",
"for",
"key",
",",
"req",
"in",
"self",
".",
"requirements",
"(",
")",
".",
"items",
"(",
")",
":",
"try",
":",
"query_option",
"=",
"self",
".",
"query_options",
".",
"get",
"(",
"key",
")",
"if",
"key",
"==",
"ob_query_field",
"and",
"ob_query_skip",
":",
"result",
"[",
"key",
"]",
"=",
"obsres",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"req",
".",
"query",
"(",
"dal",
",",
"obsres",
",",
"options",
"=",
"query_option",
")",
"except",
"NoResultFound",
"as",
"notfound",
":",
"req",
".",
"on_query_not_found",
"(",
"notfound",
")",
"return",
"self",
".",
"create_input",
"(",
"*",
"*",
"result",
")"
] | Build a RecipeInput object. | [
"Build",
"a",
"RecipeInput",
"object",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipes.py#L190-L255 | train |
guaix-ucm/numina | numina/tools/subsets_of_fileinfo_from_txt.py | subsets_of_fileinfo_from_txt | def subsets_of_fileinfo_from_txt(filename):
"""Returns a dictionary with subsets of FileInfo instances from a TXT file.
Each subset of files must be preceded by a line:
@ <number> <label>
where <number> indicates the number of files in that subset,
and <label> is a label for that subset. Any additional text
beyond <label> in the same line is ignored.
Note that blank lines or lines starting by the hash symbol are
also ignored. The name of the files comprising each subset will be
obtained from the first contiguous character string in every
line (thus, the rest of the line will be discarded).
Parameters
----------
filename : string
Name of a TXT file containing a list of FITS files grouped
in different subsets by the @ symbol.
Returns
-------
dict_of_subsets_of_fileinfo : dictionary
Dictionary containing as many entries as different subsets
of files available. Each value of the dictionary is a
dictionary with a label (sequential number starting at zero)
and the list of FileInfo instances within subset.
"""
# check for input file
if not os.path.isfile(filename):
raise ValueError("File " + filename + " not found!")
# read input file
with open(filename) as f:
file_content = f.read().splitlines()
# obtain the different subsets of files
dict_of_subsets_of_fileinfo = {}
label = None
sublist_of_fileinfo = []
idict = 0
ifiles = 0
nfiles = 0
sublist_finished = True
for line in file_content:
if len(line) > 0:
if line[0] != '#':
if label is None:
if line[0] == "@":
nfiles = int(line[1:].split()[0])
label = line[1:].split()[1]
sublist_of_fileinfo = []
ifiles = 0
sublist_finished = False
else:
raise ValueError("Expected @ symbol not found!")
else:
if line[0] == "@":
raise ValueError("Unexpected @ symbol found!")
tmplist = line.split()
tmpfile = tmplist[0]
if len(tmplist) > 1:
tmpinfo = tmplist[1:]
else:
tmpinfo = None
if not os.path.isfile(tmpfile):
raise ValueError("File " + tmpfile + " not found!")
sublist_of_fileinfo.append(FileInfo(tmpfile, tmpinfo))
ifiles += 1
if ifiles == nfiles:
dict_of_subsets_of_fileinfo[idict] = {}
tmpdict = dict_of_subsets_of_fileinfo[idict]
tmpdict['label'] = label
tmpdict['list_of_fileinfo'] = sublist_of_fileinfo
idict += 1
label = None
sublist_of_fileinfo = []
ifiles = 0
sublist_finished = True
if not sublist_finished:
raise ValueError("Unexpected end of sublist of files.")
return dict_of_subsets_of_fileinfo | python | def subsets_of_fileinfo_from_txt(filename):
"""Returns a dictionary with subsets of FileInfo instances from a TXT file.
Each subset of files must be preceded by a line:
@ <number> <label>
where <number> indicates the number of files in that subset,
and <label> is a label for that subset. Any additional text
beyond <label> in the same line is ignored.
Note that blank lines or lines starting by the hash symbol are
also ignored. The name of the files comprising each subset will be
obtained from the first contiguous character string in every
line (thus, the rest of the line will be discarded).
Parameters
----------
filename : string
Name of a TXT file containing a list of FITS files grouped
in different subsets by the @ symbol.
Returns
-------
dict_of_subsets_of_fileinfo : dictionary
Dictionary containing as many entries as different subsets
of files available. Each value of the dictionary is a
dictionary with a label (sequential number starting at zero)
and the list of FileInfo instances within subset.
"""
# check for input file
if not os.path.isfile(filename):
raise ValueError("File " + filename + " not found!")
# read input file
with open(filename) as f:
file_content = f.read().splitlines()
# obtain the different subsets of files
dict_of_subsets_of_fileinfo = {}
label = None
sublist_of_fileinfo = []
idict = 0
ifiles = 0
nfiles = 0
sublist_finished = True
for line in file_content:
if len(line) > 0:
if line[0] != '#':
if label is None:
if line[0] == "@":
nfiles = int(line[1:].split()[0])
label = line[1:].split()[1]
sublist_of_fileinfo = []
ifiles = 0
sublist_finished = False
else:
raise ValueError("Expected @ symbol not found!")
else:
if line[0] == "@":
raise ValueError("Unexpected @ symbol found!")
tmplist = line.split()
tmpfile = tmplist[0]
if len(tmplist) > 1:
tmpinfo = tmplist[1:]
else:
tmpinfo = None
if not os.path.isfile(tmpfile):
raise ValueError("File " + tmpfile + " not found!")
sublist_of_fileinfo.append(FileInfo(tmpfile, tmpinfo))
ifiles += 1
if ifiles == nfiles:
dict_of_subsets_of_fileinfo[idict] = {}
tmpdict = dict_of_subsets_of_fileinfo[idict]
tmpdict['label'] = label
tmpdict['list_of_fileinfo'] = sublist_of_fileinfo
idict += 1
label = None
sublist_of_fileinfo = []
ifiles = 0
sublist_finished = True
if not sublist_finished:
raise ValueError("Unexpected end of sublist of files.")
return dict_of_subsets_of_fileinfo | [
"def",
"subsets_of_fileinfo_from_txt",
"(",
"filename",
")",
":",
"# check for input file",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"raise",
"ValueError",
"(",
"\"File \"",
"+",
"filename",
"+",
"\" not found!\"",
")",
"# read input file",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"file_content",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"# obtain the different subsets of files",
"dict_of_subsets_of_fileinfo",
"=",
"{",
"}",
"label",
"=",
"None",
"sublist_of_fileinfo",
"=",
"[",
"]",
"idict",
"=",
"0",
"ifiles",
"=",
"0",
"nfiles",
"=",
"0",
"sublist_finished",
"=",
"True",
"for",
"line",
"in",
"file_content",
":",
"if",
"len",
"(",
"line",
")",
">",
"0",
":",
"if",
"line",
"[",
"0",
"]",
"!=",
"'#'",
":",
"if",
"label",
"is",
"None",
":",
"if",
"line",
"[",
"0",
"]",
"==",
"\"@\"",
":",
"nfiles",
"=",
"int",
"(",
"line",
"[",
"1",
":",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"label",
"=",
"line",
"[",
"1",
":",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
"sublist_of_fileinfo",
"=",
"[",
"]",
"ifiles",
"=",
"0",
"sublist_finished",
"=",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"\"Expected @ symbol not found!\"",
")",
"else",
":",
"if",
"line",
"[",
"0",
"]",
"==",
"\"@\"",
":",
"raise",
"ValueError",
"(",
"\"Unexpected @ symbol found!\"",
")",
"tmplist",
"=",
"line",
".",
"split",
"(",
")",
"tmpfile",
"=",
"tmplist",
"[",
"0",
"]",
"if",
"len",
"(",
"tmplist",
")",
">",
"1",
":",
"tmpinfo",
"=",
"tmplist",
"[",
"1",
":",
"]",
"else",
":",
"tmpinfo",
"=",
"None",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"tmpfile",
")",
":",
"raise",
"ValueError",
"(",
"\"File \"",
"+",
"tmpfile",
"+",
"\" not found!\"",
")",
"sublist_of_fileinfo",
".",
"append",
"(",
"FileInfo",
"(",
"tmpfile",
",",
"tmpinfo",
")",
")",
"ifiles",
"+=",
"1",
"if",
"ifiles",
"==",
"nfiles",
":",
"dict_of_subsets_of_fileinfo",
"[",
"idict",
"]",
"=",
"{",
"}",
"tmpdict",
"=",
"dict_of_subsets_of_fileinfo",
"[",
"idict",
"]",
"tmpdict",
"[",
"'label'",
"]",
"=",
"label",
"tmpdict",
"[",
"'list_of_fileinfo'",
"]",
"=",
"sublist_of_fileinfo",
"idict",
"+=",
"1",
"label",
"=",
"None",
"sublist_of_fileinfo",
"=",
"[",
"]",
"ifiles",
"=",
"0",
"sublist_finished",
"=",
"True",
"if",
"not",
"sublist_finished",
":",
"raise",
"ValueError",
"(",
"\"Unexpected end of sublist of files.\"",
")",
"return",
"dict_of_subsets_of_fileinfo"
] | Returns a dictionary with subsets of FileInfo instances from a TXT file.
Each subset of files must be preceded by a line:
@ <number> <label>
where <number> indicates the number of files in that subset,
and <label> is a label for that subset. Any additional text
beyond <label> in the same line is ignored.
Note that blank lines or lines starting by the hash symbol are
also ignored. The name of the files comprising each subset will be
obtained from the first contiguous character string in every
line (thus, the rest of the line will be discarded).
Parameters
----------
filename : string
Name of a TXT file containing a list of FITS files grouped
in different subsets by the @ symbol.
Returns
-------
dict_of_subsets_of_fileinfo : dictionary
Dictionary containing as many entries as different subsets
of files available. Each value of the dictionary is a
dictionary with a label (sequential number starting at zero)
and the list of FileInfo instances within subset. | [
"Returns",
"a",
"dictionary",
"with",
"subsets",
"of",
"FileInfo",
"instances",
"from",
"a",
"TXT",
"file",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/tools/subsets_of_fileinfo_from_txt.py#L10-L95 | train |
guaix-ucm/numina | numina/array/__init__.py | subarray_match | def subarray_match(shape, ref, sshape, sref=None):
"""Compute the slice representation of intersection of two arrays.
Given the shapes of two arrays and a reference point ref, compute the
intersection of the two arrays.
It returns a tuple of slices, that can be passed to the two \
images as indexes
:param shape: the shape of the reference array
:param ref: coordinates of the reference point in the first array system
:param sshape: the shape of the second array
:param: sref: coordinates of the reference point in the \
second array system, the origin by default
:type sref: sequence or None
:return: two matching slices, corresponding to both arrays \
or a tuple of Nones if they don't match
:rtype: a tuple
Example:
>>> import numpy
>>> im = numpy.zeros((1000, 1000))
>>> sim = numpy.ones((40, 40))
>>> i,j = subarray_match(im.shape, [20, 23], sim.shape)
>>> im[i] = 2 * sim[j]
"""
# Reference point in im
ref1 = asarray(ref, dtype='int')
if sref is not None:
ref2 = asarray(sref, dtype='int')
else:
ref2 = zeros_like(ref1)
offset = ref1 - ref2
urc1 = minimum(offset + asarray(sshape) - 1, asarray(shape) - 1)
blc1 = maximum(offset, 0)
urc2 = urc1 - offset
blc2 = blc1 - offset
def valid_slice(b, u):
if b >= u + 1:
return None
else:
return slice(b, u + 1)
f = tuple(valid_slice(b, u) for b, u in zip(blc1, urc1))
s = tuple(valid_slice(b, u) for b, u in zip(blc2, urc2))
if not all(f) or not all(s):
return (None, None)
return (f, s) | python | def subarray_match(shape, ref, sshape, sref=None):
"""Compute the slice representation of intersection of two arrays.
Given the shapes of two arrays and a reference point ref, compute the
intersection of the two arrays.
It returns a tuple of slices, that can be passed to the two \
images as indexes
:param shape: the shape of the reference array
:param ref: coordinates of the reference point in the first array system
:param sshape: the shape of the second array
:param: sref: coordinates of the reference point in the \
second array system, the origin by default
:type sref: sequence or None
:return: two matching slices, corresponding to both arrays \
or a tuple of Nones if they don't match
:rtype: a tuple
Example:
>>> import numpy
>>> im = numpy.zeros((1000, 1000))
>>> sim = numpy.ones((40, 40))
>>> i,j = subarray_match(im.shape, [20, 23], sim.shape)
>>> im[i] = 2 * sim[j]
"""
# Reference point in im
ref1 = asarray(ref, dtype='int')
if sref is not None:
ref2 = asarray(sref, dtype='int')
else:
ref2 = zeros_like(ref1)
offset = ref1 - ref2
urc1 = minimum(offset + asarray(sshape) - 1, asarray(shape) - 1)
blc1 = maximum(offset, 0)
urc2 = urc1 - offset
blc2 = blc1 - offset
def valid_slice(b, u):
if b >= u + 1:
return None
else:
return slice(b, u + 1)
f = tuple(valid_slice(b, u) for b, u in zip(blc1, urc1))
s = tuple(valid_slice(b, u) for b, u in zip(blc2, urc2))
if not all(f) or not all(s):
return (None, None)
return (f, s) | [
"def",
"subarray_match",
"(",
"shape",
",",
"ref",
",",
"sshape",
",",
"sref",
"=",
"None",
")",
":",
"# Reference point in im",
"ref1",
"=",
"asarray",
"(",
"ref",
",",
"dtype",
"=",
"'int'",
")",
"if",
"sref",
"is",
"not",
"None",
":",
"ref2",
"=",
"asarray",
"(",
"sref",
",",
"dtype",
"=",
"'int'",
")",
"else",
":",
"ref2",
"=",
"zeros_like",
"(",
"ref1",
")",
"offset",
"=",
"ref1",
"-",
"ref2",
"urc1",
"=",
"minimum",
"(",
"offset",
"+",
"asarray",
"(",
"sshape",
")",
"-",
"1",
",",
"asarray",
"(",
"shape",
")",
"-",
"1",
")",
"blc1",
"=",
"maximum",
"(",
"offset",
",",
"0",
")",
"urc2",
"=",
"urc1",
"-",
"offset",
"blc2",
"=",
"blc1",
"-",
"offset",
"def",
"valid_slice",
"(",
"b",
",",
"u",
")",
":",
"if",
"b",
">=",
"u",
"+",
"1",
":",
"return",
"None",
"else",
":",
"return",
"slice",
"(",
"b",
",",
"u",
"+",
"1",
")",
"f",
"=",
"tuple",
"(",
"valid_slice",
"(",
"b",
",",
"u",
")",
"for",
"b",
",",
"u",
"in",
"zip",
"(",
"blc1",
",",
"urc1",
")",
")",
"s",
"=",
"tuple",
"(",
"valid_slice",
"(",
"b",
",",
"u",
")",
"for",
"b",
",",
"u",
"in",
"zip",
"(",
"blc2",
",",
"urc2",
")",
")",
"if",
"not",
"all",
"(",
"f",
")",
"or",
"not",
"all",
"(",
"s",
")",
":",
"return",
"(",
"None",
",",
"None",
")",
"return",
"(",
"f",
",",
"s",
")"
] | Compute the slice representation of intersection of two arrays.
Given the shapes of two arrays and a reference point ref, compute the
intersection of the two arrays.
It returns a tuple of slices, that can be passed to the two \
images as indexes
:param shape: the shape of the reference array
:param ref: coordinates of the reference point in the first array system
:param sshape: the shape of the second array
:param: sref: coordinates of the reference point in the \
second array system, the origin by default
:type sref: sequence or None
:return: two matching slices, corresponding to both arrays \
or a tuple of Nones if they don't match
:rtype: a tuple
Example:
>>> import numpy
>>> im = numpy.zeros((1000, 1000))
>>> sim = numpy.ones((40, 40))
>>> i,j = subarray_match(im.shape, [20, 23], sim.shape)
>>> im[i] = 2 * sim[j] | [
"Compute",
"the",
"slice",
"representation",
"of",
"intersection",
"of",
"two",
"arrays",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/__init__.py#L21-L74 | train |
guaix-ucm/numina | numina/array/__init__.py | rebin_scale | def rebin_scale(a, scale=1):
"""Scale an array to a new shape."""
newshape = tuple((side * scale) for side in a.shape)
return rebin(a, newshape) | python | def rebin_scale(a, scale=1):
"""Scale an array to a new shape."""
newshape = tuple((side * scale) for side in a.shape)
return rebin(a, newshape) | [
"def",
"rebin_scale",
"(",
"a",
",",
"scale",
"=",
"1",
")",
":",
"newshape",
"=",
"tuple",
"(",
"(",
"side",
"*",
"scale",
")",
"for",
"side",
"in",
"a",
".",
"shape",
")",
"return",
"rebin",
"(",
"a",
",",
"newshape",
")"
] | Scale an array to a new shape. | [
"Scale",
"an",
"array",
"to",
"a",
"new",
"shape",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/__init__.py#L171-L176 | train |
guaix-ucm/numina | numina/array/__init__.py | rebin | def rebin(a, newshape):
"""Rebin an array to a new shape."""
slices = [slice(0, old, float(old)/new)
for old, new in zip(a.shape, newshape)]
coordinates = numpy.mgrid[slices]
# choose the biggest smaller integer index
indices = coordinates.astype('i')
return a[tuple(indices)] | python | def rebin(a, newshape):
"""Rebin an array to a new shape."""
slices = [slice(0, old, float(old)/new)
for old, new in zip(a.shape, newshape)]
coordinates = numpy.mgrid[slices]
# choose the biggest smaller integer index
indices = coordinates.astype('i')
return a[tuple(indices)] | [
"def",
"rebin",
"(",
"a",
",",
"newshape",
")",
":",
"slices",
"=",
"[",
"slice",
"(",
"0",
",",
"old",
",",
"float",
"(",
"old",
")",
"/",
"new",
")",
"for",
"old",
",",
"new",
"in",
"zip",
"(",
"a",
".",
"shape",
",",
"newshape",
")",
"]",
"coordinates",
"=",
"numpy",
".",
"mgrid",
"[",
"slices",
"]",
"# choose the biggest smaller integer index",
"indices",
"=",
"coordinates",
".",
"astype",
"(",
"'i'",
")",
"return",
"a",
"[",
"tuple",
"(",
"indices",
")",
"]"
] | Rebin an array to a new shape. | [
"Rebin",
"an",
"array",
"to",
"a",
"new",
"shape",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/__init__.py#L179-L187 | train |
guaix-ucm/numina | numina/array/__init__.py | fixpix | def fixpix(data, mask, kind='linear'):
"""Interpolate 2D array data in rows"""
if data.shape != mask.shape:
raise ValueError
if not numpy.any(mask):
return data
x = numpy.arange(0, data.shape[0])
for row, mrow in zip(data, mask):
if numpy.any(mrow): # Interpolate if there's some pixel missing
valid = (mrow == numpy.False_)
invalid = (mrow == numpy.True_)
itp = interp1d(x[valid], row[valid], kind=kind, copy=False)
row[invalid] = itp(x[invalid]).astype(row.dtype)
return data | python | def fixpix(data, mask, kind='linear'):
"""Interpolate 2D array data in rows"""
if data.shape != mask.shape:
raise ValueError
if not numpy.any(mask):
return data
x = numpy.arange(0, data.shape[0])
for row, mrow in zip(data, mask):
if numpy.any(mrow): # Interpolate if there's some pixel missing
valid = (mrow == numpy.False_)
invalid = (mrow == numpy.True_)
itp = interp1d(x[valid], row[valid], kind=kind, copy=False)
row[invalid] = itp(x[invalid]).astype(row.dtype)
return data | [
"def",
"fixpix",
"(",
"data",
",",
"mask",
",",
"kind",
"=",
"'linear'",
")",
":",
"if",
"data",
".",
"shape",
"!=",
"mask",
".",
"shape",
":",
"raise",
"ValueError",
"if",
"not",
"numpy",
".",
"any",
"(",
"mask",
")",
":",
"return",
"data",
"x",
"=",
"numpy",
".",
"arange",
"(",
"0",
",",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"row",
",",
"mrow",
"in",
"zip",
"(",
"data",
",",
"mask",
")",
":",
"if",
"numpy",
".",
"any",
"(",
"mrow",
")",
":",
"# Interpolate if there's some pixel missing",
"valid",
"=",
"(",
"mrow",
"==",
"numpy",
".",
"False_",
")",
"invalid",
"=",
"(",
"mrow",
"==",
"numpy",
".",
"True_",
")",
"itp",
"=",
"interp1d",
"(",
"x",
"[",
"valid",
"]",
",",
"row",
"[",
"valid",
"]",
",",
"kind",
"=",
"kind",
",",
"copy",
"=",
"False",
")",
"row",
"[",
"invalid",
"]",
"=",
"itp",
"(",
"x",
"[",
"invalid",
"]",
")",
".",
"astype",
"(",
"row",
".",
"dtype",
")",
"return",
"data"
] | Interpolate 2D array data in rows | [
"Interpolate",
"2D",
"array",
"data",
"in",
"rows"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/__init__.py#L190-L205 | train |
guaix-ucm/numina | numina/array/__init__.py | fixpix2 | def fixpix2(data, mask, iterations=3, out=None):
"""Substitute pixels in mask by a bilinear least square fitting.
"""
out = out if out is not None else data.copy()
# A binary mask, regions are ones
binry = mask != 0
# Label regions in the binary mask
lblarr, labl = ndimage.label(binry)
# Structure for dilation is 8-way
stct = ndimage.generate_binary_structure(2, 2)
# Pixels in the background
back = lblarr == 0
# For each object
for idx in range(1, labl + 1):
# Pixels of the object
segm = lblarr == idx
# Pixels of the object or the background
# dilation will only touch these pixels
dilmask = numpy.logical_or(back, segm)
# Dilation 3 times
more = ndimage.binary_dilation(segm, stct,
iterations=iterations,
mask=dilmask)
# Border pixels
# Pixels in the border around the object are
# more and (not segm)
border = numpy.logical_and(more, numpy.logical_not(segm))
# Pixels in the border
xi, yi = border.nonzero()
# Bilinear leastsq calculator
calc = FitOne(xi, yi, out[xi, yi])
# Pixels in the region
xi, yi = segm.nonzero()
# Value is obtained from the fit
out[segm] = calc(xi, yi)
return out | python | def fixpix2(data, mask, iterations=3, out=None):
"""Substitute pixels in mask by a bilinear least square fitting.
"""
out = out if out is not None else data.copy()
# A binary mask, regions are ones
binry = mask != 0
# Label regions in the binary mask
lblarr, labl = ndimage.label(binry)
# Structure for dilation is 8-way
stct = ndimage.generate_binary_structure(2, 2)
# Pixels in the background
back = lblarr == 0
# For each object
for idx in range(1, labl + 1):
# Pixels of the object
segm = lblarr == idx
# Pixels of the object or the background
# dilation will only touch these pixels
dilmask = numpy.logical_or(back, segm)
# Dilation 3 times
more = ndimage.binary_dilation(segm, stct,
iterations=iterations,
mask=dilmask)
# Border pixels
# Pixels in the border around the object are
# more and (not segm)
border = numpy.logical_and(more, numpy.logical_not(segm))
# Pixels in the border
xi, yi = border.nonzero()
# Bilinear leastsq calculator
calc = FitOne(xi, yi, out[xi, yi])
# Pixels in the region
xi, yi = segm.nonzero()
# Value is obtained from the fit
out[segm] = calc(xi, yi)
return out | [
"def",
"fixpix2",
"(",
"data",
",",
"mask",
",",
"iterations",
"=",
"3",
",",
"out",
"=",
"None",
")",
":",
"out",
"=",
"out",
"if",
"out",
"is",
"not",
"None",
"else",
"data",
".",
"copy",
"(",
")",
"# A binary mask, regions are ones",
"binry",
"=",
"mask",
"!=",
"0",
"# Label regions in the binary mask",
"lblarr",
",",
"labl",
"=",
"ndimage",
".",
"label",
"(",
"binry",
")",
"# Structure for dilation is 8-way",
"stct",
"=",
"ndimage",
".",
"generate_binary_structure",
"(",
"2",
",",
"2",
")",
"# Pixels in the background",
"back",
"=",
"lblarr",
"==",
"0",
"# For each object",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"labl",
"+",
"1",
")",
":",
"# Pixels of the object",
"segm",
"=",
"lblarr",
"==",
"idx",
"# Pixels of the object or the background",
"# dilation will only touch these pixels",
"dilmask",
"=",
"numpy",
".",
"logical_or",
"(",
"back",
",",
"segm",
")",
"# Dilation 3 times",
"more",
"=",
"ndimage",
".",
"binary_dilation",
"(",
"segm",
",",
"stct",
",",
"iterations",
"=",
"iterations",
",",
"mask",
"=",
"dilmask",
")",
"# Border pixels",
"# Pixels in the border around the object are",
"# more and (not segm)",
"border",
"=",
"numpy",
".",
"logical_and",
"(",
"more",
",",
"numpy",
".",
"logical_not",
"(",
"segm",
")",
")",
"# Pixels in the border",
"xi",
",",
"yi",
"=",
"border",
".",
"nonzero",
"(",
")",
"# Bilinear leastsq calculator",
"calc",
"=",
"FitOne",
"(",
"xi",
",",
"yi",
",",
"out",
"[",
"xi",
",",
"yi",
"]",
")",
"# Pixels in the region",
"xi",
",",
"yi",
"=",
"segm",
".",
"nonzero",
"(",
")",
"# Value is obtained from the fit",
"out",
"[",
"segm",
"]",
"=",
"calc",
"(",
"xi",
",",
"yi",
")",
"return",
"out"
] | Substitute pixels in mask by a bilinear least square fitting. | [
"Substitute",
"pixels",
"in",
"mask",
"by",
"a",
"bilinear",
"least",
"square",
"fitting",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/__init__.py#L208-L247 | train |
guaix-ucm/numina | numina/array/__init__.py | numberarray | def numberarray(x, shape):
"""Return x if it is an array or create an array and fill it with x."""
try:
iter(x)
except TypeError:
return numpy.ones(shape) * x
else:
return x | python | def numberarray(x, shape):
"""Return x if it is an array or create an array and fill it with x."""
try:
iter(x)
except TypeError:
return numpy.ones(shape) * x
else:
return x | [
"def",
"numberarray",
"(",
"x",
",",
"shape",
")",
":",
"try",
":",
"iter",
"(",
"x",
")",
"except",
"TypeError",
":",
"return",
"numpy",
".",
"ones",
"(",
"shape",
")",
"*",
"x",
"else",
":",
"return",
"x"
] | Return x if it is an array or create an array and fill it with x. | [
"Return",
"x",
"if",
"it",
"is",
"an",
"array",
"or",
"create",
"an",
"array",
"and",
"fill",
"it",
"with",
"x",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/__init__.py#L291-L298 | train |
druids/django-chamber | chamber/multidomains/auth/middleware.py | get_token | def get_token(request):
"""
Returns the token model instance associated with the given request token key.
If no user is retrieved AnonymousToken is returned.
"""
if (not request.META.get(header_name_to_django(auth_token_settings.HEADER_NAME)) and
config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME):
ovetaker_auth_token = request.COOKIES.get(config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME)
token = get_object_or_none(Token, key=ovetaker_auth_token, is_active=True)
if utils.get_user_from_token(token).is_authenticated():
return token
return utils.get_token(request) | python | def get_token(request):
"""
Returns the token model instance associated with the given request token key.
If no user is retrieved AnonymousToken is returned.
"""
if (not request.META.get(header_name_to_django(auth_token_settings.HEADER_NAME)) and
config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME):
ovetaker_auth_token = request.COOKIES.get(config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME)
token = get_object_or_none(Token, key=ovetaker_auth_token, is_active=True)
if utils.get_user_from_token(token).is_authenticated():
return token
return utils.get_token(request) | [
"def",
"get_token",
"(",
"request",
")",
":",
"if",
"(",
"not",
"request",
".",
"META",
".",
"get",
"(",
"header_name_to_django",
"(",
"auth_token_settings",
".",
"HEADER_NAME",
")",
")",
"and",
"config",
".",
"CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME",
")",
":",
"ovetaker_auth_token",
"=",
"request",
".",
"COOKIES",
".",
"get",
"(",
"config",
".",
"CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME",
")",
"token",
"=",
"get_object_or_none",
"(",
"Token",
",",
"key",
"=",
"ovetaker_auth_token",
",",
"is_active",
"=",
"True",
")",
"if",
"utils",
".",
"get_user_from_token",
"(",
"token",
")",
".",
"is_authenticated",
"(",
")",
":",
"return",
"token",
"return",
"utils",
".",
"get_token",
"(",
"request",
")"
] | Returns the token model instance associated with the given request token key.
If no user is retrieved AnonymousToken is returned. | [
"Returns",
"the",
"token",
"model",
"instance",
"associated",
"with",
"the",
"given",
"request",
"token",
"key",
".",
"If",
"no",
"user",
"is",
"retrieved",
"AnonymousToken",
"is",
"returned",
"."
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/multidomains/auth/middleware.py#L13-L25 | train |
druids/django-chamber | chamber/multidomains/auth/middleware.py | MultiDomainsTokenAuthenticationMiddleware.process_request | def process_request(self, request):
"""
Lazy set user and token
"""
request.token = get_token(request)
request.user = SimpleLazyObject(lambda: get_user(request))
request._dont_enforce_csrf_checks = dont_enforce_csrf_checks(request) | python | def process_request(self, request):
"""
Lazy set user and token
"""
request.token = get_token(request)
request.user = SimpleLazyObject(lambda: get_user(request))
request._dont_enforce_csrf_checks = dont_enforce_csrf_checks(request) | [
"def",
"process_request",
"(",
"self",
",",
"request",
")",
":",
"request",
".",
"token",
"=",
"get_token",
"(",
"request",
")",
"request",
".",
"user",
"=",
"SimpleLazyObject",
"(",
"lambda",
":",
"get_user",
"(",
"request",
")",
")",
"request",
".",
"_dont_enforce_csrf_checks",
"=",
"dont_enforce_csrf_checks",
"(",
"request",
")"
] | Lazy set user and token | [
"Lazy",
"set",
"user",
"and",
"token"
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/multidomains/auth/middleware.py#L30-L36 | train |
guaix-ucm/numina | numina/array/display/ximplotxy.py | ximplotxy_jupyter | def ximplotxy_jupyter(x, y, fmt=None, **args):
"""Auxiliary function to call ximplotxy from a jupyter notebook.
"""
using_jupyter = True
if fmt is None:
return ximplotxy(x, y, using_jupyter=using_jupyter, **args)
else:
return ximplotxy(x, y, fmt, using_jupyter=using_jupyter, **args) | python | def ximplotxy_jupyter(x, y, fmt=None, **args):
"""Auxiliary function to call ximplotxy from a jupyter notebook.
"""
using_jupyter = True
if fmt is None:
return ximplotxy(x, y, using_jupyter=using_jupyter, **args)
else:
return ximplotxy(x, y, fmt, using_jupyter=using_jupyter, **args) | [
"def",
"ximplotxy_jupyter",
"(",
"x",
",",
"y",
",",
"fmt",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"using_jupyter",
"=",
"True",
"if",
"fmt",
"is",
"None",
":",
"return",
"ximplotxy",
"(",
"x",
",",
"y",
",",
"using_jupyter",
"=",
"using_jupyter",
",",
"*",
"*",
"args",
")",
"else",
":",
"return",
"ximplotxy",
"(",
"x",
",",
"y",
",",
"fmt",
",",
"using_jupyter",
"=",
"using_jupyter",
",",
"*",
"*",
"args",
")"
] | Auxiliary function to call ximplotxy from a jupyter notebook. | [
"Auxiliary",
"function",
"to",
"call",
"ximplotxy",
"from",
"a",
"jupyter",
"notebook",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/ximplotxy.py#L20-L27 | train |
druids/django-chamber | chamber/utils/transaction.py | atomic | def atomic(func):
"""
Decorator helper that overrides django atomic decorator and automatically adds create revision.
"""
try:
from reversion.revisions import create_revision
return transaction.atomic(create_revision()(func))
except ImportError:
return transaction.atomic(func) | python | def atomic(func):
"""
Decorator helper that overrides django atomic decorator and automatically adds create revision.
"""
try:
from reversion.revisions import create_revision
return transaction.atomic(create_revision()(func))
except ImportError:
return transaction.atomic(func) | [
"def",
"atomic",
"(",
"func",
")",
":",
"try",
":",
"from",
"reversion",
".",
"revisions",
"import",
"create_revision",
"return",
"transaction",
".",
"atomic",
"(",
"create_revision",
"(",
")",
"(",
"func",
")",
")",
"except",
"ImportError",
":",
"return",
"transaction",
".",
"atomic",
"(",
"func",
")"
] | Decorator helper that overrides django atomic decorator and automatically adds create revision. | [
"Decorator",
"helper",
"that",
"overrides",
"django",
"atomic",
"decorator",
"and",
"automatically",
"adds",
"create",
"revision",
"."
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/transaction.py#L14-L23 | train |
druids/django-chamber | chamber/utils/transaction.py | atomic_with_signals | def atomic_with_signals(func):
"""
Atomic decorator with transaction signals.
"""
try:
from reversion.revisions import create_revision
return transaction.atomic(create_revision()(transaction_signals(func)))
except ImportError:
return transaction.atomic(transaction_signals(func)) | python | def atomic_with_signals(func):
"""
Atomic decorator with transaction signals.
"""
try:
from reversion.revisions import create_revision
return transaction.atomic(create_revision()(transaction_signals(func)))
except ImportError:
return transaction.atomic(transaction_signals(func)) | [
"def",
"atomic_with_signals",
"(",
"func",
")",
":",
"try",
":",
"from",
"reversion",
".",
"revisions",
"import",
"create_revision",
"return",
"transaction",
".",
"atomic",
"(",
"create_revision",
"(",
")",
"(",
"transaction_signals",
"(",
"func",
")",
")",
")",
"except",
"ImportError",
":",
"return",
"transaction",
".",
"atomic",
"(",
"transaction_signals",
"(",
"func",
")",
")"
] | Atomic decorator with transaction signals. | [
"Atomic",
"decorator",
"with",
"transaction",
"signals",
"."
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/transaction.py#L116-L125 | train |
gatkin/declxml | declxml.py | parse_from_file | def parse_from_file(
root_processor, # type: RootProcessor
xml_file_path, # type: Text
encoding='utf-8' # type: Text
):
# type: (...) -> Any
"""
Parse the XML file using the processor starting from the root of the document.
:param root_processor: Root processor of the XML document.
:param xml_file_path: Path to XML file to parse.
:param encoding: Encoding of the file.
:return: Parsed value.
"""
with open(xml_file_path, 'r', encoding=encoding) as xml_file:
xml_string = xml_file.read()
parsed_value = parse_from_string(root_processor, xml_string)
return parsed_value | python | def parse_from_file(
root_processor, # type: RootProcessor
xml_file_path, # type: Text
encoding='utf-8' # type: Text
):
# type: (...) -> Any
"""
Parse the XML file using the processor starting from the root of the document.
:param root_processor: Root processor of the XML document.
:param xml_file_path: Path to XML file to parse.
:param encoding: Encoding of the file.
:return: Parsed value.
"""
with open(xml_file_path, 'r', encoding=encoding) as xml_file:
xml_string = xml_file.read()
parsed_value = parse_from_string(root_processor, xml_string)
return parsed_value | [
"def",
"parse_from_file",
"(",
"root_processor",
",",
"# type: RootProcessor",
"xml_file_path",
",",
"# type: Text",
"encoding",
"=",
"'utf-8'",
"# type: Text",
")",
":",
"# type: (...) -> Any",
"with",
"open",
"(",
"xml_file_path",
",",
"'r'",
",",
"encoding",
"=",
"encoding",
")",
"as",
"xml_file",
":",
"xml_string",
"=",
"xml_file",
".",
"read",
"(",
")",
"parsed_value",
"=",
"parse_from_string",
"(",
"root_processor",
",",
"xml_string",
")",
"return",
"parsed_value"
] | Parse the XML file using the processor starting from the root of the document.
:param root_processor: Root processor of the XML document.
:param xml_file_path: Path to XML file to parse.
:param encoding: Encoding of the file.
:return: Parsed value. | [
"Parse",
"the",
"XML",
"file",
"using",
"the",
"processor",
"starting",
"from",
"the",
"root",
"of",
"the",
"document",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L264-L284 | train |
gatkin/declxml | declxml.py | parse_from_string | def parse_from_string(
root_processor, # type: RootProcessor
xml_string # type: Text
):
# type: (...) -> Any
"""
Parse the XML string using the processor starting from the root of the document.
:param xml_string: XML string to parse.
See also :func:`declxml.parse_from_file`
"""
if not _is_valid_root_processor(root_processor):
raise InvalidRootProcessor('Invalid root processor')
parseable_xml_string = xml_string # type: Union[Text, bytes]
if _PY2 and isinstance(xml_string, Text):
parseable_xml_string = xml_string.encode('utf-8')
root = ET.fromstring(parseable_xml_string)
_xml_namespace_strip(root)
state = _ProcessorState()
state.push_location(root_processor.element_path)
return root_processor.parse_at_root(root, state) | python | def parse_from_string(
root_processor, # type: RootProcessor
xml_string # type: Text
):
# type: (...) -> Any
"""
Parse the XML string using the processor starting from the root of the document.
:param xml_string: XML string to parse.
See also :func:`declxml.parse_from_file`
"""
if not _is_valid_root_processor(root_processor):
raise InvalidRootProcessor('Invalid root processor')
parseable_xml_string = xml_string # type: Union[Text, bytes]
if _PY2 and isinstance(xml_string, Text):
parseable_xml_string = xml_string.encode('utf-8')
root = ET.fromstring(parseable_xml_string)
_xml_namespace_strip(root)
state = _ProcessorState()
state.push_location(root_processor.element_path)
return root_processor.parse_at_root(root, state) | [
"def",
"parse_from_string",
"(",
"root_processor",
",",
"# type: RootProcessor",
"xml_string",
"# type: Text",
")",
":",
"# type: (...) -> Any",
"if",
"not",
"_is_valid_root_processor",
"(",
"root_processor",
")",
":",
"raise",
"InvalidRootProcessor",
"(",
"'Invalid root processor'",
")",
"parseable_xml_string",
"=",
"xml_string",
"# type: Union[Text, bytes]",
"if",
"_PY2",
"and",
"isinstance",
"(",
"xml_string",
",",
"Text",
")",
":",
"parseable_xml_string",
"=",
"xml_string",
".",
"encode",
"(",
"'utf-8'",
")",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"parseable_xml_string",
")",
"_xml_namespace_strip",
"(",
"root",
")",
"state",
"=",
"_ProcessorState",
"(",
")",
"state",
".",
"push_location",
"(",
"root_processor",
".",
"element_path",
")",
"return",
"root_processor",
".",
"parse_at_root",
"(",
"root",
",",
"state",
")"
] | Parse the XML string using the processor starting from the root of the document.
:param xml_string: XML string to parse.
See also :func:`declxml.parse_from_file` | [
"Parse",
"the",
"XML",
"string",
"using",
"the",
"processor",
"starting",
"from",
"the",
"root",
"of",
"the",
"document",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L287-L311 | train |
gatkin/declxml | declxml.py | serialize_to_file | def serialize_to_file(
root_processor, # type: RootProcessor
value, # type: Any
xml_file_path, # type: Text
encoding='utf-8', # type: Text
indent=None # type: Optional[Text]
):
# type: (...) -> None
"""
Serialize the value to an XML file using the root processor.
:param root_processor: Root processor of the XML document.
:param value: Value to serialize.
:param xml_file_path: Path to the XML file to which the serialized value will be written.
:param encoding: Encoding of the file.
:param indent: If specified, then the XML will be formatted with the specified indentation.
"""
serialized_value = serialize_to_string(root_processor, value, indent)
with open(xml_file_path, 'w', encoding=encoding) as xml_file:
xml_file.write(serialized_value) | python | def serialize_to_file(
root_processor, # type: RootProcessor
value, # type: Any
xml_file_path, # type: Text
encoding='utf-8', # type: Text
indent=None # type: Optional[Text]
):
# type: (...) -> None
"""
Serialize the value to an XML file using the root processor.
:param root_processor: Root processor of the XML document.
:param value: Value to serialize.
:param xml_file_path: Path to the XML file to which the serialized value will be written.
:param encoding: Encoding of the file.
:param indent: If specified, then the XML will be formatted with the specified indentation.
"""
serialized_value = serialize_to_string(root_processor, value, indent)
with open(xml_file_path, 'w', encoding=encoding) as xml_file:
xml_file.write(serialized_value) | [
"def",
"serialize_to_file",
"(",
"root_processor",
",",
"# type: RootProcessor",
"value",
",",
"# type: Any",
"xml_file_path",
",",
"# type: Text",
"encoding",
"=",
"'utf-8'",
",",
"# type: Text",
"indent",
"=",
"None",
"# type: Optional[Text]",
")",
":",
"# type: (...) -> None",
"serialized_value",
"=",
"serialize_to_string",
"(",
"root_processor",
",",
"value",
",",
"indent",
")",
"with",
"open",
"(",
"xml_file_path",
",",
"'w'",
",",
"encoding",
"=",
"encoding",
")",
"as",
"xml_file",
":",
"xml_file",
".",
"write",
"(",
"serialized_value",
")"
] | Serialize the value to an XML file using the root processor.
:param root_processor: Root processor of the XML document.
:param value: Value to serialize.
:param xml_file_path: Path to the XML file to which the serialized value will be written.
:param encoding: Encoding of the file.
:param indent: If specified, then the XML will be formatted with the specified indentation. | [
"Serialize",
"the",
"value",
"to",
"an",
"XML",
"file",
"using",
"the",
"root",
"processor",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L314-L334 | train |
gatkin/declxml | declxml.py | serialize_to_string | def serialize_to_string(
root_processor, # type: RootProcessor
value, # type: Any
indent=None # type: Optional[Text]
):
# type: (...) -> Text
"""
Serialize the value to an XML string using the root processor.
:return: The serialized XML string.
See also :func:`declxml.serialize_to_file`
"""
if not _is_valid_root_processor(root_processor):
raise InvalidRootProcessor('Invalid root processor')
state = _ProcessorState()
state.push_location(root_processor.element_path)
root = root_processor.serialize(value, state)
state.pop_location()
# Always encode to UTF-8 because element tree does not support other
# encodings in earlier Python versions. See: https://bugs.python.org/issue1767933
serialized_value = ET.tostring(root, encoding='utf-8')
# Since element tree does not support pretty printing XML, we use minidom to do the pretty
# printing
if indent:
serialized_value = minidom.parseString(serialized_value).toprettyxml(
indent=indent, encoding='utf-8'
)
return serialized_value.decode('utf-8') | python | def serialize_to_string(
root_processor, # type: RootProcessor
value, # type: Any
indent=None # type: Optional[Text]
):
# type: (...) -> Text
"""
Serialize the value to an XML string using the root processor.
:return: The serialized XML string.
See also :func:`declxml.serialize_to_file`
"""
if not _is_valid_root_processor(root_processor):
raise InvalidRootProcessor('Invalid root processor')
state = _ProcessorState()
state.push_location(root_processor.element_path)
root = root_processor.serialize(value, state)
state.pop_location()
# Always encode to UTF-8 because element tree does not support other
# encodings in earlier Python versions. See: https://bugs.python.org/issue1767933
serialized_value = ET.tostring(root, encoding='utf-8')
# Since element tree does not support pretty printing XML, we use minidom to do the pretty
# printing
if indent:
serialized_value = minidom.parseString(serialized_value).toprettyxml(
indent=indent, encoding='utf-8'
)
return serialized_value.decode('utf-8') | [
"def",
"serialize_to_string",
"(",
"root_processor",
",",
"# type: RootProcessor",
"value",
",",
"# type: Any",
"indent",
"=",
"None",
"# type: Optional[Text]",
")",
":",
"# type: (...) -> Text",
"if",
"not",
"_is_valid_root_processor",
"(",
"root_processor",
")",
":",
"raise",
"InvalidRootProcessor",
"(",
"'Invalid root processor'",
")",
"state",
"=",
"_ProcessorState",
"(",
")",
"state",
".",
"push_location",
"(",
"root_processor",
".",
"element_path",
")",
"root",
"=",
"root_processor",
".",
"serialize",
"(",
"value",
",",
"state",
")",
"state",
".",
"pop_location",
"(",
")",
"# Always encode to UTF-8 because element tree does not support other",
"# encodings in earlier Python versions. See: https://bugs.python.org/issue1767933",
"serialized_value",
"=",
"ET",
".",
"tostring",
"(",
"root",
",",
"encoding",
"=",
"'utf-8'",
")",
"# Since element tree does not support pretty printing XML, we use minidom to do the pretty",
"# printing",
"if",
"indent",
":",
"serialized_value",
"=",
"minidom",
".",
"parseString",
"(",
"serialized_value",
")",
".",
"toprettyxml",
"(",
"indent",
"=",
"indent",
",",
"encoding",
"=",
"'utf-8'",
")",
"return",
"serialized_value",
".",
"decode",
"(",
"'utf-8'",
")"
] | Serialize the value to an XML string using the root processor.
:return: The serialized XML string.
See also :func:`declxml.serialize_to_file` | [
"Serialize",
"the",
"value",
"to",
"an",
"XML",
"string",
"using",
"the",
"root",
"processor",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L337-L371 | train |
gatkin/declxml | declxml.py | array | def array(
item_processor, # type: Processor
alias=None, # type: Optional[Text]
nested=None, # type: Optional[Text]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Array(item_processor, alias, nested, omit_empty)
return _processor_wrap_if_hooks(processor, hooks) | python | def array(
item_processor, # type: Processor
alias=None, # type: Optional[Text]
nested=None, # type: Optional[Text]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Array(item_processor, alias, nested, omit_empty)
return _processor_wrap_if_hooks(processor, hooks) | [
"def",
"array",
"(",
"item_processor",
",",
"# type: Processor",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"nested",
"=",
"None",
",",
"# type: Optional[Text]",
"omit_empty",
"=",
"False",
",",
"# type: bool",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> RootProcessor",
"processor",
"=",
"_Array",
"(",
"item_processor",
",",
"alias",
",",
"nested",
",",
"omit_empty",
")",
"return",
"_processor_wrap_if_hooks",
"(",
"processor",
",",
"hooks",
")"
] | Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object. | [
"Create",
"an",
"array",
"processor",
"that",
"can",
"be",
"used",
"to",
"parse",
"and",
"serialize",
"array",
"data",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L374-L430 | train |
gatkin/declxml | declxml.py | boolean | def boolean(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default=False, # type: Optional[bool]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for boolean values.
:param element_name: Name of the XML element containing the value. Can also be specified
using supported XPath syntax.
:param attribute: If specified, then the value is searched for under the
attribute within the element specified by element_name. If not specified,
then the value is searched for as the contents of the element specified by
element_name.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param default: Default value to use if the element is not present. This option is only
valid if required is specified as False.
:param omit_empty: If True, then Falsey values will be omitted when serializing to XML. Note
that Falsey values are never omitted when they are elements of an array. Falsey values can
be omitted only when they are standalone elements.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
return _PrimitiveValue(
element_name,
_parse_boolean,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | python | def boolean(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default=False, # type: Optional[bool]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for boolean values.
:param element_name: Name of the XML element containing the value. Can also be specified
using supported XPath syntax.
:param attribute: If specified, then the value is searched for under the
attribute within the element specified by element_name. If not specified,
then the value is searched for as the contents of the element specified by
element_name.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param default: Default value to use if the element is not present. This option is only
valid if required is specified as False.
:param omit_empty: If True, then Falsey values will be omitted when serializing to XML. Note
that Falsey values are never omitted when they are elements of an array. Falsey values can
be omitted only when they are standalone elements.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
return _PrimitiveValue(
element_name,
_parse_boolean,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | [
"def",
"boolean",
"(",
"element_name",
",",
"# type: Text",
"attribute",
"=",
"None",
",",
"# type: Optional[Text]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"default",
"=",
"False",
",",
"# type: Optional[bool]",
"omit_empty",
"=",
"False",
",",
"# type: bool",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> Processor",
"return",
"_PrimitiveValue",
"(",
"element_name",
",",
"_parse_boolean",
",",
"attribute",
",",
"required",
",",
"alias",
",",
"default",
",",
"omit_empty",
",",
"hooks",
")"
] | Create a processor for boolean values.
:param element_name: Name of the XML element containing the value. Can also be specified
using supported XPath syntax.
:param attribute: If specified, then the value is searched for under the
attribute within the element specified by element_name. If not specified,
then the value is searched for as the contents of the element specified by
element_name.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param default: Default value to use if the element is not present. This option is only
valid if required is specified as False.
:param omit_empty: If True, then Falsey values will be omitted when serializing to XML. Note
that Falsey values are never omitted when they are elements of an array. Falsey values can
be omitted only when they are standalone elements.
:param hooks: A Hooks object.
:return: A declxml processor object. | [
"Create",
"a",
"processor",
"for",
"boolean",
"values",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L433-L473 | train |
gatkin/declxml | declxml.py | dictionary | def dictionary(
element_name, # type: Text
children, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for dictionary values.
:param element_name: Name of the XML element containing the dictionary value. Can also be
specified using supported XPath syntax.
:param children: List of declxml processor objects for processing the children
contained within the dictionary.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Dictionary(element_name, children, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | python | def dictionary(
element_name, # type: Text
children, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for dictionary values.
:param element_name: Name of the XML element containing the dictionary value. Can also be
specified using supported XPath syntax.
:param children: List of declxml processor objects for processing the children
contained within the dictionary.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Dictionary(element_name, children, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | [
"def",
"dictionary",
"(",
"element_name",
",",
"# type: Text",
"children",
",",
"# type: List[Processor]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> RootProcessor",
"processor",
"=",
"_Dictionary",
"(",
"element_name",
",",
"children",
",",
"required",
",",
"alias",
")",
"return",
"_processor_wrap_if_hooks",
"(",
"processor",
",",
"hooks",
")"
] | Create a processor for dictionary values.
:param element_name: Name of the XML element containing the dictionary value. Can also be
specified using supported XPath syntax.
:param children: List of declxml processor objects for processing the children
contained within the dictionary.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param hooks: A Hooks object.
:return: A declxml processor object. | [
"Create",
"a",
"processor",
"for",
"dictionary",
"values",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L476-L499 | train |
gatkin/declxml | declxml.py | floating_point | def floating_point(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default=0.0, # type: Optional[float]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for floating point values.
See also :func:`declxml.boolean`
"""
value_parser = _number_parser(float)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | python | def floating_point(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default=0.0, # type: Optional[float]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for floating point values.
See also :func:`declxml.boolean`
"""
value_parser = _number_parser(float)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | [
"def",
"floating_point",
"(",
"element_name",
",",
"# type: Text",
"attribute",
"=",
"None",
",",
"# type: Optional[Text]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"default",
"=",
"0.0",
",",
"# type: Optional[float]",
"omit_empty",
"=",
"False",
",",
"# type: bool",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> Processor",
"value_parser",
"=",
"_number_parser",
"(",
"float",
")",
"return",
"_PrimitiveValue",
"(",
"element_name",
",",
"value_parser",
",",
"attribute",
",",
"required",
",",
"alias",
",",
"default",
",",
"omit_empty",
",",
"hooks",
")"
] | Create a processor for floating point values.
See also :func:`declxml.boolean` | [
"Create",
"a",
"processor",
"for",
"floating",
"point",
"values",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L502-L527 | train |
gatkin/declxml | declxml.py | integer | def integer(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default=0, # type: Optional[int]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for integer values.
See also :func:`declxml.boolean`
"""
value_parser = _number_parser(int)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | python | def integer(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default=0, # type: Optional[int]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for integer values.
See also :func:`declxml.boolean`
"""
value_parser = _number_parser(int)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | [
"def",
"integer",
"(",
"element_name",
",",
"# type: Text",
"attribute",
"=",
"None",
",",
"# type: Optional[Text]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"default",
"=",
"0",
",",
"# type: Optional[int]",
"omit_empty",
"=",
"False",
",",
"# type: bool",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> Processor",
"value_parser",
"=",
"_number_parser",
"(",
"int",
")",
"return",
"_PrimitiveValue",
"(",
"element_name",
",",
"value_parser",
",",
"attribute",
",",
"required",
",",
"alias",
",",
"default",
",",
"omit_empty",
",",
"hooks",
")"
] | Create a processor for integer values.
See also :func:`declxml.boolean` | [
"Create",
"a",
"processor",
"for",
"integer",
"values",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L530-L555 | train |
gatkin/declxml | declxml.py | named_tuple | def named_tuple(
element_name, # type: Text
tuple_type, # type: Type[Tuple]
child_processors, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for namedtuple values.
:param tuple_type: The namedtuple type.
See also :func:`declxml.dictionary`
"""
converter = _named_tuple_converter(tuple_type)
processor = _Aggregate(element_name, converter, child_processors, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | python | def named_tuple(
element_name, # type: Text
tuple_type, # type: Type[Tuple]
child_processors, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for namedtuple values.
:param tuple_type: The namedtuple type.
See also :func:`declxml.dictionary`
"""
converter = _named_tuple_converter(tuple_type)
processor = _Aggregate(element_name, converter, child_processors, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | [
"def",
"named_tuple",
"(",
"element_name",
",",
"# type: Text",
"tuple_type",
",",
"# type: Type[Tuple]",
"child_processors",
",",
"# type: List[Processor]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> RootProcessor",
"converter",
"=",
"_named_tuple_converter",
"(",
"tuple_type",
")",
"processor",
"=",
"_Aggregate",
"(",
"element_name",
",",
"converter",
",",
"child_processors",
",",
"required",
",",
"alias",
")",
"return",
"_processor_wrap_if_hooks",
"(",
"processor",
",",
"hooks",
")"
] | Create a processor for namedtuple values.
:param tuple_type: The namedtuple type.
See also :func:`declxml.dictionary` | [
"Create",
"a",
"processor",
"for",
"namedtuple",
"values",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L558-L576 | train |
gatkin/declxml | declxml.py | string | def string(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default='', # type: Optional[Text]
omit_empty=False, # type: bool
strip_whitespace=True, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for string values.
:param strip_whitespace: Indicates whether leading and trailing whitespace should be stripped
from parsed string values.
See also :func:`declxml.boolean`
"""
value_parser = _string_parser(strip_whitespace)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | python | def string(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default='', # type: Optional[Text]
omit_empty=False, # type: bool
strip_whitespace=True, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for string values.
:param strip_whitespace: Indicates whether leading and trailing whitespace should be stripped
from parsed string values.
See also :func:`declxml.boolean`
"""
value_parser = _string_parser(strip_whitespace)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | [
"def",
"string",
"(",
"element_name",
",",
"# type: Text",
"attribute",
"=",
"None",
",",
"# type: Optional[Text]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"default",
"=",
"''",
",",
"# type: Optional[Text]",
"omit_empty",
"=",
"False",
",",
"# type: bool",
"strip_whitespace",
"=",
"True",
",",
"# type: bool",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> Processor",
"value_parser",
"=",
"_string_parser",
"(",
"strip_whitespace",
")",
"return",
"_PrimitiveValue",
"(",
"element_name",
",",
"value_parser",
",",
"attribute",
",",
"required",
",",
"alias",
",",
"default",
",",
"omit_empty",
",",
"hooks",
")"
] | Create a processor for string values.
:param strip_whitespace: Indicates whether leading and trailing whitespace should be stripped
from parsed string values.
See also :func:`declxml.boolean` | [
"Create",
"a",
"processor",
"for",
"string",
"values",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L579-L608 | train |
gatkin/declxml | declxml.py | user_object | def user_object(
element_name, # type: Text
cls, # type: Type[Any]
child_processors, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for user objects.
:param cls: Class object with a no-argument constructor or other callable no-argument object.
See also :func:`declxml.dictionary`
"""
converter = _user_object_converter(cls)
processor = _Aggregate(element_name, converter, child_processors, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | python | def user_object(
element_name, # type: Text
cls, # type: Type[Any]
child_processors, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for user objects.
:param cls: Class object with a no-argument constructor or other callable no-argument object.
See also :func:`declxml.dictionary`
"""
converter = _user_object_converter(cls)
processor = _Aggregate(element_name, converter, child_processors, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | [
"def",
"user_object",
"(",
"element_name",
",",
"# type: Text",
"cls",
",",
"# type: Type[Any]",
"child_processors",
",",
"# type: List[Processor]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> RootProcessor",
"converter",
"=",
"_user_object_converter",
"(",
"cls",
")",
"processor",
"=",
"_Aggregate",
"(",
"element_name",
",",
"converter",
",",
"child_processors",
",",
"required",
",",
"alias",
")",
"return",
"_processor_wrap_if_hooks",
"(",
"processor",
",",
"hooks",
")"
] | Create a processor for user objects.
:param cls: Class object with a no-argument constructor or other callable no-argument object.
See also :func:`declxml.dictionary` | [
"Create",
"a",
"processor",
"for",
"user",
"objects",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L611-L629 | train |
gatkin/declxml | declxml.py | _element_append_path | def _element_append_path(
start_element, # type: ET.Element
element_names # type: Iterable[Text]
):
# type: (...) -> ET.Element
"""
Append the list of element names as a path to the provided start element.
:return: The final element along the path.
"""
end_element = start_element
for element_name in element_names:
new_element = ET.Element(element_name)
end_element.append(new_element)
end_element = new_element
return end_element | python | def _element_append_path(
start_element, # type: ET.Element
element_names # type: Iterable[Text]
):
# type: (...) -> ET.Element
"""
Append the list of element names as a path to the provided start element.
:return: The final element along the path.
"""
end_element = start_element
for element_name in element_names:
new_element = ET.Element(element_name)
end_element.append(new_element)
end_element = new_element
return end_element | [
"def",
"_element_append_path",
"(",
"start_element",
",",
"# type: ET.Element",
"element_names",
"# type: Iterable[Text]",
")",
":",
"# type: (...) -> ET.Element",
"end_element",
"=",
"start_element",
"for",
"element_name",
"in",
"element_names",
":",
"new_element",
"=",
"ET",
".",
"Element",
"(",
"element_name",
")",
"end_element",
".",
"append",
"(",
"new_element",
")",
"end_element",
"=",
"new_element",
"return",
"end_element"
] | Append the list of element names as a path to the provided start element.
:return: The final element along the path. | [
"Append",
"the",
"list",
"of",
"element",
"names",
"as",
"a",
"path",
"to",
"the",
"provided",
"start",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1390-L1406 | train |
gatkin/declxml | declxml.py | _element_find_from_root | def _element_find_from_root(
root, # type: ET.Element
element_path # type: Text
):
# type: (...) -> Optional[ET.Element]
"""
Find the element specified by the given path starting from the root element of the document.
The first component of the element path is expected to be the name of the root element. Return
None if the element is not found.
"""
element = None
element_names = element_path.split('/')
if element_names[0] == root.tag:
if len(element_names) > 1:
element = root.find('/'.join(element_names[1:]))
else:
element = root
return element | python | def _element_find_from_root(
root, # type: ET.Element
element_path # type: Text
):
# type: (...) -> Optional[ET.Element]
"""
Find the element specified by the given path starting from the root element of the document.
The first component of the element path is expected to be the name of the root element. Return
None if the element is not found.
"""
element = None
element_names = element_path.split('/')
if element_names[0] == root.tag:
if len(element_names) > 1:
element = root.find('/'.join(element_names[1:]))
else:
element = root
return element | [
"def",
"_element_find_from_root",
"(",
"root",
",",
"# type: ET.Element",
"element_path",
"# type: Text",
")",
":",
"# type: (...) -> Optional[ET.Element]",
"element",
"=",
"None",
"element_names",
"=",
"element_path",
".",
"split",
"(",
"'/'",
")",
"if",
"element_names",
"[",
"0",
"]",
"==",
"root",
".",
"tag",
":",
"if",
"len",
"(",
"element_names",
")",
">",
"1",
":",
"element",
"=",
"root",
".",
"find",
"(",
"'/'",
".",
"join",
"(",
"element_names",
"[",
"1",
":",
"]",
")",
")",
"else",
":",
"element",
"=",
"root",
"return",
"element"
] | Find the element specified by the given path starting from the root element of the document.
The first component of the element path is expected to be the name of the root element. Return
None if the element is not found. | [
"Find",
"the",
"element",
"specified",
"by",
"the",
"given",
"path",
"starting",
"from",
"the",
"root",
"element",
"of",
"the",
"document",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1409-L1429 | train |
gatkin/declxml | declxml.py | _element_get_or_add_from_parent | def _element_get_or_add_from_parent(
parent, # type: ET.Element
element_path # type: Text
):
# type: (...) -> ET.Element
"""
Ensure all elements specified in the given path relative to the provided parent element exist.
Create new elements along the path only when needed, and return the final element specified
by the path.
"""
element_names = element_path.split('/')
# Starting from the parent, walk the element path until we find the first element in the path
# that does not exist. Create that element and all the elements following it in the path. If
# all elements along the path exist, then we will simply walk the full path to the final
# element we want to return.
existing_element = None
previous_element = parent
for i, element_name in enumerate(element_names):
existing_element = previous_element.find(element_name)
if existing_element is None:
existing_element = _element_append_path(previous_element, element_names[i:])
break
previous_element = existing_element
assert existing_element is not None
return existing_element | python | def _element_get_or_add_from_parent(
parent, # type: ET.Element
element_path # type: Text
):
# type: (...) -> ET.Element
"""
Ensure all elements specified in the given path relative to the provided parent element exist.
Create new elements along the path only when needed, and return the final element specified
by the path.
"""
element_names = element_path.split('/')
# Starting from the parent, walk the element path until we find the first element in the path
# that does not exist. Create that element and all the elements following it in the path. If
# all elements along the path exist, then we will simply walk the full path to the final
# element we want to return.
existing_element = None
previous_element = parent
for i, element_name in enumerate(element_names):
existing_element = previous_element.find(element_name)
if existing_element is None:
existing_element = _element_append_path(previous_element, element_names[i:])
break
previous_element = existing_element
assert existing_element is not None
return existing_element | [
"def",
"_element_get_or_add_from_parent",
"(",
"parent",
",",
"# type: ET.Element",
"element_path",
"# type: Text",
")",
":",
"# type: (...) -> ET.Element",
"element_names",
"=",
"element_path",
".",
"split",
"(",
"'/'",
")",
"# Starting from the parent, walk the element path until we find the first element in the path",
"# that does not exist. Create that element and all the elements following it in the path. If",
"# all elements along the path exist, then we will simply walk the full path to the final",
"# element we want to return.",
"existing_element",
"=",
"None",
"previous_element",
"=",
"parent",
"for",
"i",
",",
"element_name",
"in",
"enumerate",
"(",
"element_names",
")",
":",
"existing_element",
"=",
"previous_element",
".",
"find",
"(",
"element_name",
")",
"if",
"existing_element",
"is",
"None",
":",
"existing_element",
"=",
"_element_append_path",
"(",
"previous_element",
",",
"element_names",
"[",
"i",
":",
"]",
")",
"break",
"previous_element",
"=",
"existing_element",
"assert",
"existing_element",
"is",
"not",
"None",
"return",
"existing_element"
] | Ensure all elements specified in the given path relative to the provided parent element exist.
Create new elements along the path only when needed, and return the final element specified
by the path. | [
"Ensure",
"all",
"elements",
"specified",
"in",
"the",
"given",
"path",
"relative",
"to",
"the",
"provided",
"parent",
"element",
"exist",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1432-L1460 | train |
gatkin/declxml | declxml.py | _element_path_create_new | def _element_path_create_new(element_path):
# type: (Text) -> Tuple[ET.Element, ET.Element]
"""
Create an entirely new element path.
Return a tuple where the first item is the first element in the path, and the second item is
the final element in the path.
"""
element_names = element_path.split('/')
start_element = ET.Element(element_names[0])
end_element = _element_append_path(start_element, element_names[1:])
return start_element, end_element | python | def _element_path_create_new(element_path):
# type: (Text) -> Tuple[ET.Element, ET.Element]
"""
Create an entirely new element path.
Return a tuple where the first item is the first element in the path, and the second item is
the final element in the path.
"""
element_names = element_path.split('/')
start_element = ET.Element(element_names[0])
end_element = _element_append_path(start_element, element_names[1:])
return start_element, end_element | [
"def",
"_element_path_create_new",
"(",
"element_path",
")",
":",
"# type: (Text) -> Tuple[ET.Element, ET.Element]",
"element_names",
"=",
"element_path",
".",
"split",
"(",
"'/'",
")",
"start_element",
"=",
"ET",
".",
"Element",
"(",
"element_names",
"[",
"0",
"]",
")",
"end_element",
"=",
"_element_append_path",
"(",
"start_element",
",",
"element_names",
"[",
"1",
":",
"]",
")",
"return",
"start_element",
",",
"end_element"
] | Create an entirely new element path.
Return a tuple where the first item is the first element in the path, and the second item is
the final element in the path. | [
"Create",
"an",
"entirely",
"new",
"element",
"path",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1463-L1476 | train |
gatkin/declxml | declxml.py | _hooks_apply_after_parse | def _hooks_apply_after_parse(
hooks, # type: Optional[Hooks]
state, # type: _ProcessorState
value # type: Any
):
# type: (...) -> Any
"""Apply the after parse hook."""
if hooks and hooks.after_parse:
return hooks.after_parse(ProcessorStateView(state), value)
return value | python | def _hooks_apply_after_parse(
hooks, # type: Optional[Hooks]
state, # type: _ProcessorState
value # type: Any
):
# type: (...) -> Any
"""Apply the after parse hook."""
if hooks and hooks.after_parse:
return hooks.after_parse(ProcessorStateView(state), value)
return value | [
"def",
"_hooks_apply_after_parse",
"(",
"hooks",
",",
"# type: Optional[Hooks]",
"state",
",",
"# type: _ProcessorState",
"value",
"# type: Any",
")",
":",
"# type: (...) -> Any",
"if",
"hooks",
"and",
"hooks",
".",
"after_parse",
":",
"return",
"hooks",
".",
"after_parse",
"(",
"ProcessorStateView",
"(",
"state",
")",
",",
"value",
")",
"return",
"value"
] | Apply the after parse hook. | [
"Apply",
"the",
"after",
"parse",
"hook",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1479-L1489 | train |
gatkin/declxml | declxml.py | _hooks_apply_before_serialize | def _hooks_apply_before_serialize(
hooks, # type: Optional[Hooks]
state, # type: _ProcessorState
value # type: Any
):
# type: (...) -> Any
"""Apply the before serialize hook."""
if hooks and hooks.before_serialize:
return hooks.before_serialize(ProcessorStateView(state), value)
return value | python | def _hooks_apply_before_serialize(
hooks, # type: Optional[Hooks]
state, # type: _ProcessorState
value # type: Any
):
# type: (...) -> Any
"""Apply the before serialize hook."""
if hooks and hooks.before_serialize:
return hooks.before_serialize(ProcessorStateView(state), value)
return value | [
"def",
"_hooks_apply_before_serialize",
"(",
"hooks",
",",
"# type: Optional[Hooks]",
"state",
",",
"# type: _ProcessorState",
"value",
"# type: Any",
")",
":",
"# type: (...) -> Any",
"if",
"hooks",
"and",
"hooks",
".",
"before_serialize",
":",
"return",
"hooks",
".",
"before_serialize",
"(",
"ProcessorStateView",
"(",
"state",
")",
",",
"value",
")",
"return",
"value"
] | Apply the before serialize hook. | [
"Apply",
"the",
"before",
"serialize",
"hook",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1492-L1502 | train |
gatkin/declxml | declxml.py | _named_tuple_converter | def _named_tuple_converter(tuple_type):
# type: (Type[Tuple]) -> _AggregateConverter
"""Return an _AggregateConverter for named tuples of the given type."""
def _from_dict(dict_value):
if dict_value:
return tuple_type(**dict_value)
# Cannot construct a namedtuple value from an empty dictionary
return None
def _to_dict(value):
if value:
return value._asdict()
return {}
converter = _AggregateConverter(from_dict=_from_dict, to_dict=_to_dict)
return converter | python | def _named_tuple_converter(tuple_type):
# type: (Type[Tuple]) -> _AggregateConverter
"""Return an _AggregateConverter for named tuples of the given type."""
def _from_dict(dict_value):
if dict_value:
return tuple_type(**dict_value)
# Cannot construct a namedtuple value from an empty dictionary
return None
def _to_dict(value):
if value:
return value._asdict()
return {}
converter = _AggregateConverter(from_dict=_from_dict, to_dict=_to_dict)
return converter | [
"def",
"_named_tuple_converter",
"(",
"tuple_type",
")",
":",
"# type: (Type[Tuple]) -> _AggregateConverter",
"def",
"_from_dict",
"(",
"dict_value",
")",
":",
"if",
"dict_value",
":",
"return",
"tuple_type",
"(",
"*",
"*",
"dict_value",
")",
"# Cannot construct a namedtuple value from an empty dictionary",
"return",
"None",
"def",
"_to_dict",
"(",
"value",
")",
":",
"if",
"value",
":",
"return",
"value",
".",
"_asdict",
"(",
")",
"return",
"{",
"}",
"converter",
"=",
"_AggregateConverter",
"(",
"from_dict",
"=",
"_from_dict",
",",
"to_dict",
"=",
"_to_dict",
")",
"return",
"converter"
] | Return an _AggregateConverter for named tuples of the given type. | [
"Return",
"an",
"_AggregateConverter",
"for",
"named",
"tuples",
"of",
"the",
"given",
"type",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1511-L1528 | train |
gatkin/declxml | declxml.py | _number_parser | def _number_parser(str_to_number_func):
"""Return a function to parse numbers."""
def _parse_number_value(element_text, state):
value = None
try:
value = str_to_number_func(element_text)
except (ValueError, TypeError):
state.raise_error(InvalidPrimitiveValue,
'Invalid numeric value "{}"'.format(element_text))
return value
return _parse_number_value | python | def _number_parser(str_to_number_func):
"""Return a function to parse numbers."""
def _parse_number_value(element_text, state):
value = None
try:
value = str_to_number_func(element_text)
except (ValueError, TypeError):
state.raise_error(InvalidPrimitiveValue,
'Invalid numeric value "{}"'.format(element_text))
return value
return _parse_number_value | [
"def",
"_number_parser",
"(",
"str_to_number_func",
")",
":",
"def",
"_parse_number_value",
"(",
"element_text",
",",
"state",
")",
":",
"value",
"=",
"None",
"try",
":",
"value",
"=",
"str_to_number_func",
"(",
"element_text",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"state",
".",
"raise_error",
"(",
"InvalidPrimitiveValue",
",",
"'Invalid numeric value \"{}\"'",
".",
"format",
"(",
"element_text",
")",
")",
"return",
"value",
"return",
"_parse_number_value"
] | Return a function to parse numbers. | [
"Return",
"a",
"function",
"to",
"parse",
"numbers",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1531-L1544 | train |
gatkin/declxml | declxml.py | _parse_boolean | def _parse_boolean(element_text, state):
"""Parse the raw XML string as a boolean value."""
value = None
lowered_text = element_text.lower()
if lowered_text == 'true':
value = True
elif lowered_text == 'false':
value = False
else:
state.raise_error(InvalidPrimitiveValue, 'Invalid boolean value "{}"'.format(element_text))
return value | python | def _parse_boolean(element_text, state):
"""Parse the raw XML string as a boolean value."""
value = None
lowered_text = element_text.lower()
if lowered_text == 'true':
value = True
elif lowered_text == 'false':
value = False
else:
state.raise_error(InvalidPrimitiveValue, 'Invalid boolean value "{}"'.format(element_text))
return value | [
"def",
"_parse_boolean",
"(",
"element_text",
",",
"state",
")",
":",
"value",
"=",
"None",
"lowered_text",
"=",
"element_text",
".",
"lower",
"(",
")",
"if",
"lowered_text",
"==",
"'true'",
":",
"value",
"=",
"True",
"elif",
"lowered_text",
"==",
"'false'",
":",
"value",
"=",
"False",
"else",
":",
"state",
".",
"raise_error",
"(",
"InvalidPrimitiveValue",
",",
"'Invalid boolean value \"{}\"'",
".",
"format",
"(",
"element_text",
")",
")",
"return",
"value"
] | Parse the raw XML string as a boolean value. | [
"Parse",
"the",
"raw",
"XML",
"string",
"as",
"a",
"boolean",
"value",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1547-L1559 | train |
gatkin/declxml | declxml.py | _string_parser | def _string_parser(strip_whitespace):
"""Return a parser function for parsing string values."""
def _parse_string_value(element_text, _state):
if element_text is None:
value = ''
elif strip_whitespace:
value = element_text.strip()
else:
value = element_text
return value
return _parse_string_value | python | def _string_parser(strip_whitespace):
"""Return a parser function for parsing string values."""
def _parse_string_value(element_text, _state):
if element_text is None:
value = ''
elif strip_whitespace:
value = element_text.strip()
else:
value = element_text
return value
return _parse_string_value | [
"def",
"_string_parser",
"(",
"strip_whitespace",
")",
":",
"def",
"_parse_string_value",
"(",
"element_text",
",",
"_state",
")",
":",
"if",
"element_text",
"is",
"None",
":",
"value",
"=",
"''",
"elif",
"strip_whitespace",
":",
"value",
"=",
"element_text",
".",
"strip",
"(",
")",
"else",
":",
"value",
"=",
"element_text",
"return",
"value",
"return",
"_parse_string_value"
] | Return a parser function for parsing string values. | [
"Return",
"a",
"parser",
"function",
"for",
"parsing",
"string",
"values",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1574-L1586 | train |
gatkin/declxml | declxml.py | _user_object_converter | def _user_object_converter(cls):
# type: (Type[Any]) -> _AggregateConverter
"""Return an _AggregateConverter for a user object of the given class."""
def _from_dict(dict_value):
try:
object_value = cls(**dict_value)
except TypeError:
# Constructor does not support keyword arguments, try setting each
# field individually.
object_value = cls()
for field_name, field_value in dict_value.items():
setattr(object_value, field_name, field_value)
return object_value
def _to_dict(value):
if value:
return value.__dict__
return {}
return _AggregateConverter(from_dict=_from_dict, to_dict=_to_dict) | python | def _user_object_converter(cls):
# type: (Type[Any]) -> _AggregateConverter
"""Return an _AggregateConverter for a user object of the given class."""
def _from_dict(dict_value):
try:
object_value = cls(**dict_value)
except TypeError:
# Constructor does not support keyword arguments, try setting each
# field individually.
object_value = cls()
for field_name, field_value in dict_value.items():
setattr(object_value, field_name, field_value)
return object_value
def _to_dict(value):
if value:
return value.__dict__
return {}
return _AggregateConverter(from_dict=_from_dict, to_dict=_to_dict) | [
"def",
"_user_object_converter",
"(",
"cls",
")",
":",
"# type: (Type[Any]) -> _AggregateConverter",
"def",
"_from_dict",
"(",
"dict_value",
")",
":",
"try",
":",
"object_value",
"=",
"cls",
"(",
"*",
"*",
"dict_value",
")",
"except",
"TypeError",
":",
"# Constructor does not support keyword arguments, try setting each",
"# field individually.",
"object_value",
"=",
"cls",
"(",
")",
"for",
"field_name",
",",
"field_value",
"in",
"dict_value",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"object_value",
",",
"field_name",
",",
"field_value",
")",
"return",
"object_value",
"def",
"_to_dict",
"(",
"value",
")",
":",
"if",
"value",
":",
"return",
"value",
".",
"__dict__",
"return",
"{",
"}",
"return",
"_AggregateConverter",
"(",
"from_dict",
"=",
"_from_dict",
",",
"to_dict",
"=",
"_to_dict",
")"
] | Return an _AggregateConverter for a user object of the given class. | [
"Return",
"an",
"_AggregateConverter",
"for",
"a",
"user",
"object",
"of",
"the",
"given",
"class",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1589-L1610 | train |
gatkin/declxml | declxml.py | _xml_namespace_strip | def _xml_namespace_strip(root):
# type: (ET.Element) -> None
"""Strip the XML namespace prefix from all element tags under the given root Element."""
if '}' not in root.tag:
return # Nothing to do, no namespace present
for element in root.iter():
if '}' in element.tag:
element.tag = element.tag.split('}')[1]
else: # pragma: no cover
# We should never get here. If there is a namespace, then the namespace should be
# included in all elements.
pass | python | def _xml_namespace_strip(root):
# type: (ET.Element) -> None
"""Strip the XML namespace prefix from all element tags under the given root Element."""
if '}' not in root.tag:
return # Nothing to do, no namespace present
for element in root.iter():
if '}' in element.tag:
element.tag = element.tag.split('}')[1]
else: # pragma: no cover
# We should never get here. If there is a namespace, then the namespace should be
# included in all elements.
pass | [
"def",
"_xml_namespace_strip",
"(",
"root",
")",
":",
"# type: (ET.Element) -> None",
"if",
"'}'",
"not",
"in",
"root",
".",
"tag",
":",
"return",
"# Nothing to do, no namespace present",
"for",
"element",
"in",
"root",
".",
"iter",
"(",
")",
":",
"if",
"'}'",
"in",
"element",
".",
"tag",
":",
"element",
".",
"tag",
"=",
"element",
".",
"tag",
".",
"split",
"(",
"'}'",
")",
"[",
"1",
"]",
"else",
":",
"# pragma: no cover",
"# We should never get here. If there is a namespace, then the namespace should be",
"# included in all elements.",
"pass"
] | Strip the XML namespace prefix from all element tags under the given root Element. | [
"Strip",
"the",
"XML",
"namespace",
"prefix",
"from",
"all",
"element",
"tags",
"under",
"the",
"given",
"root",
"Element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1613-L1625 | train |
gatkin/declxml | declxml.py | _Aggregate.parse_at_element | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the provided element as an aggregate."""
parsed_dict = self._dictionary.parse_at_element(element, state)
return self._converter.from_dict(parsed_dict) | python | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the provided element as an aggregate."""
parsed_dict = self._dictionary.parse_at_element(element, state)
return self._converter.from_dict(parsed_dict) | [
"def",
"parse_at_element",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"parsed_dict",
"=",
"self",
".",
"_dictionary",
".",
"parse_at_element",
"(",
"element",
",",
"state",
")",
"return",
"self",
".",
"_converter",
".",
"from_dict",
"(",
"parsed_dict",
")"
] | Parse the provided element as an aggregate. | [
"Parse",
"the",
"provided",
"element",
"as",
"an",
"aggregate",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L678-L686 | train |
gatkin/declxml | declxml.py | _Aggregate.parse_at_root | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the root XML element as an aggregate."""
parsed_dict = self._dictionary.parse_at_root(root, state)
return self._converter.from_dict(parsed_dict) | python | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the root XML element as an aggregate."""
parsed_dict = self._dictionary.parse_at_root(root, state)
return self._converter.from_dict(parsed_dict) | [
"def",
"parse_at_root",
"(",
"self",
",",
"root",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"parsed_dict",
"=",
"self",
".",
"_dictionary",
".",
"parse_at_root",
"(",
"root",
",",
"state",
")",
"return",
"self",
".",
"_converter",
".",
"from_dict",
"(",
"parsed_dict",
")"
] | Parse the root XML element as an aggregate. | [
"Parse",
"the",
"root",
"XML",
"element",
"as",
"an",
"aggregate",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L688-L696 | train |
gatkin/declxml | declxml.py | _Aggregate.parse_from_parent | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the aggregate from the provided parent XML element."""
parsed_dict = self._dictionary.parse_from_parent(parent, state)
return self._converter.from_dict(parsed_dict) | python | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the aggregate from the provided parent XML element."""
parsed_dict = self._dictionary.parse_from_parent(parent, state)
return self._converter.from_dict(parsed_dict) | [
"def",
"parse_from_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"parsed_dict",
"=",
"self",
".",
"_dictionary",
".",
"parse_from_parent",
"(",
"parent",
",",
"state",
")",
"return",
"self",
".",
"_converter",
".",
"from_dict",
"(",
"parsed_dict",
")"
] | Parse the aggregate from the provided parent XML element. | [
"Parse",
"the",
"aggregate",
"from",
"the",
"provided",
"parent",
"XML",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L698-L706 | train |
gatkin/declxml | declxml.py | _Aggregate.serialize | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value to a new element and returns the element."""
dict_value = self._converter.to_dict(value)
return self._dictionary.serialize(dict_value, state) | python | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value to a new element and returns the element."""
dict_value = self._converter.to_dict(value)
return self._dictionary.serialize(dict_value, state) | [
"def",
"serialize",
"(",
"self",
",",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> ET.Element",
"dict_value",
"=",
"self",
".",
"_converter",
".",
"to_dict",
"(",
"value",
")",
"return",
"self",
".",
"_dictionary",
".",
"serialize",
"(",
"dict_value",
",",
"state",
")"
] | Serialize the value to a new element and returns the element. | [
"Serialize",
"the",
"value",
"to",
"a",
"new",
"element",
"and",
"returns",
"the",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L708-L716 | train |
gatkin/declxml | declxml.py | _Aggregate.serialize_on_parent | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value and adds it to the parent."""
dict_value = self._converter.to_dict(value)
self._dictionary.serialize_on_parent(parent, dict_value, state) | python | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value and adds it to the parent."""
dict_value = self._converter.to_dict(value)
self._dictionary.serialize_on_parent(parent, dict_value, state) | [
"def",
"serialize_on_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> None",
"dict_value",
"=",
"self",
".",
"_converter",
".",
"to_dict",
"(",
"value",
")",
"self",
".",
"_dictionary",
".",
"serialize_on_parent",
"(",
"parent",
",",
"dict_value",
",",
"state",
")"
] | Serialize the value and adds it to the parent. | [
"Serialize",
"the",
"value",
"and",
"adds",
"it",
"to",
"the",
"parent",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L718-L727 | train |
gatkin/declxml | declxml.py | _Array.parse_at_element | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the provided element as an array."""
item_iter = element.findall(self._item_processor.element_path)
return self._parse(item_iter, state) | python | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the provided element as an array."""
item_iter = element.findall(self._item_processor.element_path)
return self._parse(item_iter, state) | [
"def",
"parse_at_element",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"item_iter",
"=",
"element",
".",
"findall",
"(",
"self",
".",
"_item_processor",
".",
"element_path",
")",
"return",
"self",
".",
"_parse",
"(",
"item_iter",
",",
"state",
")"
] | Parse the provided element as an array. | [
"Parse",
"the",
"provided",
"element",
"as",
"an",
"array",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L784-L792 | train |
gatkin/declxml | declxml.py | _Array.parse_at_root | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the root XML element as an array."""
if not self._nested:
raise InvalidRootProcessor('Non-nested array "{}" cannot be root element'.format(
self.alias))
parsed_array = [] # type: List
array_element = _element_find_from_root(root, self._nested)
if array_element is not None:
parsed_array = self.parse_at_element(array_element, state)
elif self.required:
raise MissingValue('Missing required array at root: "{}"'.format(self._nested))
return parsed_array | python | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the root XML element as an array."""
if not self._nested:
raise InvalidRootProcessor('Non-nested array "{}" cannot be root element'.format(
self.alias))
parsed_array = [] # type: List
array_element = _element_find_from_root(root, self._nested)
if array_element is not None:
parsed_array = self.parse_at_element(array_element, state)
elif self.required:
raise MissingValue('Missing required array at root: "{}"'.format(self._nested))
return parsed_array | [
"def",
"parse_at_root",
"(",
"self",
",",
"root",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"if",
"not",
"self",
".",
"_nested",
":",
"raise",
"InvalidRootProcessor",
"(",
"'Non-nested array \"{}\" cannot be root element'",
".",
"format",
"(",
"self",
".",
"alias",
")",
")",
"parsed_array",
"=",
"[",
"]",
"# type: List",
"array_element",
"=",
"_element_find_from_root",
"(",
"root",
",",
"self",
".",
"_nested",
")",
"if",
"array_element",
"is",
"not",
"None",
":",
"parsed_array",
"=",
"self",
".",
"parse_at_element",
"(",
"array_element",
",",
"state",
")",
"elif",
"self",
".",
"required",
":",
"raise",
"MissingValue",
"(",
"'Missing required array at root: \"{}\"'",
".",
"format",
"(",
"self",
".",
"_nested",
")",
")",
"return",
"parsed_array"
] | Parse the root XML element as an array. | [
"Parse",
"the",
"root",
"XML",
"element",
"as",
"an",
"array",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L794-L813 | train |
gatkin/declxml | declxml.py | _Array.parse_from_parent | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the array data from the provided parent XML element."""
item_iter = parent.findall(self._item_path)
return self._parse(item_iter, state) | python | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the array data from the provided parent XML element."""
item_iter = parent.findall(self._item_path)
return self._parse(item_iter, state) | [
"def",
"parse_from_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"item_iter",
"=",
"parent",
".",
"findall",
"(",
"self",
".",
"_item_path",
")",
"return",
"self",
".",
"_parse",
"(",
"item_iter",
",",
"state",
")"
] | Parse the array data from the provided parent XML element. | [
"Parse",
"the",
"array",
"data",
"from",
"the",
"provided",
"parent",
"XML",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L815-L823 | train |
gatkin/declxml | declxml.py | _Array.serialize | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value into a new Element object and return it."""
if self._nested is None:
state.raise_error(InvalidRootProcessor,
'Cannot directly serialize a non-nested array "{}"'
.format(self.alias))
if not value and self.required:
state.raise_error(MissingValue, 'Missing required array: "{}"'.format(
self.alias))
start_element, end_element = _element_path_create_new(self._nested)
self._serialize(end_element, value, state)
return start_element | python | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value into a new Element object and return it."""
if self._nested is None:
state.raise_error(InvalidRootProcessor,
'Cannot directly serialize a non-nested array "{}"'
.format(self.alias))
if not value and self.required:
state.raise_error(MissingValue, 'Missing required array: "{}"'.format(
self.alias))
start_element, end_element = _element_path_create_new(self._nested)
self._serialize(end_element, value, state)
return start_element | [
"def",
"serialize",
"(",
"self",
",",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> ET.Element",
"if",
"self",
".",
"_nested",
"is",
"None",
":",
"state",
".",
"raise_error",
"(",
"InvalidRootProcessor",
",",
"'Cannot directly serialize a non-nested array \"{}\"'",
".",
"format",
"(",
"self",
".",
"alias",
")",
")",
"if",
"not",
"value",
"and",
"self",
".",
"required",
":",
"state",
".",
"raise_error",
"(",
"MissingValue",
",",
"'Missing required array: \"{}\"'",
".",
"format",
"(",
"self",
".",
"alias",
")",
")",
"start_element",
",",
"end_element",
"=",
"_element_path_create_new",
"(",
"self",
".",
"_nested",
")",
"self",
".",
"_serialize",
"(",
"end_element",
",",
"value",
",",
"state",
")",
"return",
"start_element"
] | Serialize the value into a new Element object and return it. | [
"Serialize",
"the",
"value",
"into",
"a",
"new",
"Element",
"object",
"and",
"return",
"it",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L825-L844 | train |
gatkin/declxml | declxml.py | _Array.serialize_on_parent | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value and append it to the parent element."""
if not value and self.required:
state.raise_error(MissingValue, 'Missing required array: "{}"'.format(
self.alias))
if not value and self.omit_empty:
return # Do nothing
if self._nested is not None:
array_parent = _element_get_or_add_from_parent(parent, self._nested)
else:
# Embedded array has all items serialized directly on the parent.
array_parent = parent
self._serialize(array_parent, value, state) | python | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value and append it to the parent element."""
if not value and self.required:
state.raise_error(MissingValue, 'Missing required array: "{}"'.format(
self.alias))
if not value and self.omit_empty:
return # Do nothing
if self._nested is not None:
array_parent = _element_get_or_add_from_parent(parent, self._nested)
else:
# Embedded array has all items serialized directly on the parent.
array_parent = parent
self._serialize(array_parent, value, state) | [
"def",
"serialize_on_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> None",
"if",
"not",
"value",
"and",
"self",
".",
"required",
":",
"state",
".",
"raise_error",
"(",
"MissingValue",
",",
"'Missing required array: \"{}\"'",
".",
"format",
"(",
"self",
".",
"alias",
")",
")",
"if",
"not",
"value",
"and",
"self",
".",
"omit_empty",
":",
"return",
"# Do nothing",
"if",
"self",
".",
"_nested",
"is",
"not",
"None",
":",
"array_parent",
"=",
"_element_get_or_add_from_parent",
"(",
"parent",
",",
"self",
".",
"_nested",
")",
"else",
":",
"# Embedded array has all items serialized directly on the parent.",
"array_parent",
"=",
"parent",
"self",
".",
"_serialize",
"(",
"array_parent",
",",
"value",
",",
"state",
")"
] | Serialize the value and append it to the parent element. | [
"Serialize",
"the",
"value",
"and",
"append",
"it",
"to",
"the",
"parent",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L846-L867 | train |
gatkin/declxml | declxml.py | _Array._parse | def _parse(
self,
item_iter, # type: Iterable[ET.Element]
state # type: _ProcessorState
):
# type: (...) -> List
"""Parse the array data using the provided iterator of XML elements."""
parsed_array = []
for i, item in enumerate(item_iter):
state.push_location(self._item_processor.element_path, i)
parsed_array.append(self._item_processor.parse_at_element(item, state))
state.pop_location()
if not parsed_array and self.required:
state.raise_error(MissingValue, 'Missing required array "{}"'.format(self.alias))
return parsed_array | python | def _parse(
self,
item_iter, # type: Iterable[ET.Element]
state # type: _ProcessorState
):
# type: (...) -> List
"""Parse the array data using the provided iterator of XML elements."""
parsed_array = []
for i, item in enumerate(item_iter):
state.push_location(self._item_processor.element_path, i)
parsed_array.append(self._item_processor.parse_at_element(item, state))
state.pop_location()
if not parsed_array and self.required:
state.raise_error(MissingValue, 'Missing required array "{}"'.format(self.alias))
return parsed_array | [
"def",
"_parse",
"(",
"self",
",",
"item_iter",
",",
"# type: Iterable[ET.Element]",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> List",
"parsed_array",
"=",
"[",
"]",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"item_iter",
")",
":",
"state",
".",
"push_location",
"(",
"self",
".",
"_item_processor",
".",
"element_path",
",",
"i",
")",
"parsed_array",
".",
"append",
"(",
"self",
".",
"_item_processor",
".",
"parse_at_element",
"(",
"item",
",",
"state",
")",
")",
"state",
".",
"pop_location",
"(",
")",
"if",
"not",
"parsed_array",
"and",
"self",
".",
"required",
":",
"state",
".",
"raise_error",
"(",
"MissingValue",
",",
"'Missing required array \"{}\"'",
".",
"format",
"(",
"self",
".",
"alias",
")",
")",
"return",
"parsed_array"
] | Parse the array data using the provided iterator of XML elements. | [
"Parse",
"the",
"array",
"data",
"using",
"the",
"provided",
"iterator",
"of",
"XML",
"elements",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L869-L886 | train |
gatkin/declxml | declxml.py | _Array._serialize | def _serialize(
self,
array_parent, # type: ET.Element
value, # type: List
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the array value and add it to the array parent element."""
if not value:
# Nothing to do. Avoid attempting to iterate over a possibly
# None value.
return
for i, item_value in enumerate(value):
state.push_location(self._item_processor.element_path, i)
item_element = self._item_processor.serialize(item_value, state)
array_parent.append(item_element)
state.pop_location() | python | def _serialize(
self,
array_parent, # type: ET.Element
value, # type: List
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the array value and add it to the array parent element."""
if not value:
# Nothing to do. Avoid attempting to iterate over a possibly
# None value.
return
for i, item_value in enumerate(value):
state.push_location(self._item_processor.element_path, i)
item_element = self._item_processor.serialize(item_value, state)
array_parent.append(item_element)
state.pop_location() | [
"def",
"_serialize",
"(",
"self",
",",
"array_parent",
",",
"# type: ET.Element",
"value",
",",
"# type: List",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> None",
"if",
"not",
"value",
":",
"# Nothing to do. Avoid attempting to iterate over a possibly",
"# None value.",
"return",
"for",
"i",
",",
"item_value",
"in",
"enumerate",
"(",
"value",
")",
":",
"state",
".",
"push_location",
"(",
"self",
".",
"_item_processor",
".",
"element_path",
",",
"i",
")",
"item_element",
"=",
"self",
".",
"_item_processor",
".",
"serialize",
"(",
"item_value",
",",
"state",
")",
"array_parent",
".",
"append",
"(",
"item_element",
")",
"state",
".",
"pop_location",
"(",
")"
] | Serialize the array value and add it to the array parent element. | [
"Serialize",
"the",
"array",
"value",
"and",
"add",
"it",
"to",
"the",
"array",
"parent",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L888-L905 | train |
gatkin/declxml | declxml.py | _Dictionary.parse_at_element | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the provided element as a dictionary."""
parsed_dict = {}
for child in self._child_processors:
state.push_location(child.element_path)
parsed_dict[child.alias] = child.parse_from_parent(element, state)
state.pop_location()
return parsed_dict | python | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the provided element as a dictionary."""
parsed_dict = {}
for child in self._child_processors:
state.push_location(child.element_path)
parsed_dict[child.alias] = child.parse_from_parent(element, state)
state.pop_location()
return parsed_dict | [
"def",
"parse_at_element",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"parsed_dict",
"=",
"{",
"}",
"for",
"child",
"in",
"self",
".",
"_child_processors",
":",
"state",
".",
"push_location",
"(",
"child",
".",
"element_path",
")",
"parsed_dict",
"[",
"child",
".",
"alias",
"]",
"=",
"child",
".",
"parse_from_parent",
"(",
"element",
",",
"state",
")",
"state",
".",
"pop_location",
"(",
")",
"return",
"parsed_dict"
] | Parse the provided element as a dictionary. | [
"Parse",
"the",
"provided",
"element",
"as",
"a",
"dictionary",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L945-L959 | train |
gatkin/declxml | declxml.py | _Dictionary.parse_at_root | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the root XML element as a dictionary."""
parsed_dict = {} # type: Dict
dict_element = _element_find_from_root(root, self.element_path)
if dict_element is not None:
parsed_dict = self.parse_at_element(dict_element, state)
elif self.required:
raise MissingValue('Missing required root aggregate "{}"'.format(self.element_path))
return parsed_dict | python | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the root XML element as a dictionary."""
parsed_dict = {} # type: Dict
dict_element = _element_find_from_root(root, self.element_path)
if dict_element is not None:
parsed_dict = self.parse_at_element(dict_element, state)
elif self.required:
raise MissingValue('Missing required root aggregate "{}"'.format(self.element_path))
return parsed_dict | [
"def",
"parse_at_root",
"(",
"self",
",",
"root",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"parsed_dict",
"=",
"{",
"}",
"# type: Dict",
"dict_element",
"=",
"_element_find_from_root",
"(",
"root",
",",
"self",
".",
"element_path",
")",
"if",
"dict_element",
"is",
"not",
"None",
":",
"parsed_dict",
"=",
"self",
".",
"parse_at_element",
"(",
"dict_element",
",",
"state",
")",
"elif",
"self",
".",
"required",
":",
"raise",
"MissingValue",
"(",
"'Missing required root aggregate \"{}\"'",
".",
"format",
"(",
"self",
".",
"element_path",
")",
")",
"return",
"parsed_dict"
] | Parse the root XML element as a dictionary. | [
"Parse",
"the",
"root",
"XML",
"element",
"as",
"a",
"dictionary",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L961-L976 | train |
gatkin/declxml | declxml.py | _Dictionary.serialize | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value to a new element and return the element."""
if not value and self.required:
state.raise_error(
MissingValue, 'Missing required aggregate "{}"'.format(self.element_path)
)
start_element, end_element = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element | python | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value to a new element and return the element."""
if not value and self.required:
state.raise_error(
MissingValue, 'Missing required aggregate "{}"'.format(self.element_path)
)
start_element, end_element = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element | [
"def",
"serialize",
"(",
"self",
",",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> ET.Element",
"if",
"not",
"value",
"and",
"self",
".",
"required",
":",
"state",
".",
"raise_error",
"(",
"MissingValue",
",",
"'Missing required aggregate \"{}\"'",
".",
"format",
"(",
"self",
".",
"element_path",
")",
")",
"start_element",
",",
"end_element",
"=",
"_element_path_create_new",
"(",
"self",
".",
"element_path",
")",
"self",
".",
"_serialize",
"(",
"end_element",
",",
"value",
",",
"state",
")",
"return",
"start_element"
] | Serialize the value to a new element and return the element. | [
"Serialize",
"the",
"value",
"to",
"a",
"new",
"element",
"and",
"return",
"the",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L996-L1010 | train |
gatkin/declxml | declxml.py | _Dictionary._serialize | def _serialize(
self,
element, # type: ET.Element
value, # type: Dict
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the dictionary and append all serialized children to the element."""
for child in self._child_processors:
state.push_location(child.element_path)
child_value = value.get(child.alias)
child.serialize_on_parent(element, child_value, state)
state.pop_location() | python | def _serialize(
self,
element, # type: ET.Element
value, # type: Dict
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the dictionary and append all serialized children to the element."""
for child in self._child_processors:
state.push_location(child.element_path)
child_value = value.get(child.alias)
child.serialize_on_parent(element, child_value, state)
state.pop_location() | [
"def",
"_serialize",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"value",
",",
"# type: Dict",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> None",
"for",
"child",
"in",
"self",
".",
"_child_processors",
":",
"state",
".",
"push_location",
"(",
"child",
".",
"element_path",
")",
"child_value",
"=",
"value",
".",
"get",
"(",
"child",
".",
"alias",
")",
"child",
".",
"serialize_on_parent",
"(",
"element",
",",
"child_value",
",",
"state",
")",
"state",
".",
"pop_location",
"(",
")"
] | Serialize the dictionary and append all serialized children to the element. | [
"Serialize",
"the",
"dictionary",
"and",
"append",
"all",
"serialized",
"children",
"to",
"the",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1030-L1042 | train |
gatkin/declxml | declxml.py | _HookedAggregate.parse_at_element | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the given element."""
xml_value = self._processor.parse_at_element(element, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value) | python | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the given element."""
xml_value = self._processor.parse_at_element(element, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value) | [
"def",
"parse_at_element",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"xml_value",
"=",
"self",
".",
"_processor",
".",
"parse_at_element",
"(",
"element",
",",
"state",
")",
"return",
"_hooks_apply_after_parse",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"xml_value",
")"
] | Parse the given element. | [
"Parse",
"the",
"given",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1078-L1086 | train |
gatkin/declxml | declxml.py | _HookedAggregate.parse_at_root | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the given element as the root of the document."""
xml_value = self._processor.parse_at_root(root, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value) | python | def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the given element as the root of the document."""
xml_value = self._processor.parse_at_root(root, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value) | [
"def",
"parse_at_root",
"(",
"self",
",",
"root",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"xml_value",
"=",
"self",
".",
"_processor",
".",
"parse_at_root",
"(",
"root",
",",
"state",
")",
"return",
"_hooks_apply_after_parse",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"xml_value",
")"
] | Parse the given element as the root of the document. | [
"Parse",
"the",
"given",
"element",
"as",
"the",
"root",
"of",
"the",
"document",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1088-L1096 | train |
gatkin/declxml | declxml.py | _HookedAggregate.parse_from_parent | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the element from the given parent element."""
xml_value = self._processor.parse_from_parent(parent, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value) | python | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the element from the given parent element."""
xml_value = self._processor.parse_from_parent(parent, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value) | [
"def",
"parse_from_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"xml_value",
"=",
"self",
".",
"_processor",
".",
"parse_from_parent",
"(",
"parent",
",",
"state",
")",
"return",
"_hooks_apply_after_parse",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"xml_value",
")"
] | Parse the element from the given parent element. | [
"Parse",
"the",
"element",
"from",
"the",
"given",
"parent",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1098-L1106 | train |
gatkin/declxml | declxml.py | _HookedAggregate.serialize | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value and returns it."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
return self._processor.serialize(xml_value, state) | python | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value and returns it."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
return self._processor.serialize(xml_value, state) | [
"def",
"serialize",
"(",
"self",
",",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> ET.Element",
"xml_value",
"=",
"_hooks_apply_before_serialize",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"value",
")",
"return",
"self",
".",
"_processor",
".",
"serialize",
"(",
"xml_value",
",",
"state",
")"
] | Serialize the value and returns it. | [
"Serialize",
"the",
"value",
"and",
"returns",
"it",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1108-L1116 | train |
gatkin/declxml | declxml.py | _HookedAggregate.serialize_on_parent | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value directory on the parent."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
self._processor.serialize_on_parent(parent, xml_value, state) | python | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value directory on the parent."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
self._processor.serialize_on_parent(parent, xml_value, state) | [
"def",
"serialize_on_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> None",
"xml_value",
"=",
"_hooks_apply_before_serialize",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"value",
")",
"self",
".",
"_processor",
".",
"serialize_on_parent",
"(",
"parent",
",",
"xml_value",
",",
"state",
")"
] | Serialize the value directory on the parent. | [
"Serialize",
"the",
"value",
"directory",
"on",
"the",
"parent",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1118-L1127 | train |
gatkin/declxml | declxml.py | _PrimitiveValue.parse_at_element | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value at the XML element."""
if self._attribute:
parsed_value = self._parse_attribute(element, self._attribute, state)
else:
parsed_value = self._parser_func(element.text, state)
return _hooks_apply_after_parse(self._hooks, state, parsed_value) | python | def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value at the XML element."""
if self._attribute:
parsed_value = self._parse_attribute(element, self._attribute, state)
else:
parsed_value = self._parser_func(element.text, state)
return _hooks_apply_after_parse(self._hooks, state, parsed_value) | [
"def",
"parse_at_element",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"if",
"self",
".",
"_attribute",
":",
"parsed_value",
"=",
"self",
".",
"_parse_attribute",
"(",
"element",
",",
"self",
".",
"_attribute",
",",
"state",
")",
"else",
":",
"parsed_value",
"=",
"self",
".",
"_parser_func",
"(",
"element",
".",
"text",
",",
"state",
")",
"return",
"_hooks_apply_after_parse",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"parsed_value",
")"
] | Parse the primitive value at the XML element. | [
"Parse",
"the",
"primitive",
"value",
"at",
"the",
"XML",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1202-L1214 | train |
gatkin/declxml | declxml.py | _PrimitiveValue.parse_from_parent | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value under the parent XML element."""
element = parent.find(self.element_path)
if element is None and self.required:
state.raise_error(
MissingValue, 'Missing required element "{}"'.format(self.element_path)
)
elif element is not None:
return self.parse_at_element(element, state)
return _hooks_apply_after_parse(self._hooks, state, self._default) | python | def parse_from_parent(
self,
parent, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value under the parent XML element."""
element = parent.find(self.element_path)
if element is None and self.required:
state.raise_error(
MissingValue, 'Missing required element "{}"'.format(self.element_path)
)
elif element is not None:
return self.parse_at_element(element, state)
return _hooks_apply_after_parse(self._hooks, state, self._default) | [
"def",
"parse_from_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"element",
"=",
"parent",
".",
"find",
"(",
"self",
".",
"element_path",
")",
"if",
"element",
"is",
"None",
"and",
"self",
".",
"required",
":",
"state",
".",
"raise_error",
"(",
"MissingValue",
",",
"'Missing required element \"{}\"'",
".",
"format",
"(",
"self",
".",
"element_path",
")",
")",
"elif",
"element",
"is",
"not",
"None",
":",
"return",
"self",
".",
"parse_at_element",
"(",
"element",
",",
"state",
")",
"return",
"_hooks_apply_after_parse",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"self",
".",
"_default",
")"
] | Parse the primitive value under the parent XML element. | [
"Parse",
"the",
"primitive",
"value",
"under",
"the",
"parent",
"XML",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1216-L1232 | train |
gatkin/declxml | declxml.py | _PrimitiveValue.serialize | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""
Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None.
"""
# For primitive values, this is only called when the value is part of an array,
# in which case we do not need to check for missing or omitted values.
start_element, end_element = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element | python | def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""
Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None.
"""
# For primitive values, this is only called when the value is part of an array,
# in which case we do not need to check for missing or omitted values.
start_element, end_element = _element_path_create_new(self.element_path)
self._serialize(end_element, value, state)
return start_element | [
"def",
"serialize",
"(",
"self",
",",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> ET.Element",
"# For primitive values, this is only called when the value is part of an array,",
"# in which case we do not need to check for missing or omitted values.",
"start_element",
",",
"end_element",
"=",
"_element_path_create_new",
"(",
"self",
".",
"element_path",
")",
"self",
".",
"_serialize",
"(",
"end_element",
",",
"value",
",",
"state",
")",
"return",
"start_element"
] | Serialize the value into a new element object and return the element.
If the omit_empty option was specified and the value is falsey, then this will return None. | [
"Serialize",
"the",
"value",
"into",
"a",
"new",
"element",
"object",
"and",
"return",
"the",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1234-L1249 | train |
gatkin/declxml | declxml.py | _PrimitiveValue.serialize_on_parent | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value and add it to the parent element."""
# Note that falsey values are not treated as missing, but they may be omitted.
if value is None and self.required:
state.raise_error(MissingValue, self._missing_value_message(parent))
if not value and self.omit_empty:
return # Do Nothing
element = _element_get_or_add_from_parent(parent, self.element_path)
self._serialize(element, value, state) | python | def serialize_on_parent(
self,
parent, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value and add it to the parent element."""
# Note that falsey values are not treated as missing, but they may be omitted.
if value is None and self.required:
state.raise_error(MissingValue, self._missing_value_message(parent))
if not value and self.omit_empty:
return # Do Nothing
element = _element_get_or_add_from_parent(parent, self.element_path)
self._serialize(element, value, state) | [
"def",
"serialize_on_parent",
"(",
"self",
",",
"parent",
",",
"# type: ET.Element",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> None",
"# Note that falsey values are not treated as missing, but they may be omitted.",
"if",
"value",
"is",
"None",
"and",
"self",
".",
"required",
":",
"state",
".",
"raise_error",
"(",
"MissingValue",
",",
"self",
".",
"_missing_value_message",
"(",
"parent",
")",
")",
"if",
"not",
"value",
"and",
"self",
".",
"omit_empty",
":",
"return",
"# Do Nothing",
"element",
"=",
"_element_get_or_add_from_parent",
"(",
"parent",
",",
"self",
".",
"element_path",
")",
"self",
".",
"_serialize",
"(",
"element",
",",
"value",
",",
"state",
")"
] | Serialize the value and add it to the parent element. | [
"Serialize",
"the",
"value",
"and",
"add",
"it",
"to",
"the",
"parent",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1251-L1267 | train |
gatkin/declxml | declxml.py | _PrimitiveValue._missing_value_message | def _missing_value_message(self, parent):
# type: (ET.Element) -> Text
"""Return the message to report that the value needed for serialization is missing."""
if self._attribute is None:
message = 'Missing required value for element "{}"'.format(self.element_path)
else:
if self.element_path == '.':
parent_name = parent.tag
else:
parent_name = self.element_path
message = 'Missing required value for attribute "{}" on element "{}"'.format(
self._attribute, parent_name)
return message | python | def _missing_value_message(self, parent):
# type: (ET.Element) -> Text
"""Return the message to report that the value needed for serialization is missing."""
if self._attribute is None:
message = 'Missing required value for element "{}"'.format(self.element_path)
else:
if self.element_path == '.':
parent_name = parent.tag
else:
parent_name = self.element_path
message = 'Missing required value for attribute "{}" on element "{}"'.format(
self._attribute, parent_name)
return message | [
"def",
"_missing_value_message",
"(",
"self",
",",
"parent",
")",
":",
"# type: (ET.Element) -> Text",
"if",
"self",
".",
"_attribute",
"is",
"None",
":",
"message",
"=",
"'Missing required value for element \"{}\"'",
".",
"format",
"(",
"self",
".",
"element_path",
")",
"else",
":",
"if",
"self",
".",
"element_path",
"==",
"'.'",
":",
"parent_name",
"=",
"parent",
".",
"tag",
"else",
":",
"parent_name",
"=",
"self",
".",
"element_path",
"message",
"=",
"'Missing required value for attribute \"{}\" on element \"{}\"'",
".",
"format",
"(",
"self",
".",
"_attribute",
",",
"parent_name",
")",
"return",
"message"
] | Return the message to report that the value needed for serialization is missing. | [
"Return",
"the",
"message",
"to",
"report",
"that",
"the",
"value",
"needed",
"for",
"serialization",
"is",
"missing",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1269-L1283 | train |
gatkin/declxml | declxml.py | _PrimitiveValue._parse_attribute | def _parse_attribute(
self,
element, # type: ET.Element
attribute, # type: Text
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value within the XML element's attribute."""
parsed_value = self._default
attribute_value = element.get(attribute, None)
if attribute_value is not None:
parsed_value = self._parser_func(attribute_value, state)
elif self.required:
state.raise_error(
MissingValue, 'Missing required attribute "{}" on element "{}"'.format(
self._attribute, element.tag
)
)
return parsed_value | python | def _parse_attribute(
self,
element, # type: ET.Element
attribute, # type: Text
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value within the XML element's attribute."""
parsed_value = self._default
attribute_value = element.get(attribute, None)
if attribute_value is not None:
parsed_value = self._parser_func(attribute_value, state)
elif self.required:
state.raise_error(
MissingValue, 'Missing required attribute "{}" on element "{}"'.format(
self._attribute, element.tag
)
)
return parsed_value | [
"def",
"_parse_attribute",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"attribute",
",",
"# type: Text",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> Any",
"parsed_value",
"=",
"self",
".",
"_default",
"attribute_value",
"=",
"element",
".",
"get",
"(",
"attribute",
",",
"None",
")",
"if",
"attribute_value",
"is",
"not",
"None",
":",
"parsed_value",
"=",
"self",
".",
"_parser_func",
"(",
"attribute_value",
",",
"state",
")",
"elif",
"self",
".",
"required",
":",
"state",
".",
"raise_error",
"(",
"MissingValue",
",",
"'Missing required attribute \"{}\" on element \"{}\"'",
".",
"format",
"(",
"self",
".",
"_attribute",
",",
"element",
".",
"tag",
")",
")",
"return",
"parsed_value"
] | Parse the primitive value within the XML element's attribute. | [
"Parse",
"the",
"primitive",
"value",
"within",
"the",
"XML",
"element",
"s",
"attribute",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1285-L1305 | train |
gatkin/declxml | declxml.py | _PrimitiveValue._serialize | def _serialize(
self,
element, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value to the element."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
# A value is only considered missing, and hence eligible to be replaced by its
# default only if it is None. Falsey values are not considered missing and are
# not replaced by the default.
if xml_value is None:
if self._default is None:
serialized_value = Text('')
else:
serialized_value = Text(self._default)
else:
serialized_value = Text(xml_value)
if self._attribute:
element.set(self._attribute, serialized_value)
else:
element.text = serialized_value | python | def _serialize(
self,
element, # type: ET.Element
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> None
"""Serialize the value to the element."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
# A value is only considered missing, and hence eligible to be replaced by its
# default only if it is None. Falsey values are not considered missing and are
# not replaced by the default.
if xml_value is None:
if self._default is None:
serialized_value = Text('')
else:
serialized_value = Text(self._default)
else:
serialized_value = Text(xml_value)
if self._attribute:
element.set(self._attribute, serialized_value)
else:
element.text = serialized_value | [
"def",
"_serialize",
"(",
"self",
",",
"element",
",",
"# type: ET.Element",
"value",
",",
"# type: Any",
"state",
"# type: _ProcessorState",
")",
":",
"# type: (...) -> None",
"xml_value",
"=",
"_hooks_apply_before_serialize",
"(",
"self",
".",
"_hooks",
",",
"state",
",",
"value",
")",
"# A value is only considered missing, and hence eligible to be replaced by its",
"# default only if it is None. Falsey values are not considered missing and are",
"# not replaced by the default.",
"if",
"xml_value",
"is",
"None",
":",
"if",
"self",
".",
"_default",
"is",
"None",
":",
"serialized_value",
"=",
"Text",
"(",
"''",
")",
"else",
":",
"serialized_value",
"=",
"Text",
"(",
"self",
".",
"_default",
")",
"else",
":",
"serialized_value",
"=",
"Text",
"(",
"xml_value",
")",
"if",
"self",
".",
"_attribute",
":",
"element",
".",
"set",
"(",
"self",
".",
"_attribute",
",",
"serialized_value",
")",
"else",
":",
"element",
".",
"text",
"=",
"serialized_value"
] | Serialize the value to the element. | [
"Serialize",
"the",
"value",
"to",
"the",
"element",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1307-L1331 | train |
gatkin/declxml | declxml.py | _ProcessorState.push_location | def push_location(
self,
element_path, # type: Text
array_index=None # type: Optional[int]
):
# type: (...) -> None
"""Push an item onto the state's stack of locations."""
location = ProcessorLocation(element_path=element_path, array_index=array_index)
self._locations.append(location) | python | def push_location(
self,
element_path, # type: Text
array_index=None # type: Optional[int]
):
# type: (...) -> None
"""Push an item onto the state's stack of locations."""
location = ProcessorLocation(element_path=element_path, array_index=array_index)
self._locations.append(location) | [
"def",
"push_location",
"(",
"self",
",",
"element_path",
",",
"# type: Text",
"array_index",
"=",
"None",
"# type: Optional[int]",
")",
":",
"# type: (...) -> None",
"location",
"=",
"ProcessorLocation",
"(",
"element_path",
"=",
"element_path",
",",
"array_index",
"=",
"array_index",
")",
"self",
".",
"_locations",
".",
"append",
"(",
"location",
")"
] | Push an item onto the state's stack of locations. | [
"Push",
"an",
"item",
"onto",
"the",
"state",
"s",
"stack",
"of",
"locations",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1351-L1359 | train |
gatkin/declxml | declxml.py | _ProcessorState.raise_error | def raise_error(
self,
exception_type, # type: Type[Exception]
message # type: Text
):
# type: (...) -> NoReturn
"""Raise an exception with the current parser state information and error message."""
error_message = '{} at {}'.format(message, repr(self))
raise exception_type(error_message) | python | def raise_error(
self,
exception_type, # type: Type[Exception]
message # type: Text
):
# type: (...) -> NoReturn
"""Raise an exception with the current parser state information and error message."""
error_message = '{} at {}'.format(message, repr(self))
raise exception_type(error_message) | [
"def",
"raise_error",
"(",
"self",
",",
"exception_type",
",",
"# type: Type[Exception]",
"message",
"# type: Text",
")",
":",
"# type: (...) -> NoReturn",
"error_message",
"=",
"'{} at {}'",
".",
"format",
"(",
"message",
",",
"repr",
"(",
"self",
")",
")",
"raise",
"exception_type",
"(",
"error_message",
")"
] | Raise an exception with the current parser state information and error message. | [
"Raise",
"an",
"exception",
"with",
"the",
"current",
"parser",
"state",
"information",
"and",
"error",
"message",
"."
] | 3a2324b43aee943e82a04587fbb68932c6f392ba | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1361-L1369 | train |
ponty/eagexp | eagexp/partlist.py | export_partlist_to_file | def export_partlist_to_file(input, output, timeout=20, showgui=False):
'''
call eagle and export sch or brd to partlist text file
:param input: .sch or .brd file name
:param output: text file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
'''
input = norm_path(input)
output = norm_path(output)
commands = export_command(output=output, output_type='partlist')
command_eagle(
input=input, timeout=timeout, commands=commands, showgui=showgui) | python | def export_partlist_to_file(input, output, timeout=20, showgui=False):
'''
call eagle and export sch or brd to partlist text file
:param input: .sch or .brd file name
:param output: text file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
'''
input = norm_path(input)
output = norm_path(output)
commands = export_command(output=output, output_type='partlist')
command_eagle(
input=input, timeout=timeout, commands=commands, showgui=showgui) | [
"def",
"export_partlist_to_file",
"(",
"input",
",",
"output",
",",
"timeout",
"=",
"20",
",",
"showgui",
"=",
"False",
")",
":",
"input",
"=",
"norm_path",
"(",
"input",
")",
"output",
"=",
"norm_path",
"(",
"output",
")",
"commands",
"=",
"export_command",
"(",
"output",
"=",
"output",
",",
"output_type",
"=",
"'partlist'",
")",
"command_eagle",
"(",
"input",
"=",
"input",
",",
"timeout",
"=",
"timeout",
",",
"commands",
"=",
"commands",
",",
"showgui",
"=",
"showgui",
")"
] | call eagle and export sch or brd to partlist text file
:param input: .sch or .brd file name
:param output: text file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None | [
"call",
"eagle",
"and",
"export",
"sch",
"or",
"brd",
"to",
"partlist",
"text",
"file"
] | 1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2 | https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/partlist.py#L16-L31 | train |
ponty/eagexp | eagexp/partlist.py | parse_partlist | def parse_partlist(str):
'''parse partlist text delivered by eagle.
header is converted to lowercase
:param str: input string
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
lines = str.strip().splitlines()
lines = filter(len, lines)
hind = header_index(lines)
if hind is None:
log.debug('empty partlist found')
return ([], [])
header_line = lines[hind]
header = header_line.split(' ')
header = filter(len, header)
positions = [header_line.index(x) for x in header]
header = [x.strip().split()[0].lower() for x in header]
data_lines = lines[hind + 1:]
def parse_data_line(line):
y = [(h, line[pos1:pos2].strip()) for h, pos1, pos2 in zip(
header, positions, positions[1:] + [1000])]
return dict(y)
data = [parse_data_line(x) for x in data_lines]
return (header, data) | python | def parse_partlist(str):
'''parse partlist text delivered by eagle.
header is converted to lowercase
:param str: input string
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
lines = str.strip().splitlines()
lines = filter(len, lines)
hind = header_index(lines)
if hind is None:
log.debug('empty partlist found')
return ([], [])
header_line = lines[hind]
header = header_line.split(' ')
header = filter(len, header)
positions = [header_line.index(x) for x in header]
header = [x.strip().split()[0].lower() for x in header]
data_lines = lines[hind + 1:]
def parse_data_line(line):
y = [(h, line[pos1:pos2].strip()) for h, pos1, pos2 in zip(
header, positions, positions[1:] + [1000])]
return dict(y)
data = [parse_data_line(x) for x in data_lines]
return (header, data) | [
"def",
"parse_partlist",
"(",
"str",
")",
":",
"lines",
"=",
"str",
".",
"strip",
"(",
")",
".",
"splitlines",
"(",
")",
"lines",
"=",
"filter",
"(",
"len",
",",
"lines",
")",
"hind",
"=",
"header_index",
"(",
"lines",
")",
"if",
"hind",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"'empty partlist found'",
")",
"return",
"(",
"[",
"]",
",",
"[",
"]",
")",
"header_line",
"=",
"lines",
"[",
"hind",
"]",
"header",
"=",
"header_line",
".",
"split",
"(",
"' '",
")",
"header",
"=",
"filter",
"(",
"len",
",",
"header",
")",
"positions",
"=",
"[",
"header_line",
".",
"index",
"(",
"x",
")",
"for",
"x",
"in",
"header",
"]",
"header",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"header",
"]",
"data_lines",
"=",
"lines",
"[",
"hind",
"+",
"1",
":",
"]",
"def",
"parse_data_line",
"(",
"line",
")",
":",
"y",
"=",
"[",
"(",
"h",
",",
"line",
"[",
"pos1",
":",
"pos2",
"]",
".",
"strip",
"(",
")",
")",
"for",
"h",
",",
"pos1",
",",
"pos2",
"in",
"zip",
"(",
"header",
",",
"positions",
",",
"positions",
"[",
"1",
":",
"]",
"+",
"[",
"1000",
"]",
")",
"]",
"return",
"dict",
"(",
"y",
")",
"data",
"=",
"[",
"parse_data_line",
"(",
"x",
")",
"for",
"x",
"in",
"data_lines",
"]",
"return",
"(",
"header",
",",
"data",
")"
] | parse partlist text delivered by eagle.
header is converted to lowercase
:param str: input string
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..]) | [
"parse",
"partlist",
"text",
"delivered",
"by",
"eagle",
"."
] | 1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2 | https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/partlist.py#L40-L69 | train |
ponty/eagexp | eagexp/partlist.py | raw_partlist | def raw_partlist(input, timeout=20, showgui=False):
'''export partlist by eagle, then return it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: string
'''
output = tempfile.NamedTemporaryFile(
prefix='eagexp_', suffix='.partlist', delete=0).name
export_partlist_to_file(
input=input, output=output, timeout=timeout, showgui=showgui)
s = Path(output).text(encoding='latin1')
os.remove(output)
return s | python | def raw_partlist(input, timeout=20, showgui=False):
'''export partlist by eagle, then return it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: string
'''
output = tempfile.NamedTemporaryFile(
prefix='eagexp_', suffix='.partlist', delete=0).name
export_partlist_to_file(
input=input, output=output, timeout=timeout, showgui=showgui)
s = Path(output).text(encoding='latin1')
os.remove(output)
return s | [
"def",
"raw_partlist",
"(",
"input",
",",
"timeout",
"=",
"20",
",",
"showgui",
"=",
"False",
")",
":",
"output",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"'eagexp_'",
",",
"suffix",
"=",
"'.partlist'",
",",
"delete",
"=",
"0",
")",
".",
"name",
"export_partlist_to_file",
"(",
"input",
"=",
"input",
",",
"output",
"=",
"output",
",",
"timeout",
"=",
"timeout",
",",
"showgui",
"=",
"showgui",
")",
"s",
"=",
"Path",
"(",
"output",
")",
".",
"text",
"(",
"encoding",
"=",
"'latin1'",
")",
"os",
".",
"remove",
"(",
"output",
")",
"return",
"s"
] | export partlist by eagle, then return it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: string | [
"export",
"partlist",
"by",
"eagle",
"then",
"return",
"it"
] | 1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2 | https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/partlist.py#L72-L87 | train |
ponty/eagexp | eagexp/partlist.py | structured_partlist | def structured_partlist(input, timeout=20, showgui=False):
'''export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
s = raw_partlist(input=input, timeout=timeout, showgui=showgui)
return parse_partlist(s) | python | def structured_partlist(input, timeout=20, showgui=False):
'''export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
'''
s = raw_partlist(input=input, timeout=timeout, showgui=showgui)
return parse_partlist(s) | [
"def",
"structured_partlist",
"(",
"input",
",",
"timeout",
"=",
"20",
",",
"showgui",
"=",
"False",
")",
":",
"s",
"=",
"raw_partlist",
"(",
"input",
"=",
"input",
",",
"timeout",
"=",
"timeout",
",",
"showgui",
"=",
"showgui",
")",
"return",
"parse_partlist",
"(",
"s",
")"
] | export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..]) | [
"export",
"partlist",
"by",
"eagle",
"then",
"parse",
"it"
] | 1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2 | https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/partlist.py#L90-L100 | train |
ponty/eagexp | eagexp/partlist.py | print_partlist | def print_partlist(input, timeout=20, showgui=False):
'''print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
'''
print raw_partlist(input=input, timeout=timeout, showgui=showgui) | python | def print_partlist(input, timeout=20, showgui=False):
'''print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
'''
print raw_partlist(input=input, timeout=timeout, showgui=showgui) | [
"def",
"print_partlist",
"(",
"input",
",",
"timeout",
"=",
"20",
",",
"showgui",
"=",
"False",
")",
":",
"print",
"raw_partlist",
"(",
"input",
"=",
"input",
",",
"timeout",
"=",
"timeout",
",",
"showgui",
"=",
"showgui",
")"
] | print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None | [
"print",
"partlist",
"text",
"delivered",
"by",
"eagle"
] | 1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2 | https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/partlist.py#L104-L112 | train |
xflr6/bitsets | bitsets/visualize.py | bitset | def bitset(bs, member_label=None, filename=None, directory=None, format=None,
render=False, view=False):
"""Graphviz source for the Hasse diagram of the domains' Boolean algebra."""
if member_label is None:
member_label = MEMBER_LABEL
if filename is None:
kind = 'members' if member_label else 'bits'
filename = FILENAME % (bs.__name__, kind)
dot = graphviz.Digraph(
name=bs.__name__,
comment=repr(bs),
filename=filename,
directory=directory,
format=format,
edge_attr={'dir': 'none'}
)
node_name = NAME_GETTERS[0]
if callable(member_label):
node_label = member_label
else:
node_label = LABEL_GETTERS[member_label]
for i in range(bs.supremum + 1):
b = bs.fromint(i)
name = node_name(b)
dot.node(name, node_label(b))
dot.edges((name, node_name(b & ~a)) for a in b.atoms(reverse=True))
if render or view:
dot.render(view=view) # pragma: no cover
return dot | python | def bitset(bs, member_label=None, filename=None, directory=None, format=None,
render=False, view=False):
"""Graphviz source for the Hasse diagram of the domains' Boolean algebra."""
if member_label is None:
member_label = MEMBER_LABEL
if filename is None:
kind = 'members' if member_label else 'bits'
filename = FILENAME % (bs.__name__, kind)
dot = graphviz.Digraph(
name=bs.__name__,
comment=repr(bs),
filename=filename,
directory=directory,
format=format,
edge_attr={'dir': 'none'}
)
node_name = NAME_GETTERS[0]
if callable(member_label):
node_label = member_label
else:
node_label = LABEL_GETTERS[member_label]
for i in range(bs.supremum + 1):
b = bs.fromint(i)
name = node_name(b)
dot.node(name, node_label(b))
dot.edges((name, node_name(b & ~a)) for a in b.atoms(reverse=True))
if render or view:
dot.render(view=view) # pragma: no cover
return dot | [
"def",
"bitset",
"(",
"bs",
",",
"member_label",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"directory",
"=",
"None",
",",
"format",
"=",
"None",
",",
"render",
"=",
"False",
",",
"view",
"=",
"False",
")",
":",
"if",
"member_label",
"is",
"None",
":",
"member_label",
"=",
"MEMBER_LABEL",
"if",
"filename",
"is",
"None",
":",
"kind",
"=",
"'members'",
"if",
"member_label",
"else",
"'bits'",
"filename",
"=",
"FILENAME",
"%",
"(",
"bs",
".",
"__name__",
",",
"kind",
")",
"dot",
"=",
"graphviz",
".",
"Digraph",
"(",
"name",
"=",
"bs",
".",
"__name__",
",",
"comment",
"=",
"repr",
"(",
"bs",
")",
",",
"filename",
"=",
"filename",
",",
"directory",
"=",
"directory",
",",
"format",
"=",
"format",
",",
"edge_attr",
"=",
"{",
"'dir'",
":",
"'none'",
"}",
")",
"node_name",
"=",
"NAME_GETTERS",
"[",
"0",
"]",
"if",
"callable",
"(",
"member_label",
")",
":",
"node_label",
"=",
"member_label",
"else",
":",
"node_label",
"=",
"LABEL_GETTERS",
"[",
"member_label",
"]",
"for",
"i",
"in",
"range",
"(",
"bs",
".",
"supremum",
"+",
"1",
")",
":",
"b",
"=",
"bs",
".",
"fromint",
"(",
"i",
")",
"name",
"=",
"node_name",
"(",
"b",
")",
"dot",
".",
"node",
"(",
"name",
",",
"node_label",
"(",
"b",
")",
")",
"dot",
".",
"edges",
"(",
"(",
"name",
",",
"node_name",
"(",
"b",
"&",
"~",
"a",
")",
")",
"for",
"a",
"in",
"b",
".",
"atoms",
"(",
"reverse",
"=",
"True",
")",
")",
"if",
"render",
"or",
"view",
":",
"dot",
".",
"render",
"(",
"view",
"=",
"view",
")",
"# pragma: no cover",
"return",
"dot"
] | Graphviz source for the Hasse diagram of the domains' Boolean algebra. | [
"Graphviz",
"source",
"for",
"the",
"Hasse",
"diagram",
"of",
"the",
"domains",
"Boolean",
"algebra",
"."
] | ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf | https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/visualize.py#L34-L68 | train |
kyzima-spb/flask-pony | flask_pony/orm.py | FormBuilder._get_field_method | def _get_field_method(self, tp):
"""Returns a reference to the form element's constructor method."""
method = self.field_constructor.get(tp)
if method and hasattr(self, method.__name__):
return getattr(self, method.__name__)
return method | python | def _get_field_method(self, tp):
"""Returns a reference to the form element's constructor method."""
method = self.field_constructor.get(tp)
if method and hasattr(self, method.__name__):
return getattr(self, method.__name__)
return method | [
"def",
"_get_field_method",
"(",
"self",
",",
"tp",
")",
":",
"method",
"=",
"self",
".",
"field_constructor",
".",
"get",
"(",
"tp",
")",
"if",
"method",
"and",
"hasattr",
"(",
"self",
",",
"method",
".",
"__name__",
")",
":",
"return",
"getattr",
"(",
"self",
",",
"method",
".",
"__name__",
")",
"return",
"method"
] | Returns a reference to the form element's constructor method. | [
"Returns",
"a",
"reference",
"to",
"the",
"form",
"element",
"s",
"constructor",
"method",
"."
] | 6cf28d70b7ebf415d58fa138fcc70b8dd57432c7 | https://github.com/kyzima-spb/flask-pony/blob/6cf28d70b7ebf415d58fa138fcc70b8dd57432c7/flask_pony/orm.py#L66-L71 | train |
kyzima-spb/flask-pony | flask_pony/orm.py | FormBuilder._create_plain_field | def _create_plain_field(self, attr, options):
"""Creates the form element."""
method = self._get_field_method(attr.py_type) or self._create_other_field
klass, options = method(attr, options)
if attr.is_unique:
options['validators'].append(validators.UniqueEntityValidator(attr.entity))
return klass, options | python | def _create_plain_field(self, attr, options):
"""Creates the form element."""
method = self._get_field_method(attr.py_type) or self._create_other_field
klass, options = method(attr, options)
if attr.is_unique:
options['validators'].append(validators.UniqueEntityValidator(attr.entity))
return klass, options | [
"def",
"_create_plain_field",
"(",
"self",
",",
"attr",
",",
"options",
")",
":",
"method",
"=",
"self",
".",
"_get_field_method",
"(",
"attr",
".",
"py_type",
")",
"or",
"self",
".",
"_create_other_field",
"klass",
",",
"options",
"=",
"method",
"(",
"attr",
",",
"options",
")",
"if",
"attr",
".",
"is_unique",
":",
"options",
"[",
"'validators'",
"]",
".",
"append",
"(",
"validators",
".",
"UniqueEntityValidator",
"(",
"attr",
".",
"entity",
")",
")",
"return",
"klass",
",",
"options"
] | Creates the form element. | [
"Creates",
"the",
"form",
"element",
"."
] | 6cf28d70b7ebf415d58fa138fcc70b8dd57432c7 | https://github.com/kyzima-spb/flask-pony/blob/6cf28d70b7ebf415d58fa138fcc70b8dd57432c7/flask_pony/orm.py#L77-L85 | train |
kyzima-spb/flask-pony | flask_pony/orm.py | FormBuilder._create_relational_field | def _create_relational_field(self, attr, options):
"""Creates the form element for working with entity relationships."""
options['entity_class'] = attr.py_type
options['allow_empty'] = not attr.is_required
return EntityField, options | python | def _create_relational_field(self, attr, options):
"""Creates the form element for working with entity relationships."""
options['entity_class'] = attr.py_type
options['allow_empty'] = not attr.is_required
return EntityField, options | [
"def",
"_create_relational_field",
"(",
"self",
",",
"attr",
",",
"options",
")",
":",
"options",
"[",
"'entity_class'",
"]",
"=",
"attr",
".",
"py_type",
"options",
"[",
"'allow_empty'",
"]",
"=",
"not",
"attr",
".",
"is_required",
"return",
"EntityField",
",",
"options"
] | Creates the form element for working with entity relationships. | [
"Creates",
"the",
"form",
"element",
"for",
"working",
"with",
"entity",
"relationships",
"."
] | 6cf28d70b7ebf415d58fa138fcc70b8dd57432c7 | https://github.com/kyzima-spb/flask-pony/blob/6cf28d70b7ebf415d58fa138fcc70b8dd57432c7/flask_pony/orm.py#L92-L96 | train |
kyzima-spb/flask-pony | flask_pony/orm.py | FormBuilder.add | def add(self, attr, field_class=None, **options):
"""Adds an element to the form based on the entity attribute."""
# print(attr.name, attr.py_type, getattr(attr, 'set', None))
# print(dir(attr))
# print(attr, attr.is_relation, attr.is_collection)
# print(attr.is_pk, attr.auto, attr.is_unique, attr.is_part_of_unique_index, attr.composite_keys)
def add(klass, options):
if klass:
self._fields[attr.name] = field_class(**options) if field_class else klass(**options)
return self
kwargs = {
'label': attr.name,
'default': attr.default,
'validators': [],
}
kwargs.update(options)
if attr.is_pk:
return add(*self._create_pk_field(attr, kwargs))
if attr.is_collection:
return add(*self._create_collection_field(attr, kwargs))
validator = wtf_validators.InputRequired() if attr.is_required and not attr.is_pk else wtf_validators.Optional()
kwargs['validators'].insert(0, validator)
if attr.is_relation:
return add(*self._create_relational_field(attr, kwargs))
return add(*self._create_plain_field(attr, kwargs)) | python | def add(self, attr, field_class=None, **options):
"""Adds an element to the form based on the entity attribute."""
# print(attr.name, attr.py_type, getattr(attr, 'set', None))
# print(dir(attr))
# print(attr, attr.is_relation, attr.is_collection)
# print(attr.is_pk, attr.auto, attr.is_unique, attr.is_part_of_unique_index, attr.composite_keys)
def add(klass, options):
if klass:
self._fields[attr.name] = field_class(**options) if field_class else klass(**options)
return self
kwargs = {
'label': attr.name,
'default': attr.default,
'validators': [],
}
kwargs.update(options)
if attr.is_pk:
return add(*self._create_pk_field(attr, kwargs))
if attr.is_collection:
return add(*self._create_collection_field(attr, kwargs))
validator = wtf_validators.InputRequired() if attr.is_required and not attr.is_pk else wtf_validators.Optional()
kwargs['validators'].insert(0, validator)
if attr.is_relation:
return add(*self._create_relational_field(attr, kwargs))
return add(*self._create_plain_field(attr, kwargs)) | [
"def",
"add",
"(",
"self",
",",
"attr",
",",
"field_class",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"# print(attr.name, attr.py_type, getattr(attr, 'set', None))",
"# print(dir(attr))",
"# print(attr, attr.is_relation, attr.is_collection)",
"# print(attr.is_pk, attr.auto, attr.is_unique, attr.is_part_of_unique_index, attr.composite_keys)",
"def",
"add",
"(",
"klass",
",",
"options",
")",
":",
"if",
"klass",
":",
"self",
".",
"_fields",
"[",
"attr",
".",
"name",
"]",
"=",
"field_class",
"(",
"*",
"*",
"options",
")",
"if",
"field_class",
"else",
"klass",
"(",
"*",
"*",
"options",
")",
"return",
"self",
"kwargs",
"=",
"{",
"'label'",
":",
"attr",
".",
"name",
",",
"'default'",
":",
"attr",
".",
"default",
",",
"'validators'",
":",
"[",
"]",
",",
"}",
"kwargs",
".",
"update",
"(",
"options",
")",
"if",
"attr",
".",
"is_pk",
":",
"return",
"add",
"(",
"*",
"self",
".",
"_create_pk_field",
"(",
"attr",
",",
"kwargs",
")",
")",
"if",
"attr",
".",
"is_collection",
":",
"return",
"add",
"(",
"*",
"self",
".",
"_create_collection_field",
"(",
"attr",
",",
"kwargs",
")",
")",
"validator",
"=",
"wtf_validators",
".",
"InputRequired",
"(",
")",
"if",
"attr",
".",
"is_required",
"and",
"not",
"attr",
".",
"is_pk",
"else",
"wtf_validators",
".",
"Optional",
"(",
")",
"kwargs",
"[",
"'validators'",
"]",
".",
"insert",
"(",
"0",
",",
"validator",
")",
"if",
"attr",
".",
"is_relation",
":",
"return",
"add",
"(",
"*",
"self",
".",
"_create_relational_field",
"(",
"attr",
",",
"kwargs",
")",
")",
"return",
"add",
"(",
"*",
"self",
".",
"_create_plain_field",
"(",
"attr",
",",
"kwargs",
")",
")"
] | Adds an element to the form based on the entity attribute. | [
"Adds",
"an",
"element",
"to",
"the",
"form",
"based",
"on",
"the",
"entity",
"attribute",
"."
] | 6cf28d70b7ebf415d58fa138fcc70b8dd57432c7 | https://github.com/kyzima-spb/flask-pony/blob/6cf28d70b7ebf415d58fa138fcc70b8dd57432c7/flask_pony/orm.py#L102-L133 | train |
kyzima-spb/flask-pony | flask_pony/orm.py | FormBuilder.add_button | def add_button(self, name, button_class=wtf_fields.SubmitField, **options):
"""Adds a button to the form."""
self._buttons[name] = button_class(**options) | python | def add_button(self, name, button_class=wtf_fields.SubmitField, **options):
"""Adds a button to the form."""
self._buttons[name] = button_class(**options) | [
"def",
"add_button",
"(",
"self",
",",
"name",
",",
"button_class",
"=",
"wtf_fields",
".",
"SubmitField",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"_buttons",
"[",
"name",
"]",
"=",
"button_class",
"(",
"*",
"*",
"options",
")"
] | Adds a button to the form. | [
"Adds",
"a",
"button",
"to",
"the",
"form",
"."
] | 6cf28d70b7ebf415d58fa138fcc70b8dd57432c7 | https://github.com/kyzima-spb/flask-pony/blob/6cf28d70b7ebf415d58fa138fcc70b8dd57432c7/flask_pony/orm.py#L135-L137 | train |
kyzima-spb/flask-pony | flask_pony/orm.py | FormBuilder.field_uuid | def field_uuid(self, attr, options):
"""Creates a form element for the UUID type."""
options['validators'].append(validators.UUIDValidator(attr.entity))
return wtf_fields.StringField, options | python | def field_uuid(self, attr, options):
"""Creates a form element for the UUID type."""
options['validators'].append(validators.UUIDValidator(attr.entity))
return wtf_fields.StringField, options | [
"def",
"field_uuid",
"(",
"self",
",",
"attr",
",",
"options",
")",
":",
"options",
"[",
"'validators'",
"]",
".",
"append",
"(",
"validators",
".",
"UUIDValidator",
"(",
"attr",
".",
"entity",
")",
")",
"return",
"wtf_fields",
".",
"StringField",
",",
"options"
] | Creates a form element for the UUID type. | [
"Creates",
"a",
"form",
"element",
"for",
"the",
"UUID",
"type",
"."
] | 6cf28d70b7ebf415d58fa138fcc70b8dd57432c7 | https://github.com/kyzima-spb/flask-pony/blob/6cf28d70b7ebf415d58fa138fcc70b8dd57432c7/flask_pony/orm.py#L198-L201 | train |
PaulMcMillan/tasa | tasa/cli.py | runm | def runm():
""" This is super minimal and pretty hacky, but it counts as a first pass.
"""
signal.signal(signal.SIGINT, signal_handler)
count = int(sys.argv.pop(1))
processes = [Process(target=run, args=()) for x in range(count)]
try:
for p in processes:
p.start()
except KeyError:
# Not sure why we see a keyerror here. Weird.
pass
finally:
for p in processes:
p.join() | python | def runm():
""" This is super minimal and pretty hacky, but it counts as a first pass.
"""
signal.signal(signal.SIGINT, signal_handler)
count = int(sys.argv.pop(1))
processes = [Process(target=run, args=()) for x in range(count)]
try:
for p in processes:
p.start()
except KeyError:
# Not sure why we see a keyerror here. Weird.
pass
finally:
for p in processes:
p.join() | [
"def",
"runm",
"(",
")",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal_handler",
")",
"count",
"=",
"int",
"(",
"sys",
".",
"argv",
".",
"pop",
"(",
"1",
")",
")",
"processes",
"=",
"[",
"Process",
"(",
"target",
"=",
"run",
",",
"args",
"=",
"(",
")",
")",
"for",
"x",
"in",
"range",
"(",
"count",
")",
"]",
"try",
":",
"for",
"p",
"in",
"processes",
":",
"p",
".",
"start",
"(",
")",
"except",
"KeyError",
":",
"# Not sure why we see a keyerror here. Weird.",
"pass",
"finally",
":",
"for",
"p",
"in",
"processes",
":",
"p",
".",
"join",
"(",
")"
] | This is super minimal and pretty hacky, but it counts as a first pass. | [
"This",
"is",
"super",
"minimal",
"and",
"pretty",
"hacky",
"but",
"it",
"counts",
"as",
"a",
"first",
"pass",
"."
] | fd548d97fd08e61c0e71296b08ffedb7d949e06a | https://github.com/PaulMcMillan/tasa/blob/fd548d97fd08e61c0e71296b08ffedb7d949e06a/tasa/cli.py#L79-L93 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.identify | def identify(self, request):
"""Establish what identity this user claims to have from request.
:param request: Request to extract identity information from.
:type request: :class:`morepath.Request`.
:returns: :class:`morepath.Identity` instance or
:attr:`morepath.NO_IDENTITY` if identity cannot
be established.
"""
token = self.get_jwt(request)
if token is None:
return NO_IDENTITY
try:
claims_set = self.decode_jwt(token)
except (DecodeError, ExpiredSignatureError):
return NO_IDENTITY
userid = self.get_userid(claims_set)
if userid is None:
return NO_IDENTITY
extra_claims = self.get_extra_claims(claims_set)
if extra_claims is not None:
return Identity(userid=userid, **extra_claims)
else:
return Identity(userid=userid) | python | def identify(self, request):
"""Establish what identity this user claims to have from request.
:param request: Request to extract identity information from.
:type request: :class:`morepath.Request`.
:returns: :class:`morepath.Identity` instance or
:attr:`morepath.NO_IDENTITY` if identity cannot
be established.
"""
token = self.get_jwt(request)
if token is None:
return NO_IDENTITY
try:
claims_set = self.decode_jwt(token)
except (DecodeError, ExpiredSignatureError):
return NO_IDENTITY
userid = self.get_userid(claims_set)
if userid is None:
return NO_IDENTITY
extra_claims = self.get_extra_claims(claims_set)
if extra_claims is not None:
return Identity(userid=userid, **extra_claims)
else:
return Identity(userid=userid) | [
"def",
"identify",
"(",
"self",
",",
"request",
")",
":",
"token",
"=",
"self",
".",
"get_jwt",
"(",
"request",
")",
"if",
"token",
"is",
"None",
":",
"return",
"NO_IDENTITY",
"try",
":",
"claims_set",
"=",
"self",
".",
"decode_jwt",
"(",
"token",
")",
"except",
"(",
"DecodeError",
",",
"ExpiredSignatureError",
")",
":",
"return",
"NO_IDENTITY",
"userid",
"=",
"self",
".",
"get_userid",
"(",
"claims_set",
")",
"if",
"userid",
"is",
"None",
":",
"return",
"NO_IDENTITY",
"extra_claims",
"=",
"self",
".",
"get_extra_claims",
"(",
"claims_set",
")",
"if",
"extra_claims",
"is",
"not",
"None",
":",
"return",
"Identity",
"(",
"userid",
"=",
"userid",
",",
"*",
"*",
"extra_claims",
")",
"else",
":",
"return",
"Identity",
"(",
"userid",
"=",
"userid",
")"
] | Establish what identity this user claims to have from request.
:param request: Request to extract identity information from.
:type request: :class:`morepath.Request`.
:returns: :class:`morepath.Identity` instance or
:attr:`morepath.NO_IDENTITY` if identity cannot
be established. | [
"Establish",
"what",
"identity",
"this",
"user",
"claims",
"to",
"have",
"from",
"request",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L150-L173 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.remember | def remember(self, response, request, identity):
"""Remember identity on response.
Implements ``morepath.App.remember_identity``, which is called
from user login code.
Create a JWT token and return it as the Authorization field of the
response header.
:param response: response object on which to store identity.
:type response: :class:`morepath.Response`
:param request: request object.
:type request: :class:`morepath.Request`
:param identity: identity to remember.
:type identity: :class:`morepath.Identity`
"""
claims = identity.as_dict()
userid = claims.pop('userid')
claims_set = self.create_claims_set(request, userid, claims)
token = self.encode_jwt(claims_set)
response.headers['Authorization'] = '%s %s' % (self.auth_header_prefix,
token) | python | def remember(self, response, request, identity):
"""Remember identity on response.
Implements ``morepath.App.remember_identity``, which is called
from user login code.
Create a JWT token and return it as the Authorization field of the
response header.
:param response: response object on which to store identity.
:type response: :class:`morepath.Response`
:param request: request object.
:type request: :class:`morepath.Request`
:param identity: identity to remember.
:type identity: :class:`morepath.Identity`
"""
claims = identity.as_dict()
userid = claims.pop('userid')
claims_set = self.create_claims_set(request, userid, claims)
token = self.encode_jwt(claims_set)
response.headers['Authorization'] = '%s %s' % (self.auth_header_prefix,
token) | [
"def",
"remember",
"(",
"self",
",",
"response",
",",
"request",
",",
"identity",
")",
":",
"claims",
"=",
"identity",
".",
"as_dict",
"(",
")",
"userid",
"=",
"claims",
".",
"pop",
"(",
"'userid'",
")",
"claims_set",
"=",
"self",
".",
"create_claims_set",
"(",
"request",
",",
"userid",
",",
"claims",
")",
"token",
"=",
"self",
".",
"encode_jwt",
"(",
"claims_set",
")",
"response",
".",
"headers",
"[",
"'Authorization'",
"]",
"=",
"'%s %s'",
"%",
"(",
"self",
".",
"auth_header_prefix",
",",
"token",
")"
] | Remember identity on response.
Implements ``morepath.App.remember_identity``, which is called
from user login code.
Create a JWT token and return it as the Authorization field of the
response header.
:param response: response object on which to store identity.
:type response: :class:`morepath.Response`
:param request: request object.
:type request: :class:`morepath.Request`
:param identity: identity to remember.
:type identity: :class:`morepath.Identity` | [
"Remember",
"identity",
"on",
"response",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L175-L196 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.decode_jwt | def decode_jwt(self, token, verify_expiration=True):
"""Decode a JWTAuth token into its claims set.
This method decodes the given JWT to provide the claims set. The JWT
can fail if the token has expired (with appropriate leeway) or if the
token won't validate due to the secret (key) being wrong.
If private_key/public key is set then the public_key will be used
to decode the key.
The leeway and issuer settings will be passed to jwt.decode.
:param token: the JWTAuth token.
:param verify_expiration: if False the expiration time will not
be checked.
"""
options = {
'verify_exp': verify_expiration,
}
return jwt.decode(
token,
self.public_key,
algorithms=[self.algorithm],
options=options,
leeway=self.leeway,
issuer=self.issuer
) | python | def decode_jwt(self, token, verify_expiration=True):
"""Decode a JWTAuth token into its claims set.
This method decodes the given JWT to provide the claims set. The JWT
can fail if the token has expired (with appropriate leeway) or if the
token won't validate due to the secret (key) being wrong.
If private_key/public key is set then the public_key will be used
to decode the key.
The leeway and issuer settings will be passed to jwt.decode.
:param token: the JWTAuth token.
:param verify_expiration: if False the expiration time will not
be checked.
"""
options = {
'verify_exp': verify_expiration,
}
return jwt.decode(
token,
self.public_key,
algorithms=[self.algorithm],
options=options,
leeway=self.leeway,
issuer=self.issuer
) | [
"def",
"decode_jwt",
"(",
"self",
",",
"token",
",",
"verify_expiration",
"=",
"True",
")",
":",
"options",
"=",
"{",
"'verify_exp'",
":",
"verify_expiration",
",",
"}",
"return",
"jwt",
".",
"decode",
"(",
"token",
",",
"self",
".",
"public_key",
",",
"algorithms",
"=",
"[",
"self",
".",
"algorithm",
"]",
",",
"options",
"=",
"options",
",",
"leeway",
"=",
"self",
".",
"leeway",
",",
"issuer",
"=",
"self",
".",
"issuer",
")"
] | Decode a JWTAuth token into its claims set.
This method decodes the given JWT to provide the claims set. The JWT
can fail if the token has expired (with appropriate leeway) or if the
token won't validate due to the secret (key) being wrong.
If private_key/public key is set then the public_key will be used
to decode the key.
The leeway and issuer settings will be passed to jwt.decode.
:param token: the JWTAuth token.
:param verify_expiration: if False the expiration time will not
be checked. | [
"Decode",
"a",
"JWTAuth",
"token",
"into",
"its",
"claims",
"set",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L214-L239 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.create_claims_set | def create_claims_set(self, request, userid, extra_claims=None):
"""Create the claims set based on the userid of the claimed identity,
the settings and the extra_claims dictionary.
The userid will be stored in settings.jwtauth.userid_claim
(default: "sub").
If settings.jwtauth.expiration_delta is set it will be added
to the current time and stored in the "exp" claim.
If settings.jwtauth.issuer is set, it get stored in the "iss" claim.
If settings.jwtauth.refresh_delta is set it will be added
to the current time and stored in the "refresh_until" claim and
the return value of settings.jwtauth.refresh_nonce_handler called with
"user_id" as argument will be stored in the "nonce" claim.
With the extra_claims dictionary you can provide additional claims.
This can be registered claims like "nbf"
(the time before which the token should not be processed) and/or
claims containing extra info
about the identity, which will be stored in the Identity object.
:param request: current request object.
:type request: :class:`morepath.Request`
:param userid: the userid of the claimed identity.
:param extra_claims: dictionary, containing additional claims or None.
"""
claims_set = {self.userid_claim: userid}
now = timegm(datetime.utcnow().utctimetuple())
if self.expiration_delta is not None:
claims_set['exp'] = now + self.expiration_delta
if self.issuer is not None:
claims_set['iss'] = self.issuer
if self.allow_refresh:
if self.refresh_delta is not None:
claims_set['refresh_until'] = now + self.refresh_delta
if self.refresh_nonce_handler is not None:
claims_set['nonce'] = self.refresh_nonce_handler(request,
userid)
if extra_claims is not None:
claims_set.update(extra_claims)
return claims_set | python | def create_claims_set(self, request, userid, extra_claims=None):
"""Create the claims set based on the userid of the claimed identity,
the settings and the extra_claims dictionary.
The userid will be stored in settings.jwtauth.userid_claim
(default: "sub").
If settings.jwtauth.expiration_delta is set it will be added
to the current time and stored in the "exp" claim.
If settings.jwtauth.issuer is set, it get stored in the "iss" claim.
If settings.jwtauth.refresh_delta is set it will be added
to the current time and stored in the "refresh_until" claim and
the return value of settings.jwtauth.refresh_nonce_handler called with
"user_id" as argument will be stored in the "nonce" claim.
With the extra_claims dictionary you can provide additional claims.
This can be registered claims like "nbf"
(the time before which the token should not be processed) and/or
claims containing extra info
about the identity, which will be stored in the Identity object.
:param request: current request object.
:type request: :class:`morepath.Request`
:param userid: the userid of the claimed identity.
:param extra_claims: dictionary, containing additional claims or None.
"""
claims_set = {self.userid_claim: userid}
now = timegm(datetime.utcnow().utctimetuple())
if self.expiration_delta is not None:
claims_set['exp'] = now + self.expiration_delta
if self.issuer is not None:
claims_set['iss'] = self.issuer
if self.allow_refresh:
if self.refresh_delta is not None:
claims_set['refresh_until'] = now + self.refresh_delta
if self.refresh_nonce_handler is not None:
claims_set['nonce'] = self.refresh_nonce_handler(request,
userid)
if extra_claims is not None:
claims_set.update(extra_claims)
return claims_set | [
"def",
"create_claims_set",
"(",
"self",
",",
"request",
",",
"userid",
",",
"extra_claims",
"=",
"None",
")",
":",
"claims_set",
"=",
"{",
"self",
".",
"userid_claim",
":",
"userid",
"}",
"now",
"=",
"timegm",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"utctimetuple",
"(",
")",
")",
"if",
"self",
".",
"expiration_delta",
"is",
"not",
"None",
":",
"claims_set",
"[",
"'exp'",
"]",
"=",
"now",
"+",
"self",
".",
"expiration_delta",
"if",
"self",
".",
"issuer",
"is",
"not",
"None",
":",
"claims_set",
"[",
"'iss'",
"]",
"=",
"self",
".",
"issuer",
"if",
"self",
".",
"allow_refresh",
":",
"if",
"self",
".",
"refresh_delta",
"is",
"not",
"None",
":",
"claims_set",
"[",
"'refresh_until'",
"]",
"=",
"now",
"+",
"self",
".",
"refresh_delta",
"if",
"self",
".",
"refresh_nonce_handler",
"is",
"not",
"None",
":",
"claims_set",
"[",
"'nonce'",
"]",
"=",
"self",
".",
"refresh_nonce_handler",
"(",
"request",
",",
"userid",
")",
"if",
"extra_claims",
"is",
"not",
"None",
":",
"claims_set",
".",
"update",
"(",
"extra_claims",
")",
"return",
"claims_set"
] | Create the claims set based on the userid of the claimed identity,
the settings and the extra_claims dictionary.
The userid will be stored in settings.jwtauth.userid_claim
(default: "sub").
If settings.jwtauth.expiration_delta is set it will be added
to the current time and stored in the "exp" claim.
If settings.jwtauth.issuer is set, it get stored in the "iss" claim.
If settings.jwtauth.refresh_delta is set it will be added
to the current time and stored in the "refresh_until" claim and
the return value of settings.jwtauth.refresh_nonce_handler called with
"user_id" as argument will be stored in the "nonce" claim.
With the extra_claims dictionary you can provide additional claims.
This can be registered claims like "nbf"
(the time before which the token should not be processed) and/or
claims containing extra info
about the identity, which will be stored in the Identity object.
:param request: current request object.
:type request: :class:`morepath.Request`
:param userid: the userid of the claimed identity.
:param extra_claims: dictionary, containing additional claims or None. | [
"Create",
"the",
"claims",
"set",
"based",
"on",
"the",
"userid",
"of",
"the",
"claimed",
"identity",
"the",
"settings",
"and",
"the",
"extra_claims",
"dictionary",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L241-L280 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.encode_jwt | def encode_jwt(self, claims_set):
"""Encode a JWT token based on the claims_set and the settings.
If available, registry.settings.jwtauth.private_key is used as key.
In this case the algorithm must be an RS* or EC* algorithm.
If registry.settings.jwtauth.private_key is not set,
registry.settings.jwtauth.master_secret is used.
registry.settings.jwtauth.algorithm is used as algorithm.
:param claims_set: set of claims, which will be included in
the created token.
"""
token = jwt.encode(claims_set, self.private_key, self.algorithm)
if PY3:
token = token.decode(encoding='UTF-8')
return token | python | def encode_jwt(self, claims_set):
"""Encode a JWT token based on the claims_set and the settings.
If available, registry.settings.jwtauth.private_key is used as key.
In this case the algorithm must be an RS* or EC* algorithm.
If registry.settings.jwtauth.private_key is not set,
registry.settings.jwtauth.master_secret is used.
registry.settings.jwtauth.algorithm is used as algorithm.
:param claims_set: set of claims, which will be included in
the created token.
"""
token = jwt.encode(claims_set, self.private_key, self.algorithm)
if PY3:
token = token.decode(encoding='UTF-8')
return token | [
"def",
"encode_jwt",
"(",
"self",
",",
"claims_set",
")",
":",
"token",
"=",
"jwt",
".",
"encode",
"(",
"claims_set",
",",
"self",
".",
"private_key",
",",
"self",
".",
"algorithm",
")",
"if",
"PY3",
":",
"token",
"=",
"token",
".",
"decode",
"(",
"encoding",
"=",
"'UTF-8'",
")",
"return",
"token"
] | Encode a JWT token based on the claims_set and the settings.
If available, registry.settings.jwtauth.private_key is used as key.
In this case the algorithm must be an RS* or EC* algorithm.
If registry.settings.jwtauth.private_key is not set,
registry.settings.jwtauth.master_secret is used.
registry.settings.jwtauth.algorithm is used as algorithm.
:param claims_set: set of claims, which will be included in
the created token. | [
"Encode",
"a",
"JWT",
"token",
"based",
"on",
"the",
"claims_set",
"and",
"the",
"settings",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L282-L299 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.get_extra_claims | def get_extra_claims(self, claims_set):
"""Get claims holding extra identity info from the claims set.
Returns a dictionary of extra claims or None if there are none.
:param claims_set: set of claims, which was included in the received
token.
"""
reserved_claims = (
self.userid_claim, "iss", "aud", "exp", "nbf", "iat", "jti",
"refresh_until", "nonce"
)
extra_claims = {}
for claim in claims_set:
if claim not in reserved_claims:
extra_claims[claim] = claims_set[claim]
if not extra_claims:
return None
return extra_claims | python | def get_extra_claims(self, claims_set):
"""Get claims holding extra identity info from the claims set.
Returns a dictionary of extra claims or None if there are none.
:param claims_set: set of claims, which was included in the received
token.
"""
reserved_claims = (
self.userid_claim, "iss", "aud", "exp", "nbf", "iat", "jti",
"refresh_until", "nonce"
)
extra_claims = {}
for claim in claims_set:
if claim not in reserved_claims:
extra_claims[claim] = claims_set[claim]
if not extra_claims:
return None
return extra_claims | [
"def",
"get_extra_claims",
"(",
"self",
",",
"claims_set",
")",
":",
"reserved_claims",
"=",
"(",
"self",
".",
"userid_claim",
",",
"\"iss\"",
",",
"\"aud\"",
",",
"\"exp\"",
",",
"\"nbf\"",
",",
"\"iat\"",
",",
"\"jti\"",
",",
"\"refresh_until\"",
",",
"\"nonce\"",
")",
"extra_claims",
"=",
"{",
"}",
"for",
"claim",
"in",
"claims_set",
":",
"if",
"claim",
"not",
"in",
"reserved_claims",
":",
"extra_claims",
"[",
"claim",
"]",
"=",
"claims_set",
"[",
"claim",
"]",
"if",
"not",
"extra_claims",
":",
"return",
"None",
"return",
"extra_claims"
] | Get claims holding extra identity info from the claims set.
Returns a dictionary of extra claims or None if there are none.
:param claims_set: set of claims, which was included in the received
token. | [
"Get",
"claims",
"holding",
"extra",
"identity",
"info",
"from",
"the",
"claims",
"set",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L314-L333 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.get_jwt | def get_jwt(self, request):
"""Extract the JWT token from the authorisation header of the request.
Returns the JWT token or None, if the token cannot be extracted.
:param request: request object.
:type request: :class:`morepath.Request`
"""
try:
authorization = request.authorization
except ValueError:
return None
if authorization is None:
return None
authtype, token = authorization
if authtype.lower() != self.auth_header_prefix.lower():
return None
return token | python | def get_jwt(self, request):
"""Extract the JWT token from the authorisation header of the request.
Returns the JWT token or None, if the token cannot be extracted.
:param request: request object.
:type request: :class:`morepath.Request`
"""
try:
authorization = request.authorization
except ValueError:
return None
if authorization is None:
return None
authtype, token = authorization
if authtype.lower() != self.auth_header_prefix.lower():
return None
return token | [
"def",
"get_jwt",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"authorization",
"=",
"request",
".",
"authorization",
"except",
"ValueError",
":",
"return",
"None",
"if",
"authorization",
"is",
"None",
":",
"return",
"None",
"authtype",
",",
"token",
"=",
"authorization",
"if",
"authtype",
".",
"lower",
"(",
")",
"!=",
"self",
".",
"auth_header_prefix",
".",
"lower",
"(",
")",
":",
"return",
"None",
"return",
"token"
] | Extract the JWT token from the authorisation header of the request.
Returns the JWT token or None, if the token cannot be extracted.
:param request: request object.
:type request: :class:`morepath.Request` | [
"Extract",
"the",
"JWT",
"token",
"from",
"the",
"authorisation",
"header",
"of",
"the",
"request",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L335-L352 | train |
morepath/more.jwtauth | more/jwtauth/main.py | JWTIdentityPolicy.verify_refresh | def verify_refresh(self, request):
"""
Verify if the request to refresh the token is valid.
If valid it returns the userid which can be used to create
an updated identity with ``remember_identity``.
Otherwise it raises an exception based on InvalidTokenError.
:param request: current request object
:type request: :class:`morepath.Request`
:returns: userid
:raises: InvalidTokenError, ExpiredSignatureError, DecodeError,
MissingRequiredClaimError
"""
if not self.allow_refresh:
raise InvalidTokenError('Token refresh is disabled')
token = self.get_jwt(request)
if token is None:
raise InvalidTokenError('Token not found')
try:
claims_set = self.decode_jwt(
token, self.verify_expiration_on_refresh
)
# reraise the exceptions to change the error messages
except DecodeError:
raise DecodeError('Token could not be decoded')
except ExpiredSignatureError:
raise ExpiredSignatureError('Token has expired')
userid = self.get_userid(claims_set)
if userid is None:
raise MissingRequiredClaimError(self.userid_claim)
if self.refresh_nonce_handler is not None:
if 'nonce' not in claims_set:
raise MissingRequiredClaimError('nonce')
if self.refresh_nonce_handler(request,
userid) != claims_set['nonce']:
raise InvalidTokenError('Refresh nonce is not valid')
if self.refresh_delta is not None:
if 'refresh_until' not in claims_set:
raise MissingRequiredClaimError('refresh_until')
now = timegm(datetime.utcnow().utctimetuple())
refresh_until = int(claims_set['refresh_until'])
if refresh_until < (now - self.leeway):
raise ExpiredSignatureError('Refresh nonce has expired')
return userid | python | def verify_refresh(self, request):
"""
Verify if the request to refresh the token is valid.
If valid it returns the userid which can be used to create
an updated identity with ``remember_identity``.
Otherwise it raises an exception based on InvalidTokenError.
:param request: current request object
:type request: :class:`morepath.Request`
:returns: userid
:raises: InvalidTokenError, ExpiredSignatureError, DecodeError,
MissingRequiredClaimError
"""
if not self.allow_refresh:
raise InvalidTokenError('Token refresh is disabled')
token = self.get_jwt(request)
if token is None:
raise InvalidTokenError('Token not found')
try:
claims_set = self.decode_jwt(
token, self.verify_expiration_on_refresh
)
# reraise the exceptions to change the error messages
except DecodeError:
raise DecodeError('Token could not be decoded')
except ExpiredSignatureError:
raise ExpiredSignatureError('Token has expired')
userid = self.get_userid(claims_set)
if userid is None:
raise MissingRequiredClaimError(self.userid_claim)
if self.refresh_nonce_handler is not None:
if 'nonce' not in claims_set:
raise MissingRequiredClaimError('nonce')
if self.refresh_nonce_handler(request,
userid) != claims_set['nonce']:
raise InvalidTokenError('Refresh nonce is not valid')
if self.refresh_delta is not None:
if 'refresh_until' not in claims_set:
raise MissingRequiredClaimError('refresh_until')
now = timegm(datetime.utcnow().utctimetuple())
refresh_until = int(claims_set['refresh_until'])
if refresh_until < (now - self.leeway):
raise ExpiredSignatureError('Refresh nonce has expired')
return userid | [
"def",
"verify_refresh",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"self",
".",
"allow_refresh",
":",
"raise",
"InvalidTokenError",
"(",
"'Token refresh is disabled'",
")",
"token",
"=",
"self",
".",
"get_jwt",
"(",
"request",
")",
"if",
"token",
"is",
"None",
":",
"raise",
"InvalidTokenError",
"(",
"'Token not found'",
")",
"try",
":",
"claims_set",
"=",
"self",
".",
"decode_jwt",
"(",
"token",
",",
"self",
".",
"verify_expiration_on_refresh",
")",
"# reraise the exceptions to change the error messages",
"except",
"DecodeError",
":",
"raise",
"DecodeError",
"(",
"'Token could not be decoded'",
")",
"except",
"ExpiredSignatureError",
":",
"raise",
"ExpiredSignatureError",
"(",
"'Token has expired'",
")",
"userid",
"=",
"self",
".",
"get_userid",
"(",
"claims_set",
")",
"if",
"userid",
"is",
"None",
":",
"raise",
"MissingRequiredClaimError",
"(",
"self",
".",
"userid_claim",
")",
"if",
"self",
".",
"refresh_nonce_handler",
"is",
"not",
"None",
":",
"if",
"'nonce'",
"not",
"in",
"claims_set",
":",
"raise",
"MissingRequiredClaimError",
"(",
"'nonce'",
")",
"if",
"self",
".",
"refresh_nonce_handler",
"(",
"request",
",",
"userid",
")",
"!=",
"claims_set",
"[",
"'nonce'",
"]",
":",
"raise",
"InvalidTokenError",
"(",
"'Refresh nonce is not valid'",
")",
"if",
"self",
".",
"refresh_delta",
"is",
"not",
"None",
":",
"if",
"'refresh_until'",
"not",
"in",
"claims_set",
":",
"raise",
"MissingRequiredClaimError",
"(",
"'refresh_until'",
")",
"now",
"=",
"timegm",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"utctimetuple",
"(",
")",
")",
"refresh_until",
"=",
"int",
"(",
"claims_set",
"[",
"'refresh_until'",
"]",
")",
"if",
"refresh_until",
"<",
"(",
"now",
"-",
"self",
".",
"leeway",
")",
":",
"raise",
"ExpiredSignatureError",
"(",
"'Refresh nonce has expired'",
")",
"return",
"userid"
] | Verify if the request to refresh the token is valid.
If valid it returns the userid which can be used to create
an updated identity with ``remember_identity``.
Otherwise it raises an exception based on InvalidTokenError.
:param request: current request object
:type request: :class:`morepath.Request`
:returns: userid
:raises: InvalidTokenError, ExpiredSignatureError, DecodeError,
MissingRequiredClaimError | [
"Verify",
"if",
"the",
"request",
"to",
"refresh",
"the",
"token",
"is",
"valid",
".",
"If",
"valid",
"it",
"returns",
"the",
"userid",
"which",
"can",
"be",
"used",
"to",
"create",
"an",
"updated",
"identity",
"with",
"remember_identity",
".",
"Otherwise",
"it",
"raises",
"an",
"exception",
"based",
"on",
"InvalidTokenError",
"."
] | 1c3c5731612069a092e44cf612641c05edf1f083 | https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L354-L404 | train |
guaix-ucm/numina | numina/array/robustfit.py | fit_theil_sen | def fit_theil_sen(x, y):
"""Compute a robust linear fit using the Theil-Sen method.
See http://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator for details.
This function "pairs up sample points by the rank of their x-coordinates
(the point with the smallest coordinate being paired with the first point
above the median coordinate, etc.) and computes the median of the slopes of
the lines determined by these pairs of points".
Parameters
----------
x : array_like, shape (M,)
X coordinate array.
y : array_like, shape (M,) or (M,K)
Y coordinate array. If the array is two dimensional, each column of
the array is independently fitted sharing the same x-coordinates. In
this last case, the returned intercepts and slopes are also 1d numpy
arrays.
Returns
-------
coef : ndarray, shape (2,) or (2, K)
Intercept and slope of the linear fit. If y was 2-D, the
coefficients in column k of coef represent the linear fit
to the data in y's k-th column.
Raises
------
ValueError:
If the number of points to fit is < 5
"""
xx = numpy.asarray(x)
y1 = numpy.asarray(y)
n = len(xx)
if n < 5:
raise ValueError('Number of points < 5')
if xx.ndim != 1:
raise ValueError('Input arrays have unexpected dimensions')
if y1.ndim == 1:
if len(y1) != n:
raise ValueError('X and Y arrays have different sizes')
yy = y1[numpy.newaxis, :]
elif y1.ndim == 2:
if n != y1.shape[0]:
raise ValueError(
'Y-array size in the fitting direction is different to the X-array size')
yy = y1.T
else:
raise ValueError('Input arrays have unexpected dimensions')
nmed = n // 2
iextra = nmed if (n % 2) == 0 else nmed + 1
deltx = xx[iextra:] - xx[:nmed]
delty = yy[:, iextra:] - yy[:, :nmed]
allslopes = delty / deltx
slopes = numpy.median(allslopes, axis=1)
allinters = yy - slopes[:, numpy.newaxis] * x
inters = numpy.median(allinters, axis=1)
coeff = numpy.array([inters, slopes])
return numpy.squeeze(coeff) | python | def fit_theil_sen(x, y):
"""Compute a robust linear fit using the Theil-Sen method.
See http://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator for details.
This function "pairs up sample points by the rank of their x-coordinates
(the point with the smallest coordinate being paired with the first point
above the median coordinate, etc.) and computes the median of the slopes of
the lines determined by these pairs of points".
Parameters
----------
x : array_like, shape (M,)
X coordinate array.
y : array_like, shape (M,) or (M,K)
Y coordinate array. If the array is two dimensional, each column of
the array is independently fitted sharing the same x-coordinates. In
this last case, the returned intercepts and slopes are also 1d numpy
arrays.
Returns
-------
coef : ndarray, shape (2,) or (2, K)
Intercept and slope of the linear fit. If y was 2-D, the
coefficients in column k of coef represent the linear fit
to the data in y's k-th column.
Raises
------
ValueError:
If the number of points to fit is < 5
"""
xx = numpy.asarray(x)
y1 = numpy.asarray(y)
n = len(xx)
if n < 5:
raise ValueError('Number of points < 5')
if xx.ndim != 1:
raise ValueError('Input arrays have unexpected dimensions')
if y1.ndim == 1:
if len(y1) != n:
raise ValueError('X and Y arrays have different sizes')
yy = y1[numpy.newaxis, :]
elif y1.ndim == 2:
if n != y1.shape[0]:
raise ValueError(
'Y-array size in the fitting direction is different to the X-array size')
yy = y1.T
else:
raise ValueError('Input arrays have unexpected dimensions')
nmed = n // 2
iextra = nmed if (n % 2) == 0 else nmed + 1
deltx = xx[iextra:] - xx[:nmed]
delty = yy[:, iextra:] - yy[:, :nmed]
allslopes = delty / deltx
slopes = numpy.median(allslopes, axis=1)
allinters = yy - slopes[:, numpy.newaxis] * x
inters = numpy.median(allinters, axis=1)
coeff = numpy.array([inters, slopes])
return numpy.squeeze(coeff) | [
"def",
"fit_theil_sen",
"(",
"x",
",",
"y",
")",
":",
"xx",
"=",
"numpy",
".",
"asarray",
"(",
"x",
")",
"y1",
"=",
"numpy",
".",
"asarray",
"(",
"y",
")",
"n",
"=",
"len",
"(",
"xx",
")",
"if",
"n",
"<",
"5",
":",
"raise",
"ValueError",
"(",
"'Number of points < 5'",
")",
"if",
"xx",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Input arrays have unexpected dimensions'",
")",
"if",
"y1",
".",
"ndim",
"==",
"1",
":",
"if",
"len",
"(",
"y1",
")",
"!=",
"n",
":",
"raise",
"ValueError",
"(",
"'X and Y arrays have different sizes'",
")",
"yy",
"=",
"y1",
"[",
"numpy",
".",
"newaxis",
",",
":",
"]",
"elif",
"y1",
".",
"ndim",
"==",
"2",
":",
"if",
"n",
"!=",
"y1",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Y-array size in the fitting direction is different to the X-array size'",
")",
"yy",
"=",
"y1",
".",
"T",
"else",
":",
"raise",
"ValueError",
"(",
"'Input arrays have unexpected dimensions'",
")",
"nmed",
"=",
"n",
"//",
"2",
"iextra",
"=",
"nmed",
"if",
"(",
"n",
"%",
"2",
")",
"==",
"0",
"else",
"nmed",
"+",
"1",
"deltx",
"=",
"xx",
"[",
"iextra",
":",
"]",
"-",
"xx",
"[",
":",
"nmed",
"]",
"delty",
"=",
"yy",
"[",
":",
",",
"iextra",
":",
"]",
"-",
"yy",
"[",
":",
",",
":",
"nmed",
"]",
"allslopes",
"=",
"delty",
"/",
"deltx",
"slopes",
"=",
"numpy",
".",
"median",
"(",
"allslopes",
",",
"axis",
"=",
"1",
")",
"allinters",
"=",
"yy",
"-",
"slopes",
"[",
":",
",",
"numpy",
".",
"newaxis",
"]",
"*",
"x",
"inters",
"=",
"numpy",
".",
"median",
"(",
"allinters",
",",
"axis",
"=",
"1",
")",
"coeff",
"=",
"numpy",
".",
"array",
"(",
"[",
"inters",
",",
"slopes",
"]",
")",
"return",
"numpy",
".",
"squeeze",
"(",
"coeff",
")"
] | Compute a robust linear fit using the Theil-Sen method.
See http://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator for details.
This function "pairs up sample points by the rank of their x-coordinates
(the point with the smallest coordinate being paired with the first point
above the median coordinate, etc.) and computes the median of the slopes of
the lines determined by these pairs of points".
Parameters
----------
x : array_like, shape (M,)
X coordinate array.
y : array_like, shape (M,) or (M,K)
Y coordinate array. If the array is two dimensional, each column of
the array is independently fitted sharing the same x-coordinates. In
this last case, the returned intercepts and slopes are also 1d numpy
arrays.
Returns
-------
coef : ndarray, shape (2,) or (2, K)
Intercept and slope of the linear fit. If y was 2-D, the
coefficients in column k of coef represent the linear fit
to the data in y's k-th column.
Raises
------
ValueError:
If the number of points to fit is < 5 | [
"Compute",
"a",
"robust",
"linear",
"fit",
"using",
"the",
"Theil",
"-",
"Sen",
"method",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/robustfit.py#L15-L80 | train |
guaix-ucm/numina | numina/user/cli.py | process_unknown_arguments | def process_unknown_arguments(unknowns):
"""Process arguments unknown to the parser"""
result = argparse.Namespace()
result.extra_control = {}
# It would be interesting to use argparse internal
# machinery for this
for unknown in unknowns:
# Check prefixes
prefix = '--parameter-'
if unknown.startswith(prefix):
# process '='
values = unknown.split('=')
if len(values) == 2:
key = values[0][len(prefix):]
val = values[1]
if key:
result.extra_control[key] = val
return result | python | def process_unknown_arguments(unknowns):
"""Process arguments unknown to the parser"""
result = argparse.Namespace()
result.extra_control = {}
# It would be interesting to use argparse internal
# machinery for this
for unknown in unknowns:
# Check prefixes
prefix = '--parameter-'
if unknown.startswith(prefix):
# process '='
values = unknown.split('=')
if len(values) == 2:
key = values[0][len(prefix):]
val = values[1]
if key:
result.extra_control[key] = val
return result | [
"def",
"process_unknown_arguments",
"(",
"unknowns",
")",
":",
"result",
"=",
"argparse",
".",
"Namespace",
"(",
")",
"result",
".",
"extra_control",
"=",
"{",
"}",
"# It would be interesting to use argparse internal",
"# machinery for this",
"for",
"unknown",
"in",
"unknowns",
":",
"# Check prefixes",
"prefix",
"=",
"'--parameter-'",
"if",
"unknown",
".",
"startswith",
"(",
"prefix",
")",
":",
"# process '='",
"values",
"=",
"unknown",
".",
"split",
"(",
"'='",
")",
"if",
"len",
"(",
"values",
")",
"==",
"2",
":",
"key",
"=",
"values",
"[",
"0",
"]",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"val",
"=",
"values",
"[",
"1",
"]",
"if",
"key",
":",
"result",
".",
"extra_control",
"[",
"key",
"]",
"=",
"val",
"return",
"result"
] | Process arguments unknown to the parser | [
"Process",
"arguments",
"unknown",
"to",
"the",
"parser"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/cli.py#L152-L170 | train |
rfk/playitagainsam | playitagainsam/util.py | get_fd | def get_fd(file_or_fd, default=None):
"""Helper function for getting a file descriptor."""
fd = file_or_fd
if fd is None:
fd = default
if hasattr(fd, "fileno"):
fd = fd.fileno()
return fd | python | def get_fd(file_or_fd, default=None):
"""Helper function for getting a file descriptor."""
fd = file_or_fd
if fd is None:
fd = default
if hasattr(fd, "fileno"):
fd = fd.fileno()
return fd | [
"def",
"get_fd",
"(",
"file_or_fd",
",",
"default",
"=",
"None",
")",
":",
"fd",
"=",
"file_or_fd",
"if",
"fd",
"is",
"None",
":",
"fd",
"=",
"default",
"if",
"hasattr",
"(",
"fd",
",",
"\"fileno\"",
")",
":",
"fd",
"=",
"fd",
".",
"fileno",
"(",
")",
"return",
"fd"
] | Helper function for getting a file descriptor. | [
"Helper",
"function",
"for",
"getting",
"a",
"file",
"descriptor",
"."
] | 897cc8e8ca920a4afb8597b4a345361065a3f108 | https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/util.py#L65-L72 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.