id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 51
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
251,700 | astropy/regions | regions/io/ds9/read.py | DS9RegionParser.parse | def parse(self):
"""
Convert line to shape object
"""
log.debug(self)
self.parse_composite()
self.split_line()
self.convert_coordinates()
self.convert_meta()
self.make_shape()
log.debug(self) | python | def parse(self):
log.debug(self)
self.parse_composite()
self.split_line()
self.convert_coordinates()
self.convert_meta()
self.make_shape()
log.debug(self) | [
"def",
"parse",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"self",
")",
"self",
".",
"parse_composite",
"(",
")",
"self",
".",
"split_line",
"(",
")",
"self",
".",
"convert_coordinates",
"(",
")",
"self",
".",
"convert_meta",
"(",
")",
"self",
".",
"make_shape",
"(",
")",
"log",
".",
"debug",
"(",
"self",
")"
] | Convert line to shape object | [
"Convert",
"line",
"to",
"shape",
"object"
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/io/ds9/read.py#L431-L442 |
251,701 | astropy/regions | regions/io/ds9/read.py | DS9RegionParser.split_line | def split_line(self):
"""
Split line into coordinates and meta string
"""
# coordinate of the # symbol or end of the line (-1) if not found
hash_or_end = self.line.find("#")
temp = self.line[self.region_end:hash_or_end].strip(" |")
self.coord_str = regex_paren.sub("", temp)
# don't want any meta_str if there is no metadata found
if hash_or_end >= 0:
self.meta_str = self.line[hash_or_end:]
else:
self.meta_str = "" | python | def split_line(self):
# coordinate of the # symbol or end of the line (-1) if not found
hash_or_end = self.line.find("#")
temp = self.line[self.region_end:hash_or_end].strip(" |")
self.coord_str = regex_paren.sub("", temp)
# don't want any meta_str if there is no metadata found
if hash_or_end >= 0:
self.meta_str = self.line[hash_or_end:]
else:
self.meta_str = "" | [
"def",
"split_line",
"(",
"self",
")",
":",
"# coordinate of the # symbol or end of the line (-1) if not found",
"hash_or_end",
"=",
"self",
".",
"line",
".",
"find",
"(",
"\"#\"",
")",
"temp",
"=",
"self",
".",
"line",
"[",
"self",
".",
"region_end",
":",
"hash_or_end",
"]",
".",
"strip",
"(",
"\" |\"",
")",
"self",
".",
"coord_str",
"=",
"regex_paren",
".",
"sub",
"(",
"\"\"",
",",
"temp",
")",
"# don't want any meta_str if there is no metadata found",
"if",
"hash_or_end",
">=",
"0",
":",
"self",
".",
"meta_str",
"=",
"self",
".",
"line",
"[",
"hash_or_end",
":",
"]",
"else",
":",
"self",
".",
"meta_str",
"=",
"\"\""
] | Split line into coordinates and meta string | [
"Split",
"line",
"into",
"coordinates",
"and",
"meta",
"string"
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/io/ds9/read.py#L450-L463 |
251,702 | astropy/regions | regions/io/ds9/read.py | DS9RegionParser.convert_coordinates | def convert_coordinates(self):
"""
Convert coordinate string to objects
"""
coord_list = []
# strip out "null" elements, i.e. ''. It might be possible to eliminate
# these some other way, i.e. with regex directly, but I don't know how.
# We need to copy in order not to burn up the iterators
elements = [x for x in regex_splitter.split(self.coord_str) if x]
element_parsers = self.language_spec[self.region_type]
for ii, (element, element_parser) in enumerate(zip(elements,
element_parsers)):
if element_parser is coordinate:
unit = self.coordinate_units[self.coordsys][ii % 2]
coord_list.append(element_parser(element, unit))
elif self.coordinate_units[self.coordsys][0] is u.dimensionless_unscaled:
coord_list.append(element_parser(element, unit=u.dimensionless_unscaled))
else:
coord_list.append(element_parser(element))
if self.region_type in ['ellipse', 'box'] and len(coord_list) % 2 == 1:
coord_list[-1] = CoordinateParser.parse_angular_length_quantity(elements[len(coord_list)-1])
# Reset iterator for ellipse and annulus
# Note that this cannot be done with copy.deepcopy on python2
if self.region_type in ['ellipse', 'annulus']:
self.language_spec[self.region_type] = itertools.chain(
(coordinate, coordinate), itertools.cycle((radius,)))
self.coord = coord_list | python | def convert_coordinates(self):
coord_list = []
# strip out "null" elements, i.e. ''. It might be possible to eliminate
# these some other way, i.e. with regex directly, but I don't know how.
# We need to copy in order not to burn up the iterators
elements = [x for x in regex_splitter.split(self.coord_str) if x]
element_parsers = self.language_spec[self.region_type]
for ii, (element, element_parser) in enumerate(zip(elements,
element_parsers)):
if element_parser is coordinate:
unit = self.coordinate_units[self.coordsys][ii % 2]
coord_list.append(element_parser(element, unit))
elif self.coordinate_units[self.coordsys][0] is u.dimensionless_unscaled:
coord_list.append(element_parser(element, unit=u.dimensionless_unscaled))
else:
coord_list.append(element_parser(element))
if self.region_type in ['ellipse', 'box'] and len(coord_list) % 2 == 1:
coord_list[-1] = CoordinateParser.parse_angular_length_quantity(elements[len(coord_list)-1])
# Reset iterator for ellipse and annulus
# Note that this cannot be done with copy.deepcopy on python2
if self.region_type in ['ellipse', 'annulus']:
self.language_spec[self.region_type] = itertools.chain(
(coordinate, coordinate), itertools.cycle((radius,)))
self.coord = coord_list | [
"def",
"convert_coordinates",
"(",
"self",
")",
":",
"coord_list",
"=",
"[",
"]",
"# strip out \"null\" elements, i.e. ''. It might be possible to eliminate",
"# these some other way, i.e. with regex directly, but I don't know how.",
"# We need to copy in order not to burn up the iterators",
"elements",
"=",
"[",
"x",
"for",
"x",
"in",
"regex_splitter",
".",
"split",
"(",
"self",
".",
"coord_str",
")",
"if",
"x",
"]",
"element_parsers",
"=",
"self",
".",
"language_spec",
"[",
"self",
".",
"region_type",
"]",
"for",
"ii",
",",
"(",
"element",
",",
"element_parser",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"elements",
",",
"element_parsers",
")",
")",
":",
"if",
"element_parser",
"is",
"coordinate",
":",
"unit",
"=",
"self",
".",
"coordinate_units",
"[",
"self",
".",
"coordsys",
"]",
"[",
"ii",
"%",
"2",
"]",
"coord_list",
".",
"append",
"(",
"element_parser",
"(",
"element",
",",
"unit",
")",
")",
"elif",
"self",
".",
"coordinate_units",
"[",
"self",
".",
"coordsys",
"]",
"[",
"0",
"]",
"is",
"u",
".",
"dimensionless_unscaled",
":",
"coord_list",
".",
"append",
"(",
"element_parser",
"(",
"element",
",",
"unit",
"=",
"u",
".",
"dimensionless_unscaled",
")",
")",
"else",
":",
"coord_list",
".",
"append",
"(",
"element_parser",
"(",
"element",
")",
")",
"if",
"self",
".",
"region_type",
"in",
"[",
"'ellipse'",
",",
"'box'",
"]",
"and",
"len",
"(",
"coord_list",
")",
"%",
"2",
"==",
"1",
":",
"coord_list",
"[",
"-",
"1",
"]",
"=",
"CoordinateParser",
".",
"parse_angular_length_quantity",
"(",
"elements",
"[",
"len",
"(",
"coord_list",
")",
"-",
"1",
"]",
")",
"# Reset iterator for ellipse and annulus",
"# Note that this cannot be done with copy.deepcopy on python2",
"if",
"self",
".",
"region_type",
"in",
"[",
"'ellipse'",
",",
"'annulus'",
"]",
":",
"self",
".",
"language_spec",
"[",
"self",
".",
"region_type",
"]",
"=",
"itertools",
".",
"chain",
"(",
"(",
"coordinate",
",",
"coordinate",
")",
",",
"itertools",
".",
"cycle",
"(",
"(",
"radius",
",",
")",
")",
")",
"self",
".",
"coord",
"=",
"coord_list"
] | Convert coordinate string to objects | [
"Convert",
"coordinate",
"string",
"to",
"objects"
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/io/ds9/read.py#L465-L494 |
251,703 | astropy/regions | regions/io/ds9/read.py | DS9RegionParser.convert_meta | def convert_meta(self):
"""
Convert meta string to dict
"""
meta_ = DS9Parser.parse_meta(self.meta_str)
self.meta = copy.deepcopy(self.global_meta)
self.meta.update(meta_)
# the 'include' is not part of the metadata string;
# it is pre-parsed as part of the shape type and should always
# override the global one
self.include = self.meta.get('include', True) if self.include == '' else self.include != '-'
self.meta['include'] = self.include | python | def convert_meta(self):
meta_ = DS9Parser.parse_meta(self.meta_str)
self.meta = copy.deepcopy(self.global_meta)
self.meta.update(meta_)
# the 'include' is not part of the metadata string;
# it is pre-parsed as part of the shape type and should always
# override the global one
self.include = self.meta.get('include', True) if self.include == '' else self.include != '-'
self.meta['include'] = self.include | [
"def",
"convert_meta",
"(",
"self",
")",
":",
"meta_",
"=",
"DS9Parser",
".",
"parse_meta",
"(",
"self",
".",
"meta_str",
")",
"self",
".",
"meta",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"global_meta",
")",
"self",
".",
"meta",
".",
"update",
"(",
"meta_",
")",
"# the 'include' is not part of the metadata string;",
"# it is pre-parsed as part of the shape type and should always",
"# override the global one",
"self",
".",
"include",
"=",
"self",
".",
"meta",
".",
"get",
"(",
"'include'",
",",
"True",
")",
"if",
"self",
".",
"include",
"==",
"''",
"else",
"self",
".",
"include",
"!=",
"'-'",
"self",
".",
"meta",
"[",
"'include'",
"]",
"=",
"self",
".",
"include"
] | Convert meta string to dict | [
"Convert",
"meta",
"string",
"to",
"dict"
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/io/ds9/read.py#L496-L507 |
251,704 | astropy/regions | regions/core/pixcoord.py | PixCoord._validate | def _validate(val, name, expected='any'):
"""Validate that a given object is an appropriate `PixCoord`.
This is used for input validation throughout the regions package,
especially in the `__init__` method of pixel region classes.
Parameters
----------
val : `PixCoord`
The object to check
name : str
Parameter name (used for error messages)
expected : {'any', 'scalar', 'not scalar'}
What kind of PixCoord to check for
Returns
-------
val : `PixCoord`
The input object (at the moment unmodified, might do fix-ups here later)
"""
if not isinstance(val, PixCoord):
raise TypeError('{} must be a PixCoord'.format(name))
if expected == 'any':
pass
elif expected == 'scalar':
if not val.isscalar:
raise ValueError('{} must be a scalar PixCoord'.format(name))
elif expected == 'not scalar':
if val.isscalar:
raise ValueError('{} must be a non-scalar PixCoord'.format(name))
else:
raise ValueError('Invalid argument for `expected`: {}'.format(expected))
return val | python | def _validate(val, name, expected='any'):
if not isinstance(val, PixCoord):
raise TypeError('{} must be a PixCoord'.format(name))
if expected == 'any':
pass
elif expected == 'scalar':
if not val.isscalar:
raise ValueError('{} must be a scalar PixCoord'.format(name))
elif expected == 'not scalar':
if val.isscalar:
raise ValueError('{} must be a non-scalar PixCoord'.format(name))
else:
raise ValueError('Invalid argument for `expected`: {}'.format(expected))
return val | [
"def",
"_validate",
"(",
"val",
",",
"name",
",",
"expected",
"=",
"'any'",
")",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"PixCoord",
")",
":",
"raise",
"TypeError",
"(",
"'{} must be a PixCoord'",
".",
"format",
"(",
"name",
")",
")",
"if",
"expected",
"==",
"'any'",
":",
"pass",
"elif",
"expected",
"==",
"'scalar'",
":",
"if",
"not",
"val",
".",
"isscalar",
":",
"raise",
"ValueError",
"(",
"'{} must be a scalar PixCoord'",
".",
"format",
"(",
"name",
")",
")",
"elif",
"expected",
"==",
"'not scalar'",
":",
"if",
"val",
".",
"isscalar",
":",
"raise",
"ValueError",
"(",
"'{} must be a non-scalar PixCoord'",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid argument for `expected`: {}'",
".",
"format",
"(",
"expected",
")",
")",
"return",
"val"
] | Validate that a given object is an appropriate `PixCoord`.
This is used for input validation throughout the regions package,
especially in the `__init__` method of pixel region classes.
Parameters
----------
val : `PixCoord`
The object to check
name : str
Parameter name (used for error messages)
expected : {'any', 'scalar', 'not scalar'}
What kind of PixCoord to check for
Returns
-------
val : `PixCoord`
The input object (at the moment unmodified, might do fix-ups here later) | [
"Validate",
"that",
"a",
"given",
"object",
"is",
"an",
"appropriate",
"PixCoord",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/core/pixcoord.py#L45-L79 |
251,705 | astropy/regions | regions/core/pixcoord.py | PixCoord.to_sky | def to_sky(self, wcs, origin=_DEFAULT_WCS_ORIGIN, mode=_DEFAULT_WCS_MODE):
"""Convert this `PixCoord` to `~astropy.coordinates.SkyCoord`.
Calls :meth:`astropy.coordinates.SkyCoord.from_pixel`.
See parameter description there.
"""
return SkyCoord.from_pixel(
xp=self.x, yp=self.y, wcs=wcs,
origin=origin, mode=mode,
) | python | def to_sky(self, wcs, origin=_DEFAULT_WCS_ORIGIN, mode=_DEFAULT_WCS_MODE):
return SkyCoord.from_pixel(
xp=self.x, yp=self.y, wcs=wcs,
origin=origin, mode=mode,
) | [
"def",
"to_sky",
"(",
"self",
",",
"wcs",
",",
"origin",
"=",
"_DEFAULT_WCS_ORIGIN",
",",
"mode",
"=",
"_DEFAULT_WCS_MODE",
")",
":",
"return",
"SkyCoord",
".",
"from_pixel",
"(",
"xp",
"=",
"self",
".",
"x",
",",
"yp",
"=",
"self",
".",
"y",
",",
"wcs",
"=",
"wcs",
",",
"origin",
"=",
"origin",
",",
"mode",
"=",
"mode",
",",
")"
] | Convert this `PixCoord` to `~astropy.coordinates.SkyCoord`.
Calls :meth:`astropy.coordinates.SkyCoord.from_pixel`.
See parameter description there. | [
"Convert",
"this",
"PixCoord",
"to",
"~astropy",
".",
"coordinates",
".",
"SkyCoord",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/core/pixcoord.py#L123-L132 |
251,706 | astropy/regions | regions/core/pixcoord.py | PixCoord.from_sky | def from_sky(cls, skycoord, wcs, origin=_DEFAULT_WCS_ORIGIN, mode=_DEFAULT_WCS_MODE):
"""Create `PixCoord` from `~astropy.coordinates.SkyCoord`.
Calls :meth:`astropy.coordinates.SkyCoord.to_pixel`.
See parameter description there.
"""
x, y = skycoord.to_pixel(wcs=wcs, origin=origin, mode=mode)
return cls(x=x, y=y) | python | def from_sky(cls, skycoord, wcs, origin=_DEFAULT_WCS_ORIGIN, mode=_DEFAULT_WCS_MODE):
x, y = skycoord.to_pixel(wcs=wcs, origin=origin, mode=mode)
return cls(x=x, y=y) | [
"def",
"from_sky",
"(",
"cls",
",",
"skycoord",
",",
"wcs",
",",
"origin",
"=",
"_DEFAULT_WCS_ORIGIN",
",",
"mode",
"=",
"_DEFAULT_WCS_MODE",
")",
":",
"x",
",",
"y",
"=",
"skycoord",
".",
"to_pixel",
"(",
"wcs",
"=",
"wcs",
",",
"origin",
"=",
"origin",
",",
"mode",
"=",
"mode",
")",
"return",
"cls",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
")"
] | Create `PixCoord` from `~astropy.coordinates.SkyCoord`.
Calls :meth:`astropy.coordinates.SkyCoord.to_pixel`.
See parameter description there. | [
"Create",
"PixCoord",
"from",
"~astropy",
".",
"coordinates",
".",
"SkyCoord",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/core/pixcoord.py#L135-L142 |
251,707 | astropy/regions | regions/core/pixcoord.py | PixCoord.separation | def separation(self, other):
r"""Separation to another pixel coordinate.
This is the two-dimensional cartesian separation :math:`d` with
.. math::
d = \sqrt{(x_1 - x_2) ^ 2 + (y_1 - y_2) ^ 2}
Parameters
----------
other : `PixCoord`
Other pixel coordinate
Returns
-------
separation : `numpy.array`
Separation in pixels
"""
dx = other.x - self.x
dy = other.y - self.y
return np.hypot(dx, dy) | python | def separation(self, other):
r"""Separation to another pixel coordinate.
This is the two-dimensional cartesian separation :math:`d` with
.. math::
d = \sqrt{(x_1 - x_2) ^ 2 + (y_1 - y_2) ^ 2}
Parameters
----------
other : `PixCoord`
Other pixel coordinate
Returns
-------
separation : `numpy.array`
Separation in pixels
"""
dx = other.x - self.x
dy = other.y - self.y
return np.hypot(dx, dy) | [
"def",
"separation",
"(",
"self",
",",
"other",
")",
":",
"dx",
"=",
"other",
".",
"x",
"-",
"self",
".",
"x",
"dy",
"=",
"other",
".",
"y",
"-",
"self",
".",
"y",
"return",
"np",
".",
"hypot",
"(",
"dx",
",",
"dy",
")"
] | r"""Separation to another pixel coordinate.
This is the two-dimensional cartesian separation :math:`d` with
.. math::
d = \sqrt{(x_1 - x_2) ^ 2 + (y_1 - y_2) ^ 2}
Parameters
----------
other : `PixCoord`
Other pixel coordinate
Returns
-------
separation : `numpy.array`
Separation in pixels | [
"r",
"Separation",
"to",
"another",
"pixel",
"coordinate",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/core/pixcoord.py#L144-L164 |
251,708 | astropy/regions | regions/_utils/wcs_helpers.py | skycoord_to_pixel_scale_angle | def skycoord_to_pixel_scale_angle(skycoord, wcs, small_offset=1 * u.arcsec):
"""
Convert a set of SkyCoord coordinates into pixel coordinates, pixel
scales, and position angles.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
Sky coordinates
wcs : `~astropy.wcs.WCS`
The WCS transformation to use
small_offset : `~astropy.units.Quantity`
A small offset to use to compute the angle
Returns
-------
pixcoord : `~regions.PixCoord`
Pixel coordinates
scale : float
The pixel scale at each location, in degrees/pixel
angle : `~astropy.units.Quantity`
The position angle of the celestial coordinate system in pixel space.
"""
# Convert to pixel coordinates
x, y = skycoord_to_pixel(skycoord, wcs, mode=skycoord_to_pixel_mode)
pixcoord = PixCoord(x=x, y=y)
# We take a point directly 'above' (in latitude) the position requested
# and convert it to pixel coordinates, then we use that to figure out the
# scale and position angle of the coordinate system at the location of
# the points.
# Find the coordinates as a representation object
r_old = skycoord.represent_as('unitspherical')
# Add a a small perturbation in the latitude direction (since longitude
# is more difficult because it is not directly an angle).
dlat = small_offset
r_new = UnitSphericalRepresentation(r_old.lon, r_old.lat + dlat)
coords_offset = skycoord.realize_frame(r_new)
# Find pixel coordinates of offset coordinates
x_offset, y_offset = skycoord_to_pixel(coords_offset, wcs,
mode=skycoord_to_pixel_mode)
# Find vector
dx = x_offset - x
dy = y_offset - y
# Find the length of the vector
scale = np.hypot(dx, dy) / dlat.to('degree').value
# Find the position angle
angle = np.arctan2(dy, dx) * u.radian
return pixcoord, scale, angle | python | def skycoord_to_pixel_scale_angle(skycoord, wcs, small_offset=1 * u.arcsec):
# Convert to pixel coordinates
x, y = skycoord_to_pixel(skycoord, wcs, mode=skycoord_to_pixel_mode)
pixcoord = PixCoord(x=x, y=y)
# We take a point directly 'above' (in latitude) the position requested
# and convert it to pixel coordinates, then we use that to figure out the
# scale and position angle of the coordinate system at the location of
# the points.
# Find the coordinates as a representation object
r_old = skycoord.represent_as('unitspherical')
# Add a a small perturbation in the latitude direction (since longitude
# is more difficult because it is not directly an angle).
dlat = small_offset
r_new = UnitSphericalRepresentation(r_old.lon, r_old.lat + dlat)
coords_offset = skycoord.realize_frame(r_new)
# Find pixel coordinates of offset coordinates
x_offset, y_offset = skycoord_to_pixel(coords_offset, wcs,
mode=skycoord_to_pixel_mode)
# Find vector
dx = x_offset - x
dy = y_offset - y
# Find the length of the vector
scale = np.hypot(dx, dy) / dlat.to('degree').value
# Find the position angle
angle = np.arctan2(dy, dx) * u.radian
return pixcoord, scale, angle | [
"def",
"skycoord_to_pixel_scale_angle",
"(",
"skycoord",
",",
"wcs",
",",
"small_offset",
"=",
"1",
"*",
"u",
".",
"arcsec",
")",
":",
"# Convert to pixel coordinates",
"x",
",",
"y",
"=",
"skycoord_to_pixel",
"(",
"skycoord",
",",
"wcs",
",",
"mode",
"=",
"skycoord_to_pixel_mode",
")",
"pixcoord",
"=",
"PixCoord",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
")",
"# We take a point directly 'above' (in latitude) the position requested",
"# and convert it to pixel coordinates, then we use that to figure out the",
"# scale and position angle of the coordinate system at the location of",
"# the points.",
"# Find the coordinates as a representation object",
"r_old",
"=",
"skycoord",
".",
"represent_as",
"(",
"'unitspherical'",
")",
"# Add a a small perturbation in the latitude direction (since longitude",
"# is more difficult because it is not directly an angle).",
"dlat",
"=",
"small_offset",
"r_new",
"=",
"UnitSphericalRepresentation",
"(",
"r_old",
".",
"lon",
",",
"r_old",
".",
"lat",
"+",
"dlat",
")",
"coords_offset",
"=",
"skycoord",
".",
"realize_frame",
"(",
"r_new",
")",
"# Find pixel coordinates of offset coordinates",
"x_offset",
",",
"y_offset",
"=",
"skycoord_to_pixel",
"(",
"coords_offset",
",",
"wcs",
",",
"mode",
"=",
"skycoord_to_pixel_mode",
")",
"# Find vector",
"dx",
"=",
"x_offset",
"-",
"x",
"dy",
"=",
"y_offset",
"-",
"y",
"# Find the length of the vector",
"scale",
"=",
"np",
".",
"hypot",
"(",
"dx",
",",
"dy",
")",
"/",
"dlat",
".",
"to",
"(",
"'degree'",
")",
".",
"value",
"# Find the position angle",
"angle",
"=",
"np",
".",
"arctan2",
"(",
"dy",
",",
"dx",
")",
"*",
"u",
".",
"radian",
"return",
"pixcoord",
",",
"scale",
",",
"angle"
] | Convert a set of SkyCoord coordinates into pixel coordinates, pixel
scales, and position angles.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
Sky coordinates
wcs : `~astropy.wcs.WCS`
The WCS transformation to use
small_offset : `~astropy.units.Quantity`
A small offset to use to compute the angle
Returns
-------
pixcoord : `~regions.PixCoord`
Pixel coordinates
scale : float
The pixel scale at each location, in degrees/pixel
angle : `~astropy.units.Quantity`
The position angle of the celestial coordinate system in pixel space. | [
"Convert",
"a",
"set",
"of",
"SkyCoord",
"coordinates",
"into",
"pixel",
"coordinates",
"pixel",
"scales",
"and",
"position",
"angles",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/_utils/wcs_helpers.py#L13-L69 |
251,709 | astropy/regions | regions/_utils/wcs_helpers.py | assert_angle | def assert_angle(name, q):
"""
Check that ``q`` is an angular `~astropy.units.Quantity`.
"""
if isinstance(q, u.Quantity):
if q.unit.physical_type == 'angle':
pass
else:
raise ValueError("{0} should have angular units".format(name))
else:
raise TypeError("{0} should be a Quantity instance".format(name)) | python | def assert_angle(name, q):
if isinstance(q, u.Quantity):
if q.unit.physical_type == 'angle':
pass
else:
raise ValueError("{0} should have angular units".format(name))
else:
raise TypeError("{0} should be a Quantity instance".format(name)) | [
"def",
"assert_angle",
"(",
"name",
",",
"q",
")",
":",
"if",
"isinstance",
"(",
"q",
",",
"u",
".",
"Quantity",
")",
":",
"if",
"q",
".",
"unit",
".",
"physical_type",
"==",
"'angle'",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"\"{0} should have angular units\"",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"{0} should be a Quantity instance\"",
".",
"format",
"(",
"name",
")",
")"
] | Check that ``q`` is an angular `~astropy.units.Quantity`. | [
"Check",
"that",
"q",
"is",
"an",
"angular",
"~astropy",
".",
"units",
".",
"Quantity",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/_utils/wcs_helpers.py#L86-L96 |
251,710 | astropy/regions | ah_bootstrap.py | _silence | def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr | python | def _silence():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr | [
"def",
"_silence",
"(",
")",
":",
"old_stdout",
"=",
"sys",
".",
"stdout",
"old_stderr",
"=",
"sys",
".",
"stderr",
"sys",
".",
"stdout",
"=",
"_DummyFile",
"(",
")",
"sys",
".",
"stderr",
"=",
"_DummyFile",
"(",
")",
"exception_occurred",
"=",
"False",
"try",
":",
"yield",
"except",
":",
"exception_occurred",
"=",
"True",
"# Go ahead and clean up so that exception handling can work normally",
"sys",
".",
"stdout",
"=",
"old_stdout",
"sys",
".",
"stderr",
"=",
"old_stderr",
"raise",
"if",
"not",
"exception_occurred",
":",
"sys",
".",
"stdout",
"=",
"old_stdout",
"sys",
".",
"stderr",
"=",
"old_stderr"
] | A context manager that silences sys.stdout and sys.stderr. | [
"A",
"context",
"manager",
"that",
"silences",
"sys",
".",
"stdout",
"and",
"sys",
".",
"stderr",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L914-L933 |
251,711 | astropy/regions | ah_bootstrap.py | use_astropy_helpers | def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run() | python | def use_astropy_helpers(**kwargs):
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run() | [
"def",
"use_astropy_helpers",
"(",
"*",
"*",
"kwargs",
")",
":",
"global",
"BOOTSTRAPPER",
"config",
"=",
"BOOTSTRAPPER",
".",
"config",
"config",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"# Create a new bootstrapper with the updated configuration and run it",
"BOOTSTRAPPER",
"=",
"_Bootstrapper",
"(",
"*",
"*",
"config",
")",
"BOOTSTRAPPER",
".",
"run",
"(",
")"
] | Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`. | [
"Ensure",
"that",
"the",
"astropy_helpers",
"module",
"is",
"available",
"and",
"is",
"importable",
".",
"This",
"supports",
"automatic",
"submodule",
"initialization",
"if",
"astropy_helpers",
"is",
"included",
"in",
"a",
"project",
"as",
"a",
"git",
"submodule",
"or",
"will",
"download",
"it",
"from",
"PyPI",
"if",
"necessary",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L959-L1022 |
251,712 | astropy/regions | ah_bootstrap.py | _Bootstrapper.config | def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname)) | python | def config(self):
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname)) | [
"def",
"config",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"(",
"optname",
",",
"getattr",
"(",
"self",
",",
"optname",
")",
")",
"for",
"optname",
",",
"_",
"in",
"CFG_OPTIONS",
"if",
"hasattr",
"(",
"self",
",",
"optname",
")",
")"
] | A `dict` containing the options this `_Bootstrapper` was configured
with. | [
"A",
"dict",
"containing",
"the",
"options",
"this",
"_Bootstrapper",
"was",
"configured",
"with",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L393-L400 |
251,713 | astropy/regions | ah_bootstrap.py | _Bootstrapper.get_local_directory_dist | def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist | python | def get_local_directory_dist(self):
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist | [
"def",
"get_local_directory_dist",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"path",
")",
":",
"return",
"log",
".",
"info",
"(",
"'Attempting to import astropy_helpers from {0} {1!r}'",
".",
"format",
"(",
"'submodule'",
"if",
"self",
".",
"is_submodule",
"else",
"'directory'",
",",
"self",
".",
"path",
")",
")",
"dist",
"=",
"self",
".",
"_directory_import",
"(",
")",
"if",
"dist",
"is",
"None",
":",
"log",
".",
"warn",
"(",
"'The requested path {0!r} for importing {1} does not '",
"'exist, or does not contain a copy of the {1} '",
"'package.'",
".",
"format",
"(",
"self",
".",
"path",
",",
"PACKAGE_NAME",
")",
")",
"elif",
"self",
".",
"auto_upgrade",
"and",
"not",
"self",
".",
"is_submodule",
":",
"# A version of astropy-helpers was found on the available path, but",
"# check to see if a bugfix release is available on PyPI",
"upgrade",
"=",
"self",
".",
"_do_upgrade",
"(",
"dist",
")",
"if",
"upgrade",
"is",
"not",
"None",
":",
"dist",
"=",
"upgrade",
"return",
"dist"
] | Handle importing a vendored package from a subdirectory of the source
distribution. | [
"Handle",
"importing",
"a",
"vendored",
"package",
"from",
"a",
"subdirectory",
"of",
"the",
"source",
"distribution",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L402-L429 |
251,714 | astropy/regions | ah_bootstrap.py | _Bootstrapper.get_local_file_dist | def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist | python | def get_local_file_dist(self):
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist | [
"def",
"get_local_file_dist",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"path",
")",
":",
"return",
"log",
".",
"info",
"(",
"'Attempting to unpack and import astropy_helpers from '",
"'{0!r}'",
".",
"format",
"(",
"self",
".",
"path",
")",
")",
"try",
":",
"dist",
"=",
"self",
".",
"_do_download",
"(",
"find_links",
"=",
"[",
"self",
".",
"path",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"DEBUG",
":",
"raise",
"log",
".",
"warn",
"(",
"'Failed to import {0} from the specified archive {1!r}: '",
"'{2}'",
".",
"format",
"(",
"PACKAGE_NAME",
",",
"self",
".",
"path",
",",
"str",
"(",
"e",
")",
")",
")",
"dist",
"=",
"None",
"if",
"dist",
"is",
"not",
"None",
"and",
"self",
".",
"auto_upgrade",
":",
"# A version of astropy-helpers was found on the available path, but",
"# check to see if a bugfix release is available on PyPI",
"upgrade",
"=",
"self",
".",
"_do_upgrade",
"(",
"dist",
")",
"if",
"upgrade",
"is",
"not",
"None",
":",
"dist",
"=",
"upgrade",
"return",
"dist"
] | Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive. | [
"Handle",
"importing",
"from",
"a",
"source",
"archive",
";",
"this",
"also",
"uses",
"setup_requires",
"but",
"points",
"easy_install",
"directly",
"to",
"the",
"source",
"archive",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L431-L461 |
251,715 | astropy/regions | ah_bootstrap.py | _Bootstrapper._directory_import | def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist | python | def _directory_import(self):
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist | [
"def",
"_directory_import",
"(",
"self",
")",
":",
"# Return True on success, False on failure but download is allowed, and",
"# otherwise raise SystemExit",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"path",
")",
"# Use an empty WorkingSet rather than the man",
"# pkg_resources.working_set, since on older versions of setuptools this",
"# will invoke a VersionConflict when trying to install an upgrade",
"ws",
"=",
"pkg_resources",
".",
"WorkingSet",
"(",
"[",
"]",
")",
"ws",
".",
"add_entry",
"(",
"path",
")",
"dist",
"=",
"ws",
".",
"by_key",
".",
"get",
"(",
"DIST_NAME",
")",
"if",
"dist",
"is",
"None",
":",
"# We didn't find an egg-info/dist-info in the given path, but if a",
"# setup.py exists we can generate it",
"setup_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'setup.py'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"setup_py",
")",
":",
"# We use subprocess instead of run_setup from setuptools to",
"# avoid segmentation faults - see the following for more details:",
"# https://github.com/cython/cython/issues/2104",
"sp",
".",
"check_output",
"(",
"[",
"sys",
".",
"executable",
",",
"'setup.py'",
",",
"'egg_info'",
"]",
",",
"cwd",
"=",
"path",
")",
"for",
"dist",
"in",
"pkg_resources",
".",
"find_distributions",
"(",
"path",
",",
"True",
")",
":",
"# There should be only one...",
"return",
"dist",
"return",
"dist"
] | Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise. | [
"Import",
"astropy_helpers",
"from",
"the",
"given",
"path",
"which",
"will",
"be",
"added",
"to",
"sys",
".",
"path",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L486-L519 |
251,716 | astropy/regions | ah_bootstrap.py | _Bootstrapper._check_submodule | def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git() | python | def _check_submodule(self):
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git() | [
"def",
"_check_submodule",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"path",
"is",
"None",
"or",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"path",
")",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"path",
")",
")",
")",
":",
"return",
"False",
"if",
"self",
".",
"use_git",
":",
"return",
"self",
".",
"_check_submodule_using_git",
"(",
")",
"else",
":",
"return",
"self",
".",
"_check_submodule_no_git",
"(",
")"
] | Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details. | [
"Check",
"if",
"the",
"given",
"path",
"is",
"a",
"git",
"submodule",
"."
] | 452d962c417e4ff20d1268f99535c6ff89c83437 | https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L607-L622 |
251,717 | EconForge/dolo | dolo/numeric/tensor.py | sdot | def sdot( U, V ):
'''
Computes the tensorproduct reducing last dimensoin of U with first dimension of V.
For matrices, it is equal to regular matrix product.
'''
nu = U.ndim
#nv = V.ndim
return np.tensordot( U, V, axes=(nu-1,0) ) | python | def sdot( U, V ):
'''
Computes the tensorproduct reducing last dimensoin of U with first dimension of V.
For matrices, it is equal to regular matrix product.
'''
nu = U.ndim
#nv = V.ndim
return np.tensordot( U, V, axes=(nu-1,0) ) | [
"def",
"sdot",
"(",
"U",
",",
"V",
")",
":",
"nu",
"=",
"U",
".",
"ndim",
"#nv = V.ndim",
"return",
"np",
".",
"tensordot",
"(",
"U",
",",
"V",
",",
"axes",
"=",
"(",
"nu",
"-",
"1",
",",
"0",
")",
")"
] | Computes the tensorproduct reducing last dimensoin of U with first dimension of V.
For matrices, it is equal to regular matrix product. | [
"Computes",
"the",
"tensorproduct",
"reducing",
"last",
"dimensoin",
"of",
"U",
"with",
"first",
"dimension",
"of",
"V",
".",
"For",
"matrices",
"it",
"is",
"equal",
"to",
"regular",
"matrix",
"product",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/tensor.py#L44-L51 |
251,718 | EconForge/dolo | dolo/numeric/interpolation/smolyak.py | SmolyakBasic.set_values | def set_values(self,x):
""" Updates self.theta parameter. No returns values"""
x = numpy.atleast_2d(x)
x = x.real # ahem
C_inv = self.__C_inv__
theta = numpy.dot( x, C_inv )
self.theta = theta
return theta | python | def set_values(self,x):
x = numpy.atleast_2d(x)
x = x.real # ahem
C_inv = self.__C_inv__
theta = numpy.dot( x, C_inv )
self.theta = theta
return theta | [
"def",
"set_values",
"(",
"self",
",",
"x",
")",
":",
"x",
"=",
"numpy",
".",
"atleast_2d",
"(",
"x",
")",
"x",
"=",
"x",
".",
"real",
"# ahem",
"C_inv",
"=",
"self",
".",
"__C_inv__",
"theta",
"=",
"numpy",
".",
"dot",
"(",
"x",
",",
"C_inv",
")",
"self",
".",
"theta",
"=",
"theta",
"return",
"theta"
] | Updates self.theta parameter. No returns values | [
"Updates",
"self",
".",
"theta",
"parameter",
".",
"No",
"returns",
"values"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/interpolation/smolyak.py#L256-L267 |
251,719 | EconForge/dolo | dolo/numeric/discretization/discretization.py | tauchen | def tauchen(N, mu, rho, sigma, m=2):
"""
Approximate an AR1 process by a finite markov chain using Tauchen's method.
:param N: scalar, number of nodes for Z
:param mu: scalar, unconditional mean of process
:param rho: scalar
:param sigma: scalar, std. dev. of epsilons
:param m: max +- std. devs.
:returns: Z, N*1 vector, nodes for Z. Zprob, N*N matrix, transition probabilities
SJB: This is a port of Martin Floden's 1996 Matlab code to implement Tauchen 1986 Economic Letters method The following comments are Floden's. Finds a Markov chain whose sample paths approximate those of the AR(1) process z(t+1) = (1-rho)*mu + rho * z(t) + eps(t+1) where eps are normal with stddev sigma.
"""
Z = np.zeros((N,1))
Zprob = np.zeros((N,N))
a = (1-rho)*mu
Z[-1] = m * math.sqrt(sigma**2 / (1 - (rho**2)))
Z[0] = -1 * Z[-1]
zstep = (Z[-1] - Z[0]) / (N - 1)
for i in range(1,N):
Z[i] = Z[0] + zstep * (i)
Z = Z + a / (1-rho)
for j in range(0,N):
for k in range(0,N):
if k == 0:
Zprob[j,k] = sp.stats.norm.cdf((Z[0] - a - rho * Z[j] + zstep / 2) / sigma)
elif k == (N-1):
Zprob[j,k] = 1 - sp.stats.norm.cdf((Z[-1] - a - rho * Z[j] - zstep / 2) / sigma)
else:
up = sp.stats.norm.cdf((Z[k] - a - rho * Z[j] + zstep / 2) / sigma)
down = sp.stats.norm.cdf( (Z[k] - a - rho * Z[j] - zstep / 2) / sigma)
Zprob[j,k] = up - down
return( (Z, Zprob) ) | python | def tauchen(N, mu, rho, sigma, m=2):
Z = np.zeros((N,1))
Zprob = np.zeros((N,N))
a = (1-rho)*mu
Z[-1] = m * math.sqrt(sigma**2 / (1 - (rho**2)))
Z[0] = -1 * Z[-1]
zstep = (Z[-1] - Z[0]) / (N - 1)
for i in range(1,N):
Z[i] = Z[0] + zstep * (i)
Z = Z + a / (1-rho)
for j in range(0,N):
for k in range(0,N):
if k == 0:
Zprob[j,k] = sp.stats.norm.cdf((Z[0] - a - rho * Z[j] + zstep / 2) / sigma)
elif k == (N-1):
Zprob[j,k] = 1 - sp.stats.norm.cdf((Z[-1] - a - rho * Z[j] - zstep / 2) / sigma)
else:
up = sp.stats.norm.cdf((Z[k] - a - rho * Z[j] + zstep / 2) / sigma)
down = sp.stats.norm.cdf( (Z[k] - a - rho * Z[j] - zstep / 2) / sigma)
Zprob[j,k] = up - down
return( (Z, Zprob) ) | [
"def",
"tauchen",
"(",
"N",
",",
"mu",
",",
"rho",
",",
"sigma",
",",
"m",
"=",
"2",
")",
":",
"Z",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"1",
")",
")",
"Zprob",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"N",
")",
")",
"a",
"=",
"(",
"1",
"-",
"rho",
")",
"*",
"mu",
"Z",
"[",
"-",
"1",
"]",
"=",
"m",
"*",
"math",
".",
"sqrt",
"(",
"sigma",
"**",
"2",
"/",
"(",
"1",
"-",
"(",
"rho",
"**",
"2",
")",
")",
")",
"Z",
"[",
"0",
"]",
"=",
"-",
"1",
"*",
"Z",
"[",
"-",
"1",
"]",
"zstep",
"=",
"(",
"Z",
"[",
"-",
"1",
"]",
"-",
"Z",
"[",
"0",
"]",
")",
"/",
"(",
"N",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"N",
")",
":",
"Z",
"[",
"i",
"]",
"=",
"Z",
"[",
"0",
"]",
"+",
"zstep",
"*",
"(",
"i",
")",
"Z",
"=",
"Z",
"+",
"a",
"/",
"(",
"1",
"-",
"rho",
")",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"N",
")",
":",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"N",
")",
":",
"if",
"k",
"==",
"0",
":",
"Zprob",
"[",
"j",
",",
"k",
"]",
"=",
"sp",
".",
"stats",
".",
"norm",
".",
"cdf",
"(",
"(",
"Z",
"[",
"0",
"]",
"-",
"a",
"-",
"rho",
"*",
"Z",
"[",
"j",
"]",
"+",
"zstep",
"/",
"2",
")",
"/",
"sigma",
")",
"elif",
"k",
"==",
"(",
"N",
"-",
"1",
")",
":",
"Zprob",
"[",
"j",
",",
"k",
"]",
"=",
"1",
"-",
"sp",
".",
"stats",
".",
"norm",
".",
"cdf",
"(",
"(",
"Z",
"[",
"-",
"1",
"]",
"-",
"a",
"-",
"rho",
"*",
"Z",
"[",
"j",
"]",
"-",
"zstep",
"/",
"2",
")",
"/",
"sigma",
")",
"else",
":",
"up",
"=",
"sp",
".",
"stats",
".",
"norm",
".",
"cdf",
"(",
"(",
"Z",
"[",
"k",
"]",
"-",
"a",
"-",
"rho",
"*",
"Z",
"[",
"j",
"]",
"+",
"zstep",
"/",
"2",
")",
"/",
"sigma",
")",
"down",
"=",
"sp",
".",
"stats",
".",
"norm",
".",
"cdf",
"(",
"(",
"Z",
"[",
"k",
"]",
"-",
"a",
"-",
"rho",
"*",
"Z",
"[",
"j",
"]",
"-",
"zstep",
"/",
"2",
")",
"/",
"sigma",
")",
"Zprob",
"[",
"j",
",",
"k",
"]",
"=",
"up",
"-",
"down",
"return",
"(",
"(",
"Z",
",",
"Zprob",
")",
")"
] | Approximate an AR1 process by a finite markov chain using Tauchen's method.
:param N: scalar, number of nodes for Z
:param mu: scalar, unconditional mean of process
:param rho: scalar
:param sigma: scalar, std. dev. of epsilons
:param m: max +- std. devs.
:returns: Z, N*1 vector, nodes for Z. Zprob, N*N matrix, transition probabilities
SJB: This is a port of Martin Floden's 1996 Matlab code to implement Tauchen 1986 Economic Letters method The following comments are Floden's. Finds a Markov chain whose sample paths approximate those of the AR(1) process z(t+1) = (1-rho)*mu + rho * z(t) + eps(t+1) where eps are normal with stddev sigma. | [
"Approximate",
"an",
"AR1",
"process",
"by",
"a",
"finite",
"markov",
"chain",
"using",
"Tauchen",
"s",
"method",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/discretization.py#L13-L50 |
251,720 | EconForge/dolo | dolo/numeric/discretization/discretization.py | rouwenhorst | def rouwenhorst(rho, sigma, N):
"""
Approximate an AR1 process by a finite markov chain using Rouwenhorst's method.
:param rho: autocorrelation of the AR1 process
:param sigma: conditional standard deviation of the AR1 process
:param N: number of states
:return [nodes, P]: equally spaced nodes and transition matrix
"""
from numpy import sqrt, linspace, array,zeros
sigma = float(sigma)
if N == 1:
nodes = array([0.0])
transitions = array([[1.0]])
return [nodes, transitions]
p = (rho+1)/2
q = p
nu = sqrt( (N-1)/(1-rho**2) )*sigma
nodes = linspace( -nu, nu, N)
sig_a = sigma
n = 1
# mat0 = array( [[1]] )
mat0 = array([[p,1-p],[1-q,q]])
if N == 2:
return [nodes,mat0]
for n in range(3,N+1):
mat = zeros( (n,n) )
mat_A = mat.copy()
mat_B = mat.copy()
mat_C = mat.copy()
mat_D = mat.copy()
mat_A[:-1,:-1] = mat0
mat_B[:-1,1:] = mat0
mat_C[1:,:-1] = mat0
mat_D[1:,1:] = mat0
mat0 = p*mat_A + (1-p)*mat_B + (1-q)*mat_C + q*mat_D
mat0[1:-1,:] = mat0[1:-1,:]/2
P = mat0
return [nodes, P] | python | def rouwenhorst(rho, sigma, N):
from numpy import sqrt, linspace, array,zeros
sigma = float(sigma)
if N == 1:
nodes = array([0.0])
transitions = array([[1.0]])
return [nodes, transitions]
p = (rho+1)/2
q = p
nu = sqrt( (N-1)/(1-rho**2) )*sigma
nodes = linspace( -nu, nu, N)
sig_a = sigma
n = 1
# mat0 = array( [[1]] )
mat0 = array([[p,1-p],[1-q,q]])
if N == 2:
return [nodes,mat0]
for n in range(3,N+1):
mat = zeros( (n,n) )
mat_A = mat.copy()
mat_B = mat.copy()
mat_C = mat.copy()
mat_D = mat.copy()
mat_A[:-1,:-1] = mat0
mat_B[:-1,1:] = mat0
mat_C[1:,:-1] = mat0
mat_D[1:,1:] = mat0
mat0 = p*mat_A + (1-p)*mat_B + (1-q)*mat_C + q*mat_D
mat0[1:-1,:] = mat0[1:-1,:]/2
P = mat0
return [nodes, P] | [
"def",
"rouwenhorst",
"(",
"rho",
",",
"sigma",
",",
"N",
")",
":",
"from",
"numpy",
"import",
"sqrt",
",",
"linspace",
",",
"array",
",",
"zeros",
"sigma",
"=",
"float",
"(",
"sigma",
")",
"if",
"N",
"==",
"1",
":",
"nodes",
"=",
"array",
"(",
"[",
"0.0",
"]",
")",
"transitions",
"=",
"array",
"(",
"[",
"[",
"1.0",
"]",
"]",
")",
"return",
"[",
"nodes",
",",
"transitions",
"]",
"p",
"=",
"(",
"rho",
"+",
"1",
")",
"/",
"2",
"q",
"=",
"p",
"nu",
"=",
"sqrt",
"(",
"(",
"N",
"-",
"1",
")",
"/",
"(",
"1",
"-",
"rho",
"**",
"2",
")",
")",
"*",
"sigma",
"nodes",
"=",
"linspace",
"(",
"-",
"nu",
",",
"nu",
",",
"N",
")",
"sig_a",
"=",
"sigma",
"n",
"=",
"1",
"# mat0 = array( [[1]] )",
"mat0",
"=",
"array",
"(",
"[",
"[",
"p",
",",
"1",
"-",
"p",
"]",
",",
"[",
"1",
"-",
"q",
",",
"q",
"]",
"]",
")",
"if",
"N",
"==",
"2",
":",
"return",
"[",
"nodes",
",",
"mat0",
"]",
"for",
"n",
"in",
"range",
"(",
"3",
",",
"N",
"+",
"1",
")",
":",
"mat",
"=",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"mat_A",
"=",
"mat",
".",
"copy",
"(",
")",
"mat_B",
"=",
"mat",
".",
"copy",
"(",
")",
"mat_C",
"=",
"mat",
".",
"copy",
"(",
")",
"mat_D",
"=",
"mat",
".",
"copy",
"(",
")",
"mat_A",
"[",
":",
"-",
"1",
",",
":",
"-",
"1",
"]",
"=",
"mat0",
"mat_B",
"[",
":",
"-",
"1",
",",
"1",
":",
"]",
"=",
"mat0",
"mat_C",
"[",
"1",
":",
",",
":",
"-",
"1",
"]",
"=",
"mat0",
"mat_D",
"[",
"1",
":",
",",
"1",
":",
"]",
"=",
"mat0",
"mat0",
"=",
"p",
"*",
"mat_A",
"+",
"(",
"1",
"-",
"p",
")",
"*",
"mat_B",
"+",
"(",
"1",
"-",
"q",
")",
"*",
"mat_C",
"+",
"q",
"*",
"mat_D",
"mat0",
"[",
"1",
":",
"-",
"1",
",",
":",
"]",
"=",
"mat0",
"[",
"1",
":",
"-",
"1",
",",
":",
"]",
"/",
"2",
"P",
"=",
"mat0",
"return",
"[",
"nodes",
",",
"P",
"]"
] | Approximate an AR1 process by a finite markov chain using Rouwenhorst's method.
:param rho: autocorrelation of the AR1 process
:param sigma: conditional standard deviation of the AR1 process
:param N: number of states
:return [nodes, P]: equally spaced nodes and transition matrix | [
"Approximate",
"an",
"AR1",
"process",
"by",
"a",
"finite",
"markov",
"chain",
"using",
"Rouwenhorst",
"s",
"method",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/discretization.py#L53-L97 |
251,721 | EconForge/dolo | dolo/numeric/discretization/discretization.py | tensor_markov | def tensor_markov( *args ):
"""Computes the product of two independent markov chains.
:param m1: a tuple containing the nodes and the transition matrix of the first chain
:param m2: a tuple containing the nodes and the transition matrix of the second chain
:return: a tuple containing the nodes and the transition matrix of the product chain
"""
if len(args) > 2:
m1 = args[0]
m2 = args[1]
tail = args[2:]
prod = tensor_markov(m1,m2)
return tensor_markov( prod, tail )
elif len(args) == 2:
m1,m2 = args
n1, t1 = m1
n2, t2 = m2
n1 = np.array(n1, dtype=float)
n2 = np.array(n2, dtype=float)
t1 = np.array(t1, dtype=float)
t2 = np.array(t2, dtype=float)
assert(n1.shape[0] == t1.shape[0] == t1.shape[1])
assert(n2.shape[0] == t2.shape[0] == t2.shape[1])
t = np.kron(t1, t2)
p = t1.shape[0]
q = t2.shape[0]
np.tile( n2, (1,p))
# n = np.row_stack([
# np.repeat(n1, q, axis=1),
# np.tile( n2, (1,p))
# ])
n = np.column_stack([
np.repeat(n1, q, axis=0),
np.tile( n2, (p,1))
])
return [n,t]
else:
raise Exception("Incorrect number of arguments. Expected at least 2. Found {}.".format(len(args))) | python | def tensor_markov( *args ):
if len(args) > 2:
m1 = args[0]
m2 = args[1]
tail = args[2:]
prod = tensor_markov(m1,m2)
return tensor_markov( prod, tail )
elif len(args) == 2:
m1,m2 = args
n1, t1 = m1
n2, t2 = m2
n1 = np.array(n1, dtype=float)
n2 = np.array(n2, dtype=float)
t1 = np.array(t1, dtype=float)
t2 = np.array(t2, dtype=float)
assert(n1.shape[0] == t1.shape[0] == t1.shape[1])
assert(n2.shape[0] == t2.shape[0] == t2.shape[1])
t = np.kron(t1, t2)
p = t1.shape[0]
q = t2.shape[0]
np.tile( n2, (1,p))
# n = np.row_stack([
# np.repeat(n1, q, axis=1),
# np.tile( n2, (1,p))
# ])
n = np.column_stack([
np.repeat(n1, q, axis=0),
np.tile( n2, (p,1))
])
return [n,t]
else:
raise Exception("Incorrect number of arguments. Expected at least 2. Found {}.".format(len(args))) | [
"def",
"tensor_markov",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"2",
":",
"m1",
"=",
"args",
"[",
"0",
"]",
"m2",
"=",
"args",
"[",
"1",
"]",
"tail",
"=",
"args",
"[",
"2",
":",
"]",
"prod",
"=",
"tensor_markov",
"(",
"m1",
",",
"m2",
")",
"return",
"tensor_markov",
"(",
"prod",
",",
"tail",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"2",
":",
"m1",
",",
"m2",
"=",
"args",
"n1",
",",
"t1",
"=",
"m1",
"n2",
",",
"t2",
"=",
"m2",
"n1",
"=",
"np",
".",
"array",
"(",
"n1",
",",
"dtype",
"=",
"float",
")",
"n2",
"=",
"np",
".",
"array",
"(",
"n2",
",",
"dtype",
"=",
"float",
")",
"t1",
"=",
"np",
".",
"array",
"(",
"t1",
",",
"dtype",
"=",
"float",
")",
"t2",
"=",
"np",
".",
"array",
"(",
"t2",
",",
"dtype",
"=",
"float",
")",
"assert",
"(",
"n1",
".",
"shape",
"[",
"0",
"]",
"==",
"t1",
".",
"shape",
"[",
"0",
"]",
"==",
"t1",
".",
"shape",
"[",
"1",
"]",
")",
"assert",
"(",
"n2",
".",
"shape",
"[",
"0",
"]",
"==",
"t2",
".",
"shape",
"[",
"0",
"]",
"==",
"t2",
".",
"shape",
"[",
"1",
"]",
")",
"t",
"=",
"np",
".",
"kron",
"(",
"t1",
",",
"t2",
")",
"p",
"=",
"t1",
".",
"shape",
"[",
"0",
"]",
"q",
"=",
"t2",
".",
"shape",
"[",
"0",
"]",
"np",
".",
"tile",
"(",
"n2",
",",
"(",
"1",
",",
"p",
")",
")",
"# n = np.row_stack([",
"# np.repeat(n1, q, axis=1),",
"# np.tile( n2, (1,p))",
"# ])",
"n",
"=",
"np",
".",
"column_stack",
"(",
"[",
"np",
".",
"repeat",
"(",
"n1",
",",
"q",
",",
"axis",
"=",
"0",
")",
",",
"np",
".",
"tile",
"(",
"n2",
",",
"(",
"p",
",",
"1",
")",
")",
"]",
")",
"return",
"[",
"n",
",",
"t",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Incorrect number of arguments. Expected at least 2. Found {}.\"",
".",
"format",
"(",
"len",
"(",
"args",
")",
")",
")"
] | Computes the product of two independent markov chains.
:param m1: a tuple containing the nodes and the transition matrix of the first chain
:param m2: a tuple containing the nodes and the transition matrix of the second chain
:return: a tuple containing the nodes and the transition matrix of the product chain | [
"Computes",
"the",
"product",
"of",
"two",
"independent",
"markov",
"chains",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/discretization.py#L155-L201 |
251,722 | EconForge/dolo | trash/dolo/misc/modfile.py | dynare_import | def dynare_import(filename,full_output=False, debug=False):
'''Imports model defined in specified file'''
import os
basename = os.path.basename(filename)
fname = re.compile('(.*)\.(.*)').match(basename).group(1)
f = open(filename)
txt = f.read()
model = parse_dynare_text(txt,full_output=full_output, debug=debug)
model.name = fname
return model | python | def dynare_import(filename,full_output=False, debug=False):
'''Imports model defined in specified file'''
import os
basename = os.path.basename(filename)
fname = re.compile('(.*)\.(.*)').match(basename).group(1)
f = open(filename)
txt = f.read()
model = parse_dynare_text(txt,full_output=full_output, debug=debug)
model.name = fname
return model | [
"def",
"dynare_import",
"(",
"filename",
",",
"full_output",
"=",
"False",
",",
"debug",
"=",
"False",
")",
":",
"import",
"os",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"fname",
"=",
"re",
".",
"compile",
"(",
"'(.*)\\.(.*)'",
")",
".",
"match",
"(",
"basename",
")",
".",
"group",
"(",
"1",
")",
"f",
"=",
"open",
"(",
"filename",
")",
"txt",
"=",
"f",
".",
"read",
"(",
")",
"model",
"=",
"parse_dynare_text",
"(",
"txt",
",",
"full_output",
"=",
"full_output",
",",
"debug",
"=",
"debug",
")",
"model",
".",
"name",
"=",
"fname",
"return",
"model"
] | Imports model defined in specified file | [
"Imports",
"model",
"defined",
"in",
"specified",
"file"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/misc/modfile.py#L311-L320 |
251,723 | EconForge/dolo | dolo/algos/perfect_foresight.py | _shocks_to_epsilons | def _shocks_to_epsilons(model, shocks, T):
"""
Helper function to support input argument `shocks` being one of many
different data types. Will always return a `T, n_e` matrix.
"""
n_e = len(model.calibration['exogenous'])
# if we have a DataFrame, convert it to a dict and rely on the method below
if isinstance(shocks, pd.DataFrame):
shocks = {k: shocks[k].tolist() for k in shocks.columns}
# handle case where shocks might be a dict. Be careful to handle case where
# value arrays are not the same length
if isinstance(shocks, dict):
epsilons = np.zeros((T + 1, n_e))
for (i, k) in enumerate(model.symbols["exogenous"]):
if k in shocks:
this_shock = shocks[k]
epsilons[:len(this_shock), i] = this_shock
epsilons[len(this_shock):, i] = this_shock[-1]
else:
# otherwise set to value in calibration
epsilons[:, i] = model.calibration["exogenous"][i]
return epsilons
# read from calibration if not given
if shocks is None:
shocks = model.calibration["exogenous"]
# now we just assume that shocks is array-like and try using the output of
# np.asarray(shocks)
shocks = np.asarray(shocks)
shocks = shocks.reshape((-1, n_e))
# until last period, exogenous shock takes its last value
epsilons = np.zeros((T + 1, n_e))
epsilons[:(shocks.shape[0] - 1), :] = shocks[1:, :]
epsilons[(shocks.shape[0] - 1):, :] = shocks[-1:, :]
return epsilons | python | def _shocks_to_epsilons(model, shocks, T):
n_e = len(model.calibration['exogenous'])
# if we have a DataFrame, convert it to a dict and rely on the method below
if isinstance(shocks, pd.DataFrame):
shocks = {k: shocks[k].tolist() for k in shocks.columns}
# handle case where shocks might be a dict. Be careful to handle case where
# value arrays are not the same length
if isinstance(shocks, dict):
epsilons = np.zeros((T + 1, n_e))
for (i, k) in enumerate(model.symbols["exogenous"]):
if k in shocks:
this_shock = shocks[k]
epsilons[:len(this_shock), i] = this_shock
epsilons[len(this_shock):, i] = this_shock[-1]
else:
# otherwise set to value in calibration
epsilons[:, i] = model.calibration["exogenous"][i]
return epsilons
# read from calibration if not given
if shocks is None:
shocks = model.calibration["exogenous"]
# now we just assume that shocks is array-like and try using the output of
# np.asarray(shocks)
shocks = np.asarray(shocks)
shocks = shocks.reshape((-1, n_e))
# until last period, exogenous shock takes its last value
epsilons = np.zeros((T + 1, n_e))
epsilons[:(shocks.shape[0] - 1), :] = shocks[1:, :]
epsilons[(shocks.shape[0] - 1):, :] = shocks[-1:, :]
return epsilons | [
"def",
"_shocks_to_epsilons",
"(",
"model",
",",
"shocks",
",",
"T",
")",
":",
"n_e",
"=",
"len",
"(",
"model",
".",
"calibration",
"[",
"'exogenous'",
"]",
")",
"# if we have a DataFrame, convert it to a dict and rely on the method below",
"if",
"isinstance",
"(",
"shocks",
",",
"pd",
".",
"DataFrame",
")",
":",
"shocks",
"=",
"{",
"k",
":",
"shocks",
"[",
"k",
"]",
".",
"tolist",
"(",
")",
"for",
"k",
"in",
"shocks",
".",
"columns",
"}",
"# handle case where shocks might be a dict. Be careful to handle case where",
"# value arrays are not the same length",
"if",
"isinstance",
"(",
"shocks",
",",
"dict",
")",
":",
"epsilons",
"=",
"np",
".",
"zeros",
"(",
"(",
"T",
"+",
"1",
",",
"n_e",
")",
")",
"for",
"(",
"i",
",",
"k",
")",
"in",
"enumerate",
"(",
"model",
".",
"symbols",
"[",
"\"exogenous\"",
"]",
")",
":",
"if",
"k",
"in",
"shocks",
":",
"this_shock",
"=",
"shocks",
"[",
"k",
"]",
"epsilons",
"[",
":",
"len",
"(",
"this_shock",
")",
",",
"i",
"]",
"=",
"this_shock",
"epsilons",
"[",
"len",
"(",
"this_shock",
")",
":",
",",
"i",
"]",
"=",
"this_shock",
"[",
"-",
"1",
"]",
"else",
":",
"# otherwise set to value in calibration",
"epsilons",
"[",
":",
",",
"i",
"]",
"=",
"model",
".",
"calibration",
"[",
"\"exogenous\"",
"]",
"[",
"i",
"]",
"return",
"epsilons",
"# read from calibration if not given",
"if",
"shocks",
"is",
"None",
":",
"shocks",
"=",
"model",
".",
"calibration",
"[",
"\"exogenous\"",
"]",
"# now we just assume that shocks is array-like and try using the output of",
"# np.asarray(shocks)",
"shocks",
"=",
"np",
".",
"asarray",
"(",
"shocks",
")",
"shocks",
"=",
"shocks",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"n_e",
")",
")",
"# until last period, exogenous shock takes its last value",
"epsilons",
"=",
"np",
".",
"zeros",
"(",
"(",
"T",
"+",
"1",
",",
"n_e",
")",
")",
"epsilons",
"[",
":",
"(",
"shocks",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
",",
":",
"]",
"=",
"shocks",
"[",
"1",
":",
",",
":",
"]",
"epsilons",
"[",
"(",
"shocks",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
":",
",",
":",
"]",
"=",
"shocks",
"[",
"-",
"1",
":",
",",
":",
"]",
"return",
"epsilons"
] | Helper function to support input argument `shocks` being one of many
different data types. Will always return a `T, n_e` matrix. | [
"Helper",
"function",
"to",
"support",
"input",
"argument",
"shocks",
"being",
"one",
"of",
"many",
"different",
"data",
"types",
".",
"Will",
"always",
"return",
"a",
"T",
"n_e",
"matrix",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/algos/perfect_foresight.py#L9-L49 |
251,724 | EconForge/dolo | trash/dolo/misc/symbolic_interactive.py | clear_all | def clear_all():
"""
Clears all parameters, variables, and shocks defined previously
"""
frame = inspect.currentframe().f_back
try:
if frame.f_globals.get('variables_order'):
# we should avoid to declare symbols twice !
del frame.f_globals['variables_order']
if frame.f_globals.get('parameters_order'):
# we should avoid to declare symbols twice !
del frame.f_globals['parameters_order']
finally:
del frame | python | def clear_all():
frame = inspect.currentframe().f_back
try:
if frame.f_globals.get('variables_order'):
# we should avoid to declare symbols twice !
del frame.f_globals['variables_order']
if frame.f_globals.get('parameters_order'):
# we should avoid to declare symbols twice !
del frame.f_globals['parameters_order']
finally:
del frame | [
"def",
"clear_all",
"(",
")",
":",
"frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
".",
"f_back",
"try",
":",
"if",
"frame",
".",
"f_globals",
".",
"get",
"(",
"'variables_order'",
")",
":",
"# we should avoid to declare symbols twice !",
"del",
"frame",
".",
"f_globals",
"[",
"'variables_order'",
"]",
"if",
"frame",
".",
"f_globals",
".",
"get",
"(",
"'parameters_order'",
")",
":",
"# we should avoid to declare symbols twice !",
"del",
"frame",
".",
"f_globals",
"[",
"'parameters_order'",
"]",
"finally",
":",
"del",
"frame"
] | Clears all parameters, variables, and shocks defined previously | [
"Clears",
"all",
"parameters",
"variables",
"and",
"shocks",
"defined",
"previously"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/misc/symbolic_interactive.py#L319-L333 |
251,725 | EconForge/dolo | trash/dolo/algos/dtcscc/nonlinearsystem.py | nonlinear_system | def nonlinear_system(model, initial_dr=None, maxit=10, tol=1e-8, grid={}, distribution={}, verbose=True):
'''
Finds a global solution for ``model`` by solving one large system of equations
using a simple newton algorithm.
Parameters
----------
model: NumericModel
"dtcscc" model to be solved
verbose: boolean
if True, display iterations
initial_dr: decision rule
initial guess for the decision rule
maxit: int
maximum number of iterationsd
tol: tolerance criterium for successive approximations
grid: grid options
distribution: distribution options
Returns
-------
decision rule :
approximated solution
'''
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} |'
headline = headline.format('N', ' Error', 'Time')
stars = '-'*len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} |'
f = model.functions['arbitrage']
g = model.functions['transition']
p = model.calibration['parameters']
distrib = model.get_distribution(**distribution)
nodes, weights = distrib.discretize()
approx = model.get_grid(**grid)
ms = create_interpolator(approx, approx.interpolation)
grid = ms.grid
if initial_dr is None:
dr = approximate_controls(model)
else:
dr = initial_dr
ms.set_values(dr(grid))
x = dr(grid)
x0 = x.copy()
it = 0
err = 10
a0 = x0.copy().reshape((x0.shape[0]*x0.shape[1],))
a = a0.copy()
while err > tol and it < maxit:
it += 1
t1 = time.time()
r, da = residuals(f, g, grid, a.reshape(x0.shape), ms, nodes, weights, p, diff=True)[:2]
r = r.flatten()
err = abs(r).max()
t2 = time.time()
if verbose:
print(fmt_str.format(it, err, t2-t1))
if err > tol:
a -= scipy.sparse.linalg.spsolve(da, r)
if verbose:
print(stars)
return ms | python | def nonlinear_system(model, initial_dr=None, maxit=10, tol=1e-8, grid={}, distribution={}, verbose=True):
'''
Finds a global solution for ``model`` by solving one large system of equations
using a simple newton algorithm.
Parameters
----------
model: NumericModel
"dtcscc" model to be solved
verbose: boolean
if True, display iterations
initial_dr: decision rule
initial guess for the decision rule
maxit: int
maximum number of iterationsd
tol: tolerance criterium for successive approximations
grid: grid options
distribution: distribution options
Returns
-------
decision rule :
approximated solution
'''
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} |'
headline = headline.format('N', ' Error', 'Time')
stars = '-'*len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} |'
f = model.functions['arbitrage']
g = model.functions['transition']
p = model.calibration['parameters']
distrib = model.get_distribution(**distribution)
nodes, weights = distrib.discretize()
approx = model.get_grid(**grid)
ms = create_interpolator(approx, approx.interpolation)
grid = ms.grid
if initial_dr is None:
dr = approximate_controls(model)
else:
dr = initial_dr
ms.set_values(dr(grid))
x = dr(grid)
x0 = x.copy()
it = 0
err = 10
a0 = x0.copy().reshape((x0.shape[0]*x0.shape[1],))
a = a0.copy()
while err > tol and it < maxit:
it += 1
t1 = time.time()
r, da = residuals(f, g, grid, a.reshape(x0.shape), ms, nodes, weights, p, diff=True)[:2]
r = r.flatten()
err = abs(r).max()
t2 = time.time()
if verbose:
print(fmt_str.format(it, err, t2-t1))
if err > tol:
a -= scipy.sparse.linalg.spsolve(da, r)
if verbose:
print(stars)
return ms | [
"def",
"nonlinear_system",
"(",
"model",
",",
"initial_dr",
"=",
"None",
",",
"maxit",
"=",
"10",
",",
"tol",
"=",
"1e-8",
",",
"grid",
"=",
"{",
"}",
",",
"distribution",
"=",
"{",
"}",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"verbose",
":",
"headline",
"=",
"'|{0:^4} | {1:10} | {2:8} |'",
"headline",
"=",
"headline",
".",
"format",
"(",
"'N'",
",",
"' Error'",
",",
"'Time'",
")",
"stars",
"=",
"'-'",
"*",
"len",
"(",
"headline",
")",
"print",
"(",
"stars",
")",
"print",
"(",
"headline",
")",
"print",
"(",
"stars",
")",
"# format string for within loop",
"fmt_str",
"=",
"'|{0:4} | {1:10.3e} | {2:8.3f} |'",
"f",
"=",
"model",
".",
"functions",
"[",
"'arbitrage'",
"]",
"g",
"=",
"model",
".",
"functions",
"[",
"'transition'",
"]",
"p",
"=",
"model",
".",
"calibration",
"[",
"'parameters'",
"]",
"distrib",
"=",
"model",
".",
"get_distribution",
"(",
"*",
"*",
"distribution",
")",
"nodes",
",",
"weights",
"=",
"distrib",
".",
"discretize",
"(",
")",
"approx",
"=",
"model",
".",
"get_grid",
"(",
"*",
"*",
"grid",
")",
"ms",
"=",
"create_interpolator",
"(",
"approx",
",",
"approx",
".",
"interpolation",
")",
"grid",
"=",
"ms",
".",
"grid",
"if",
"initial_dr",
"is",
"None",
":",
"dr",
"=",
"approximate_controls",
"(",
"model",
")",
"else",
":",
"dr",
"=",
"initial_dr",
"ms",
".",
"set_values",
"(",
"dr",
"(",
"grid",
")",
")",
"x",
"=",
"dr",
"(",
"grid",
")",
"x0",
"=",
"x",
".",
"copy",
"(",
")",
"it",
"=",
"0",
"err",
"=",
"10",
"a0",
"=",
"x0",
".",
"copy",
"(",
")",
".",
"reshape",
"(",
"(",
"x0",
".",
"shape",
"[",
"0",
"]",
"*",
"x0",
".",
"shape",
"[",
"1",
"]",
",",
")",
")",
"a",
"=",
"a0",
".",
"copy",
"(",
")",
"while",
"err",
">",
"tol",
"and",
"it",
"<",
"maxit",
":",
"it",
"+=",
"1",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"r",
",",
"da",
"=",
"residuals",
"(",
"f",
",",
"g",
",",
"grid",
",",
"a",
".",
"reshape",
"(",
"x0",
".",
"shape",
")",
",",
"ms",
",",
"nodes",
",",
"weights",
",",
"p",
",",
"diff",
"=",
"True",
")",
"[",
":",
"2",
"]",
"r",
"=",
"r",
".",
"flatten",
"(",
")",
"err",
"=",
"abs",
"(",
"r",
")",
".",
"max",
"(",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"if",
"verbose",
":",
"print",
"(",
"fmt_str",
".",
"format",
"(",
"it",
",",
"err",
",",
"t2",
"-",
"t1",
")",
")",
"if",
"err",
">",
"tol",
":",
"a",
"-=",
"scipy",
".",
"sparse",
".",
"linalg",
".",
"spsolve",
"(",
"da",
",",
"r",
")",
"if",
"verbose",
":",
"print",
"(",
"stars",
")",
"return",
"ms"
] | Finds a global solution for ``model`` by solving one large system of equations
using a simple newton algorithm.
Parameters
----------
model: NumericModel
"dtcscc" model to be solved
verbose: boolean
if True, display iterations
initial_dr: decision rule
initial guess for the decision rule
maxit: int
maximum number of iterationsd
tol: tolerance criterium for successive approximations
grid: grid options
distribution: distribution options
Returns
-------
decision rule :
approximated solution | [
"Finds",
"a",
"global",
"solution",
"for",
"model",
"by",
"solving",
"one",
"large",
"system",
"of",
"equations",
"using",
"a",
"simple",
"newton",
"algorithm",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/algos/dtcscc/nonlinearsystem.py#L10-L97 |
251,726 | EconForge/dolo | dolo/numeric/discretization/quadrature.py | gauss_hermite_nodes | def gauss_hermite_nodes(orders, sigma, mu=None):
'''
Computes the weights and nodes for Gauss Hermite quadrature.
Parameters
----------
orders : int, list, array
The order of integration used in the quadrature routine
sigma : array-like
If one dimensional, the variance of the normal distribution being
approximated. If multidimensional, the variance-covariance matrix of
the multivariate normal process being approximated.
Returns
-------
x : array
Quadrature nodes
w : array
Quadrature weights
'''
if isinstance(orders, int):
orders = [orders]
import numpy
if mu is None:
mu = numpy.array( [0]*sigma.shape[0] )
herms = [hermgauss(i) for i in orders]
points = [ h[0]*numpy.sqrt(2) for h in herms]
weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms]
if len(orders) == 1:
# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.
# print(points.shape)
x = numpy.array(points[0])*numpy.sqrt(float(sigma))
if sigma.ndim==2:
x = x[:,None]
w = weights[0]
return [x,w]
else:
x = cartesian( points).T
from functools import reduce
w = reduce( numpy.kron, weights)
zero_columns = numpy.where(sigma.sum(axis=0)==0)[0]
for i in zero_columns:
sigma[i,i] = 1.0
C = numpy.linalg.cholesky(sigma)
x = numpy.dot(C, x) + mu[:,numpy.newaxis]
x = numpy.ascontiguousarray(x.T)
for i in zero_columns:
x[:,i] =0
return [x,w] | python | def gauss_hermite_nodes(orders, sigma, mu=None):
'''
Computes the weights and nodes for Gauss Hermite quadrature.
Parameters
----------
orders : int, list, array
The order of integration used in the quadrature routine
sigma : array-like
If one dimensional, the variance of the normal distribution being
approximated. If multidimensional, the variance-covariance matrix of
the multivariate normal process being approximated.
Returns
-------
x : array
Quadrature nodes
w : array
Quadrature weights
'''
if isinstance(orders, int):
orders = [orders]
import numpy
if mu is None:
mu = numpy.array( [0]*sigma.shape[0] )
herms = [hermgauss(i) for i in orders]
points = [ h[0]*numpy.sqrt(2) for h in herms]
weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms]
if len(orders) == 1:
# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.
# print(points.shape)
x = numpy.array(points[0])*numpy.sqrt(float(sigma))
if sigma.ndim==2:
x = x[:,None]
w = weights[0]
return [x,w]
else:
x = cartesian( points).T
from functools import reduce
w = reduce( numpy.kron, weights)
zero_columns = numpy.where(sigma.sum(axis=0)==0)[0]
for i in zero_columns:
sigma[i,i] = 1.0
C = numpy.linalg.cholesky(sigma)
x = numpy.dot(C, x) + mu[:,numpy.newaxis]
x = numpy.ascontiguousarray(x.T)
for i in zero_columns:
x[:,i] =0
return [x,w] | [
"def",
"gauss_hermite_nodes",
"(",
"orders",
",",
"sigma",
",",
"mu",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"orders",
",",
"int",
")",
":",
"orders",
"=",
"[",
"orders",
"]",
"import",
"numpy",
"if",
"mu",
"is",
"None",
":",
"mu",
"=",
"numpy",
".",
"array",
"(",
"[",
"0",
"]",
"*",
"sigma",
".",
"shape",
"[",
"0",
"]",
")",
"herms",
"=",
"[",
"hermgauss",
"(",
"i",
")",
"for",
"i",
"in",
"orders",
"]",
"points",
"=",
"[",
"h",
"[",
"0",
"]",
"*",
"numpy",
".",
"sqrt",
"(",
"2",
")",
"for",
"h",
"in",
"herms",
"]",
"weights",
"=",
"[",
"h",
"[",
"1",
"]",
"/",
"numpy",
".",
"sqrt",
"(",
"numpy",
".",
"pi",
")",
"for",
"h",
"in",
"herms",
"]",
"if",
"len",
"(",
"orders",
")",
"==",
"1",
":",
"# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.",
"# print(points.shape)",
"x",
"=",
"numpy",
".",
"array",
"(",
"points",
"[",
"0",
"]",
")",
"*",
"numpy",
".",
"sqrt",
"(",
"float",
"(",
"sigma",
")",
")",
"if",
"sigma",
".",
"ndim",
"==",
"2",
":",
"x",
"=",
"x",
"[",
":",
",",
"None",
"]",
"w",
"=",
"weights",
"[",
"0",
"]",
"return",
"[",
"x",
",",
"w",
"]",
"else",
":",
"x",
"=",
"cartesian",
"(",
"points",
")",
".",
"T",
"from",
"functools",
"import",
"reduce",
"w",
"=",
"reduce",
"(",
"numpy",
".",
"kron",
",",
"weights",
")",
"zero_columns",
"=",
"numpy",
".",
"where",
"(",
"sigma",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"==",
"0",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"zero_columns",
":",
"sigma",
"[",
"i",
",",
"i",
"]",
"=",
"1.0",
"C",
"=",
"numpy",
".",
"linalg",
".",
"cholesky",
"(",
"sigma",
")",
"x",
"=",
"numpy",
".",
"dot",
"(",
"C",
",",
"x",
")",
"+",
"mu",
"[",
":",
",",
"numpy",
".",
"newaxis",
"]",
"x",
"=",
"numpy",
".",
"ascontiguousarray",
"(",
"x",
".",
"T",
")",
"for",
"i",
"in",
"zero_columns",
":",
"x",
"[",
":",
",",
"i",
"]",
"=",
"0",
"return",
"[",
"x",
",",
"w",
"]"
] | Computes the weights and nodes for Gauss Hermite quadrature.
Parameters
----------
orders : int, list, array
The order of integration used in the quadrature routine
sigma : array-like
If one dimensional, the variance of the normal distribution being
approximated. If multidimensional, the variance-covariance matrix of
the multivariate normal process being approximated.
Returns
-------
x : array
Quadrature nodes
w : array
Quadrature weights | [
"Computes",
"the",
"weights",
"and",
"nodes",
"for",
"Gauss",
"Hermite",
"quadrature",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/quadrature.py#L59-L122 |
251,727 | EconForge/dolo | dolo/numeric/optimize/newton.py | newton | def newton(f, x, verbose=False, tol=1e-6, maxit=5, jactype='serial'):
"""Solve nonlinear system using safeguarded Newton iterations
Parameters
----------
Return
------
"""
if verbose:
print = lambda txt: old_print(txt)
else:
print = lambda txt: None
it = 0
error = 10
converged = False
maxbacksteps = 30
x0 = x
if jactype == 'sparse':
from scipy.sparse.linalg import spsolve as solve
elif jactype == 'full':
from numpy.linalg import solve
else:
solve = serial_solve
while it<maxit and not converged:
[v,dv] = f(x)
# TODO: rewrite starting here
# print("Time to evaluate {}".format(ss-tt)0)
error_0 = abs(v).max()
if error_0 < tol:
if verbose:
print("> System was solved after iteration {}. Residual={}".format(it,error_0))
converged = True
else:
it += 1
dx = solve(dv, v)
# norm_dx = abs(dx).max()
for bck in range(maxbacksteps):
xx = x - dx*(2**(-bck))
vm = f(xx)[0]
err = abs(vm).max()
if err < error_0:
break
x = xx
if verbose:
print("\t> {} | {} | {}".format(it, err, bck))
if not converged:
import warnings
warnings.warn("Did not converge")
return [x, it] | python | def newton(f, x, verbose=False, tol=1e-6, maxit=5, jactype='serial'):
if verbose:
print = lambda txt: old_print(txt)
else:
print = lambda txt: None
it = 0
error = 10
converged = False
maxbacksteps = 30
x0 = x
if jactype == 'sparse':
from scipy.sparse.linalg import spsolve as solve
elif jactype == 'full':
from numpy.linalg import solve
else:
solve = serial_solve
while it<maxit and not converged:
[v,dv] = f(x)
# TODO: rewrite starting here
# print("Time to evaluate {}".format(ss-tt)0)
error_0 = abs(v).max()
if error_0 < tol:
if verbose:
print("> System was solved after iteration {}. Residual={}".format(it,error_0))
converged = True
else:
it += 1
dx = solve(dv, v)
# norm_dx = abs(dx).max()
for bck in range(maxbacksteps):
xx = x - dx*(2**(-bck))
vm = f(xx)[0]
err = abs(vm).max()
if err < error_0:
break
x = xx
if verbose:
print("\t> {} | {} | {}".format(it, err, bck))
if not converged:
import warnings
warnings.warn("Did not converge")
return [x, it] | [
"def",
"newton",
"(",
"f",
",",
"x",
",",
"verbose",
"=",
"False",
",",
"tol",
"=",
"1e-6",
",",
"maxit",
"=",
"5",
",",
"jactype",
"=",
"'serial'",
")",
":",
"if",
"verbose",
":",
"print",
"=",
"lambda",
"txt",
":",
"old_print",
"(",
"txt",
")",
"else",
":",
"print",
"=",
"lambda",
"txt",
":",
"None",
"it",
"=",
"0",
"error",
"=",
"10",
"converged",
"=",
"False",
"maxbacksteps",
"=",
"30",
"x0",
"=",
"x",
"if",
"jactype",
"==",
"'sparse'",
":",
"from",
"scipy",
".",
"sparse",
".",
"linalg",
"import",
"spsolve",
"as",
"solve",
"elif",
"jactype",
"==",
"'full'",
":",
"from",
"numpy",
".",
"linalg",
"import",
"solve",
"else",
":",
"solve",
"=",
"serial_solve",
"while",
"it",
"<",
"maxit",
"and",
"not",
"converged",
":",
"[",
"v",
",",
"dv",
"]",
"=",
"f",
"(",
"x",
")",
"# TODO: rewrite starting here",
"# print(\"Time to evaluate {}\".format(ss-tt)0)",
"error_0",
"=",
"abs",
"(",
"v",
")",
".",
"max",
"(",
")",
"if",
"error_0",
"<",
"tol",
":",
"if",
"verbose",
":",
"print",
"(",
"\"> System was solved after iteration {}. Residual={}\"",
".",
"format",
"(",
"it",
",",
"error_0",
")",
")",
"converged",
"=",
"True",
"else",
":",
"it",
"+=",
"1",
"dx",
"=",
"solve",
"(",
"dv",
",",
"v",
")",
"# norm_dx = abs(dx).max()",
"for",
"bck",
"in",
"range",
"(",
"maxbacksteps",
")",
":",
"xx",
"=",
"x",
"-",
"dx",
"*",
"(",
"2",
"**",
"(",
"-",
"bck",
")",
")",
"vm",
"=",
"f",
"(",
"xx",
")",
"[",
"0",
"]",
"err",
"=",
"abs",
"(",
"vm",
")",
".",
"max",
"(",
")",
"if",
"err",
"<",
"error_0",
":",
"break",
"x",
"=",
"xx",
"if",
"verbose",
":",
"print",
"(",
"\"\\t> {} | {} | {}\"",
".",
"format",
"(",
"it",
",",
"err",
",",
"bck",
")",
")",
"if",
"not",
"converged",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"\"Did not converge\"",
")",
"return",
"[",
"x",
",",
"it",
"]"
] | Solve nonlinear system using safeguarded Newton iterations
Parameters
----------
Return
------ | [
"Solve",
"nonlinear",
"system",
"using",
"safeguarded",
"Newton",
"iterations"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/optimize/newton.py#L81-L151 |
251,728 | EconForge/dolo | dolo/numeric/extern/qz.py | qzordered | def qzordered(A,B,crit=1.0):
"Eigenvalues bigger than crit are sorted in the top-left."
TOL = 1e-10
def select(alpha, beta):
return alpha**2>crit*beta**2
[S,T,alpha,beta,U,V] = ordqz(A,B,output='real',sort=select)
eigval = abs(numpy.diag(S)/numpy.diag(T))
return [S,T,U,V,eigval] | python | def qzordered(A,B,crit=1.0):
"Eigenvalues bigger than crit are sorted in the top-left."
TOL = 1e-10
def select(alpha, beta):
return alpha**2>crit*beta**2
[S,T,alpha,beta,U,V] = ordqz(A,B,output='real',sort=select)
eigval = abs(numpy.diag(S)/numpy.diag(T))
return [S,T,U,V,eigval] | [
"def",
"qzordered",
"(",
"A",
",",
"B",
",",
"crit",
"=",
"1.0",
")",
":",
"TOL",
"=",
"1e-10",
"def",
"select",
"(",
"alpha",
",",
"beta",
")",
":",
"return",
"alpha",
"**",
"2",
">",
"crit",
"*",
"beta",
"**",
"2",
"[",
"S",
",",
"T",
",",
"alpha",
",",
"beta",
",",
"U",
",",
"V",
"]",
"=",
"ordqz",
"(",
"A",
",",
"B",
",",
"output",
"=",
"'real'",
",",
"sort",
"=",
"select",
")",
"eigval",
"=",
"abs",
"(",
"numpy",
".",
"diag",
"(",
"S",
")",
"/",
"numpy",
".",
"diag",
"(",
"T",
")",
")",
"return",
"[",
"S",
",",
"T",
",",
"U",
",",
"V",
",",
"eigval",
"]"
] | Eigenvalues bigger than crit are sorted in the top-left. | [
"Eigenvalues",
"bigger",
"than",
"crit",
"are",
"sorted",
"in",
"the",
"top",
"-",
"left",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/extern/qz.py#L6-L18 |
251,729 | EconForge/dolo | dolo/numeric/extern/qz.py | ordqz | def ordqz(A, B, sort='lhp', output='real', overwrite_a=False,
overwrite_b=False, check_finite=True):
"""
QZ decomposition for a pair of matrices with reordering.
.. versionadded:: 0.17.0
Parameters
----------
A : (N, N) array_like
2d array to decompose
B : (N, N) array_like
2d array to decompose
sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue
``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or
output='complex', the sort function takes two complex arguments
(alpha, beta). The eigenvalue ``x = (alpha/beta)``.
Alternatively, string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
output : str {'real','complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
overwrite_a : bool, optional
If True, the contents of A are overwritten.
overwrite_b : bool, optional
If True, the contents of B are overwritten.
check_finite : bool, optional
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
alpha : (N,) ndarray
alpha = alphar + alphai * 1j. See notes.
beta : (N,) ndarray
See notes.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
Notes
-----
On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the
generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and
``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T)
that would result if the 2-by-2 diagonal blocks of the real generalized
Schur form of (A,B) were further reduced to triangular form using complex
unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is
real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a complex
conjugate pair, with ``ALPHAI(j+1)`` negative.
See also
--------
qz
"""
import warnings
import numpy as np
from numpy import asarray_chkfinite
from scipy.linalg.misc import LinAlgError, _datacopied
from scipy.linalg.lapack import get_lapack_funcs
from scipy._lib.six import callable
from scipy.linalg._decomp_qz import _qz, _select_function
#NOTE: should users be able to set these?
lwork = None
result, typ = _qz(A, B, output=output, lwork=lwork, sort=None,
overwrite_a=overwrite_a, overwrite_b=overwrite_b,
check_finite=check_finite)
AA, BB, Q, Z = result[0], result[1], result[-4], result[-3]
if typ not in 'cz':
alpha, beta = result[3] + result[4]*1.j, result[5]
else:
alpha, beta = result[3], result[4]
sfunction = _select_function(sort)
select = sfunction(alpha, beta)
tgsen, = get_lapack_funcs(('tgsen',), (AA, BB))
if lwork is None or lwork == -1:
result = tgsen(select, AA, BB, Q, Z, lwork=-1)
lwork = result[-3][0].real.astype(np.int)
# looks like wrong value passed to ZTGSYL if not
lwork += 1
liwork = None
if liwork is None or liwork == -1:
result = tgsen(select, AA, BB, Q, Z, liwork=-1)
liwork = result[-2][0]
result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of tgsen" % -info)
elif info == 1:
raise ValueError("Reordering of (A, B) failed because the transformed"
" matrix pair (A, B) would be too far from "
"generalized Schur form; the problem is very "
"ill-conditioned. (A, B) may have been partially "
"reorded. If requested, 0 is returned in DIF(*), "
"PL, and PR.")
# for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif,
# work, iwork, info
if typ in ['f', 'd']:
alpha = result[2] + result[3] * 1.j
return (result[0], result[1], alpha, result[4], result[5], result[6])
# for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work,
# iwork, info
else:
return result[0], result[1], result[2], result[3], result[4], result[5] | python | def ordqz(A, B, sort='lhp', output='real', overwrite_a=False,
overwrite_b=False, check_finite=True):
import warnings
import numpy as np
from numpy import asarray_chkfinite
from scipy.linalg.misc import LinAlgError, _datacopied
from scipy.linalg.lapack import get_lapack_funcs
from scipy._lib.six import callable
from scipy.linalg._decomp_qz import _qz, _select_function
#NOTE: should users be able to set these?
lwork = None
result, typ = _qz(A, B, output=output, lwork=lwork, sort=None,
overwrite_a=overwrite_a, overwrite_b=overwrite_b,
check_finite=check_finite)
AA, BB, Q, Z = result[0], result[1], result[-4], result[-3]
if typ not in 'cz':
alpha, beta = result[3] + result[4]*1.j, result[5]
else:
alpha, beta = result[3], result[4]
sfunction = _select_function(sort)
select = sfunction(alpha, beta)
tgsen, = get_lapack_funcs(('tgsen',), (AA, BB))
if lwork is None or lwork == -1:
result = tgsen(select, AA, BB, Q, Z, lwork=-1)
lwork = result[-3][0].real.astype(np.int)
# looks like wrong value passed to ZTGSYL if not
lwork += 1
liwork = None
if liwork is None or liwork == -1:
result = tgsen(select, AA, BB, Q, Z, liwork=-1)
liwork = result[-2][0]
result = tgsen(select, AA, BB, Q, Z, lwork=lwork, liwork=liwork)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of tgsen" % -info)
elif info == 1:
raise ValueError("Reordering of (A, B) failed because the transformed"
" matrix pair (A, B) would be too far from "
"generalized Schur form; the problem is very "
"ill-conditioned. (A, B) may have been partially "
"reorded. If requested, 0 is returned in DIF(*), "
"PL, and PR.")
# for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif,
# work, iwork, info
if typ in ['f', 'd']:
alpha = result[2] + result[3] * 1.j
return (result[0], result[1], alpha, result[4], result[5], result[6])
# for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work,
# iwork, info
else:
return result[0], result[1], result[2], result[3], result[4], result[5] | [
"def",
"ordqz",
"(",
"A",
",",
"B",
",",
"sort",
"=",
"'lhp'",
",",
"output",
"=",
"'real'",
",",
"overwrite_a",
"=",
"False",
",",
"overwrite_b",
"=",
"False",
",",
"check_finite",
"=",
"True",
")",
":",
"import",
"warnings",
"import",
"numpy",
"as",
"np",
"from",
"numpy",
"import",
"asarray_chkfinite",
"from",
"scipy",
".",
"linalg",
".",
"misc",
"import",
"LinAlgError",
",",
"_datacopied",
"from",
"scipy",
".",
"linalg",
".",
"lapack",
"import",
"get_lapack_funcs",
"from",
"scipy",
".",
"_lib",
".",
"six",
"import",
"callable",
"from",
"scipy",
".",
"linalg",
".",
"_decomp_qz",
"import",
"_qz",
",",
"_select_function",
"#NOTE: should users be able to set these?",
"lwork",
"=",
"None",
"result",
",",
"typ",
"=",
"_qz",
"(",
"A",
",",
"B",
",",
"output",
"=",
"output",
",",
"lwork",
"=",
"lwork",
",",
"sort",
"=",
"None",
",",
"overwrite_a",
"=",
"overwrite_a",
",",
"overwrite_b",
"=",
"overwrite_b",
",",
"check_finite",
"=",
"check_finite",
")",
"AA",
",",
"BB",
",",
"Q",
",",
"Z",
"=",
"result",
"[",
"0",
"]",
",",
"result",
"[",
"1",
"]",
",",
"result",
"[",
"-",
"4",
"]",
",",
"result",
"[",
"-",
"3",
"]",
"if",
"typ",
"not",
"in",
"'cz'",
":",
"alpha",
",",
"beta",
"=",
"result",
"[",
"3",
"]",
"+",
"result",
"[",
"4",
"]",
"*",
"1.j",
",",
"result",
"[",
"5",
"]",
"else",
":",
"alpha",
",",
"beta",
"=",
"result",
"[",
"3",
"]",
",",
"result",
"[",
"4",
"]",
"sfunction",
"=",
"_select_function",
"(",
"sort",
")",
"select",
"=",
"sfunction",
"(",
"alpha",
",",
"beta",
")",
"tgsen",
",",
"=",
"get_lapack_funcs",
"(",
"(",
"'tgsen'",
",",
")",
",",
"(",
"AA",
",",
"BB",
")",
")",
"if",
"lwork",
"is",
"None",
"or",
"lwork",
"==",
"-",
"1",
":",
"result",
"=",
"tgsen",
"(",
"select",
",",
"AA",
",",
"BB",
",",
"Q",
",",
"Z",
",",
"lwork",
"=",
"-",
"1",
")",
"lwork",
"=",
"result",
"[",
"-",
"3",
"]",
"[",
"0",
"]",
".",
"real",
".",
"astype",
"(",
"np",
".",
"int",
")",
"# looks like wrong value passed to ZTGSYL if not",
"lwork",
"+=",
"1",
"liwork",
"=",
"None",
"if",
"liwork",
"is",
"None",
"or",
"liwork",
"==",
"-",
"1",
":",
"result",
"=",
"tgsen",
"(",
"select",
",",
"AA",
",",
"BB",
",",
"Q",
",",
"Z",
",",
"liwork",
"=",
"-",
"1",
")",
"liwork",
"=",
"result",
"[",
"-",
"2",
"]",
"[",
"0",
"]",
"result",
"=",
"tgsen",
"(",
"select",
",",
"AA",
",",
"BB",
",",
"Q",
",",
"Z",
",",
"lwork",
"=",
"lwork",
",",
"liwork",
"=",
"liwork",
")",
"info",
"=",
"result",
"[",
"-",
"1",
"]",
"if",
"info",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Illegal value in argument %d of tgsen\"",
"%",
"-",
"info",
")",
"elif",
"info",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Reordering of (A, B) failed because the transformed\"",
"\" matrix pair (A, B) would be too far from \"",
"\"generalized Schur form; the problem is very \"",
"\"ill-conditioned. (A, B) may have been partially \"",
"\"reorded. If requested, 0 is returned in DIF(*), \"",
"\"PL, and PR.\"",
")",
"# for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif,",
"# work, iwork, info",
"if",
"typ",
"in",
"[",
"'f'",
",",
"'d'",
"]",
":",
"alpha",
"=",
"result",
"[",
"2",
"]",
"+",
"result",
"[",
"3",
"]",
"*",
"1.j",
"return",
"(",
"result",
"[",
"0",
"]",
",",
"result",
"[",
"1",
"]",
",",
"alpha",
",",
"result",
"[",
"4",
"]",
",",
"result",
"[",
"5",
"]",
",",
"result",
"[",
"6",
"]",
")",
"# for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work,",
"# iwork, info",
"else",
":",
"return",
"result",
"[",
"0",
"]",
",",
"result",
"[",
"1",
"]",
",",
"result",
"[",
"2",
"]",
",",
"result",
"[",
"3",
"]",
",",
"result",
"[",
"4",
"]",
",",
"result",
"[",
"5",
"]"
] | QZ decomposition for a pair of matrices with reordering.
.. versionadded:: 0.17.0
Parameters
----------
A : (N, N) array_like
2d array to decompose
B : (N, N) array_like
2d array to decompose
sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue
``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or
output='complex', the sort function takes two complex arguments
(alpha, beta). The eigenvalue ``x = (alpha/beta)``.
Alternatively, string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
output : str {'real','complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
overwrite_a : bool, optional
If True, the contents of A are overwritten.
overwrite_b : bool, optional
If True, the contents of B are overwritten.
check_finite : bool, optional
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
alpha : (N,) ndarray
alpha = alphar + alphai * 1j. See notes.
beta : (N,) ndarray
See notes.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
Notes
-----
On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the
generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and
``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T)
that would result if the 2-by-2 diagonal blocks of the real generalized
Schur form of (A,B) were further reduced to triangular form using complex
unitary transformations. If ALPHAI(j) is zero, then the j-th eigenvalue is
real; if positive, then the ``j``-th and ``(j+1)``-st eigenvalues are a complex
conjugate pair, with ``ALPHAI(j+1)`` negative.
See also
--------
qz | [
"QZ",
"decomposition",
"for",
"a",
"pair",
"of",
"matrices",
"with",
"reordering",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/extern/qz.py#L21-L154 |
251,730 | EconForge/dolo | trash/dolo/algos/dtcscc/time_iteration_2.py | parameterized_expectations_direct | def parameterized_expectations_direct(model, verbose=False, initial_dr=None,
pert_order=1, grid={}, distribution={},
maxit=100, tol=1e-8):
'''
Finds a global solution for ``model`` using parameterized expectations
function. Requires the model to be written with controls as a direct
function of the model objects.
The algorithm iterates on the expectations function in the arbitrage
equation. It follows the discussion in section 9.9 of Miranda and
Fackler (2002).
Parameters
----------
model : NumericModel
"dtcscc" model to be solved
verbose : boolean
if True, display iterations
initial_dr : decision rule
initial guess for the decision rule
pert_order : {1}
if no initial guess is supplied, the perturbation solution at order
``pert_order`` is used as initial guess
grid: grid options
distribution: distribution options
maxit: maximum number of iterations
tol: tolerance criterium for successive approximations
Returns
-------
decision rule :
approximated solution
'''
t1 = time.time()
g = model.functions['transition']
d = model.functions['direct_response']
h = model.functions['expectation']
parms = model.calibration['parameters']
if initial_dr is None:
if pert_order == 1:
initial_dr = approximate_controls(model)
if pert_order > 1:
raise Exception("Perturbation order > 1 not supported (yet).")
approx = model.get_grid(**grid)
grid = approx.grid
interp_type = approx.interpolation
dr = create_interpolator(approx, interp_type)
expect = create_interpolator(approx, interp_type)
distrib = model.get_distribution(**distribution)
nodes, weights = distrib.discretize()
N = grid.shape[0]
z = np.zeros((N, len(model.symbols['expectations'])))
x_0 = initial_dr(grid)
x_0 = x_0.real # just in case ...
h_0 = h(grid, x_0, parms)
it = 0
err = 10
err_0 = 10
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'
headline = headline.format('N', ' Error', 'Gain', 'Time')
stars = '-'*len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'
while err > tol and it <= maxit:
it += 1
t_start = time.time()
# dr.set_values(x_0)
expect.set_values(h_0)
z[...] = 0
for i in range(weights.shape[0]):
e = nodes[i, :]
S = g(grid, x_0, e, parms)
# evaluate expectation over the future state
z += weights[i]*expect(S)
# TODO: check that control is admissible
new_x = d(grid, z, parms)
new_h = h(grid, new_x, parms)
# update error
err = (abs(new_h - h_0).max())
# Update guess for decision rule and expectations function
x_0 = new_x
h_0 = new_h
# print error information if `verbose`
err_SA = err/err_0
err_0 = err
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print(fmt_str.format(it, err, err_SA, elapsed))
if it == maxit:
import warnings
warnings.warn(UserWarning("Maximum number of iterations reached"))
# compute final fime and do final printout if `verbose`
t2 = time.time()
if verbose:
print(stars)
print('Elapsed: {} seconds.'.format(t2 - t1))
print(stars)
# Interpolation for the decision rule
dr.set_values(x_0)
return dr | python | def parameterized_expectations_direct(model, verbose=False, initial_dr=None,
pert_order=1, grid={}, distribution={},
maxit=100, tol=1e-8):
'''
Finds a global solution for ``model`` using parameterized expectations
function. Requires the model to be written with controls as a direct
function of the model objects.
The algorithm iterates on the expectations function in the arbitrage
equation. It follows the discussion in section 9.9 of Miranda and
Fackler (2002).
Parameters
----------
model : NumericModel
"dtcscc" model to be solved
verbose : boolean
if True, display iterations
initial_dr : decision rule
initial guess for the decision rule
pert_order : {1}
if no initial guess is supplied, the perturbation solution at order
``pert_order`` is used as initial guess
grid: grid options
distribution: distribution options
maxit: maximum number of iterations
tol: tolerance criterium for successive approximations
Returns
-------
decision rule :
approximated solution
'''
t1 = time.time()
g = model.functions['transition']
d = model.functions['direct_response']
h = model.functions['expectation']
parms = model.calibration['parameters']
if initial_dr is None:
if pert_order == 1:
initial_dr = approximate_controls(model)
if pert_order > 1:
raise Exception("Perturbation order > 1 not supported (yet).")
approx = model.get_grid(**grid)
grid = approx.grid
interp_type = approx.interpolation
dr = create_interpolator(approx, interp_type)
expect = create_interpolator(approx, interp_type)
distrib = model.get_distribution(**distribution)
nodes, weights = distrib.discretize()
N = grid.shape[0]
z = np.zeros((N, len(model.symbols['expectations'])))
x_0 = initial_dr(grid)
x_0 = x_0.real # just in case ...
h_0 = h(grid, x_0, parms)
it = 0
err = 10
err_0 = 10
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'
headline = headline.format('N', ' Error', 'Gain', 'Time')
stars = '-'*len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'
while err > tol and it <= maxit:
it += 1
t_start = time.time()
# dr.set_values(x_0)
expect.set_values(h_0)
z[...] = 0
for i in range(weights.shape[0]):
e = nodes[i, :]
S = g(grid, x_0, e, parms)
# evaluate expectation over the future state
z += weights[i]*expect(S)
# TODO: check that control is admissible
new_x = d(grid, z, parms)
new_h = h(grid, new_x, parms)
# update error
err = (abs(new_h - h_0).max())
# Update guess for decision rule and expectations function
x_0 = new_x
h_0 = new_h
# print error information if `verbose`
err_SA = err/err_0
err_0 = err
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print(fmt_str.format(it, err, err_SA, elapsed))
if it == maxit:
import warnings
warnings.warn(UserWarning("Maximum number of iterations reached"))
# compute final fime and do final printout if `verbose`
t2 = time.time()
if verbose:
print(stars)
print('Elapsed: {} seconds.'.format(t2 - t1))
print(stars)
# Interpolation for the decision rule
dr.set_values(x_0)
return dr | [
"def",
"parameterized_expectations_direct",
"(",
"model",
",",
"verbose",
"=",
"False",
",",
"initial_dr",
"=",
"None",
",",
"pert_order",
"=",
"1",
",",
"grid",
"=",
"{",
"}",
",",
"distribution",
"=",
"{",
"}",
",",
"maxit",
"=",
"100",
",",
"tol",
"=",
"1e-8",
")",
":",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"g",
"=",
"model",
".",
"functions",
"[",
"'transition'",
"]",
"d",
"=",
"model",
".",
"functions",
"[",
"'direct_response'",
"]",
"h",
"=",
"model",
".",
"functions",
"[",
"'expectation'",
"]",
"parms",
"=",
"model",
".",
"calibration",
"[",
"'parameters'",
"]",
"if",
"initial_dr",
"is",
"None",
":",
"if",
"pert_order",
"==",
"1",
":",
"initial_dr",
"=",
"approximate_controls",
"(",
"model",
")",
"if",
"pert_order",
">",
"1",
":",
"raise",
"Exception",
"(",
"\"Perturbation order > 1 not supported (yet).\"",
")",
"approx",
"=",
"model",
".",
"get_grid",
"(",
"*",
"*",
"grid",
")",
"grid",
"=",
"approx",
".",
"grid",
"interp_type",
"=",
"approx",
".",
"interpolation",
"dr",
"=",
"create_interpolator",
"(",
"approx",
",",
"interp_type",
")",
"expect",
"=",
"create_interpolator",
"(",
"approx",
",",
"interp_type",
")",
"distrib",
"=",
"model",
".",
"get_distribution",
"(",
"*",
"*",
"distribution",
")",
"nodes",
",",
"weights",
"=",
"distrib",
".",
"discretize",
"(",
")",
"N",
"=",
"grid",
".",
"shape",
"[",
"0",
"]",
"z",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"len",
"(",
"model",
".",
"symbols",
"[",
"'expectations'",
"]",
")",
")",
")",
"x_0",
"=",
"initial_dr",
"(",
"grid",
")",
"x_0",
"=",
"x_0",
".",
"real",
"# just in case ...",
"h_0",
"=",
"h",
"(",
"grid",
",",
"x_0",
",",
"parms",
")",
"it",
"=",
"0",
"err",
"=",
"10",
"err_0",
"=",
"10",
"if",
"verbose",
":",
"headline",
"=",
"'|{0:^4} | {1:10} | {2:8} | {3:8} |'",
"headline",
"=",
"headline",
".",
"format",
"(",
"'N'",
",",
"' Error'",
",",
"'Gain'",
",",
"'Time'",
")",
"stars",
"=",
"'-'",
"*",
"len",
"(",
"headline",
")",
"print",
"(",
"stars",
")",
"print",
"(",
"headline",
")",
"print",
"(",
"stars",
")",
"# format string for within loop",
"fmt_str",
"=",
"'|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'",
"while",
"err",
">",
"tol",
"and",
"it",
"<=",
"maxit",
":",
"it",
"+=",
"1",
"t_start",
"=",
"time",
".",
"time",
"(",
")",
"# dr.set_values(x_0)",
"expect",
".",
"set_values",
"(",
"h_0",
")",
"z",
"[",
"...",
"]",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"weights",
".",
"shape",
"[",
"0",
"]",
")",
":",
"e",
"=",
"nodes",
"[",
"i",
",",
":",
"]",
"S",
"=",
"g",
"(",
"grid",
",",
"x_0",
",",
"e",
",",
"parms",
")",
"# evaluate expectation over the future state",
"z",
"+=",
"weights",
"[",
"i",
"]",
"*",
"expect",
"(",
"S",
")",
"# TODO: check that control is admissible",
"new_x",
"=",
"d",
"(",
"grid",
",",
"z",
",",
"parms",
")",
"new_h",
"=",
"h",
"(",
"grid",
",",
"new_x",
",",
"parms",
")",
"# update error",
"err",
"=",
"(",
"abs",
"(",
"new_h",
"-",
"h_0",
")",
".",
"max",
"(",
")",
")",
"# Update guess for decision rule and expectations function",
"x_0",
"=",
"new_x",
"h_0",
"=",
"new_h",
"# print error information if `verbose`",
"err_SA",
"=",
"err",
"/",
"err_0",
"err_0",
"=",
"err",
"t_finish",
"=",
"time",
".",
"time",
"(",
")",
"elapsed",
"=",
"t_finish",
"-",
"t_start",
"if",
"verbose",
":",
"print",
"(",
"fmt_str",
".",
"format",
"(",
"it",
",",
"err",
",",
"err_SA",
",",
"elapsed",
")",
")",
"if",
"it",
"==",
"maxit",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"UserWarning",
"(",
"\"Maximum number of iterations reached\"",
")",
")",
"# compute final fime and do final printout if `verbose`",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"if",
"verbose",
":",
"print",
"(",
"stars",
")",
"print",
"(",
"'Elapsed: {} seconds.'",
".",
"format",
"(",
"t2",
"-",
"t1",
")",
")",
"print",
"(",
"stars",
")",
"# Interpolation for the decision rule",
"dr",
".",
"set_values",
"(",
"x_0",
")",
"return",
"dr"
] | Finds a global solution for ``model`` using parameterized expectations
function. Requires the model to be written with controls as a direct
function of the model objects.
The algorithm iterates on the expectations function in the arbitrage
equation. It follows the discussion in section 9.9 of Miranda and
Fackler (2002).
Parameters
----------
model : NumericModel
"dtcscc" model to be solved
verbose : boolean
if True, display iterations
initial_dr : decision rule
initial guess for the decision rule
pert_order : {1}
if no initial guess is supplied, the perturbation solution at order
``pert_order`` is used as initial guess
grid: grid options
distribution: distribution options
maxit: maximum number of iterations
tol: tolerance criterium for successive approximations
Returns
-------
decision rule :
approximated solution | [
"Finds",
"a",
"global",
"solution",
"for",
"model",
"using",
"parameterized",
"expectations",
"function",
".",
"Requires",
"the",
"model",
"to",
"be",
"written",
"with",
"controls",
"as",
"a",
"direct",
"function",
"of",
"the",
"model",
"objects",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/trash/dolo/algos/dtcscc/time_iteration_2.py#L186-L312 |
251,731 | EconForge/dolo | dolo/compiler/misc.py | numdiff | def numdiff(fun, args):
"""Vectorized numerical differentiation"""
# vectorized version
epsilon = 1e-8
args = list(args)
v0 = fun(*args)
N = v0.shape[0]
l_v = len(v0)
dvs = []
for i, a in enumerate(args):
l_a = (a).shape[1]
dv = numpy.zeros((N, l_v, l_a))
nargs = list(args) #.copy()
for j in range(l_a):
xx = args[i].copy()
xx[:, j] += epsilon
nargs[i] = xx
dv[:, :, j] = (fun(*nargs) - v0) / epsilon
dvs.append(dv)
return [v0] + dvs | python | def numdiff(fun, args):
# vectorized version
epsilon = 1e-8
args = list(args)
v0 = fun(*args)
N = v0.shape[0]
l_v = len(v0)
dvs = []
for i, a in enumerate(args):
l_a = (a).shape[1]
dv = numpy.zeros((N, l_v, l_a))
nargs = list(args) #.copy()
for j in range(l_a):
xx = args[i].copy()
xx[:, j] += epsilon
nargs[i] = xx
dv[:, :, j] = (fun(*nargs) - v0) / epsilon
dvs.append(dv)
return [v0] + dvs | [
"def",
"numdiff",
"(",
"fun",
",",
"args",
")",
":",
"# vectorized version",
"epsilon",
"=",
"1e-8",
"args",
"=",
"list",
"(",
"args",
")",
"v0",
"=",
"fun",
"(",
"*",
"args",
")",
"N",
"=",
"v0",
".",
"shape",
"[",
"0",
"]",
"l_v",
"=",
"len",
"(",
"v0",
")",
"dvs",
"=",
"[",
"]",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"args",
")",
":",
"l_a",
"=",
"(",
"a",
")",
".",
"shape",
"[",
"1",
"]",
"dv",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"N",
",",
"l_v",
",",
"l_a",
")",
")",
"nargs",
"=",
"list",
"(",
"args",
")",
"#.copy()",
"for",
"j",
"in",
"range",
"(",
"l_a",
")",
":",
"xx",
"=",
"args",
"[",
"i",
"]",
".",
"copy",
"(",
")",
"xx",
"[",
":",
",",
"j",
"]",
"+=",
"epsilon",
"nargs",
"[",
"i",
"]",
"=",
"xx",
"dv",
"[",
":",
",",
":",
",",
"j",
"]",
"=",
"(",
"fun",
"(",
"*",
"nargs",
")",
"-",
"v0",
")",
"/",
"epsilon",
"dvs",
".",
"append",
"(",
"dv",
")",
"return",
"[",
"v0",
"]",
"+",
"dvs"
] | Vectorized numerical differentiation | [
"Vectorized",
"numerical",
"differentiation"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/misc.py#L97-L118 |
251,732 | EconForge/dolo | dolo/numeric/filters.py | bandpass_filter | def bandpass_filter(data, k, w1, w2):
"""
This function will apply a bandpass filter to data. It will be kth
order and will select the band between w1 and w2.
Parameters
----------
data: array, dtype=float
The data you wish to filter
k: number, int
The order of approximation for the filter. A max value for
this isdata.size/2
w1: number, float
This is the lower bound for which frequencies will pass
through.
w2: number, float
This is the upper bound for which frequencies will pass
through.
Returns
-------
y: array, dtype=float
The filtered data.
"""
data = np.asarray(data)
low_w = np.pi * 2 / w2
high_w = np.pi * 2 / w1
bweights = np.zeros(2 * k + 1)
bweights[k] = (high_w - low_w) / np.pi
j = np.arange(1, int(k) + 1)
weights = 1 / (np.pi * j) * (sin(high_w * j) - sin(low_w * j))
bweights[k + j] = weights
bweights[:k] = weights[::-1]
bweights -= bweights.mean()
return fftconvolve(bweights, data, mode='valid') | python | def bandpass_filter(data, k, w1, w2):
data = np.asarray(data)
low_w = np.pi * 2 / w2
high_w = np.pi * 2 / w1
bweights = np.zeros(2 * k + 1)
bweights[k] = (high_w - low_w) / np.pi
j = np.arange(1, int(k) + 1)
weights = 1 / (np.pi * j) * (sin(high_w * j) - sin(low_w * j))
bweights[k + j] = weights
bweights[:k] = weights[::-1]
bweights -= bweights.mean()
return fftconvolve(bweights, data, mode='valid') | [
"def",
"bandpass_filter",
"(",
"data",
",",
"k",
",",
"w1",
",",
"w2",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"low_w",
"=",
"np",
".",
"pi",
"*",
"2",
"/",
"w2",
"high_w",
"=",
"np",
".",
"pi",
"*",
"2",
"/",
"w1",
"bweights",
"=",
"np",
".",
"zeros",
"(",
"2",
"*",
"k",
"+",
"1",
")",
"bweights",
"[",
"k",
"]",
"=",
"(",
"high_w",
"-",
"low_w",
")",
"/",
"np",
".",
"pi",
"j",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"int",
"(",
"k",
")",
"+",
"1",
")",
"weights",
"=",
"1",
"/",
"(",
"np",
".",
"pi",
"*",
"j",
")",
"*",
"(",
"sin",
"(",
"high_w",
"*",
"j",
")",
"-",
"sin",
"(",
"low_w",
"*",
"j",
")",
")",
"bweights",
"[",
"k",
"+",
"j",
"]",
"=",
"weights",
"bweights",
"[",
":",
"k",
"]",
"=",
"weights",
"[",
":",
":",
"-",
"1",
"]",
"bweights",
"-=",
"bweights",
".",
"mean",
"(",
")",
"return",
"fftconvolve",
"(",
"bweights",
",",
"data",
",",
"mode",
"=",
"'valid'",
")"
] | This function will apply a bandpass filter to data. It will be kth
order and will select the band between w1 and w2.
Parameters
----------
data: array, dtype=float
The data you wish to filter
k: number, int
The order of approximation for the filter. A max value for
this isdata.size/2
w1: number, float
This is the lower bound for which frequencies will pass
through.
w2: number, float
This is the upper bound for which frequencies will pass
through.
Returns
-------
y: array, dtype=float
The filtered data. | [
"This",
"function",
"will",
"apply",
"a",
"bandpass",
"filter",
"to",
"data",
".",
"It",
"will",
"be",
"kth",
"order",
"and",
"will",
"select",
"the",
"band",
"between",
"w1",
"and",
"w2",
"."
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/filters.py#L83-L119 |
251,733 | EconForge/dolo | dolo/misc/dprint.py | dprint | def dprint(s):
'''Prints `s` with additional debugging informations'''
import inspect
frameinfo = inspect.stack()[1]
callerframe = frameinfo.frame
d = callerframe.f_locals
if (isinstance(s,str)):
val = eval(s, d)
else:
val = s
cc = frameinfo.code_context[0]
import re
regex = re.compile("dprint\((.*)\)")
res = regex.search(cc)
s = res.group(1)
text = ''
text += bcolors.OKBLUE + "At <{}>\n".format(str(frameinfo)) + bcolors.ENDC
text += bcolors.WARNING + "{}: ".format(s) + bcolors.ENDC
text += str(val)
text += str()
print(text) | python | def dprint(s):
'''Prints `s` with additional debugging informations'''
import inspect
frameinfo = inspect.stack()[1]
callerframe = frameinfo.frame
d = callerframe.f_locals
if (isinstance(s,str)):
val = eval(s, d)
else:
val = s
cc = frameinfo.code_context[0]
import re
regex = re.compile("dprint\((.*)\)")
res = regex.search(cc)
s = res.group(1)
text = ''
text += bcolors.OKBLUE + "At <{}>\n".format(str(frameinfo)) + bcolors.ENDC
text += bcolors.WARNING + "{}: ".format(s) + bcolors.ENDC
text += str(val)
text += str()
print(text) | [
"def",
"dprint",
"(",
"s",
")",
":",
"import",
"inspect",
"frameinfo",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"1",
"]",
"callerframe",
"=",
"frameinfo",
".",
"frame",
"d",
"=",
"callerframe",
".",
"f_locals",
"if",
"(",
"isinstance",
"(",
"s",
",",
"str",
")",
")",
":",
"val",
"=",
"eval",
"(",
"s",
",",
"d",
")",
"else",
":",
"val",
"=",
"s",
"cc",
"=",
"frameinfo",
".",
"code_context",
"[",
"0",
"]",
"import",
"re",
"regex",
"=",
"re",
".",
"compile",
"(",
"\"dprint\\((.*)\\)\"",
")",
"res",
"=",
"regex",
".",
"search",
"(",
"cc",
")",
"s",
"=",
"res",
".",
"group",
"(",
"1",
")",
"text",
"=",
"''",
"text",
"+=",
"bcolors",
".",
"OKBLUE",
"+",
"\"At <{}>\\n\"",
".",
"format",
"(",
"str",
"(",
"frameinfo",
")",
")",
"+",
"bcolors",
".",
"ENDC",
"text",
"+=",
"bcolors",
".",
"WARNING",
"+",
"\"{}: \"",
".",
"format",
"(",
"s",
")",
"+",
"bcolors",
".",
"ENDC",
"text",
"+=",
"str",
"(",
"val",
")",
"text",
"+=",
"str",
"(",
")",
"print",
"(",
"text",
")"
] | Prints `s` with additional debugging informations | [
"Prints",
"s",
"with",
"additional",
"debugging",
"informations"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/misc/dprint.py#L21-L46 |
251,734 | EconForge/dolo | dolo/compiler/function_compiler_sympy.py | non_decreasing_series | def non_decreasing_series(n, size):
'''Lists all combinations of 0,...,n-1 in increasing order'''
if size == 1:
return [[a] for a in range(n)]
else:
lc = non_decreasing_series(n, size-1)
ll = []
for l in lc:
last = l[-1]
for i in range(last, n):
e = l + [i]
ll.append(e)
return ll | python | def non_decreasing_series(n, size):
'''Lists all combinations of 0,...,n-1 in increasing order'''
if size == 1:
return [[a] for a in range(n)]
else:
lc = non_decreasing_series(n, size-1)
ll = []
for l in lc:
last = l[-1]
for i in range(last, n):
e = l + [i]
ll.append(e)
return ll | [
"def",
"non_decreasing_series",
"(",
"n",
",",
"size",
")",
":",
"if",
"size",
"==",
"1",
":",
"return",
"[",
"[",
"a",
"]",
"for",
"a",
"in",
"range",
"(",
"n",
")",
"]",
"else",
":",
"lc",
"=",
"non_decreasing_series",
"(",
"n",
",",
"size",
"-",
"1",
")",
"ll",
"=",
"[",
"]",
"for",
"l",
"in",
"lc",
":",
"last",
"=",
"l",
"[",
"-",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"last",
",",
"n",
")",
":",
"e",
"=",
"l",
"+",
"[",
"i",
"]",
"ll",
".",
"append",
"(",
"e",
")",
"return",
"ll"
] | Lists all combinations of 0,...,n-1 in increasing order | [
"Lists",
"all",
"combinations",
"of",
"0",
"...",
"n",
"-",
"1",
"in",
"increasing",
"order"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/function_compiler_sympy.py#L13-L26 |
251,735 | EconForge/dolo | dolo/compiler/function_compiler_sympy.py | higher_order_diff | def higher_order_diff(eqs, syms, order=2):
'''Takes higher order derivatives of a list of equations w.r.t a list of paramters'''
import numpy
eqs = list([sympy.sympify(eq) for eq in eqs])
syms = list([sympy.sympify(s) for s in syms])
neq = len(eqs)
p = len(syms)
D = [numpy.array(eqs)]
orders = []
for i in range(1,order+1):
par = D[i-1]
mat = numpy.empty([neq] + [p]*i, dtype=object) #.append( numpy.zeros(orders))
for ind in non_decreasing_series(p,i):
ind_parent = ind[:-1]
k = ind[-1]
for line in range(neq):
ii = [line] + ind
iid = [line] + ind_parent
eeq = par[ tuple(iid) ]
mat[tuple(ii)] = eeq.diff(syms[k])
D.append(mat)
return D | python | def higher_order_diff(eqs, syms, order=2):
'''Takes higher order derivatives of a list of equations w.r.t a list of paramters'''
import numpy
eqs = list([sympy.sympify(eq) for eq in eqs])
syms = list([sympy.sympify(s) for s in syms])
neq = len(eqs)
p = len(syms)
D = [numpy.array(eqs)]
orders = []
for i in range(1,order+1):
par = D[i-1]
mat = numpy.empty([neq] + [p]*i, dtype=object) #.append( numpy.zeros(orders))
for ind in non_decreasing_series(p,i):
ind_parent = ind[:-1]
k = ind[-1]
for line in range(neq):
ii = [line] + ind
iid = [line] + ind_parent
eeq = par[ tuple(iid) ]
mat[tuple(ii)] = eeq.diff(syms[k])
D.append(mat)
return D | [
"def",
"higher_order_diff",
"(",
"eqs",
",",
"syms",
",",
"order",
"=",
"2",
")",
":",
"import",
"numpy",
"eqs",
"=",
"list",
"(",
"[",
"sympy",
".",
"sympify",
"(",
"eq",
")",
"for",
"eq",
"in",
"eqs",
"]",
")",
"syms",
"=",
"list",
"(",
"[",
"sympy",
".",
"sympify",
"(",
"s",
")",
"for",
"s",
"in",
"syms",
"]",
")",
"neq",
"=",
"len",
"(",
"eqs",
")",
"p",
"=",
"len",
"(",
"syms",
")",
"D",
"=",
"[",
"numpy",
".",
"array",
"(",
"eqs",
")",
"]",
"orders",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"order",
"+",
"1",
")",
":",
"par",
"=",
"D",
"[",
"i",
"-",
"1",
"]",
"mat",
"=",
"numpy",
".",
"empty",
"(",
"[",
"neq",
"]",
"+",
"[",
"p",
"]",
"*",
"i",
",",
"dtype",
"=",
"object",
")",
"#.append( numpy.zeros(orders))",
"for",
"ind",
"in",
"non_decreasing_series",
"(",
"p",
",",
"i",
")",
":",
"ind_parent",
"=",
"ind",
"[",
":",
"-",
"1",
"]",
"k",
"=",
"ind",
"[",
"-",
"1",
"]",
"for",
"line",
"in",
"range",
"(",
"neq",
")",
":",
"ii",
"=",
"[",
"line",
"]",
"+",
"ind",
"iid",
"=",
"[",
"line",
"]",
"+",
"ind_parent",
"eeq",
"=",
"par",
"[",
"tuple",
"(",
"iid",
")",
"]",
"mat",
"[",
"tuple",
"(",
"ii",
")",
"]",
"=",
"eeq",
".",
"diff",
"(",
"syms",
"[",
"k",
"]",
")",
"D",
".",
"append",
"(",
"mat",
")",
"return",
"D"
] | Takes higher order derivatives of a list of equations w.r.t a list of paramters | [
"Takes",
"higher",
"order",
"derivatives",
"of",
"a",
"list",
"of",
"equations",
"w",
".",
"r",
".",
"t",
"a",
"list",
"of",
"paramters"
] | d91ddf148b009bf79852d9aec70f3a1877e0f79a | https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/function_compiler_sympy.py#L28-L60 |
251,736 | pokerregion/poker | poker/website/pocketfives.py | get_ranked_players | def get_ranked_players():
"""Get the list of the first 100 ranked players."""
rankings_page = requests.get(RANKINGS_URL)
root = etree.HTML(rankings_page.text)
player_rows = root.xpath('//div[@id="ranked"]//tr')
for row in player_rows[1:]:
player_row = row.xpath('td[@class!="country"]//text()')
yield _Player(
name=player_row[1],
country=row[1][0].get('title'),
triple_crowns=player_row[3],
monthly_win=player_row[4],
biggest_cash=player_row[5],
plb_score=player_row[6],
biggest_score=player_row[7],
average_score=player_row[8],
previous_rank=player_row[9],
) | python | def get_ranked_players():
rankings_page = requests.get(RANKINGS_URL)
root = etree.HTML(rankings_page.text)
player_rows = root.xpath('//div[@id="ranked"]//tr')
for row in player_rows[1:]:
player_row = row.xpath('td[@class!="country"]//text()')
yield _Player(
name=player_row[1],
country=row[1][0].get('title'),
triple_crowns=player_row[3],
monthly_win=player_row[4],
biggest_cash=player_row[5],
plb_score=player_row[6],
biggest_score=player_row[7],
average_score=player_row[8],
previous_rank=player_row[9],
) | [
"def",
"get_ranked_players",
"(",
")",
":",
"rankings_page",
"=",
"requests",
".",
"get",
"(",
"RANKINGS_URL",
")",
"root",
"=",
"etree",
".",
"HTML",
"(",
"rankings_page",
".",
"text",
")",
"player_rows",
"=",
"root",
".",
"xpath",
"(",
"'//div[@id=\"ranked\"]//tr'",
")",
"for",
"row",
"in",
"player_rows",
"[",
"1",
":",
"]",
":",
"player_row",
"=",
"row",
".",
"xpath",
"(",
"'td[@class!=\"country\"]//text()'",
")",
"yield",
"_Player",
"(",
"name",
"=",
"player_row",
"[",
"1",
"]",
",",
"country",
"=",
"row",
"[",
"1",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"'title'",
")",
",",
"triple_crowns",
"=",
"player_row",
"[",
"3",
"]",
",",
"monthly_win",
"=",
"player_row",
"[",
"4",
"]",
",",
"biggest_cash",
"=",
"player_row",
"[",
"5",
"]",
",",
"plb_score",
"=",
"player_row",
"[",
"6",
"]",
",",
"biggest_score",
"=",
"player_row",
"[",
"7",
"]",
",",
"average_score",
"=",
"player_row",
"[",
"8",
"]",
",",
"previous_rank",
"=",
"player_row",
"[",
"9",
"]",
",",
")"
] | Get the list of the first 100 ranked players. | [
"Get",
"the",
"list",
"of",
"the",
"first",
"100",
"ranked",
"players",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/pocketfives.py#L31-L50 |
251,737 | pokerregion/poker | poker/card.py | Rank.difference | def difference(cls, first, second):
"""Tells the numerical difference between two ranks."""
# so we always get a Rank instance even if string were passed in
first, second = cls(first), cls(second)
rank_list = list(cls)
return abs(rank_list.index(first) - rank_list.index(second)) | python | def difference(cls, first, second):
# so we always get a Rank instance even if string were passed in
first, second = cls(first), cls(second)
rank_list = list(cls)
return abs(rank_list.index(first) - rank_list.index(second)) | [
"def",
"difference",
"(",
"cls",
",",
"first",
",",
"second",
")",
":",
"# so we always get a Rank instance even if string were passed in",
"first",
",",
"second",
"=",
"cls",
"(",
"first",
")",
",",
"cls",
"(",
"second",
")",
"rank_list",
"=",
"list",
"(",
"cls",
")",
"return",
"abs",
"(",
"rank_list",
".",
"index",
"(",
"first",
")",
"-",
"rank_list",
".",
"index",
"(",
"second",
")",
")"
] | Tells the numerical difference between two ranks. | [
"Tells",
"the",
"numerical",
"difference",
"between",
"two",
"ranks",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/card.py#L42-L48 |
251,738 | pokerregion/poker | poker/card.py | _CardMeta.make_random | def make_random(cls):
"""Returns a random Card instance."""
self = object.__new__(cls)
self.rank = Rank.make_random()
self.suit = Suit.make_random()
return self | python | def make_random(cls):
self = object.__new__(cls)
self.rank = Rank.make_random()
self.suit = Suit.make_random()
return self | [
"def",
"make_random",
"(",
"cls",
")",
":",
"self",
"=",
"object",
".",
"__new__",
"(",
"cls",
")",
"self",
".",
"rank",
"=",
"Rank",
".",
"make_random",
"(",
")",
"self",
".",
"suit",
"=",
"Suit",
".",
"make_random",
"(",
")",
"return",
"self"
] | Returns a random Card instance. | [
"Returns",
"a",
"random",
"Card",
"instance",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/card.py#L64-L69 |
251,739 | pokerregion/poker | poker/commands.py | twoplustwo_player | def twoplustwo_player(username):
"""Get profile information about a Two plus Two Forum member given the username."""
from .website.twoplustwo import ForumMember, AmbiguousUserNameError, UserNotFoundError
try:
member = ForumMember(username)
except UserNotFoundError:
raise click.ClickException('User "%s" not found!' % username)
except AmbiguousUserNameError as e:
click.echo('Got multiple users with similar names!', err=True)
for ind, user in enumerate(e.users):
click.echo('{}. {}'.format(ind + 1, user.name), err=True)
number = click.prompt('Which would you like to see [{}-{}]'.format(1, len(e.users)),
prompt_suffix='? ', type=click.IntRange(1, len(e.users)), err=True)
userid = e.users[int(number) - 1].id
member = ForumMember.from_userid(userid)
click.echo(err=True) # empty line after input
_print_header('Two plus two forum member')
_print_values(
('Username', member.username),
('Forum id', member.id),
('Location', member.location),
('Total posts', member.total_posts),
('Posts per day', member.posts_per_day),
('Rank', member.rank),
('Last activity', member.last_activity),
('Join date', member.join_date),
('Usergroups', member.public_usergroups),
('Profile picture', member.profile_picture),
('Avatar', member.avatar),
) | python | def twoplustwo_player(username):
from .website.twoplustwo import ForumMember, AmbiguousUserNameError, UserNotFoundError
try:
member = ForumMember(username)
except UserNotFoundError:
raise click.ClickException('User "%s" not found!' % username)
except AmbiguousUserNameError as e:
click.echo('Got multiple users with similar names!', err=True)
for ind, user in enumerate(e.users):
click.echo('{}. {}'.format(ind + 1, user.name), err=True)
number = click.prompt('Which would you like to see [{}-{}]'.format(1, len(e.users)),
prompt_suffix='? ', type=click.IntRange(1, len(e.users)), err=True)
userid = e.users[int(number) - 1].id
member = ForumMember.from_userid(userid)
click.echo(err=True) # empty line after input
_print_header('Two plus two forum member')
_print_values(
('Username', member.username),
('Forum id', member.id),
('Location', member.location),
('Total posts', member.total_posts),
('Posts per day', member.posts_per_day),
('Rank', member.rank),
('Last activity', member.last_activity),
('Join date', member.join_date),
('Usergroups', member.public_usergroups),
('Profile picture', member.profile_picture),
('Avatar', member.avatar),
) | [
"def",
"twoplustwo_player",
"(",
"username",
")",
":",
"from",
".",
"website",
".",
"twoplustwo",
"import",
"ForumMember",
",",
"AmbiguousUserNameError",
",",
"UserNotFoundError",
"try",
":",
"member",
"=",
"ForumMember",
"(",
"username",
")",
"except",
"UserNotFoundError",
":",
"raise",
"click",
".",
"ClickException",
"(",
"'User \"%s\" not found!'",
"%",
"username",
")",
"except",
"AmbiguousUserNameError",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"'Got multiple users with similar names!'",
",",
"err",
"=",
"True",
")",
"for",
"ind",
",",
"user",
"in",
"enumerate",
"(",
"e",
".",
"users",
")",
":",
"click",
".",
"echo",
"(",
"'{}. {}'",
".",
"format",
"(",
"ind",
"+",
"1",
",",
"user",
".",
"name",
")",
",",
"err",
"=",
"True",
")",
"number",
"=",
"click",
".",
"prompt",
"(",
"'Which would you like to see [{}-{}]'",
".",
"format",
"(",
"1",
",",
"len",
"(",
"e",
".",
"users",
")",
")",
",",
"prompt_suffix",
"=",
"'? '",
",",
"type",
"=",
"click",
".",
"IntRange",
"(",
"1",
",",
"len",
"(",
"e",
".",
"users",
")",
")",
",",
"err",
"=",
"True",
")",
"userid",
"=",
"e",
".",
"users",
"[",
"int",
"(",
"number",
")",
"-",
"1",
"]",
".",
"id",
"member",
"=",
"ForumMember",
".",
"from_userid",
"(",
"userid",
")",
"click",
".",
"echo",
"(",
"err",
"=",
"True",
")",
"# empty line after input",
"_print_header",
"(",
"'Two plus two forum member'",
")",
"_print_values",
"(",
"(",
"'Username'",
",",
"member",
".",
"username",
")",
",",
"(",
"'Forum id'",
",",
"member",
".",
"id",
")",
",",
"(",
"'Location'",
",",
"member",
".",
"location",
")",
",",
"(",
"'Total posts'",
",",
"member",
".",
"total_posts",
")",
",",
"(",
"'Posts per day'",
",",
"member",
".",
"posts_per_day",
")",
",",
"(",
"'Rank'",
",",
"member",
".",
"rank",
")",
",",
"(",
"'Last activity'",
",",
"member",
".",
"last_activity",
")",
",",
"(",
"'Join date'",
",",
"member",
".",
"join_date",
")",
",",
"(",
"'Usergroups'",
",",
"member",
".",
"public_usergroups",
")",
",",
"(",
"'Profile picture'",
",",
"member",
".",
"profile_picture",
")",
",",
"(",
"'Avatar'",
",",
"member",
".",
"avatar",
")",
",",
")"
] | Get profile information about a Two plus Two Forum member given the username. | [
"Get",
"profile",
"information",
"about",
"a",
"Two",
"plus",
"Two",
"Forum",
"member",
"given",
"the",
"username",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L59-L95 |
251,740 | pokerregion/poker | poker/commands.py | p5list | def p5list(num):
"""List pocketfives ranked players, max 100 if no NUM, or NUM if specified."""
from .website.pocketfives import get_ranked_players
format_str = '{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'\
'{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}'
click.echo(format_str.format(
'Rank' , 'Player name', 'Country', 'Triple', 'Monthly', 'Biggest cash',
'PLB score', 'Biggest s', 'Average s', 'Prev'
))
# just generate the appropriate number of underlines and cut them with format_str
underlines = ['-' * 20] * 10
click.echo(format_str.format(*underlines))
for ind, player in enumerate(get_ranked_players()):
click.echo(format_str.format(str(ind + 1) + '.', *player))
if ind == num - 1:
break | python | def p5list(num):
from .website.pocketfives import get_ranked_players
format_str = '{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'\
'{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}'
click.echo(format_str.format(
'Rank' , 'Player name', 'Country', 'Triple', 'Monthly', 'Biggest cash',
'PLB score', 'Biggest s', 'Average s', 'Prev'
))
# just generate the appropriate number of underlines and cut them with format_str
underlines = ['-' * 20] * 10
click.echo(format_str.format(*underlines))
for ind, player in enumerate(get_ranked_players()):
click.echo(format_str.format(str(ind + 1) + '.', *player))
if ind == num - 1:
break | [
"def",
"p5list",
"(",
"num",
")",
":",
"from",
".",
"website",
".",
"pocketfives",
"import",
"get_ranked_players",
"format_str",
"=",
"'{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'",
"'{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}'",
"click",
".",
"echo",
"(",
"format_str",
".",
"format",
"(",
"'Rank'",
",",
"'Player name'",
",",
"'Country'",
",",
"'Triple'",
",",
"'Monthly'",
",",
"'Biggest cash'",
",",
"'PLB score'",
",",
"'Biggest s'",
",",
"'Average s'",
",",
"'Prev'",
")",
")",
"# just generate the appropriate number of underlines and cut them with format_str",
"underlines",
"=",
"[",
"'-'",
"*",
"20",
"]",
"*",
"10",
"click",
".",
"echo",
"(",
"format_str",
".",
"format",
"(",
"*",
"underlines",
")",
")",
"for",
"ind",
",",
"player",
"in",
"enumerate",
"(",
"get_ranked_players",
"(",
")",
")",
":",
"click",
".",
"echo",
"(",
"format_str",
".",
"format",
"(",
"str",
"(",
"ind",
"+",
"1",
")",
"+",
"'.'",
",",
"*",
"player",
")",
")",
"if",
"ind",
"==",
"num",
"-",
"1",
":",
"break"
] | List pocketfives ranked players, max 100 if no NUM, or NUM if specified. | [
"List",
"pocketfives",
"ranked",
"players",
"max",
"100",
"if",
"no",
"NUM",
"or",
"NUM",
"if",
"specified",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L100-L119 |
251,741 | pokerregion/poker | poker/commands.py | psstatus | def psstatus():
"""Shows PokerStars status such as number of players, tournaments."""
from .website.pokerstars import get_status
_print_header('PokerStars status')
status = get_status()
_print_values(
('Info updated', status.updated),
('Tables', status.tables),
('Players', status.players),
('Active tournaments', status.active_tournaments),
('Total tournaments', status.total_tournaments),
('Clubs', status.clubs),
('Club members', status.club_members),
)
site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}'
click.echo('\nSite Tables Players Tournaments')
click.echo('----------- ------ ------- -----------')
for site in status.sites:
click.echo(site_format_str.format(site)) | python | def psstatus():
from .website.pokerstars import get_status
_print_header('PokerStars status')
status = get_status()
_print_values(
('Info updated', status.updated),
('Tables', status.tables),
('Players', status.players),
('Active tournaments', status.active_tournaments),
('Total tournaments', status.total_tournaments),
('Clubs', status.clubs),
('Club members', status.club_members),
)
site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}'
click.echo('\nSite Tables Players Tournaments')
click.echo('----------- ------ ------- -----------')
for site in status.sites:
click.echo(site_format_str.format(site)) | [
"def",
"psstatus",
"(",
")",
":",
"from",
".",
"website",
".",
"pokerstars",
"import",
"get_status",
"_print_header",
"(",
"'PokerStars status'",
")",
"status",
"=",
"get_status",
"(",
")",
"_print_values",
"(",
"(",
"'Info updated'",
",",
"status",
".",
"updated",
")",
",",
"(",
"'Tables'",
",",
"status",
".",
"tables",
")",
",",
"(",
"'Players'",
",",
"status",
".",
"players",
")",
",",
"(",
"'Active tournaments'",
",",
"status",
".",
"active_tournaments",
")",
",",
"(",
"'Total tournaments'",
",",
"status",
".",
"total_tournaments",
")",
",",
"(",
"'Clubs'",
",",
"status",
".",
"clubs",
")",
",",
"(",
"'Club members'",
",",
"status",
".",
"club_members",
")",
",",
")",
"site_format_str",
"=",
"'{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}'",
"click",
".",
"echo",
"(",
"'\\nSite Tables Players Tournaments'",
")",
"click",
".",
"echo",
"(",
"'----------- ------ ------- -----------'",
")",
"for",
"site",
"in",
"status",
".",
"sites",
":",
"click",
".",
"echo",
"(",
"site_format_str",
".",
"format",
"(",
"site",
")",
")"
] | Shows PokerStars status such as number of players, tournaments. | [
"Shows",
"PokerStars",
"status",
"such",
"as",
"number",
"of",
"players",
"tournaments",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L123-L145 |
251,742 | pokerregion/poker | poker/room/pokerstars.py | Notes.notes | def notes(self):
"""Tuple of notes.."""
return tuple(self._get_note_data(note) for note in self.root.iter('note')) | python | def notes(self):
return tuple(self._get_note_data(note) for note in self.root.iter('note')) | [
"def",
"notes",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"self",
".",
"_get_note_data",
"(",
"note",
")",
"for",
"note",
"in",
"self",
".",
"root",
".",
"iter",
"(",
"'note'",
")",
")"
] | Tuple of notes.. | [
"Tuple",
"of",
"notes",
".."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L335-L337 |
251,743 | pokerregion/poker | poker/room/pokerstars.py | Notes.labels | def labels(self):
"""Tuple of labels."""
return tuple(_Label(label.get('id'), label.get('color'), label.text) for label
in self.root.iter('label')) | python | def labels(self):
return tuple(_Label(label.get('id'), label.get('color'), label.text) for label
in self.root.iter('label')) | [
"def",
"labels",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"_Label",
"(",
"label",
".",
"get",
"(",
"'id'",
")",
",",
"label",
".",
"get",
"(",
"'color'",
")",
",",
"label",
".",
"text",
")",
"for",
"label",
"in",
"self",
".",
"root",
".",
"iter",
"(",
"'label'",
")",
")"
] | Tuple of labels. | [
"Tuple",
"of",
"labels",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L340-L343 |
251,744 | pokerregion/poker | poker/room/pokerstars.py | Notes.add_note | def add_note(self, player, text, label=None, update=None):
"""Add a note to the xml. If update param is None, it will be the current time."""
if label is not None and (label not in self.label_names):
raise LabelNotFoundError('Invalid label: {}'.format(label))
if update is None:
update = datetime.utcnow()
# converted to timestamp, rounded to ones
update = update.strftime('%s')
label_id = self._get_label_id(label)
new_note = etree.Element('note', player=player, label=label_id, update=update)
new_note.text = text
self.root.append(new_note) | python | def add_note(self, player, text, label=None, update=None):
if label is not None and (label not in self.label_names):
raise LabelNotFoundError('Invalid label: {}'.format(label))
if update is None:
update = datetime.utcnow()
# converted to timestamp, rounded to ones
update = update.strftime('%s')
label_id = self._get_label_id(label)
new_note = etree.Element('note', player=player, label=label_id, update=update)
new_note.text = text
self.root.append(new_note) | [
"def",
"add_note",
"(",
"self",
",",
"player",
",",
"text",
",",
"label",
"=",
"None",
",",
"update",
"=",
"None",
")",
":",
"if",
"label",
"is",
"not",
"None",
"and",
"(",
"label",
"not",
"in",
"self",
".",
"label_names",
")",
":",
"raise",
"LabelNotFoundError",
"(",
"'Invalid label: {}'",
".",
"format",
"(",
"label",
")",
")",
"if",
"update",
"is",
"None",
":",
"update",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"# converted to timestamp, rounded to ones",
"update",
"=",
"update",
".",
"strftime",
"(",
"'%s'",
")",
"label_id",
"=",
"self",
".",
"_get_label_id",
"(",
"label",
")",
"new_note",
"=",
"etree",
".",
"Element",
"(",
"'note'",
",",
"player",
"=",
"player",
",",
"label",
"=",
"label_id",
",",
"update",
"=",
"update",
")",
"new_note",
".",
"text",
"=",
"text",
"self",
".",
"root",
".",
"append",
"(",
"new_note",
")"
] | Add a note to the xml. If update param is None, it will be the current time. | [
"Add",
"a",
"note",
"to",
"the",
"xml",
".",
"If",
"update",
"param",
"is",
"None",
"it",
"will",
"be",
"the",
"current",
"time",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L354-L365 |
251,745 | pokerregion/poker | poker/room/pokerstars.py | Notes.append_note | def append_note(self, player, text):
"""Append text to an already existing note."""
note = self._find_note(player)
note.text += text | python | def append_note(self, player, text):
note = self._find_note(player)
note.text += text | [
"def",
"append_note",
"(",
"self",
",",
"player",
",",
"text",
")",
":",
"note",
"=",
"self",
".",
"_find_note",
"(",
"player",
")",
"note",
".",
"text",
"+=",
"text"
] | Append text to an already existing note. | [
"Append",
"text",
"to",
"an",
"already",
"existing",
"note",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L367-L370 |
251,746 | pokerregion/poker | poker/room/pokerstars.py | Notes.prepend_note | def prepend_note(self, player, text):
"""Prepend text to an already existing note."""
note = self._find_note(player)
note.text = text + note.text | python | def prepend_note(self, player, text):
note = self._find_note(player)
note.text = text + note.text | [
"def",
"prepend_note",
"(",
"self",
",",
"player",
",",
"text",
")",
":",
"note",
"=",
"self",
".",
"_find_note",
"(",
"player",
")",
"note",
".",
"text",
"=",
"text",
"+",
"note",
".",
"text"
] | Prepend text to an already existing note. | [
"Prepend",
"text",
"to",
"an",
"already",
"existing",
"note",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L372-L375 |
251,747 | pokerregion/poker | poker/room/pokerstars.py | Notes.get_label | def get_label(self, name):
"""Find the label by name."""
label_tag = self._find_label(name)
return _Label(label_tag.get('id'), label_tag.get('color'), label_tag.text) | python | def get_label(self, name):
label_tag = self._find_label(name)
return _Label(label_tag.get('id'), label_tag.get('color'), label_tag.text) | [
"def",
"get_label",
"(",
"self",
",",
"name",
")",
":",
"label_tag",
"=",
"self",
".",
"_find_label",
"(",
"name",
")",
"return",
"_Label",
"(",
"label_tag",
".",
"get",
"(",
"'id'",
")",
",",
"label_tag",
".",
"get",
"(",
"'color'",
")",
",",
"label_tag",
".",
"text",
")"
] | Find the label by name. | [
"Find",
"the",
"label",
"by",
"name",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L412-L415 |
251,748 | pokerregion/poker | poker/room/pokerstars.py | Notes.add_label | def add_label(self, name, color):
"""Add a new label. It's id will automatically be calculated."""
color_upper = color.upper()
if not self._color_re.match(color_upper):
raise ValueError('Invalid color: {}'.format(color))
labels_tag = self.root[0]
last_id = int(labels_tag[-1].get('id'))
new_id = str(last_id + 1)
new_label = etree.Element('label', id=new_id, color=color_upper)
new_label.text = name
labels_tag.append(new_label) | python | def add_label(self, name, color):
color_upper = color.upper()
if not self._color_re.match(color_upper):
raise ValueError('Invalid color: {}'.format(color))
labels_tag = self.root[0]
last_id = int(labels_tag[-1].get('id'))
new_id = str(last_id + 1)
new_label = etree.Element('label', id=new_id, color=color_upper)
new_label.text = name
labels_tag.append(new_label) | [
"def",
"add_label",
"(",
"self",
",",
"name",
",",
"color",
")",
":",
"color_upper",
"=",
"color",
".",
"upper",
"(",
")",
"if",
"not",
"self",
".",
"_color_re",
".",
"match",
"(",
"color_upper",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid color: {}'",
".",
"format",
"(",
"color",
")",
")",
"labels_tag",
"=",
"self",
".",
"root",
"[",
"0",
"]",
"last_id",
"=",
"int",
"(",
"labels_tag",
"[",
"-",
"1",
"]",
".",
"get",
"(",
"'id'",
")",
")",
"new_id",
"=",
"str",
"(",
"last_id",
"+",
"1",
")",
"new_label",
"=",
"etree",
".",
"Element",
"(",
"'label'",
",",
"id",
"=",
"new_id",
",",
"color",
"=",
"color_upper",
")",
"new_label",
".",
"text",
"=",
"name",
"labels_tag",
".",
"append",
"(",
"new_label",
")"
] | Add a new label. It's id will automatically be calculated. | [
"Add",
"a",
"new",
"label",
".",
"It",
"s",
"id",
"will",
"automatically",
"be",
"calculated",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L417-L430 |
251,749 | pokerregion/poker | poker/room/pokerstars.py | Notes.del_label | def del_label(self, name):
"""Delete a label by name."""
labels_tag = self.root[0]
labels_tag.remove(self._find_label(name)) | python | def del_label(self, name):
labels_tag = self.root[0]
labels_tag.remove(self._find_label(name)) | [
"def",
"del_label",
"(",
"self",
",",
"name",
")",
":",
"labels_tag",
"=",
"self",
".",
"root",
"[",
"0",
"]",
"labels_tag",
".",
"remove",
"(",
"self",
".",
"_find_label",
"(",
"name",
")",
")"
] | Delete a label by name. | [
"Delete",
"a",
"label",
"by",
"name",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L432-L435 |
251,750 | pokerregion/poker | poker/room/pokerstars.py | Notes.save | def save(self, filename):
"""Save the note XML to a file."""
with open(filename, 'w') as fp:
fp.write(str(self)) | python | def save(self, filename):
with open(filename, 'w') as fp:
fp.write(str(self)) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"str",
"(",
"self",
")",
")"
] | Save the note XML to a file. | [
"Save",
"the",
"note",
"XML",
"to",
"a",
"file",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/room/pokerstars.py#L447-L450 |
251,751 | pokerregion/poker | poker/handhistory.py | _BaseHandHistory.board | def board(self):
"""Calculates board from flop, turn and river."""
board = []
if self.flop:
board.extend(self.flop.cards)
if self.turn:
board.append(self.turn)
if self.river:
board.append(self.river)
return tuple(board) if board else None | python | def board(self):
board = []
if self.flop:
board.extend(self.flop.cards)
if self.turn:
board.append(self.turn)
if self.river:
board.append(self.river)
return tuple(board) if board else None | [
"def",
"board",
"(",
"self",
")",
":",
"board",
"=",
"[",
"]",
"if",
"self",
".",
"flop",
":",
"board",
".",
"extend",
"(",
"self",
".",
"flop",
".",
"cards",
")",
"if",
"self",
".",
"turn",
":",
"board",
".",
"append",
"(",
"self",
".",
"turn",
")",
"if",
"self",
".",
"river",
":",
"board",
".",
"append",
"(",
"self",
".",
"river",
")",
"return",
"tuple",
"(",
"board",
")",
"if",
"board",
"else",
"None"
] | Calculates board from flop, turn and river. | [
"Calculates",
"board",
"from",
"flop",
"turn",
"and",
"river",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/handhistory.py#L167-L176 |
251,752 | pokerregion/poker | poker/handhistory.py | _BaseHandHistory._parse_date | def _parse_date(self, date_string):
"""Parse the date_string and return a datetime object as UTC."""
date = datetime.strptime(date_string, self._DATE_FORMAT)
self.date = self._TZ.localize(date).astimezone(pytz.UTC) | python | def _parse_date(self, date_string):
date = datetime.strptime(date_string, self._DATE_FORMAT)
self.date = self._TZ.localize(date).astimezone(pytz.UTC) | [
"def",
"_parse_date",
"(",
"self",
",",
"date_string",
")",
":",
"date",
"=",
"datetime",
".",
"strptime",
"(",
"date_string",
",",
"self",
".",
"_DATE_FORMAT",
")",
"self",
".",
"date",
"=",
"self",
".",
"_TZ",
".",
"localize",
"(",
"date",
")",
".",
"astimezone",
"(",
"pytz",
".",
"UTC",
")"
] | Parse the date_string and return a datetime object as UTC. | [
"Parse",
"the",
"date_string",
"and",
"return",
"a",
"datetime",
"object",
"as",
"UTC",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/handhistory.py#L178-L181 |
251,753 | pokerregion/poker | poker/handhistory.py | _SplittableHandHistoryMixin._split_raw | def _split_raw(self):
"""Split hand history by sections."""
self._splitted = self._split_re.split(self.raw)
# search split locations (basically empty strings)
self._sections = [ind for ind, elem in enumerate(self._splitted) if not elem] | python | def _split_raw(self):
self._splitted = self._split_re.split(self.raw)
# search split locations (basically empty strings)
self._sections = [ind for ind, elem in enumerate(self._splitted) if not elem] | [
"def",
"_split_raw",
"(",
"self",
")",
":",
"self",
".",
"_splitted",
"=",
"self",
".",
"_split_re",
".",
"split",
"(",
"self",
".",
"raw",
")",
"# search split locations (basically empty strings)",
"self",
".",
"_sections",
"=",
"[",
"ind",
"for",
"ind",
",",
"elem",
"in",
"enumerate",
"(",
"self",
".",
"_splitted",
")",
"if",
"not",
"elem",
"]"
] | Split hand history by sections. | [
"Split",
"hand",
"history",
"by",
"sections",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/handhistory.py#L201-L206 |
251,754 | pokerregion/poker | poker/website/twoplustwo.py | ForumMember._get_timezone | def _get_timezone(self, root):
"""Find timezone informatation on bottom of the page."""
tz_str = root.xpath('//div[@class="smallfont" and @align="center"]')[0].text
hours = int(self._tz_re.search(tz_str).group(1))
return tzoffset(tz_str, hours * 60) | python | def _get_timezone(self, root):
tz_str = root.xpath('//div[@class="smallfont" and @align="center"]')[0].text
hours = int(self._tz_re.search(tz_str).group(1))
return tzoffset(tz_str, hours * 60) | [
"def",
"_get_timezone",
"(",
"self",
",",
"root",
")",
":",
"tz_str",
"=",
"root",
".",
"xpath",
"(",
"'//div[@class=\"smallfont\" and @align=\"center\"]'",
")",
"[",
"0",
"]",
".",
"text",
"hours",
"=",
"int",
"(",
"self",
".",
"_tz_re",
".",
"search",
"(",
"tz_str",
")",
".",
"group",
"(",
"1",
")",
")",
"return",
"tzoffset",
"(",
"tz_str",
",",
"hours",
"*",
"60",
")"
] | Find timezone informatation on bottom of the page. | [
"Find",
"timezone",
"informatation",
"on",
"bottom",
"of",
"the",
"page",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/twoplustwo.py#L125-L129 |
251,755 | pokerregion/poker | poker/website/pokerstars.py | get_current_tournaments | def get_current_tournaments():
"""Get the next 200 tournaments from pokerstars."""
schedule_page = requests.get(TOURNAMENTS_XML_URL)
root = etree.XML(schedule_page.content)
for tour in root.iter('{*}tournament'):
yield _Tournament(
start_date=tour.findtext('{*}start_date'),
name=tour.findtext('{*}name'),
game=tour.findtext('{*}game'),
buyin=tour.findtext('{*}buy_in_fee'),
players=tour.get('players')
) | python | def get_current_tournaments():
schedule_page = requests.get(TOURNAMENTS_XML_URL)
root = etree.XML(schedule_page.content)
for tour in root.iter('{*}tournament'):
yield _Tournament(
start_date=tour.findtext('{*}start_date'),
name=tour.findtext('{*}name'),
game=tour.findtext('{*}game'),
buyin=tour.findtext('{*}buy_in_fee'),
players=tour.get('players')
) | [
"def",
"get_current_tournaments",
"(",
")",
":",
"schedule_page",
"=",
"requests",
".",
"get",
"(",
"TOURNAMENTS_XML_URL",
")",
"root",
"=",
"etree",
".",
"XML",
"(",
"schedule_page",
".",
"content",
")",
"for",
"tour",
"in",
"root",
".",
"iter",
"(",
"'{*}tournament'",
")",
":",
"yield",
"_Tournament",
"(",
"start_date",
"=",
"tour",
".",
"findtext",
"(",
"'{*}start_date'",
")",
",",
"name",
"=",
"tour",
".",
"findtext",
"(",
"'{*}name'",
")",
",",
"game",
"=",
"tour",
".",
"findtext",
"(",
"'{*}game'",
")",
",",
"buyin",
"=",
"tour",
".",
"findtext",
"(",
"'{*}buy_in_fee'",
")",
",",
"players",
"=",
"tour",
".",
"get",
"(",
"'players'",
")",
")"
] | Get the next 200 tournaments from pokerstars. | [
"Get",
"the",
"next",
"200",
"tournaments",
"from",
"pokerstars",
"."
] | 2d8cf208fdf2b26bdc935972dcbe7a983a9e9768 | https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/pokerstars.py#L29-L42 |
251,756 | RKrahl/pytest-dependency | setup.py | _filter_file | def _filter_file(src, dest, subst):
"""Copy src to dest doing substitutions on the fly.
"""
substre = re.compile(r'\$(%s)' % '|'.join(subst.keys()))
def repl(m):
return subst[m.group(1)]
with open(src, "rt") as sf, open(dest, "wt") as df:
while True:
l = sf.readline()
if not l:
break
df.write(re.sub(substre, repl, l)) | python | def _filter_file(src, dest, subst):
substre = re.compile(r'\$(%s)' % '|'.join(subst.keys()))
def repl(m):
return subst[m.group(1)]
with open(src, "rt") as sf, open(dest, "wt") as df:
while True:
l = sf.readline()
if not l:
break
df.write(re.sub(substre, repl, l)) | [
"def",
"_filter_file",
"(",
"src",
",",
"dest",
",",
"subst",
")",
":",
"substre",
"=",
"re",
".",
"compile",
"(",
"r'\\$(%s)'",
"%",
"'|'",
".",
"join",
"(",
"subst",
".",
"keys",
"(",
")",
")",
")",
"def",
"repl",
"(",
"m",
")",
":",
"return",
"subst",
"[",
"m",
".",
"group",
"(",
"1",
")",
"]",
"with",
"open",
"(",
"src",
",",
"\"rt\"",
")",
"as",
"sf",
",",
"open",
"(",
"dest",
",",
"\"wt\"",
")",
"as",
"df",
":",
"while",
"True",
":",
"l",
"=",
"sf",
".",
"readline",
"(",
")",
"if",
"not",
"l",
":",
"break",
"df",
".",
"write",
"(",
"re",
".",
"sub",
"(",
"substre",
",",
"repl",
",",
"l",
")",
")"
] | Copy src to dest doing substitutions on the fly. | [
"Copy",
"src",
"to",
"dest",
"doing",
"substitutions",
"on",
"the",
"fly",
"."
] | 7b7c10818266ec4b05c36c341cf84f05d7ab53ce | https://github.com/RKrahl/pytest-dependency/blob/7b7c10818266ec4b05c36c341cf84f05d7ab53ce/setup.py#L18-L29 |
251,757 | profusion/sgqlc | sgqlc/endpoint/base.py | BaseEndpoint._fixup_graphql_error | def _fixup_graphql_error(self, data):
'''Given a possible GraphQL error payload, make sure it's in shape.
This will ensure the given ``data`` is in the shape:
.. code-block:: json
{"errors": [{"message": "some string"}]}
If ``errors`` is not an array, it will be made into a single element
array, with the object in that format, with its string representation
being the message.
If an element of the ``errors`` array is not in the format, then
it's converted to the format, with its string representation being
the message.
The input object is not changed, a copy is made if needed.
:return: the given ``data`` formatted to the correct shape, a copy
is made and returned if any fix up was needed.
:rtype: dict
'''
original_data = data
errors = data.get('errors')
original_errors = errors
if not isinstance(errors, list):
self.logger.warning('data["errors"] is not a list! Fix up data=%r',
data)
data = data.copy()
data['errors'] = [{'message': str(errors)}]
return data
for i, error in enumerate(errors):
if not isinstance(error, dict):
self.logger.warning('Error #%d: is not a dict: %r. Fix up!',
i, error)
if data is original_data:
data = data.copy()
if errors is original_errors:
errors = errors.copy()
data['errors'] = errors
errors[i] = {'message': str(error)}
continue
message = error.get('message')
if not isinstance(message, str):
if data is original_data:
data = data.copy()
if errors is original_errors:
errors = errors.copy()
data['errors'] = errors
message = str(error) if message is None else str(message)
error = error.copy()
error['message'] = message
errors[i] = error
return data | python | def _fixup_graphql_error(self, data):
'''Given a possible GraphQL error payload, make sure it's in shape.
This will ensure the given ``data`` is in the shape:
.. code-block:: json
{"errors": [{"message": "some string"}]}
If ``errors`` is not an array, it will be made into a single element
array, with the object in that format, with its string representation
being the message.
If an element of the ``errors`` array is not in the format, then
it's converted to the format, with its string representation being
the message.
The input object is not changed, a copy is made if needed.
:return: the given ``data`` formatted to the correct shape, a copy
is made and returned if any fix up was needed.
:rtype: dict
'''
original_data = data
errors = data.get('errors')
original_errors = errors
if not isinstance(errors, list):
self.logger.warning('data["errors"] is not a list! Fix up data=%r',
data)
data = data.copy()
data['errors'] = [{'message': str(errors)}]
return data
for i, error in enumerate(errors):
if not isinstance(error, dict):
self.logger.warning('Error #%d: is not a dict: %r. Fix up!',
i, error)
if data is original_data:
data = data.copy()
if errors is original_errors:
errors = errors.copy()
data['errors'] = errors
errors[i] = {'message': str(error)}
continue
message = error.get('message')
if not isinstance(message, str):
if data is original_data:
data = data.copy()
if errors is original_errors:
errors = errors.copy()
data['errors'] = errors
message = str(error) if message is None else str(message)
error = error.copy()
error['message'] = message
errors[i] = error
return data | [
"def",
"_fixup_graphql_error",
"(",
"self",
",",
"data",
")",
":",
"original_data",
"=",
"data",
"errors",
"=",
"data",
".",
"get",
"(",
"'errors'",
")",
"original_errors",
"=",
"errors",
"if",
"not",
"isinstance",
"(",
"errors",
",",
"list",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'data[\"errors\"] is not a list! Fix up data=%r'",
",",
"data",
")",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"data",
"[",
"'errors'",
"]",
"=",
"[",
"{",
"'message'",
":",
"str",
"(",
"errors",
")",
"}",
"]",
"return",
"data",
"for",
"i",
",",
"error",
"in",
"enumerate",
"(",
"errors",
")",
":",
"if",
"not",
"isinstance",
"(",
"error",
",",
"dict",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'Error #%d: is not a dict: %r. Fix up!'",
",",
"i",
",",
"error",
")",
"if",
"data",
"is",
"original_data",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"errors",
"is",
"original_errors",
":",
"errors",
"=",
"errors",
".",
"copy",
"(",
")",
"data",
"[",
"'errors'",
"]",
"=",
"errors",
"errors",
"[",
"i",
"]",
"=",
"{",
"'message'",
":",
"str",
"(",
"error",
")",
"}",
"continue",
"message",
"=",
"error",
".",
"get",
"(",
"'message'",
")",
"if",
"not",
"isinstance",
"(",
"message",
",",
"str",
")",
":",
"if",
"data",
"is",
"original_data",
":",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"errors",
"is",
"original_errors",
":",
"errors",
"=",
"errors",
".",
"copy",
"(",
")",
"data",
"[",
"'errors'",
"]",
"=",
"errors",
"message",
"=",
"str",
"(",
"error",
")",
"if",
"message",
"is",
"None",
"else",
"str",
"(",
"message",
")",
"error",
"=",
"error",
".",
"copy",
"(",
")",
"error",
"[",
"'message'",
"]",
"=",
"message",
"errors",
"[",
"i",
"]",
"=",
"error",
"return",
"data"
] | Given a possible GraphQL error payload, make sure it's in shape.
This will ensure the given ``data`` is in the shape:
.. code-block:: json
{"errors": [{"message": "some string"}]}
If ``errors`` is not an array, it will be made into a single element
array, with the object in that format, with its string representation
being the message.
If an element of the ``errors`` array is not in the format, then
it's converted to the format, with its string representation being
the message.
The input object is not changed, a copy is made if needed.
:return: the given ``data`` formatted to the correct shape, a copy
is made and returned if any fix up was needed.
:rtype: dict | [
"Given",
"a",
"possible",
"GraphQL",
"error",
"payload",
"make",
"sure",
"it",
"s",
"in",
"shape",
"."
] | 684afb059c93f142150043cafac09b7fd52bfa27 | https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/endpoint/base.py#L104-L163 |
251,758 | profusion/sgqlc | sgqlc/endpoint/base.py | BaseEndpoint.snippet | def snippet(code, locations, sep=' | ', colmark=('-', '^'), context=5):
'''Given a code and list of locations, convert to snippet lines.
return will include line number, a separator (``sep``), then
line contents.
At most ``context`` lines are shown before each location line.
After each location line, the column is marked using
``colmark``. The first character is repeated up to column, the
second character is used only once.
:return: list of lines of sources or column markups.
:rtype: list
'''
if not locations:
return []
lines = code.split('\n')
offset = int(len(lines) / 10) + 1
linenofmt = '%{}d'.format(offset)
s = []
for loc in locations:
line = max(0, loc.get('line', 1) - 1)
column = max(0, loc.get('column', 1) - 1)
start_line = max(0, line - context)
for i, ln in enumerate(lines[start_line:line + 1], start_line):
s.append('{}{}{}'.format(linenofmt % i, sep, ln))
s.append('{}{}{}'.format(' ' * (offset + len(sep)),
colmark[0] * column,
colmark[1]))
return s | python | def snippet(code, locations, sep=' | ', colmark=('-', '^'), context=5):
'''Given a code and list of locations, convert to snippet lines.
return will include line number, a separator (``sep``), then
line contents.
At most ``context`` lines are shown before each location line.
After each location line, the column is marked using
``colmark``. The first character is repeated up to column, the
second character is used only once.
:return: list of lines of sources or column markups.
:rtype: list
'''
if not locations:
return []
lines = code.split('\n')
offset = int(len(lines) / 10) + 1
linenofmt = '%{}d'.format(offset)
s = []
for loc in locations:
line = max(0, loc.get('line', 1) - 1)
column = max(0, loc.get('column', 1) - 1)
start_line = max(0, line - context)
for i, ln in enumerate(lines[start_line:line + 1], start_line):
s.append('{}{}{}'.format(linenofmt % i, sep, ln))
s.append('{}{}{}'.format(' ' * (offset + len(sep)),
colmark[0] * column,
colmark[1]))
return s | [
"def",
"snippet",
"(",
"code",
",",
"locations",
",",
"sep",
"=",
"' | '",
",",
"colmark",
"=",
"(",
"'-'",
",",
"'^'",
")",
",",
"context",
"=",
"5",
")",
":",
"if",
"not",
"locations",
":",
"return",
"[",
"]",
"lines",
"=",
"code",
".",
"split",
"(",
"'\\n'",
")",
"offset",
"=",
"int",
"(",
"len",
"(",
"lines",
")",
"/",
"10",
")",
"+",
"1",
"linenofmt",
"=",
"'%{}d'",
".",
"format",
"(",
"offset",
")",
"s",
"=",
"[",
"]",
"for",
"loc",
"in",
"locations",
":",
"line",
"=",
"max",
"(",
"0",
",",
"loc",
".",
"get",
"(",
"'line'",
",",
"1",
")",
"-",
"1",
")",
"column",
"=",
"max",
"(",
"0",
",",
"loc",
".",
"get",
"(",
"'column'",
",",
"1",
")",
"-",
"1",
")",
"start_line",
"=",
"max",
"(",
"0",
",",
"line",
"-",
"context",
")",
"for",
"i",
",",
"ln",
"in",
"enumerate",
"(",
"lines",
"[",
"start_line",
":",
"line",
"+",
"1",
"]",
",",
"start_line",
")",
":",
"s",
".",
"append",
"(",
"'{}{}{}'",
".",
"format",
"(",
"linenofmt",
"%",
"i",
",",
"sep",
",",
"ln",
")",
")",
"s",
".",
"append",
"(",
"'{}{}{}'",
".",
"format",
"(",
"' '",
"*",
"(",
"offset",
"+",
"len",
"(",
"sep",
")",
")",
",",
"colmark",
"[",
"0",
"]",
"*",
"column",
",",
"colmark",
"[",
"1",
"]",
")",
")",
"return",
"s"
] | Given a code and list of locations, convert to snippet lines.
return will include line number, a separator (``sep``), then
line contents.
At most ``context`` lines are shown before each location line.
After each location line, the column is marked using
``colmark``. The first character is repeated up to column, the
second character is used only once.
:return: list of lines of sources or column markups.
:rtype: list | [
"Given",
"a",
"code",
"and",
"list",
"of",
"locations",
"convert",
"to",
"snippet",
"lines",
"."
] | 684afb059c93f142150043cafac09b7fd52bfa27 | https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/endpoint/base.py#L206-L236 |
251,759 | profusion/sgqlc | sgqlc/types/__init__.py | _create_non_null_wrapper | def _create_non_null_wrapper(name, t):
'creates type wrapper for non-null of given type'
def __new__(cls, json_data, selection_list=None):
if json_data is None:
raise ValueError(name + ' received null value')
return t(json_data, selection_list)
def __to_graphql_input__(value, indent=0, indent_string=' '):
return t.__to_graphql_input__(value, indent, indent_string)
return type(name, (t,), {
'__new__': __new__,
'_%s__auto_register' % name: False,
'__to_graphql_input__': __to_graphql_input__,
}) | python | def _create_non_null_wrapper(name, t):
'creates type wrapper for non-null of given type'
def __new__(cls, json_data, selection_list=None):
if json_data is None:
raise ValueError(name + ' received null value')
return t(json_data, selection_list)
def __to_graphql_input__(value, indent=0, indent_string=' '):
return t.__to_graphql_input__(value, indent, indent_string)
return type(name, (t,), {
'__new__': __new__,
'_%s__auto_register' % name: False,
'__to_graphql_input__': __to_graphql_input__,
}) | [
"def",
"_create_non_null_wrapper",
"(",
"name",
",",
"t",
")",
":",
"def",
"__new__",
"(",
"cls",
",",
"json_data",
",",
"selection_list",
"=",
"None",
")",
":",
"if",
"json_data",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"name",
"+",
"' received null value'",
")",
"return",
"t",
"(",
"json_data",
",",
"selection_list",
")",
"def",
"__to_graphql_input__",
"(",
"value",
",",
"indent",
"=",
"0",
",",
"indent_string",
"=",
"' '",
")",
":",
"return",
"t",
".",
"__to_graphql_input__",
"(",
"value",
",",
"indent",
",",
"indent_string",
")",
"return",
"type",
"(",
"name",
",",
"(",
"t",
",",
")",
",",
"{",
"'__new__'",
":",
"__new__",
",",
"'_%s__auto_register'",
"%",
"name",
":",
"False",
",",
"'__to_graphql_input__'",
":",
"__to_graphql_input__",
",",
"}",
")"
] | creates type wrapper for non-null of given type | [
"creates",
"type",
"wrapper",
"for",
"non",
"-",
"null",
"of",
"given",
"type"
] | 684afb059c93f142150043cafac09b7fd52bfa27 | https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/types/__init__.py#L869-L883 |
251,760 | profusion/sgqlc | sgqlc/types/__init__.py | _create_list_of_wrapper | def _create_list_of_wrapper(name, t):
'creates type wrapper for list of given type'
def __new__(cls, json_data, selection_list=None):
if json_data is None:
return None
return [t(v, selection_list) for v in json_data]
def __to_graphql_input__(value, indent=0, indent_string=' '):
r = []
for v in value:
r.append(t.__to_graphql_input__(v, indent, indent_string))
return '[' + ', '.join(r) + ']'
def __to_json_value__(value):
if value is None:
return None
return [t.__to_json_value__(v) for v in value]
return type(name, (t,), {
'__new__': __new__,
'_%s__auto_register' % name: False,
'__to_graphql_input__': __to_graphql_input__,
'__to_json_value__': __to_json_value__,
}) | python | def _create_list_of_wrapper(name, t):
'creates type wrapper for list of given type'
def __new__(cls, json_data, selection_list=None):
if json_data is None:
return None
return [t(v, selection_list) for v in json_data]
def __to_graphql_input__(value, indent=0, indent_string=' '):
r = []
for v in value:
r.append(t.__to_graphql_input__(v, indent, indent_string))
return '[' + ', '.join(r) + ']'
def __to_json_value__(value):
if value is None:
return None
return [t.__to_json_value__(v) for v in value]
return type(name, (t,), {
'__new__': __new__,
'_%s__auto_register' % name: False,
'__to_graphql_input__': __to_graphql_input__,
'__to_json_value__': __to_json_value__,
}) | [
"def",
"_create_list_of_wrapper",
"(",
"name",
",",
"t",
")",
":",
"def",
"__new__",
"(",
"cls",
",",
"json_data",
",",
"selection_list",
"=",
"None",
")",
":",
"if",
"json_data",
"is",
"None",
":",
"return",
"None",
"return",
"[",
"t",
"(",
"v",
",",
"selection_list",
")",
"for",
"v",
"in",
"json_data",
"]",
"def",
"__to_graphql_input__",
"(",
"value",
",",
"indent",
"=",
"0",
",",
"indent_string",
"=",
"' '",
")",
":",
"r",
"=",
"[",
"]",
"for",
"v",
"in",
"value",
":",
"r",
".",
"append",
"(",
"t",
".",
"__to_graphql_input__",
"(",
"v",
",",
"indent",
",",
"indent_string",
")",
")",
"return",
"'['",
"+",
"', '",
".",
"join",
"(",
"r",
")",
"+",
"']'",
"def",
"__to_json_value__",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"return",
"[",
"t",
".",
"__to_json_value__",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"return",
"type",
"(",
"name",
",",
"(",
"t",
",",
")",
",",
"{",
"'__new__'",
":",
"__new__",
",",
"'_%s__auto_register'",
"%",
"name",
":",
"False",
",",
"'__to_graphql_input__'",
":",
"__to_graphql_input__",
",",
"'__to_json_value__'",
":",
"__to_json_value__",
",",
"}",
")"
] | creates type wrapper for list of given type | [
"creates",
"type",
"wrapper",
"for",
"list",
"of",
"given",
"type"
] | 684afb059c93f142150043cafac09b7fd52bfa27 | https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/types/__init__.py#L886-L909 |
251,761 | profusion/sgqlc | sgqlc/endpoint/http.py | add_query_to_url | def add_query_to_url(url, extra_query):
'''Adds an extra query to URL, returning the new URL.
Extra query may be a dict or a list as returned by
:func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`.
'''
split = urllib.parse.urlsplit(url)
merged_query = urllib.parse.parse_qsl(split.query)
if isinstance(extra_query, dict):
for k, v in extra_query.items():
if not isinstance(v, (tuple, list)):
merged_query.append((k, v))
else:
for cv in v:
merged_query.append((k, cv))
else:
merged_query.extend(extra_query)
merged_split = urllib.parse.SplitResult(
split.scheme,
split.netloc,
split.path,
urllib.parse.urlencode(merged_query),
split.fragment,
)
return merged_split.geturl() | python | def add_query_to_url(url, extra_query):
'''Adds an extra query to URL, returning the new URL.
Extra query may be a dict or a list as returned by
:func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`.
'''
split = urllib.parse.urlsplit(url)
merged_query = urllib.parse.parse_qsl(split.query)
if isinstance(extra_query, dict):
for k, v in extra_query.items():
if not isinstance(v, (tuple, list)):
merged_query.append((k, v))
else:
for cv in v:
merged_query.append((k, cv))
else:
merged_query.extend(extra_query)
merged_split = urllib.parse.SplitResult(
split.scheme,
split.netloc,
split.path,
urllib.parse.urlencode(merged_query),
split.fragment,
)
return merged_split.geturl() | [
"def",
"add_query_to_url",
"(",
"url",
",",
"extra_query",
")",
":",
"split",
"=",
"urllib",
".",
"parse",
".",
"urlsplit",
"(",
"url",
")",
"merged_query",
"=",
"urllib",
".",
"parse",
".",
"parse_qsl",
"(",
"split",
".",
"query",
")",
"if",
"isinstance",
"(",
"extra_query",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"extra_query",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"merged_query",
".",
"append",
"(",
"(",
"k",
",",
"v",
")",
")",
"else",
":",
"for",
"cv",
"in",
"v",
":",
"merged_query",
".",
"append",
"(",
"(",
"k",
",",
"cv",
")",
")",
"else",
":",
"merged_query",
".",
"extend",
"(",
"extra_query",
")",
"merged_split",
"=",
"urllib",
".",
"parse",
".",
"SplitResult",
"(",
"split",
".",
"scheme",
",",
"split",
".",
"netloc",
",",
"split",
".",
"path",
",",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"merged_query",
")",
",",
"split",
".",
"fragment",
",",
")",
"return",
"merged_split",
".",
"geturl",
"(",
")"
] | Adds an extra query to URL, returning the new URL.
Extra query may be a dict or a list as returned by
:func:`urllib.parse.parse_qsl()` and :func:`urllib.parse.parse_qs()`. | [
"Adds",
"an",
"extra",
"query",
"to",
"URL",
"returning",
"the",
"new",
"URL",
"."
] | 684afb059c93f142150043cafac09b7fd52bfa27 | https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/endpoint/http.py#L33-L59 |
251,762 | profusion/sgqlc | sgqlc/types/relay.py | connection_args | def connection_args(*lst, **mapping):
'''Returns the default parameters for connection.
Extra parameters may be given as argument, both as iterable,
positional tuples or mapping.
By default, provides:
- ``after: String``
- ``before: String``
- ``first: Int``
- ``last: Int``
'''
pd = ArgDict(*lst, **mapping)
pd.setdefault('after', String)
pd.setdefault('before', String)
pd.setdefault('first', Int)
pd.setdefault('last', Int)
return pd | python | def connection_args(*lst, **mapping):
'''Returns the default parameters for connection.
Extra parameters may be given as argument, both as iterable,
positional tuples or mapping.
By default, provides:
- ``after: String``
- ``before: String``
- ``first: Int``
- ``last: Int``
'''
pd = ArgDict(*lst, **mapping)
pd.setdefault('after', String)
pd.setdefault('before', String)
pd.setdefault('first', Int)
pd.setdefault('last', Int)
return pd | [
"def",
"connection_args",
"(",
"*",
"lst",
",",
"*",
"*",
"mapping",
")",
":",
"pd",
"=",
"ArgDict",
"(",
"*",
"lst",
",",
"*",
"*",
"mapping",
")",
"pd",
".",
"setdefault",
"(",
"'after'",
",",
"String",
")",
"pd",
".",
"setdefault",
"(",
"'before'",
",",
"String",
")",
"pd",
".",
"setdefault",
"(",
"'first'",
",",
"Int",
")",
"pd",
".",
"setdefault",
"(",
"'last'",
",",
"Int",
")",
"return",
"pd"
] | Returns the default parameters for connection.
Extra parameters may be given as argument, both as iterable,
positional tuples or mapping.
By default, provides:
- ``after: String``
- ``before: String``
- ``first: Int``
- ``last: Int`` | [
"Returns",
"the",
"default",
"parameters",
"for",
"connection",
"."
] | 684afb059c93f142150043cafac09b7fd52bfa27 | https://github.com/profusion/sgqlc/blob/684afb059c93f142150043cafac09b7fd52bfa27/sgqlc/types/relay.py#L406-L424 |
251,763 | nchopin/particles | book/pmcmc/pmmh_lingauss_varying_scale.py | msjd | def msjd(theta):
"""Mean squared jumping distance.
"""
s = 0.
for p in theta.dtype.names:
s += np.sum(np.diff(theta[p], axis=0) ** 2)
return s | python | def msjd(theta):
s = 0.
for p in theta.dtype.names:
s += np.sum(np.diff(theta[p], axis=0) ** 2)
return s | [
"def",
"msjd",
"(",
"theta",
")",
":",
"s",
"=",
"0.",
"for",
"p",
"in",
"theta",
".",
"dtype",
".",
"names",
":",
"s",
"+=",
"np",
".",
"sum",
"(",
"np",
".",
"diff",
"(",
"theta",
"[",
"p",
"]",
",",
"axis",
"=",
"0",
")",
"**",
"2",
")",
"return",
"s"
] | Mean squared jumping distance. | [
"Mean",
"squared",
"jumping",
"distance",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/book/pmcmc/pmmh_lingauss_varying_scale.py#L31-L37 |
251,764 | nchopin/particles | particles/smc_samplers.py | StaticModel.loglik | def loglik(self, theta, t=None):
""" log-likelihood at given parameter values.
Parameters
----------
theta: dict-like
theta['par'] is a ndarray containing the N values for parameter par
t: int
time (if set to None, the full log-likelihood is returned)
Returns
-------
l: float numpy.ndarray
the N log-likelihood values
"""
if t is None:
t = self.T - 1
l = np.zeros(shape=theta.shape[0])
for s in range(t + 1):
l += self.logpyt(theta, s)
return l | python | def loglik(self, theta, t=None):
if t is None:
t = self.T - 1
l = np.zeros(shape=theta.shape[0])
for s in range(t + 1):
l += self.logpyt(theta, s)
return l | [
"def",
"loglik",
"(",
"self",
",",
"theta",
",",
"t",
"=",
"None",
")",
":",
"if",
"t",
"is",
"None",
":",
"t",
"=",
"self",
".",
"T",
"-",
"1",
"l",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"theta",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"s",
"in",
"range",
"(",
"t",
"+",
"1",
")",
":",
"l",
"+=",
"self",
".",
"logpyt",
"(",
"theta",
",",
"s",
")",
"return",
"l"
] | log-likelihood at given parameter values.
Parameters
----------
theta: dict-like
theta['par'] is a ndarray containing the N values for parameter par
t: int
time (if set to None, the full log-likelihood is returned)
Returns
-------
l: float numpy.ndarray
the N log-likelihood values | [
"log",
"-",
"likelihood",
"at",
"given",
"parameter",
"values",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L91-L111 |
251,765 | nchopin/particles | particles/smc_samplers.py | StaticModel.logpost | def logpost(self, theta, t=None):
"""Posterior log-density at given parameter values.
Parameters
----------
theta: dict-like
theta['par'] is a ndarray containing the N values for parameter par
t: int
time (if set to None, the full posterior is returned)
Returns
-------
l: float numpy.ndarray
the N log-likelihood values
"""
return self.prior.logpdf(theta) + self.loglik(theta, t) | python | def logpost(self, theta, t=None):
return self.prior.logpdf(theta) + self.loglik(theta, t) | [
"def",
"logpost",
"(",
"self",
",",
"theta",
",",
"t",
"=",
"None",
")",
":",
"return",
"self",
".",
"prior",
".",
"logpdf",
"(",
"theta",
")",
"+",
"self",
".",
"loglik",
"(",
"theta",
",",
"t",
")"
] | Posterior log-density at given parameter values.
Parameters
----------
theta: dict-like
theta['par'] is a ndarray containing the N values for parameter par
t: int
time (if set to None, the full posterior is returned)
Returns
-------
l: float numpy.ndarray
the N log-likelihood values | [
"Posterior",
"log",
"-",
"density",
"at",
"given",
"parameter",
"values",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L113-L128 |
251,766 | nchopin/particles | particles/smc_samplers.py | FancyList.copyto | def copyto(self, src, where=None):
"""
Same syntax and functionality as numpy.copyto
"""
for n, _ in enumerate(self.l):
if where[n]:
self.l[n] = src.l[n] | python | def copyto(self, src, where=None):
for n, _ in enumerate(self.l):
if where[n]:
self.l[n] = src.l[n] | [
"def",
"copyto",
"(",
"self",
",",
"src",
",",
"where",
"=",
"None",
")",
":",
"for",
"n",
",",
"_",
"in",
"enumerate",
"(",
"self",
".",
"l",
")",
":",
"if",
"where",
"[",
"n",
"]",
":",
"self",
".",
"l",
"[",
"n",
"]",
"=",
"src",
".",
"l",
"[",
"n",
"]"
] | Same syntax and functionality as numpy.copyto | [
"Same",
"syntax",
"and",
"functionality",
"as",
"numpy",
".",
"copyto"
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L178-L185 |
251,767 | nchopin/particles | particles/smc_samplers.py | ThetaParticles.copy | def copy(self):
"""Returns a copy of the object."""
attrs = {k: self.__dict__[k].copy() for k in self.containers}
attrs.update({k: cp.deepcopy(self.__dict__[k]) for k in self.shared})
return self.__class__(**attrs) | python | def copy(self):
attrs = {k: self.__dict__[k].copy() for k in self.containers}
attrs.update({k: cp.deepcopy(self.__dict__[k]) for k in self.shared})
return self.__class__(**attrs) | [
"def",
"copy",
"(",
"self",
")",
":",
"attrs",
"=",
"{",
"k",
":",
"self",
".",
"__dict__",
"[",
"k",
"]",
".",
"copy",
"(",
")",
"for",
"k",
"in",
"self",
".",
"containers",
"}",
"attrs",
".",
"update",
"(",
"{",
"k",
":",
"cp",
".",
"deepcopy",
"(",
"self",
".",
"__dict__",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"self",
".",
"shared",
"}",
")",
"return",
"self",
".",
"__class__",
"(",
"*",
"*",
"attrs",
")"
] | Returns a copy of the object. | [
"Returns",
"a",
"copy",
"of",
"the",
"object",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L245-L249 |
251,768 | nchopin/particles | particles/smc_samplers.py | ThetaParticles.copyto | def copyto(self, src, where=None):
"""Emulates function `copyto` in NumPy.
Parameters
----------
where: (N,) bool ndarray
True if particle n in src must be copied.
src: (N,) `ThetaParticles` object
source
for each n such that where[n] is True, copy particle n in src
into self (at location n)
"""
for k in self.containers:
v = self.__dict__[k]
if isinstance(v, np.ndarray):
np.copyto(v, src.__dict__[k], where=where)
else:
v.copyto(src.__dict__[k], where=where) | python | def copyto(self, src, where=None):
for k in self.containers:
v = self.__dict__[k]
if isinstance(v, np.ndarray):
np.copyto(v, src.__dict__[k], where=where)
else:
v.copyto(src.__dict__[k], where=where) | [
"def",
"copyto",
"(",
"self",
",",
"src",
",",
"where",
"=",
"None",
")",
":",
"for",
"k",
"in",
"self",
".",
"containers",
":",
"v",
"=",
"self",
".",
"__dict__",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"v",
",",
"np",
".",
"ndarray",
")",
":",
"np",
".",
"copyto",
"(",
"v",
",",
"src",
".",
"__dict__",
"[",
"k",
"]",
",",
"where",
"=",
"where",
")",
"else",
":",
"v",
".",
"copyto",
"(",
"src",
".",
"__dict__",
"[",
"k",
"]",
",",
"where",
"=",
"where",
")"
] | Emulates function `copyto` in NumPy.
Parameters
----------
where: (N,) bool ndarray
True if particle n in src must be copied.
src: (N,) `ThetaParticles` object
source
for each n such that where[n] is True, copy particle n in src
into self (at location n) | [
"Emulates",
"function",
"copyto",
"in",
"NumPy",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L251-L270 |
251,769 | nchopin/particles | particles/smc_samplers.py | ThetaParticles.copyto_at | def copyto_at(self, n, src, m):
"""Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m]
"""
for k in self.containers:
self.__dict__[k][n] = src.__dict__[k][m] | python | def copyto_at(self, n, src, m):
for k in self.containers:
self.__dict__[k][n] = src.__dict__[k][m] | [
"def",
"copyto_at",
"(",
"self",
",",
"n",
",",
"src",
",",
"m",
")",
":",
"for",
"k",
"in",
"self",
".",
"containers",
":",
"self",
".",
"__dict__",
"[",
"k",
"]",
"[",
"n",
"]",
"=",
"src",
".",
"__dict__",
"[",
"k",
"]",
"[",
"m",
"]"
] | Copy to at a given location.
Parameters
----------
n: int
index where to copy
src: `ThetaParticles` object
source
m: int
index of the element to be copied
Note
----
Basically, does self[n] <- src[m] | [
"Copy",
"to",
"at",
"a",
"given",
"location",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L272-L289 |
251,770 | nchopin/particles | particles/smc_samplers.py | MetroParticles.Metropolis | def Metropolis(self, compute_target, mh_options):
"""Performs a certain number of Metropolis steps.
Parameters
----------
compute_target: function
computes the target density for the proposed values
mh_options: dict
+ 'type_prop': {'random_walk', 'independent'}
type of proposal: either Gaussian random walk, or independent Gaussian
+ 'adaptive': bool
If True, the covariance matrix of the random walk proposal is
set to a `rw_scale` times the weighted cov matrix of the particle
sample (ignored if proposal is independent)
+ 'rw_scale': float (default=None)
see above (ignored if proposal is independent)
+ 'indep_scale': float (default=1.1)
for an independent proposal, the proposal distribution is
Gaussian with mean set to the particle mean, cov set to
`indep_scale` times particle covariance
+ 'nsteps': int (default: 0)
number of steps; if 0, the number of steps is chosen adaptively
as follows: we stop when the average distance between the
starting points and the stopping points increase less than a
certain fraction
+ 'delta_dist': float (default: 0.1)
threshold for when nsteps = 0
"""
opts = mh_options.copy()
nsteps = opts.pop('nsteps', 0)
delta_dist = opts.pop('delta_dist', 0.1)
proposal = self.choose_proposal(**opts)
xout = self.copy()
xp = self.__class__(theta=np.empty_like(self.theta))
step_ars = []
for _ in self.mcmc_iterate(nsteps, self.arr, xout.arr, delta_dist):
xp.arr[:, :], delta_lp = proposal.step(xout.arr)
compute_target(xp)
lp_acc = xp.lpost - xout.lpost + delta_lp
accept = (np.log(stats.uniform.rvs(size=self.N)) < lp_acc)
xout.copyto(xp, where=accept)
step_ars.append(np.mean(accept))
xout.acc_rates = self.acc_rates + [step_ars]
return xout | python | def Metropolis(self, compute_target, mh_options):
opts = mh_options.copy()
nsteps = opts.pop('nsteps', 0)
delta_dist = opts.pop('delta_dist', 0.1)
proposal = self.choose_proposal(**opts)
xout = self.copy()
xp = self.__class__(theta=np.empty_like(self.theta))
step_ars = []
for _ in self.mcmc_iterate(nsteps, self.arr, xout.arr, delta_dist):
xp.arr[:, :], delta_lp = proposal.step(xout.arr)
compute_target(xp)
lp_acc = xp.lpost - xout.lpost + delta_lp
accept = (np.log(stats.uniform.rvs(size=self.N)) < lp_acc)
xout.copyto(xp, where=accept)
step_ars.append(np.mean(accept))
xout.acc_rates = self.acc_rates + [step_ars]
return xout | [
"def",
"Metropolis",
"(",
"self",
",",
"compute_target",
",",
"mh_options",
")",
":",
"opts",
"=",
"mh_options",
".",
"copy",
"(",
")",
"nsteps",
"=",
"opts",
".",
"pop",
"(",
"'nsteps'",
",",
"0",
")",
"delta_dist",
"=",
"opts",
".",
"pop",
"(",
"'delta_dist'",
",",
"0.1",
")",
"proposal",
"=",
"self",
".",
"choose_proposal",
"(",
"*",
"*",
"opts",
")",
"xout",
"=",
"self",
".",
"copy",
"(",
")",
"xp",
"=",
"self",
".",
"__class__",
"(",
"theta",
"=",
"np",
".",
"empty_like",
"(",
"self",
".",
"theta",
")",
")",
"step_ars",
"=",
"[",
"]",
"for",
"_",
"in",
"self",
".",
"mcmc_iterate",
"(",
"nsteps",
",",
"self",
".",
"arr",
",",
"xout",
".",
"arr",
",",
"delta_dist",
")",
":",
"xp",
".",
"arr",
"[",
":",
",",
":",
"]",
",",
"delta_lp",
"=",
"proposal",
".",
"step",
"(",
"xout",
".",
"arr",
")",
"compute_target",
"(",
"xp",
")",
"lp_acc",
"=",
"xp",
".",
"lpost",
"-",
"xout",
".",
"lpost",
"+",
"delta_lp",
"accept",
"=",
"(",
"np",
".",
"log",
"(",
"stats",
".",
"uniform",
".",
"rvs",
"(",
"size",
"=",
"self",
".",
"N",
")",
")",
"<",
"lp_acc",
")",
"xout",
".",
"copyto",
"(",
"xp",
",",
"where",
"=",
"accept",
")",
"step_ars",
".",
"append",
"(",
"np",
".",
"mean",
"(",
"accept",
")",
")",
"xout",
".",
"acc_rates",
"=",
"self",
".",
"acc_rates",
"+",
"[",
"step_ars",
"]",
"return",
"xout"
] | Performs a certain number of Metropolis steps.
Parameters
----------
compute_target: function
computes the target density for the proposed values
mh_options: dict
+ 'type_prop': {'random_walk', 'independent'}
type of proposal: either Gaussian random walk, or independent Gaussian
+ 'adaptive': bool
If True, the covariance matrix of the random walk proposal is
set to a `rw_scale` times the weighted cov matrix of the particle
sample (ignored if proposal is independent)
+ 'rw_scale': float (default=None)
see above (ignored if proposal is independent)
+ 'indep_scale': float (default=1.1)
for an independent proposal, the proposal distribution is
Gaussian with mean set to the particle mean, cov set to
`indep_scale` times particle covariance
+ 'nsteps': int (default: 0)
number of steps; if 0, the number of steps is chosen adaptively
as follows: we stop when the average distance between the
starting points and the stopping points increase less than a
certain fraction
+ 'delta_dist': float (default: 0.1)
threshold for when nsteps = 0 | [
"Performs",
"a",
"certain",
"number",
"of",
"Metropolis",
"steps",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smc_samplers.py#L375-L418 |
251,771 | nchopin/particles | particles/hmm.py | BaumWelch.backward | def backward(self):
"""Backward recursion.
Upon completion, the following list of length T is available:
* smth: marginal smoothing probabilities
Note
----
Performs the forward step in case it has not been performed before.
"""
if not self.filt:
self.forward()
self.smth = [self.filt[-1]]
log_trans = np.log(self.hmm.trans_mat)
ctg = np.zeros(self.hmm.dim) # cost to go (log-lik of y_{t+1:T} given x_t=k)
for filt, next_ft in reversed(list(zip(self.filt[:-1],
self.logft[1:]))):
new_ctg = np.empty(self.hmm.dim)
for k in range(self.hmm.dim):
new_ctg[k] = rs.log_sum_exp(log_trans[k, :] + next_ft + ctg)
ctg = new_ctg
smth = rs.exp_and_normalise(np.log(filt) + ctg)
self.smth.append(smth)
self.smth.reverse() | python | def backward(self):
if not self.filt:
self.forward()
self.smth = [self.filt[-1]]
log_trans = np.log(self.hmm.trans_mat)
ctg = np.zeros(self.hmm.dim) # cost to go (log-lik of y_{t+1:T} given x_t=k)
for filt, next_ft in reversed(list(zip(self.filt[:-1],
self.logft[1:]))):
new_ctg = np.empty(self.hmm.dim)
for k in range(self.hmm.dim):
new_ctg[k] = rs.log_sum_exp(log_trans[k, :] + next_ft + ctg)
ctg = new_ctg
smth = rs.exp_and_normalise(np.log(filt) + ctg)
self.smth.append(smth)
self.smth.reverse() | [
"def",
"backward",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"filt",
":",
"self",
".",
"forward",
"(",
")",
"self",
".",
"smth",
"=",
"[",
"self",
".",
"filt",
"[",
"-",
"1",
"]",
"]",
"log_trans",
"=",
"np",
".",
"log",
"(",
"self",
".",
"hmm",
".",
"trans_mat",
")",
"ctg",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"hmm",
".",
"dim",
")",
"# cost to go (log-lik of y_{t+1:T} given x_t=k)",
"for",
"filt",
",",
"next_ft",
"in",
"reversed",
"(",
"list",
"(",
"zip",
"(",
"self",
".",
"filt",
"[",
":",
"-",
"1",
"]",
",",
"self",
".",
"logft",
"[",
"1",
":",
"]",
")",
")",
")",
":",
"new_ctg",
"=",
"np",
".",
"empty",
"(",
"self",
".",
"hmm",
".",
"dim",
")",
"for",
"k",
"in",
"range",
"(",
"self",
".",
"hmm",
".",
"dim",
")",
":",
"new_ctg",
"[",
"k",
"]",
"=",
"rs",
".",
"log_sum_exp",
"(",
"log_trans",
"[",
"k",
",",
":",
"]",
"+",
"next_ft",
"+",
"ctg",
")",
"ctg",
"=",
"new_ctg",
"smth",
"=",
"rs",
".",
"exp_and_normalise",
"(",
"np",
".",
"log",
"(",
"filt",
")",
"+",
"ctg",
")",
"self",
".",
"smth",
".",
"append",
"(",
"smth",
")",
"self",
".",
"smth",
".",
"reverse",
"(",
")"
] | Backward recursion.
Upon completion, the following list of length T is available:
* smth: marginal smoothing probabilities
Note
----
Performs the forward step in case it has not been performed before. | [
"Backward",
"recursion",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/hmm.py#L215-L238 |
251,772 | nchopin/particles | particles/kalman.py | predict_step | def predict_step(F, covX, filt):
"""Predictive step of Kalman filter.
Parameters
----------
F: (dx, dx) numpy array
Mean of X_t | X_{t-1} is F * X_{t-1}
covX: (dx, dx) numpy array
covariance of X_t | X_{t-1}
filt: MeanAndCov object
filtering distribution at time t-1
Returns
-------
pred: MeanAndCov object
predictive distribution at time t
Note
----
filt.mean may either be of shape (dx,) or (N, dx); in the latter case
N predictive steps are performed in parallel.
"""
pred_mean = np.matmul(filt.mean, F.T)
pred_cov = dotdot(F, filt.cov, F.T) + covX
return MeanAndCov(mean=pred_mean, cov=pred_cov) | python | def predict_step(F, covX, filt):
pred_mean = np.matmul(filt.mean, F.T)
pred_cov = dotdot(F, filt.cov, F.T) + covX
return MeanAndCov(mean=pred_mean, cov=pred_cov) | [
"def",
"predict_step",
"(",
"F",
",",
"covX",
",",
"filt",
")",
":",
"pred_mean",
"=",
"np",
".",
"matmul",
"(",
"filt",
".",
"mean",
",",
"F",
".",
"T",
")",
"pred_cov",
"=",
"dotdot",
"(",
"F",
",",
"filt",
".",
"cov",
",",
"F",
".",
"T",
")",
"+",
"covX",
"return",
"MeanAndCov",
"(",
"mean",
"=",
"pred_mean",
",",
"cov",
"=",
"pred_cov",
")"
] | Predictive step of Kalman filter.
Parameters
----------
F: (dx, dx) numpy array
Mean of X_t | X_{t-1} is F * X_{t-1}
covX: (dx, dx) numpy array
covariance of X_t | X_{t-1}
filt: MeanAndCov object
filtering distribution at time t-1
Returns
-------
pred: MeanAndCov object
predictive distribution at time t
Note
----
filt.mean may either be of shape (dx,) or (N, dx); in the latter case
N predictive steps are performed in parallel. | [
"Predictive",
"step",
"of",
"Kalman",
"filter",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L163-L187 |
251,773 | nchopin/particles | particles/kalman.py | filter_step | def filter_step(G, covY, pred, yt):
"""Filtering step of Kalman filter.
Parameters
----------
G: (dy, dx) numpy array
mean of Y_t | X_t is G * X_t
covX: (dx, dx) numpy array
covariance of Y_t | X_t
pred: MeanAndCov object
predictive distribution at time t
Returns
-------
pred: MeanAndCov object
filtering distribution at time t
logpyt: float
log density of Y_t | Y_{0:t-1}
"""
# data prediction
data_pred_mean = np.matmul(pred.mean, G.T)
data_pred_cov = dotdot(G, pred.cov, G.T) + covY
if covY.shape[0] == 1:
logpyt = dists.Normal(loc=data_pred_mean,
scale=np.sqrt(data_pred_cov)).logpdf(yt)
else:
logpyt = dists.MvNormal(loc=data_pred_mean,
cov=data_pred_cov).logpdf(yt)
# filter
residual = yt - data_pred_mean
gain = dotdot(pred.cov, G.T, inv(data_pred_cov))
filt_mean = pred.mean + np.matmul(residual, gain.T)
filt_cov = pred.cov - dotdot(gain, G, pred.cov)
return MeanAndCov(mean=filt_mean, cov=filt_cov), logpyt | python | def filter_step(G, covY, pred, yt):
# data prediction
data_pred_mean = np.matmul(pred.mean, G.T)
data_pred_cov = dotdot(G, pred.cov, G.T) + covY
if covY.shape[0] == 1:
logpyt = dists.Normal(loc=data_pred_mean,
scale=np.sqrt(data_pred_cov)).logpdf(yt)
else:
logpyt = dists.MvNormal(loc=data_pred_mean,
cov=data_pred_cov).logpdf(yt)
# filter
residual = yt - data_pred_mean
gain = dotdot(pred.cov, G.T, inv(data_pred_cov))
filt_mean = pred.mean + np.matmul(residual, gain.T)
filt_cov = pred.cov - dotdot(gain, G, pred.cov)
return MeanAndCov(mean=filt_mean, cov=filt_cov), logpyt | [
"def",
"filter_step",
"(",
"G",
",",
"covY",
",",
"pred",
",",
"yt",
")",
":",
"# data prediction",
"data_pred_mean",
"=",
"np",
".",
"matmul",
"(",
"pred",
".",
"mean",
",",
"G",
".",
"T",
")",
"data_pred_cov",
"=",
"dotdot",
"(",
"G",
",",
"pred",
".",
"cov",
",",
"G",
".",
"T",
")",
"+",
"covY",
"if",
"covY",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"logpyt",
"=",
"dists",
".",
"Normal",
"(",
"loc",
"=",
"data_pred_mean",
",",
"scale",
"=",
"np",
".",
"sqrt",
"(",
"data_pred_cov",
")",
")",
".",
"logpdf",
"(",
"yt",
")",
"else",
":",
"logpyt",
"=",
"dists",
".",
"MvNormal",
"(",
"loc",
"=",
"data_pred_mean",
",",
"cov",
"=",
"data_pred_cov",
")",
".",
"logpdf",
"(",
"yt",
")",
"# filter",
"residual",
"=",
"yt",
"-",
"data_pred_mean",
"gain",
"=",
"dotdot",
"(",
"pred",
".",
"cov",
",",
"G",
".",
"T",
",",
"inv",
"(",
"data_pred_cov",
")",
")",
"filt_mean",
"=",
"pred",
".",
"mean",
"+",
"np",
".",
"matmul",
"(",
"residual",
",",
"gain",
".",
"T",
")",
"filt_cov",
"=",
"pred",
".",
"cov",
"-",
"dotdot",
"(",
"gain",
",",
"G",
",",
"pred",
".",
"cov",
")",
"return",
"MeanAndCov",
"(",
"mean",
"=",
"filt_mean",
",",
"cov",
"=",
"filt_cov",
")",
",",
"logpyt"
] | Filtering step of Kalman filter.
Parameters
----------
G: (dy, dx) numpy array
mean of Y_t | X_t is G * X_t
covX: (dx, dx) numpy array
covariance of Y_t | X_t
pred: MeanAndCov object
predictive distribution at time t
Returns
-------
pred: MeanAndCov object
filtering distribution at time t
logpyt: float
log density of Y_t | Y_{0:t-1} | [
"Filtering",
"step",
"of",
"Kalman",
"filter",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L190-L223 |
251,774 | nchopin/particles | particles/kalman.py | MVLinearGauss.check_shapes | def check_shapes(self):
"""
Check all dimensions are correct.
"""
assert self.covX.shape == (self.dx, self.dx), error_msg
assert self.covY.shape == (self.dy, self.dy), error_msg
assert self.F.shape == (self.dx, self.dx), error_msg
assert self.G.shape == (self.dy, self.dx), error_msg
assert self.mu0.shape == (self.dx,), error_msg
assert self.cov0.shape == (self.dx, self.dx), error_msg | python | def check_shapes(self):
assert self.covX.shape == (self.dx, self.dx), error_msg
assert self.covY.shape == (self.dy, self.dy), error_msg
assert self.F.shape == (self.dx, self.dx), error_msg
assert self.G.shape == (self.dy, self.dx), error_msg
assert self.mu0.shape == (self.dx,), error_msg
assert self.cov0.shape == (self.dx, self.dx), error_msg | [
"def",
"check_shapes",
"(",
"self",
")",
":",
"assert",
"self",
".",
"covX",
".",
"shape",
"==",
"(",
"self",
".",
"dx",
",",
"self",
".",
"dx",
")",
",",
"error_msg",
"assert",
"self",
".",
"covY",
".",
"shape",
"==",
"(",
"self",
".",
"dy",
",",
"self",
".",
"dy",
")",
",",
"error_msg",
"assert",
"self",
".",
"F",
".",
"shape",
"==",
"(",
"self",
".",
"dx",
",",
"self",
".",
"dx",
")",
",",
"error_msg",
"assert",
"self",
".",
"G",
".",
"shape",
"==",
"(",
"self",
".",
"dy",
",",
"self",
".",
"dx",
")",
",",
"error_msg",
"assert",
"self",
".",
"mu0",
".",
"shape",
"==",
"(",
"self",
".",
"dx",
",",
")",
",",
"error_msg",
"assert",
"self",
".",
"cov0",
".",
"shape",
"==",
"(",
"self",
".",
"dx",
",",
"self",
".",
"dx",
")",
",",
"error_msg"
] | Check all dimensions are correct. | [
"Check",
"all",
"dimensions",
"are",
"correct",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/kalman.py#L326-L335 |
251,775 | nchopin/particles | particles/qmc.py | sobol | def sobol(N, dim, scrambled=1):
""" Sobol sequence.
Parameters
----------
N : int
length of sequence
dim: int
dimension
scrambled: int
which scrambling method to use:
+ 0: no scrambling
+ 1: Owen's scrambling
+ 2: Faure-Tezuka
+ 3: Owen + Faure-Tezuka
Returns
-------
(N, dim) numpy array.
Notes
-----
For scrambling, seed is set randomly.
Fun fact: this venerable but playful piece of Fortran code occasionally
returns numbers above 1. (i.e. for a very small number of seeds); when this
happen we just start over (since the seed is randomly generated).
"""
while(True):
seed = np.random.randint(2**32)
out = lowdiscrepancy.sobol(N, dim, scrambled, seed, 1, 0)
if (scrambled == 0) or ((out < 1.).all() and (out > 0.).all()):
# no need to test if scrambled==0
return out | python | def sobol(N, dim, scrambled=1):
while(True):
seed = np.random.randint(2**32)
out = lowdiscrepancy.sobol(N, dim, scrambled, seed, 1, 0)
if (scrambled == 0) or ((out < 1.).all() and (out > 0.).all()):
# no need to test if scrambled==0
return out | [
"def",
"sobol",
"(",
"N",
",",
"dim",
",",
"scrambled",
"=",
"1",
")",
":",
"while",
"(",
"True",
")",
":",
"seed",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"2",
"**",
"32",
")",
"out",
"=",
"lowdiscrepancy",
".",
"sobol",
"(",
"N",
",",
"dim",
",",
"scrambled",
",",
"seed",
",",
"1",
",",
"0",
")",
"if",
"(",
"scrambled",
"==",
"0",
")",
"or",
"(",
"(",
"out",
"<",
"1.",
")",
".",
"all",
"(",
")",
"and",
"(",
"out",
">",
"0.",
")",
".",
"all",
"(",
")",
")",
":",
"# no need to test if scrambled==0",
"return",
"out"
] | Sobol sequence.
Parameters
----------
N : int
length of sequence
dim: int
dimension
scrambled: int
which scrambling method to use:
+ 0: no scrambling
+ 1: Owen's scrambling
+ 2: Faure-Tezuka
+ 3: Owen + Faure-Tezuka
Returns
-------
(N, dim) numpy array.
Notes
-----
For scrambling, seed is set randomly.
Fun fact: this venerable but playful piece of Fortran code occasionally
returns numbers above 1. (i.e. for a very small number of seeds); when this
happen we just start over (since the seed is randomly generated). | [
"Sobol",
"sequence",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/qmc.py#L29-L64 |
251,776 | nchopin/particles | particles/smoothing.py | smoothing_worker | def smoothing_worker(method=None, N=100, seed=None, fk=None, fk_info=None,
add_func=None, log_gamma=None):
"""Generic worker for off-line smoothing algorithms.
This worker may be used in conjunction with utils.multiplexer in order to
run in parallel (and eventually compare) off-line smoothing algorithms.
Parameters
----------
method: string
['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC',
'two-filter_ON', 'two-filter_ON_prop', 'two-filter_ON2']
N: int
number of particles
seed: int
random generator seed; if None, generator is not seeded
fk: Feynman-Kac object
The Feynman-Kac model for the forward filter
fk_info: Feynman-Kac object (default=None)
the Feynman-Kac model for the information filter; if None,
set to the same Feynman-Kac model as fk, with data in reverse
add_func: function, with signature (t, x, xf)
additive function, at time t, for particles x=x_t and xf=x_{t+1}
log_gamma: function
log of function gamma (see book)
Returns
-------
a dict with fields:
est: a ndarray of length T
cpu_time
"""
T = fk.T
if fk_info is None:
fk_info = fk.__class__(ssm=fk.ssm, data=fk.data[::-1])
if seed:
random.seed(seed)
est = np.zeros(T - 1)
if method=='FFBS_QMC':
pf = particles.SQMC(fk=fk, N=N, store_history=True)
else:
pf = particles.SMC(fk=fk, N=N, store_history=True)
tic = time.clock()
pf.run()
if method in ['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC']:
if method.startswith('FFBS_ON'):
z = pf.hist.backward_sampling(N, linear_cost=(method == 'FFBS_ON'))
else:
z = pf.hist.backward_sampling_qmc(N)
for t in range(T - 1):
est[t] = np.mean(add_func(t, z[t], z[t + 1]))
elif method in ['two-filter_ON2', 'two-filter_ON', 'two-filter_ON_prop']:
infopf = particles.SMC(fk=fk_info, N=N, store_history=True)
infopf.run()
for t in range(T - 1):
psi = lambda x, xf: add_func(t, x, xf)
if method == 'two-filter_ON2':
est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma)
else:
ti = T - 2 - t # t+1 for info filter
if method == 'two-filter_ON_prop':
modif_fwd = stats.norm.logpdf(pf.hist.X[t],
loc=np.mean(infopf.hist.X[ti + 1]),
scale=np.std(infopf.hist.X[ti + 1]))
modif_info = stats.norm.logpdf(infopf.hist.X[ti],
loc=np.mean(pf.hist.X[t + 1]),
scale=np.std(pf.hist.X[t + 1]))
else:
modif_fwd, modif_info = None, None
est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma,
linear_cost=True,
modif_forward=modif_fwd,
modif_info=modif_info)
else:
print('no such method?')
cpu_time = time.clock() - tic
print(method + ' took %.2f s for N=%i' % (cpu_time, N))
return {'est': est, 'cpu': cpu_time} | python | def smoothing_worker(method=None, N=100, seed=None, fk=None, fk_info=None,
add_func=None, log_gamma=None):
T = fk.T
if fk_info is None:
fk_info = fk.__class__(ssm=fk.ssm, data=fk.data[::-1])
if seed:
random.seed(seed)
est = np.zeros(T - 1)
if method=='FFBS_QMC':
pf = particles.SQMC(fk=fk, N=N, store_history=True)
else:
pf = particles.SMC(fk=fk, N=N, store_history=True)
tic = time.clock()
pf.run()
if method in ['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC']:
if method.startswith('FFBS_ON'):
z = pf.hist.backward_sampling(N, linear_cost=(method == 'FFBS_ON'))
else:
z = pf.hist.backward_sampling_qmc(N)
for t in range(T - 1):
est[t] = np.mean(add_func(t, z[t], z[t + 1]))
elif method in ['two-filter_ON2', 'two-filter_ON', 'two-filter_ON_prop']:
infopf = particles.SMC(fk=fk_info, N=N, store_history=True)
infopf.run()
for t in range(T - 1):
psi = lambda x, xf: add_func(t, x, xf)
if method == 'two-filter_ON2':
est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma)
else:
ti = T - 2 - t # t+1 for info filter
if method == 'two-filter_ON_prop':
modif_fwd = stats.norm.logpdf(pf.hist.X[t],
loc=np.mean(infopf.hist.X[ti + 1]),
scale=np.std(infopf.hist.X[ti + 1]))
modif_info = stats.norm.logpdf(infopf.hist.X[ti],
loc=np.mean(pf.hist.X[t + 1]),
scale=np.std(pf.hist.X[t + 1]))
else:
modif_fwd, modif_info = None, None
est[t] = pf.hist.twofilter_smoothing(t, infopf, psi, log_gamma,
linear_cost=True,
modif_forward=modif_fwd,
modif_info=modif_info)
else:
print('no such method?')
cpu_time = time.clock() - tic
print(method + ' took %.2f s for N=%i' % (cpu_time, N))
return {'est': est, 'cpu': cpu_time} | [
"def",
"smoothing_worker",
"(",
"method",
"=",
"None",
",",
"N",
"=",
"100",
",",
"seed",
"=",
"None",
",",
"fk",
"=",
"None",
",",
"fk_info",
"=",
"None",
",",
"add_func",
"=",
"None",
",",
"log_gamma",
"=",
"None",
")",
":",
"T",
"=",
"fk",
".",
"T",
"if",
"fk_info",
"is",
"None",
":",
"fk_info",
"=",
"fk",
".",
"__class__",
"(",
"ssm",
"=",
"fk",
".",
"ssm",
",",
"data",
"=",
"fk",
".",
"data",
"[",
":",
":",
"-",
"1",
"]",
")",
"if",
"seed",
":",
"random",
".",
"seed",
"(",
"seed",
")",
"est",
"=",
"np",
".",
"zeros",
"(",
"T",
"-",
"1",
")",
"if",
"method",
"==",
"'FFBS_QMC'",
":",
"pf",
"=",
"particles",
".",
"SQMC",
"(",
"fk",
"=",
"fk",
",",
"N",
"=",
"N",
",",
"store_history",
"=",
"True",
")",
"else",
":",
"pf",
"=",
"particles",
".",
"SMC",
"(",
"fk",
"=",
"fk",
",",
"N",
"=",
"N",
",",
"store_history",
"=",
"True",
")",
"tic",
"=",
"time",
".",
"clock",
"(",
")",
"pf",
".",
"run",
"(",
")",
"if",
"method",
"in",
"[",
"'FFBS_ON'",
",",
"'FFBS_ON2'",
",",
"'FFBS_QMC'",
"]",
":",
"if",
"method",
".",
"startswith",
"(",
"'FFBS_ON'",
")",
":",
"z",
"=",
"pf",
".",
"hist",
".",
"backward_sampling",
"(",
"N",
",",
"linear_cost",
"=",
"(",
"method",
"==",
"'FFBS_ON'",
")",
")",
"else",
":",
"z",
"=",
"pf",
".",
"hist",
".",
"backward_sampling_qmc",
"(",
"N",
")",
"for",
"t",
"in",
"range",
"(",
"T",
"-",
"1",
")",
":",
"est",
"[",
"t",
"]",
"=",
"np",
".",
"mean",
"(",
"add_func",
"(",
"t",
",",
"z",
"[",
"t",
"]",
",",
"z",
"[",
"t",
"+",
"1",
"]",
")",
")",
"elif",
"method",
"in",
"[",
"'two-filter_ON2'",
",",
"'two-filter_ON'",
",",
"'two-filter_ON_prop'",
"]",
":",
"infopf",
"=",
"particles",
".",
"SMC",
"(",
"fk",
"=",
"fk_info",
",",
"N",
"=",
"N",
",",
"store_history",
"=",
"True",
")",
"infopf",
".",
"run",
"(",
")",
"for",
"t",
"in",
"range",
"(",
"T",
"-",
"1",
")",
":",
"psi",
"=",
"lambda",
"x",
",",
"xf",
":",
"add_func",
"(",
"t",
",",
"x",
",",
"xf",
")",
"if",
"method",
"==",
"'two-filter_ON2'",
":",
"est",
"[",
"t",
"]",
"=",
"pf",
".",
"hist",
".",
"twofilter_smoothing",
"(",
"t",
",",
"infopf",
",",
"psi",
",",
"log_gamma",
")",
"else",
":",
"ti",
"=",
"T",
"-",
"2",
"-",
"t",
"# t+1 for info filter",
"if",
"method",
"==",
"'two-filter_ON_prop'",
":",
"modif_fwd",
"=",
"stats",
".",
"norm",
".",
"logpdf",
"(",
"pf",
".",
"hist",
".",
"X",
"[",
"t",
"]",
",",
"loc",
"=",
"np",
".",
"mean",
"(",
"infopf",
".",
"hist",
".",
"X",
"[",
"ti",
"+",
"1",
"]",
")",
",",
"scale",
"=",
"np",
".",
"std",
"(",
"infopf",
".",
"hist",
".",
"X",
"[",
"ti",
"+",
"1",
"]",
")",
")",
"modif_info",
"=",
"stats",
".",
"norm",
".",
"logpdf",
"(",
"infopf",
".",
"hist",
".",
"X",
"[",
"ti",
"]",
",",
"loc",
"=",
"np",
".",
"mean",
"(",
"pf",
".",
"hist",
".",
"X",
"[",
"t",
"+",
"1",
"]",
")",
",",
"scale",
"=",
"np",
".",
"std",
"(",
"pf",
".",
"hist",
".",
"X",
"[",
"t",
"+",
"1",
"]",
")",
")",
"else",
":",
"modif_fwd",
",",
"modif_info",
"=",
"None",
",",
"None",
"est",
"[",
"t",
"]",
"=",
"pf",
".",
"hist",
".",
"twofilter_smoothing",
"(",
"t",
",",
"infopf",
",",
"psi",
",",
"log_gamma",
",",
"linear_cost",
"=",
"True",
",",
"modif_forward",
"=",
"modif_fwd",
",",
"modif_info",
"=",
"modif_info",
")",
"else",
":",
"print",
"(",
"'no such method?'",
")",
"cpu_time",
"=",
"time",
".",
"clock",
"(",
")",
"-",
"tic",
"print",
"(",
"method",
"+",
"' took %.2f s for N=%i'",
"%",
"(",
"cpu_time",
",",
"N",
")",
")",
"return",
"{",
"'est'",
":",
"est",
",",
"'cpu'",
":",
"cpu_time",
"}"
] | Generic worker for off-line smoothing algorithms.
This worker may be used in conjunction with utils.multiplexer in order to
run in parallel (and eventually compare) off-line smoothing algorithms.
Parameters
----------
method: string
['FFBS_ON', 'FFBS_ON2', 'FFBS_QMC',
'two-filter_ON', 'two-filter_ON_prop', 'two-filter_ON2']
N: int
number of particles
seed: int
random generator seed; if None, generator is not seeded
fk: Feynman-Kac object
The Feynman-Kac model for the forward filter
fk_info: Feynman-Kac object (default=None)
the Feynman-Kac model for the information filter; if None,
set to the same Feynman-Kac model as fk, with data in reverse
add_func: function, with signature (t, x, xf)
additive function, at time t, for particles x=x_t and xf=x_{t+1}
log_gamma: function
log of function gamma (see book)
Returns
-------
a dict with fields:
est: a ndarray of length T
cpu_time | [
"Generic",
"worker",
"for",
"off",
"-",
"line",
"smoothing",
"algorithms",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L367-L444 |
251,777 | nchopin/particles | particles/smoothing.py | ParticleHistory.save | def save(self, X=None, w=None, A=None):
"""Save one "page" of history at a given time.
.. note::
This method is used internally by `SMC` to store the state of the
particle system at each time t. In most cases, users should not
have to call this method directly.
"""
self.X.append(X)
self.wgt.append(w)
self.A.append(A) | python | def save(self, X=None, w=None, A=None):
self.X.append(X)
self.wgt.append(w)
self.A.append(A) | [
"def",
"save",
"(",
"self",
",",
"X",
"=",
"None",
",",
"w",
"=",
"None",
",",
"A",
"=",
"None",
")",
":",
"self",
".",
"X",
".",
"append",
"(",
"X",
")",
"self",
".",
"wgt",
".",
"append",
"(",
"w",
")",
"self",
".",
"A",
".",
"append",
"(",
"A",
")"
] | Save one "page" of history at a given time.
.. note::
This method is used internally by `SMC` to store the state of the
particle system at each time t. In most cases, users should not
have to call this method directly. | [
"Save",
"one",
"page",
"of",
"history",
"at",
"a",
"given",
"time",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L92-L102 |
251,778 | nchopin/particles | particles/smoothing.py | ParticleHistory.extract_one_trajectory | def extract_one_trajectory(self):
"""Extract a single trajectory from the particle history.
The final state is chosen randomly, then the corresponding trajectory
is constructed backwards, until time t=0.
"""
traj = []
for t in reversed(range(self.T)):
if t == self.T - 1:
n = rs.multinomial_once(self.wgt[-1].W)
else:
n = self.A[t + 1][n]
traj.append(self.X[t][n])
return traj[::-1] | python | def extract_one_trajectory(self):
traj = []
for t in reversed(range(self.T)):
if t == self.T - 1:
n = rs.multinomial_once(self.wgt[-1].W)
else:
n = self.A[t + 1][n]
traj.append(self.X[t][n])
return traj[::-1] | [
"def",
"extract_one_trajectory",
"(",
"self",
")",
":",
"traj",
"=",
"[",
"]",
"for",
"t",
"in",
"reversed",
"(",
"range",
"(",
"self",
".",
"T",
")",
")",
":",
"if",
"t",
"==",
"self",
".",
"T",
"-",
"1",
":",
"n",
"=",
"rs",
".",
"multinomial_once",
"(",
"self",
".",
"wgt",
"[",
"-",
"1",
"]",
".",
"W",
")",
"else",
":",
"n",
"=",
"self",
".",
"A",
"[",
"t",
"+",
"1",
"]",
"[",
"n",
"]",
"traj",
".",
"append",
"(",
"self",
".",
"X",
"[",
"t",
"]",
"[",
"n",
"]",
")",
"return",
"traj",
"[",
":",
":",
"-",
"1",
"]"
] | Extract a single trajectory from the particle history.
The final state is chosen randomly, then the corresponding trajectory
is constructed backwards, until time t=0. | [
"Extract",
"a",
"single",
"trajectory",
"from",
"the",
"particle",
"history",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L104-L117 |
251,779 | nchopin/particles | particles/smoothing.py | ParticleHistory.compute_trajectories | def compute_trajectories(self):
"""Compute the N trajectories that constitute the current genealogy.
Compute and add attribute ``B`` to ``self`` where ``B`` is an array
such that ``B[t,n]`` is the index of ancestor at time t of particle X_T^n,
where T is the current length of history.
"""
self.B = np.empty((self.T, self.N), 'int')
self.B[-1, :] = self.A[-1]
for t in reversed(range(self.T - 1)):
self.B[t, :] = self.A[t + 1][self.B[t + 1]] | python | def compute_trajectories(self):
self.B = np.empty((self.T, self.N), 'int')
self.B[-1, :] = self.A[-1]
for t in reversed(range(self.T - 1)):
self.B[t, :] = self.A[t + 1][self.B[t + 1]] | [
"def",
"compute_trajectories",
"(",
"self",
")",
":",
"self",
".",
"B",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"T",
",",
"self",
".",
"N",
")",
",",
"'int'",
")",
"self",
".",
"B",
"[",
"-",
"1",
",",
":",
"]",
"=",
"self",
".",
"A",
"[",
"-",
"1",
"]",
"for",
"t",
"in",
"reversed",
"(",
"range",
"(",
"self",
".",
"T",
"-",
"1",
")",
")",
":",
"self",
".",
"B",
"[",
"t",
",",
":",
"]",
"=",
"self",
".",
"A",
"[",
"t",
"+",
"1",
"]",
"[",
"self",
".",
"B",
"[",
"t",
"+",
"1",
"]",
"]"
] | Compute the N trajectories that constitute the current genealogy.
Compute and add attribute ``B`` to ``self`` where ``B`` is an array
such that ``B[t,n]`` is the index of ancestor at time t of particle X_T^n,
where T is the current length of history. | [
"Compute",
"the",
"N",
"trajectories",
"that",
"constitute",
"the",
"current",
"genealogy",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L119-L129 |
251,780 | nchopin/particles | particles/smoothing.py | ParticleHistory.twofilter_smoothing | def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False,
return_ess=False, modif_forward=None,
modif_info=None):
"""Two-filter smoothing.
Parameters
----------
t: time, in range 0 <= t < T-1
info: SMC object
the information filter
phi: function
test function, a function of (X_t,X_{t+1})
loggamma: function
a function of (X_{t+1})
linear_cost: bool
if True, use the O(N) variant (basic version is O(N^2))
Returns
-------
Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1})
"""
ti = self.T - 2 - t # t+1 in reverse
if t < 0 or t >= self.T - 1:
raise ValueError(
'two-filter smoothing: t must be in range 0,...,T-2')
lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti])
if linear_cost:
return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo,
return_ess,
modif_forward, modif_info)
else:
return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo) | python | def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False,
return_ess=False, modif_forward=None,
modif_info=None):
ti = self.T - 2 - t # t+1 in reverse
if t < 0 or t >= self.T - 1:
raise ValueError(
'two-filter smoothing: t must be in range 0,...,T-2')
lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti])
if linear_cost:
return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo,
return_ess,
modif_forward, modif_info)
else:
return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo) | [
"def",
"twofilter_smoothing",
"(",
"self",
",",
"t",
",",
"info",
",",
"phi",
",",
"loggamma",
",",
"linear_cost",
"=",
"False",
",",
"return_ess",
"=",
"False",
",",
"modif_forward",
"=",
"None",
",",
"modif_info",
"=",
"None",
")",
":",
"ti",
"=",
"self",
".",
"T",
"-",
"2",
"-",
"t",
"# t+1 in reverse",
"if",
"t",
"<",
"0",
"or",
"t",
">=",
"self",
".",
"T",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"'two-filter smoothing: t must be in range 0,...,T-2'",
")",
"lwinfo",
"=",
"info",
".",
"hist",
".",
"wgt",
"[",
"ti",
"]",
".",
"lw",
"-",
"loggamma",
"(",
"info",
".",
"hist",
".",
"X",
"[",
"ti",
"]",
")",
"if",
"linear_cost",
":",
"return",
"self",
".",
"_twofilter_smoothing_ON",
"(",
"t",
",",
"ti",
",",
"info",
",",
"phi",
",",
"lwinfo",
",",
"return_ess",
",",
"modif_forward",
",",
"modif_info",
")",
"else",
":",
"return",
"self",
".",
"_twofilter_smoothing_ON2",
"(",
"t",
",",
"ti",
",",
"info",
",",
"phi",
",",
"lwinfo",
")"
] | Two-filter smoothing.
Parameters
----------
t: time, in range 0 <= t < T-1
info: SMC object
the information filter
phi: function
test function, a function of (X_t,X_{t+1})
loggamma: function
a function of (X_{t+1})
linear_cost: bool
if True, use the O(N) variant (basic version is O(N^2))
Returns
-------
Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1}) | [
"Two",
"-",
"filter",
"smoothing",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/smoothing.py#L286-L317 |
251,781 | nchopin/particles | particles/core.py | multiSMC | def multiSMC(nruns=10, nprocs=0, out_func=None, **args):
"""Run SMC algorithms in parallel, for different combinations of parameters.
`multiSMC` relies on the `multiplexer` utility, and obeys the same logic.
A basic usage is::
results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0)
This runs the same SMC algorithm 20 times, using all available CPU cores.
The output, ``results``, is a list of 20 dictionaries; a given dict corresponds
to a single run, and contains the following (key, value) pairs:
+ ``'run'``: a run identifier (a number between 0 and nruns-1)
+ ``'output'``: the corresponding SMC object (once method run was completed)
Since a `SMC` object may take a lot of space in memory (especially when
the option ``store_history`` is set to True), it is possible to require
`multiSMC` to store only some chosen summary of the SMC runs, using option
`out_func`. For instance, if we only want to store the estimate
of the log-likelihood of the model obtained from each particle filter::
of = lambda pf: pf.logLt
results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of)
It is also possible to vary the parameters. Say::
results = multiSMC(fk=my_fk_model, N=[100, 500, 1000])
will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for
N=500, and 10 times for N=1000. The number 10 comes from the fact that we
did not specify nruns, and its default value is 10. The 30 dictionaries
obtained in results will then contain an extra (key, value) pair that will
give the value of N for which the run was performed.
It is possible to vary several arguments. Each time a list must be
provided. The end result will amount to take a *cartesian product* of the
arguments::
results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial',
'residual'], nruns=20)
In that case we run our algorithm 80 times: 20 times with N=100 and
resampling set to multinomial, 20 times with N=100 and resampling set to
residual and so on.
Parameters
----------
* nruns: int, optional
number of runs (default is 10)
* nprocs: int, optional
number of processors to use; if negative, number of cores not to use.
Default value is 1 (no multiprocessing)
* out_func: callable, optional
function to transform the output of each SMC run. (If not given, output
will be the complete SMC object).
* args: dict
arguments passed to SMC class
Returns
-------
A list of dicts
See also
--------
`utils.multiplexer`: for more details on the syntax.
"""
def f(**args):
pf = SMC(**args)
pf.run()
return out_func(pf)
if out_func is None:
out_func = lambda x: x
return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True,
**args) | python | def multiSMC(nruns=10, nprocs=0, out_func=None, **args):
def f(**args):
pf = SMC(**args)
pf.run()
return out_func(pf)
if out_func is None:
out_func = lambda x: x
return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True,
**args) | [
"def",
"multiSMC",
"(",
"nruns",
"=",
"10",
",",
"nprocs",
"=",
"0",
",",
"out_func",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"def",
"f",
"(",
"*",
"*",
"args",
")",
":",
"pf",
"=",
"SMC",
"(",
"*",
"*",
"args",
")",
"pf",
".",
"run",
"(",
")",
"return",
"out_func",
"(",
"pf",
")",
"if",
"out_func",
"is",
"None",
":",
"out_func",
"=",
"lambda",
"x",
":",
"x",
"return",
"utils",
".",
"multiplexer",
"(",
"f",
"=",
"f",
",",
"nruns",
"=",
"nruns",
",",
"nprocs",
"=",
"nprocs",
",",
"seeding",
"=",
"True",
",",
"*",
"*",
"args",
")"
] | Run SMC algorithms in parallel, for different combinations of parameters.
`multiSMC` relies on the `multiplexer` utility, and obeys the same logic.
A basic usage is::
results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0)
This runs the same SMC algorithm 20 times, using all available CPU cores.
The output, ``results``, is a list of 20 dictionaries; a given dict corresponds
to a single run, and contains the following (key, value) pairs:
+ ``'run'``: a run identifier (a number between 0 and nruns-1)
+ ``'output'``: the corresponding SMC object (once method run was completed)
Since a `SMC` object may take a lot of space in memory (especially when
the option ``store_history`` is set to True), it is possible to require
`multiSMC` to store only some chosen summary of the SMC runs, using option
`out_func`. For instance, if we only want to store the estimate
of the log-likelihood of the model obtained from each particle filter::
of = lambda pf: pf.logLt
results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of)
It is also possible to vary the parameters. Say::
results = multiSMC(fk=my_fk_model, N=[100, 500, 1000])
will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for
N=500, and 10 times for N=1000. The number 10 comes from the fact that we
did not specify nruns, and its default value is 10. The 30 dictionaries
obtained in results will then contain an extra (key, value) pair that will
give the value of N for which the run was performed.
It is possible to vary several arguments. Each time a list must be
provided. The end result will amount to take a *cartesian product* of the
arguments::
results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial',
'residual'], nruns=20)
In that case we run our algorithm 80 times: 20 times with N=100 and
resampling set to multinomial, 20 times with N=100 and resampling set to
residual and so on.
Parameters
----------
* nruns: int, optional
number of runs (default is 10)
* nprocs: int, optional
number of processors to use; if negative, number of cores not to use.
Default value is 1 (no multiprocessing)
* out_func: callable, optional
function to transform the output of each SMC run. (If not given, output
will be the complete SMC object).
* args: dict
arguments passed to SMC class
Returns
-------
A list of dicts
See also
--------
`utils.multiplexer`: for more details on the syntax. | [
"Run",
"SMC",
"algorithms",
"in",
"parallel",
"for",
"different",
"combinations",
"of",
"parameters",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/core.py#L438-L512 |
251,782 | nchopin/particles | particles/core.py | SMC.reset_weights | def reset_weights(self):
"""Reset weights after a resampling step.
"""
if self.fk.isAPF:
lw = (rs.log_mean_exp(self.logetat, W=self.W)
- self.logetat[self.A])
self.wgts = rs.Weights(lw=lw)
else:
self.wgts = rs.Weights() | python | def reset_weights(self):
if self.fk.isAPF:
lw = (rs.log_mean_exp(self.logetat, W=self.W)
- self.logetat[self.A])
self.wgts = rs.Weights(lw=lw)
else:
self.wgts = rs.Weights() | [
"def",
"reset_weights",
"(",
"self",
")",
":",
"if",
"self",
".",
"fk",
".",
"isAPF",
":",
"lw",
"=",
"(",
"rs",
".",
"log_mean_exp",
"(",
"self",
".",
"logetat",
",",
"W",
"=",
"self",
".",
"W",
")",
"-",
"self",
".",
"logetat",
"[",
"self",
".",
"A",
"]",
")",
"self",
".",
"wgts",
"=",
"rs",
".",
"Weights",
"(",
"lw",
"=",
"lw",
")",
"else",
":",
"self",
".",
"wgts",
"=",
"rs",
".",
"Weights",
"(",
")"
] | Reset weights after a resampling step. | [
"Reset",
"weights",
"after",
"a",
"resampling",
"step",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/core.py#L317-L325 |
251,783 | nchopin/particles | particles/resampling.py | log_sum_exp | def log_sum_exp(v):
"""Log of the sum of the exp of the arguments.
Parameters
----------
v: ndarray
Returns
-------
l: float
l = log(sum(exp(v)))
Note
----
use the log_sum_exp trick to avoid overflow: i.e. we remove the max of v
before exponentiating, then we add it back
See also
--------
log_mean_exp
"""
m = v.max()
return m + np.log(np.sum(np.exp(v - m))) | python | def log_sum_exp(v):
m = v.max()
return m + np.log(np.sum(np.exp(v - m))) | [
"def",
"log_sum_exp",
"(",
"v",
")",
":",
"m",
"=",
"v",
".",
"max",
"(",
")",
"return",
"m",
"+",
"np",
".",
"log",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"exp",
"(",
"v",
"-",
"m",
")",
")",
")"
] | Log of the sum of the exp of the arguments.
Parameters
----------
v: ndarray
Returns
-------
l: float
l = log(sum(exp(v)))
Note
----
use the log_sum_exp trick to avoid overflow: i.e. we remove the max of v
before exponentiating, then we add it back
See also
--------
log_mean_exp | [
"Log",
"of",
"the",
"sum",
"of",
"the",
"exp",
"of",
"the",
"arguments",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L233-L256 |
251,784 | nchopin/particles | particles/resampling.py | log_sum_exp_ab | def log_sum_exp_ab(a, b):
"""log_sum_exp for two scalars.
Parameters
----------
a, b: float
Returns
-------
c: float
c = log(e^a + e^b)
"""
if a > b:
return a + np.log(1. + np.exp(b - a))
else:
return b + np.log(1. + np.exp(a - b)) | python | def log_sum_exp_ab(a, b):
if a > b:
return a + np.log(1. + np.exp(b - a))
else:
return b + np.log(1. + np.exp(a - b)) | [
"def",
"log_sum_exp_ab",
"(",
"a",
",",
"b",
")",
":",
"if",
"a",
">",
"b",
":",
"return",
"a",
"+",
"np",
".",
"log",
"(",
"1.",
"+",
"np",
".",
"exp",
"(",
"b",
"-",
"a",
")",
")",
"else",
":",
"return",
"b",
"+",
"np",
".",
"log",
"(",
"1.",
"+",
"np",
".",
"exp",
"(",
"a",
"-",
"b",
")",
")"
] | log_sum_exp for two scalars.
Parameters
----------
a, b: float
Returns
-------
c: float
c = log(e^a + e^b) | [
"log_sum_exp",
"for",
"two",
"scalars",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L259-L274 |
251,785 | nchopin/particles | particles/resampling.py | wmean_and_var | def wmean_and_var(W, x):
"""Component-wise weighted mean and variance.
Parameters
----------
W: (N,) ndarray
normalised weights (must be >=0 and sum to one).
x: ndarray (such that shape[0]==N)
data
Returns
-------
dictionary
{'mean':weighted_means, 'var':weighted_variances}
"""
m = np.average(x, weights=W, axis=0)
m2 = np.average(x**2, weights=W, axis=0)
v = m2 - m**2
return {'mean': m, 'var': v} | python | def wmean_and_var(W, x):
m = np.average(x, weights=W, axis=0)
m2 = np.average(x**2, weights=W, axis=0)
v = m2 - m**2
return {'mean': m, 'var': v} | [
"def",
"wmean_and_var",
"(",
"W",
",",
"x",
")",
":",
"m",
"=",
"np",
".",
"average",
"(",
"x",
",",
"weights",
"=",
"W",
",",
"axis",
"=",
"0",
")",
"m2",
"=",
"np",
".",
"average",
"(",
"x",
"**",
"2",
",",
"weights",
"=",
"W",
",",
"axis",
"=",
"0",
")",
"v",
"=",
"m2",
"-",
"m",
"**",
"2",
"return",
"{",
"'mean'",
":",
"m",
",",
"'var'",
":",
"v",
"}"
] | Component-wise weighted mean and variance.
Parameters
----------
W: (N,) ndarray
normalised weights (must be >=0 and sum to one).
x: ndarray (such that shape[0]==N)
data
Returns
-------
dictionary
{'mean':weighted_means, 'var':weighted_variances} | [
"Component",
"-",
"wise",
"weighted",
"mean",
"and",
"variance",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L306-L324 |
251,786 | nchopin/particles | particles/resampling.py | wmean_and_var_str_array | def wmean_and_var_str_array(W, x):
"""Weighted mean and variance of each component of a structured array.
Parameters
----------
W: (N,) ndarray
normalised weights (must be >=0 and sum to one).
x: (N,) structured array
data
Returns
-------
dictionary
{'mean':weighted_means, 'var':weighted_variances}
"""
m = np.empty(shape=x.shape[1:], dtype=x.dtype)
v = np.empty_like(m)
for p in x.dtype.names:
m[p], v[p] = wmean_and_var(W, x[p]).values()
return {'mean': m, 'var': v} | python | def wmean_and_var_str_array(W, x):
m = np.empty(shape=x.shape[1:], dtype=x.dtype)
v = np.empty_like(m)
for p in x.dtype.names:
m[p], v[p] = wmean_and_var(W, x[p]).values()
return {'mean': m, 'var': v} | [
"def",
"wmean_and_var_str_array",
"(",
"W",
",",
"x",
")",
":",
"m",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"x",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"v",
"=",
"np",
".",
"empty_like",
"(",
"m",
")",
"for",
"p",
"in",
"x",
".",
"dtype",
".",
"names",
":",
"m",
"[",
"p",
"]",
",",
"v",
"[",
"p",
"]",
"=",
"wmean_and_var",
"(",
"W",
",",
"x",
"[",
"p",
"]",
")",
".",
"values",
"(",
")",
"return",
"{",
"'mean'",
":",
"m",
",",
"'var'",
":",
"v",
"}"
] | Weighted mean and variance of each component of a structured array.
Parameters
----------
W: (N,) ndarray
normalised weights (must be >=0 and sum to one).
x: (N,) structured array
data
Returns
-------
dictionary
{'mean':weighted_means, 'var':weighted_variances} | [
"Weighted",
"mean",
"and",
"variance",
"of",
"each",
"component",
"of",
"a",
"structured",
"array",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L326-L345 |
251,787 | nchopin/particles | particles/resampling.py | wquantiles | def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)):
"""Quantiles for weighted data.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) or (N,d) ndarray
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
a (k,) or (d, k) ndarray containing the alpha-quantiles
"""
if len(x.shape) == 1:
return _wquantiles(W, x, alphas=alphas)
elif len(x.shape) == 2:
return np.array([_wquantiles(W, x[:, i], alphas=alphas)
for i in range(x.shape[1])]) | python | def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)):
if len(x.shape) == 1:
return _wquantiles(W, x, alphas=alphas)
elif len(x.shape) == 2:
return np.array([_wquantiles(W, x[:, i], alphas=alphas)
for i in range(x.shape[1])]) | [
"def",
"wquantiles",
"(",
"W",
",",
"x",
",",
"alphas",
"=",
"(",
"0.25",
",",
"0.50",
",",
"0.75",
")",
")",
":",
"if",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"1",
":",
"return",
"_wquantiles",
"(",
"W",
",",
"x",
",",
"alphas",
"=",
"alphas",
")",
"elif",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
":",
"return",
"np",
".",
"array",
"(",
"[",
"_wquantiles",
"(",
"W",
",",
"x",
"[",
":",
",",
"i",
"]",
",",
"alphas",
"=",
"alphas",
")",
"for",
"i",
"in",
"range",
"(",
"x",
".",
"shape",
"[",
"1",
"]",
")",
"]",
")"
] | Quantiles for weighted data.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) or (N,d) ndarray
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
a (k,) or (d, k) ndarray containing the alpha-quantiles | [
"Quantiles",
"for",
"weighted",
"data",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L359-L379 |
251,788 | nchopin/particles | particles/resampling.py | wquantiles_str_array | def wquantiles_str_array(W, x, alphas=(0.25, 0.50, 0,75)):
"""quantiles for weighted data stored in a structured array.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) structured array
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
dictionary {p: quantiles} that stores for each field name p
the corresponding quantiles
"""
return {p: wquantiles(W, x[p], alphas) for p in x.dtype.names} | python | def wquantiles_str_array(W, x, alphas=(0.25, 0.50, 0,75)):
return {p: wquantiles(W, x[p], alphas) for p in x.dtype.names} | [
"def",
"wquantiles_str_array",
"(",
"W",
",",
"x",
",",
"alphas",
"=",
"(",
"0.25",
",",
"0.50",
",",
"0",
",",
"75",
")",
")",
":",
"return",
"{",
"p",
":",
"wquantiles",
"(",
"W",
",",
"x",
"[",
"p",
"]",
",",
"alphas",
")",
"for",
"p",
"in",
"x",
".",
"dtype",
".",
"names",
"}"
] | quantiles for weighted data stored in a structured array.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) structured array
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
dictionary {p: quantiles} that stores for each field name p
the corresponding quantiles | [
"quantiles",
"for",
"weighted",
"data",
"stored",
"in",
"a",
"structured",
"array",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L381-L399 |
251,789 | nchopin/particles | particles/resampling.py | resampling_scheme | def resampling_scheme(func):
"""Decorator for resampling schemes."""
@functools.wraps(func)
def modif_func(W, M=None):
M = W.shape[0] if M is None else M
return func(W, M)
rs_funcs[func.__name__] = modif_func
modif_func.__doc__ = rs_doc % func.__name__.capitalize()
return modif_func | python | def resampling_scheme(func):
@functools.wraps(func)
def modif_func(W, M=None):
M = W.shape[0] if M is None else M
return func(W, M)
rs_funcs[func.__name__] = modif_func
modif_func.__doc__ = rs_doc % func.__name__.capitalize()
return modif_func | [
"def",
"resampling_scheme",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"modif_func",
"(",
"W",
",",
"M",
"=",
"None",
")",
":",
"M",
"=",
"W",
".",
"shape",
"[",
"0",
"]",
"if",
"M",
"is",
"None",
"else",
"M",
"return",
"func",
"(",
"W",
",",
"M",
")",
"rs_funcs",
"[",
"func",
".",
"__name__",
"]",
"=",
"modif_func",
"modif_func",
".",
"__doc__",
"=",
"rs_doc",
"%",
"func",
".",
"__name__",
".",
"capitalize",
"(",
")",
"return",
"modif_func"
] | Decorator for resampling schemes. | [
"Decorator",
"for",
"resampling",
"schemes",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L423-L433 |
251,790 | nchopin/particles | particles/resampling.py | inverse_cdf | def inverse_cdf(su, W):
"""Inverse CDF algorithm for a finite distribution.
Parameters
----------
su: (M,) ndarray
M sorted uniform variates (i.e. M ordered points in [0,1]).
W: (N,) ndarray
a vector of N normalized weights (>=0 and sum to one)
Returns
-------
A: (M,) ndarray
a vector of M indices in range 0, ..., N-1
"""
j = 0
s = W[0]
M = su.shape[0]
A = np.empty(M, 'int')
for n in range(M):
while su[n] > s:
j += 1
s += W[j]
A[n] = j
return A | python | def inverse_cdf(su, W):
j = 0
s = W[0]
M = su.shape[0]
A = np.empty(M, 'int')
for n in range(M):
while su[n] > s:
j += 1
s += W[j]
A[n] = j
return A | [
"def",
"inverse_cdf",
"(",
"su",
",",
"W",
")",
":",
"j",
"=",
"0",
"s",
"=",
"W",
"[",
"0",
"]",
"M",
"=",
"su",
".",
"shape",
"[",
"0",
"]",
"A",
"=",
"np",
".",
"empty",
"(",
"M",
",",
"'int'",
")",
"for",
"n",
"in",
"range",
"(",
"M",
")",
":",
"while",
"su",
"[",
"n",
"]",
">",
"s",
":",
"j",
"+=",
"1",
"s",
"+=",
"W",
"[",
"j",
"]",
"A",
"[",
"n",
"]",
"=",
"j",
"return",
"A"
] | Inverse CDF algorithm for a finite distribution.
Parameters
----------
su: (M,) ndarray
M sorted uniform variates (i.e. M ordered points in [0,1]).
W: (N,) ndarray
a vector of N normalized weights (>=0 and sum to one)
Returns
-------
A: (M,) ndarray
a vector of M indices in range 0, ..., N-1 | [
"Inverse",
"CDF",
"algorithm",
"for",
"a",
"finite",
"distribution",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L443-L467 |
251,791 | nchopin/particles | particles/hilbert.py | hilbert_array | def hilbert_array(xint):
"""Compute Hilbert indices.
Parameters
----------
xint: (N, d) int numpy.ndarray
Returns
-------
h: (N,) int numpy.ndarray
Hilbert indices
"""
N, d = xint.shape
h = np.zeros(N, int64)
for n in range(N):
h[n] = Hilbert_to_int(xint[n, :])
return h | python | def hilbert_array(xint):
N, d = xint.shape
h = np.zeros(N, int64)
for n in range(N):
h[n] = Hilbert_to_int(xint[n, :])
return h | [
"def",
"hilbert_array",
"(",
"xint",
")",
":",
"N",
",",
"d",
"=",
"xint",
".",
"shape",
"h",
"=",
"np",
".",
"zeros",
"(",
"N",
",",
"int64",
")",
"for",
"n",
"in",
"range",
"(",
"N",
")",
":",
"h",
"[",
"n",
"]",
"=",
"Hilbert_to_int",
"(",
"xint",
"[",
"n",
",",
":",
"]",
")",
"return",
"h"
] | Compute Hilbert indices.
Parameters
----------
xint: (N, d) int numpy.ndarray
Returns
-------
h: (N,) int numpy.ndarray
Hilbert indices | [
"Compute",
"Hilbert",
"indices",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/hilbert.py#L17-L33 |
251,792 | nchopin/particles | particles/mcmc.py | MCMC.mean_sq_jump_dist | def mean_sq_jump_dist(self, discard_frac=0.1):
"""Mean squared jumping distance estimated from chain.
Parameters
----------
discard_frac: float
fraction of iterations to discard at the beginning (as a burn-in)
Returns
-------
float
"""
discard = int(self.niter * discard_frac)
return msjd(self.chain.theta[discard:]) | python | def mean_sq_jump_dist(self, discard_frac=0.1):
discard = int(self.niter * discard_frac)
return msjd(self.chain.theta[discard:]) | [
"def",
"mean_sq_jump_dist",
"(",
"self",
",",
"discard_frac",
"=",
"0.1",
")",
":",
"discard",
"=",
"int",
"(",
"self",
".",
"niter",
"*",
"discard_frac",
")",
"return",
"msjd",
"(",
"self",
".",
"chain",
".",
"theta",
"[",
"discard",
":",
"]",
")"
] | Mean squared jumping distance estimated from chain.
Parameters
----------
discard_frac: float
fraction of iterations to discard at the beginning (as a burn-in)
Returns
-------
float | [
"Mean",
"squared",
"jumping",
"distance",
"estimated",
"from",
"chain",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/mcmc.py#L99-L112 |
251,793 | nchopin/particles | particles/mcmc.py | VanishCovTracker.update | def update(self, v):
"""Adds point v"""
self.t += 1
g = self.gamma()
self.mu = (1. - g) * self.mu + g * v
mv = v - self.mu
self.Sigma = ((1. - g) * self.Sigma
+ g * np.dot(mv[:, np.newaxis], mv[np.newaxis, :]))
try:
self.L = cholesky(self.Sigma, lower=True)
except LinAlgError:
self.L = self.L0 | python | def update(self, v):
self.t += 1
g = self.gamma()
self.mu = (1. - g) * self.mu + g * v
mv = v - self.mu
self.Sigma = ((1. - g) * self.Sigma
+ g * np.dot(mv[:, np.newaxis], mv[np.newaxis, :]))
try:
self.L = cholesky(self.Sigma, lower=True)
except LinAlgError:
self.L = self.L0 | [
"def",
"update",
"(",
"self",
",",
"v",
")",
":",
"self",
".",
"t",
"+=",
"1",
"g",
"=",
"self",
".",
"gamma",
"(",
")",
"self",
".",
"mu",
"=",
"(",
"1.",
"-",
"g",
")",
"*",
"self",
".",
"mu",
"+",
"g",
"*",
"v",
"mv",
"=",
"v",
"-",
"self",
".",
"mu",
"self",
".",
"Sigma",
"=",
"(",
"(",
"1.",
"-",
"g",
")",
"*",
"self",
".",
"Sigma",
"+",
"g",
"*",
"np",
".",
"dot",
"(",
"mv",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"mv",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
")",
")",
"try",
":",
"self",
".",
"L",
"=",
"cholesky",
"(",
"self",
".",
"Sigma",
",",
"lower",
"=",
"True",
")",
"except",
"LinAlgError",
":",
"self",
".",
"L",
"=",
"self",
".",
"L0"
] | Adds point v | [
"Adds",
"point",
"v"
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/mcmc.py#L161-L172 |
251,794 | nchopin/particles | particles/utils.py | cartesian_lists | def cartesian_lists(d):
"""
turns a dict of lists into a list of dicts that represents
the cartesian product of the initial lists
Example
-------
cartesian_lists({'a':[0, 2], 'b':[3, 4, 5]}
returns
[ {'a':0, 'b':3}, {'a':0, 'b':4}, ... {'a':2, 'b':5} ]
"""
return [{k: v for k, v in zip(d.keys(), args)}
for args in itertools.product(*d.values())] | python | def cartesian_lists(d):
return [{k: v for k, v in zip(d.keys(), args)}
for args in itertools.product(*d.values())] | [
"def",
"cartesian_lists",
"(",
"d",
")",
":",
"return",
"[",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"d",
".",
"keys",
"(",
")",
",",
"args",
")",
"}",
"for",
"args",
"in",
"itertools",
".",
"product",
"(",
"*",
"d",
".",
"values",
"(",
")",
")",
"]"
] | turns a dict of lists into a list of dicts that represents
the cartesian product of the initial lists
Example
-------
cartesian_lists({'a':[0, 2], 'b':[3, 4, 5]}
returns
[ {'a':0, 'b':3}, {'a':0, 'b':4}, ... {'a':2, 'b':5} ] | [
"turns",
"a",
"dict",
"of",
"lists",
"into",
"a",
"list",
"of",
"dicts",
"that",
"represents",
"the",
"cartesian",
"product",
"of",
"the",
"initial",
"lists"
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L87-L100 |
251,795 | nchopin/particles | particles/utils.py | cartesian_args | def cartesian_args(args, listargs, dictargs):
""" Compute a list of inputs and outputs for a function
with kw arguments.
args: dict
fixed arguments, e.g. {'x': 3}, then x=3 for all inputs
listargs: dict
arguments specified as a list; then the inputs
should be the Cartesian products of these lists
dictargs: dict
same as above, except the key will be used in the output
(see module doc for more explanation)
"""
ils = {k: [v, ] for k, v in args.items()}
ils.update(listargs)
ils.update({k: v.values() for k, v in dictargs.items()})
ols = listargs.copy()
ols.update({k: v.keys() for k, v in dictargs.items()})
return cartesian_lists(ils), cartesian_lists(ols) | python | def cartesian_args(args, listargs, dictargs):
ils = {k: [v, ] for k, v in args.items()}
ils.update(listargs)
ils.update({k: v.values() for k, v in dictargs.items()})
ols = listargs.copy()
ols.update({k: v.keys() for k, v in dictargs.items()})
return cartesian_lists(ils), cartesian_lists(ols) | [
"def",
"cartesian_args",
"(",
"args",
",",
"listargs",
",",
"dictargs",
")",
":",
"ils",
"=",
"{",
"k",
":",
"[",
"v",
",",
"]",
"for",
"k",
",",
"v",
"in",
"args",
".",
"items",
"(",
")",
"}",
"ils",
".",
"update",
"(",
"listargs",
")",
"ils",
".",
"update",
"(",
"{",
"k",
":",
"v",
".",
"values",
"(",
")",
"for",
"k",
",",
"v",
"in",
"dictargs",
".",
"items",
"(",
")",
"}",
")",
"ols",
"=",
"listargs",
".",
"copy",
"(",
")",
"ols",
".",
"update",
"(",
"{",
"k",
":",
"v",
".",
"keys",
"(",
")",
"for",
"k",
",",
"v",
"in",
"dictargs",
".",
"items",
"(",
")",
"}",
")",
"return",
"cartesian_lists",
"(",
"ils",
")",
",",
"cartesian_lists",
"(",
"ols",
")"
] | Compute a list of inputs and outputs for a function
with kw arguments.
args: dict
fixed arguments, e.g. {'x': 3}, then x=3 for all inputs
listargs: dict
arguments specified as a list; then the inputs
should be the Cartesian products of these lists
dictargs: dict
same as above, except the key will be used in the output
(see module doc for more explanation) | [
"Compute",
"a",
"list",
"of",
"inputs",
"and",
"outputs",
"for",
"a",
"function",
"with",
"kw",
"arguments",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L103-L122 |
251,796 | nchopin/particles | particles/utils.py | worker | def worker(qin, qout, f):
"""Worker for muliprocessing.
A worker repeatedly picks a dict of arguments in the queue and computes
f for this set of arguments, until the input queue is empty.
"""
while not qin.empty():
i, args = qin.get()
qout.put((i, f(**args))) | python | def worker(qin, qout, f):
while not qin.empty():
i, args = qin.get()
qout.put((i, f(**args))) | [
"def",
"worker",
"(",
"qin",
",",
"qout",
",",
"f",
")",
":",
"while",
"not",
"qin",
".",
"empty",
"(",
")",
":",
"i",
",",
"args",
"=",
"qin",
".",
"get",
"(",
")",
"qout",
".",
"put",
"(",
"(",
"i",
",",
"f",
"(",
"*",
"*",
"args",
")",
")",
")"
] | Worker for muliprocessing.
A worker repeatedly picks a dict of arguments in the queue and computes
f for this set of arguments, until the input queue is empty. | [
"Worker",
"for",
"muliprocessing",
".",
"A",
"worker",
"repeatedly",
"picks",
"a",
"dict",
"of",
"arguments",
"in",
"the",
"queue",
"and",
"computes",
"f",
"for",
"this",
"set",
"of",
"arguments",
"until",
"the",
"input",
"queue",
"is",
"empty",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L133-L141 |
251,797 | nchopin/particles | particles/utils.py | distinct_seeds | def distinct_seeds(k):
""" returns k distinct seeds for random number generation
"""
seeds = []
for _ in range(k):
while True:
s = random.randint(2**32 - 1)
if s not in seeds:
break
seeds.append(s)
return seeds | python | def distinct_seeds(k):
seeds = []
for _ in range(k):
while True:
s = random.randint(2**32 - 1)
if s not in seeds:
break
seeds.append(s)
return seeds | [
"def",
"distinct_seeds",
"(",
"k",
")",
":",
"seeds",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"k",
")",
":",
"while",
"True",
":",
"s",
"=",
"random",
".",
"randint",
"(",
"2",
"**",
"32",
"-",
"1",
")",
"if",
"s",
"not",
"in",
"seeds",
":",
"break",
"seeds",
".",
"append",
"(",
"s",
")",
"return",
"seeds"
] | returns k distinct seeds for random number generation | [
"returns",
"k",
"distinct",
"seeds",
"for",
"random",
"number",
"generation"
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L179-L189 |
251,798 | nchopin/particles | particles/utils.py | multiplexer | def multiplexer(f=None, nruns=1, nprocs=1, seeding=None, **args):
"""Evaluate a function for different parameters, optionally in parallel.
Parameters
----------
f: function
function f to evaluate, must take only kw arguments as inputs
nruns: int
number of evaluations of f for each set of arguments
nprocs: int
+ if <=0, set to actual number of physical processors plus nprocs
(i.e. -1 => number of cpus on your machine minus one)
Default is 1, which means no multiprocessing
seeding: bool (default: True if nruns > 1, False otherwise)
whether we need to provide different seeds for RNGS
**args:
keyword arguments for function f.
Note
----
see documentation of `utils`
"""
if not callable(f):
raise ValueError('multiplexer: function f missing, or not callable')
if seeding is None:
seeding = (nruns > 1)
# extra arguments (meant to be arguments for f)
fixedargs, listargs, dictargs = {}, {}, {}
listargs['run'] = list(range(nruns))
for k, v in args.items():
if isinstance(v, list):
listargs[k] = v
elif isinstance(v, dict):
dictargs[k] = v
else:
fixedargs[k] = v
# cartesian product
inputs, outputs = cartesian_args(fixedargs, listargs, dictargs)
for ip in inputs:
ip.pop('run') # run is not an argument of f, just an id for output
# distributing different seeds
if seeding:
seeds = distinct_seeds(len(inputs))
for ip, op, s in zip(inputs, outputs, seeds):
ip['seed'] = s
op['seed'] = s
# the actual work happens here
return distribute_work(f, inputs, outputs, nprocs=nprocs) | python | def multiplexer(f=None, nruns=1, nprocs=1, seeding=None, **args):
if not callable(f):
raise ValueError('multiplexer: function f missing, or not callable')
if seeding is None:
seeding = (nruns > 1)
# extra arguments (meant to be arguments for f)
fixedargs, listargs, dictargs = {}, {}, {}
listargs['run'] = list(range(nruns))
for k, v in args.items():
if isinstance(v, list):
listargs[k] = v
elif isinstance(v, dict):
dictargs[k] = v
else:
fixedargs[k] = v
# cartesian product
inputs, outputs = cartesian_args(fixedargs, listargs, dictargs)
for ip in inputs:
ip.pop('run') # run is not an argument of f, just an id for output
# distributing different seeds
if seeding:
seeds = distinct_seeds(len(inputs))
for ip, op, s in zip(inputs, outputs, seeds):
ip['seed'] = s
op['seed'] = s
# the actual work happens here
return distribute_work(f, inputs, outputs, nprocs=nprocs) | [
"def",
"multiplexer",
"(",
"f",
"=",
"None",
",",
"nruns",
"=",
"1",
",",
"nprocs",
"=",
"1",
",",
"seeding",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"if",
"not",
"callable",
"(",
"f",
")",
":",
"raise",
"ValueError",
"(",
"'multiplexer: function f missing, or not callable'",
")",
"if",
"seeding",
"is",
"None",
":",
"seeding",
"=",
"(",
"nruns",
">",
"1",
")",
"# extra arguments (meant to be arguments for f)",
"fixedargs",
",",
"listargs",
",",
"dictargs",
"=",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
"listargs",
"[",
"'run'",
"]",
"=",
"list",
"(",
"range",
"(",
"nruns",
")",
")",
"for",
"k",
",",
"v",
"in",
"args",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"listargs",
"[",
"k",
"]",
"=",
"v",
"elif",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"dictargs",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"fixedargs",
"[",
"k",
"]",
"=",
"v",
"# cartesian product",
"inputs",
",",
"outputs",
"=",
"cartesian_args",
"(",
"fixedargs",
",",
"listargs",
",",
"dictargs",
")",
"for",
"ip",
"in",
"inputs",
":",
"ip",
".",
"pop",
"(",
"'run'",
")",
"# run is not an argument of f, just an id for output",
"# distributing different seeds",
"if",
"seeding",
":",
"seeds",
"=",
"distinct_seeds",
"(",
"len",
"(",
"inputs",
")",
")",
"for",
"ip",
",",
"op",
",",
"s",
"in",
"zip",
"(",
"inputs",
",",
"outputs",
",",
"seeds",
")",
":",
"ip",
"[",
"'seed'",
"]",
"=",
"s",
"op",
"[",
"'seed'",
"]",
"=",
"s",
"# the actual work happens here",
"return",
"distribute_work",
"(",
"f",
",",
"inputs",
",",
"outputs",
",",
"nprocs",
"=",
"nprocs",
")"
] | Evaluate a function for different parameters, optionally in parallel.
Parameters
----------
f: function
function f to evaluate, must take only kw arguments as inputs
nruns: int
number of evaluations of f for each set of arguments
nprocs: int
+ if <=0, set to actual number of physical processors plus nprocs
(i.e. -1 => number of cpus on your machine minus one)
Default is 1, which means no multiprocessing
seeding: bool (default: True if nruns > 1, False otherwise)
whether we need to provide different seeds for RNGS
**args:
keyword arguments for function f.
Note
----
see documentation of `utils` | [
"Evaluate",
"a",
"function",
"for",
"different",
"parameters",
"optionally",
"in",
"parallel",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/utils.py#L192-L240 |
251,799 | nchopin/particles | particles/state_space_models.py | StateSpaceModel.simulate | def simulate(self, T):
"""Simulate state and observation processes.
Parameters
----------
T: int
processes are simulated from time 0 to time T-1
Returns
-------
x, y: lists
lists of length T
"""
x = []
for t in range(T):
law_x = self.PX0() if t == 0 else self.PX(t, x[-1])
x.append(law_x.rvs(size=1))
y = self.simulate_given_x(x)
return x, y | python | def simulate(self, T):
x = []
for t in range(T):
law_x = self.PX0() if t == 0 else self.PX(t, x[-1])
x.append(law_x.rvs(size=1))
y = self.simulate_given_x(x)
return x, y | [
"def",
"simulate",
"(",
"self",
",",
"T",
")",
":",
"x",
"=",
"[",
"]",
"for",
"t",
"in",
"range",
"(",
"T",
")",
":",
"law_x",
"=",
"self",
".",
"PX0",
"(",
")",
"if",
"t",
"==",
"0",
"else",
"self",
".",
"PX",
"(",
"t",
",",
"x",
"[",
"-",
"1",
"]",
")",
"x",
".",
"append",
"(",
"law_x",
".",
"rvs",
"(",
"size",
"=",
"1",
")",
")",
"y",
"=",
"self",
".",
"simulate_given_x",
"(",
"x",
")",
"return",
"x",
",",
"y"
] | Simulate state and observation processes.
Parameters
----------
T: int
processes are simulated from time 0 to time T-1
Returns
-------
x, y: lists
lists of length T | [
"Simulate",
"state",
"and",
"observation",
"processes",
"."
] | 3faa97a1073db45c5889eef3e015dd76ef350b52 | https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/state_space_models.py#L280-L298 |
Subsets and Splits