response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Test :func:`astropy.cosmology._utils.all_cls_vars`.
def test_all_cls_vars(): """Test :func:`astropy.cosmology._utils.all_cls_vars`.""" class ClassA: a = 1 b = 2 all_vars = all_cls_vars(ClassA) public_all_vars = {k: v for k, v in all_vars.items() if not k.startswith("_")} assert public_all_vars == {"a": 1, "b": 2} class ClassB(ClassA): c = 3 all_vars = all_cls_vars(ClassB) public_all_vars = {k: v for k, v in all_vars.items() if not k.startswith("_")} assert public_all_vars == {"a": 1, "b": 2, "c": 3} assert "a" not in vars(ClassB) assert "b" not in vars(ClassB)
Return the |Cosmology| unchanged. Parameters ---------- cosmo : `~astropy.cosmology.Cosmology` The cosmology to return. **kwargs This argument is required for compatibility with the standard set of keyword arguments in format `~astropy.cosmology.Cosmology.from_format`, e.g. "cosmology". If "cosmology" is included and is not `None`, ``cosmo`` is checked for correctness. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Just ``cosmo`` passed through. Raises ------ TypeError If the |Cosmology| object is not an instance of ``cosmo`` (and ``cosmology`` is not `None`). Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> print(Cosmology.from_format(Planck18)) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
def from_cosmology(cosmo, /, **kwargs): """Return the |Cosmology| unchanged. Parameters ---------- cosmo : `~astropy.cosmology.Cosmology` The cosmology to return. **kwargs This argument is required for compatibility with the standard set of keyword arguments in format `~astropy.cosmology.Cosmology.from_format`, e.g. "cosmology". If "cosmology" is included and is not `None`, ``cosmo`` is checked for correctness. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Just ``cosmo`` passed through. Raises ------ TypeError If the |Cosmology| object is not an instance of ``cosmo`` (and ``cosmology`` is not `None`). Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> print(Cosmology.from_format(Planck18)) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) """ # Check argument `cosmology` cosmology = kwargs.get("cosmology") if isinstance(cosmology, str): cosmology = _COSMOLOGY_CLASSES[cosmology] if cosmology is not None and not isinstance(cosmo, cosmology): raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.") return cosmo
Return the |Cosmology| unchanged. Parameters ---------- cosmo : `~astropy.cosmology.Cosmology` The cosmology to return. *args Not used. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Just ``cosmo`` passed through. Examples -------- >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("astropy.cosmology") is Planck18 True
def to_cosmology(cosmo, *args): """Return the |Cosmology| unchanged. Parameters ---------- cosmo : `~astropy.cosmology.Cosmology` The cosmology to return. *args Not used. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Just ``cosmo`` passed through. Examples -------- >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("astropy.cosmology") is Planck18 True """ return cosmo
Identify if object is a `~astropy.cosmology.Cosmology`. Returns ------- bool
def cosmology_identify(origin, format, *args, **kwargs): """Identify if object is a `~astropy.cosmology.Cosmology`. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Cosmology) and ( format in (None, "astropy.cosmology") ) return itis
Read a `~astropy.cosmology.Cosmology` from an ECSV file. Parameters ---------- filename : path-like or file-like From where to read the Cosmology. index : int, str, or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). rename : dict or None (optional keyword-only) A dictionary mapping column names to fields of the `~astropy.cosmology.Cosmology`. **kwargs Passed to ``QTable.read`` Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() To see reading a Cosmology from an ECSV file, we first write a Cosmology to an ECSV file: >>> from astropy.cosmology import Cosmology, Planck18 >>> file = Path(temp_dir.name) / "file.ecsv" >>> Planck18.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name H0 Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> Now we can read the Cosmology from the ECSV file, constructing a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.read(file) >>> print(cosmo) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) >>> cosmo == Planck18 True The ``cosmology`` information (column or metadata) may be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.read|. Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> print(FlatLambdaCDM.read(file)) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. For files with multiple rows of cosmological parameters, the ``index`` argument is needed to select the correct row. The index can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. Here is an example where ``index`` is needed and can be either an integer (for the row number) or the name of one of the cosmologies, e.g. 'Planck15'. >>> from astropy.cosmology import Planck13, Planck15, Planck18 >>> from astropy.table import vstack >>> cts = vstack([c.to_format("astropy.table") ... for c in (Planck13, Planck15, Planck18)], ... metadata_conflicts='silent') >>> file = Path(temp_dir.name) / "file2.ecsv" >>> cts.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name H0 Om0 Tcmb0 Neff m_nu Ob0 Planck13 67.77 0.30712 2.7255 3.046 [0.0,0.0,0.06] 0.048252 Planck15 67.74 0.3075 2.7255 3.046 [0.0,0.0,0.06] 0.0486 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 >>> cosmo = Cosmology.read(file, index="Planck15", format="ascii.ecsv") >>> cosmo == Planck15 True Fields of the table in the file can be renamed to match the `~astropy.cosmology.Cosmology` class' signature using the ``rename`` argument. This is useful when the files's column names do not match the class' parameter names. For this example we need to make a new file with renamed columns: >>> file = Path(temp_dir.name) / "file3.ecsv" >>> renamed_table = Planck18.to_format("astropy.table", rename={"H0": "Hubble"}) >>> renamed_table.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name Hubble Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 Now we can read the Cosmology from the ECSV file, with the required renaming: >>> cosmo = Cosmology.read(file, rename={"Hubble": "H0"}) >>> cosmo == Planck18 True Additional keyword arguments are passed to ``QTable.read``. .. testcleanup:: >>> temp_dir.cleanup()
def read_ecsv( filename, index=None, *, move_to_meta=False, cosmology=None, rename=None, **kwargs ): r"""Read a `~astropy.cosmology.Cosmology` from an ECSV file. Parameters ---------- filename : path-like or file-like From where to read the Cosmology. index : int, str, or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). rename : dict or None (optional keyword-only) A dictionary mapping column names to fields of the `~astropy.cosmology.Cosmology`. **kwargs Passed to ``QTable.read`` Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() To see reading a Cosmology from an ECSV file, we first write a Cosmology to an ECSV file: >>> from astropy.cosmology import Cosmology, Planck18 >>> file = Path(temp_dir.name) / "file.ecsv" >>> Planck18.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name H0 Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> Now we can read the Cosmology from the ECSV file, constructing a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.read(file) >>> print(cosmo) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) >>> cosmo == Planck18 True The ``cosmology`` information (column or metadata) may be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.read|. Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> print(FlatLambdaCDM.read(file)) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. For files with multiple rows of cosmological parameters, the ``index`` argument is needed to select the correct row. The index can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. Here is an example where ``index`` is needed and can be either an integer (for the row number) or the name of one of the cosmologies, e.g. 'Planck15'. >>> from astropy.cosmology import Planck13, Planck15, Planck18 >>> from astropy.table import vstack >>> cts = vstack([c.to_format("astropy.table") ... for c in (Planck13, Planck15, Planck18)], ... metadata_conflicts='silent') >>> file = Path(temp_dir.name) / "file2.ecsv" >>> cts.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name H0 Om0 Tcmb0 Neff m_nu Ob0 Planck13 67.77 0.30712 2.7255 3.046 [0.0,0.0,0.06] 0.048252 Planck15 67.74 0.3075 2.7255 3.046 [0.0,0.0,0.06] 0.0486 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 >>> cosmo = Cosmology.read(file, index="Planck15", format="ascii.ecsv") >>> cosmo == Planck15 True Fields of the table in the file can be renamed to match the `~astropy.cosmology.Cosmology` class' signature using the ``rename`` argument. This is useful when the files's column names do not match the class' parameter names. For this example we need to make a new file with renamed columns: >>> file = Path(temp_dir.name) / "file3.ecsv" >>> renamed_table = Planck18.to_format("astropy.table", rename={"H0": "Hubble"}) >>> renamed_table.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name Hubble Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 Now we can read the Cosmology from the ECSV file, with the required renaming: >>> cosmo = Cosmology.read(file, rename={"Hubble": "H0"}) >>> cosmo == Planck18 True Additional keyword arguments are passed to ``QTable.read``. .. testcleanup:: >>> temp_dir.cleanup() """ kwargs["format"] = "ascii.ecsv" with u.add_enabled_units(cu): table = QTable.read(filename, **kwargs) # build cosmology from table return from_table( table, index=index, move_to_meta=move_to_meta, cosmology=cosmology, rename=rename, )
Serialize the cosmology into a ECSV. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` The cosmology instance to convert to a mapping. file : path-like or file-like Location to save the serialized cosmology. overwrite : bool Whether to overwrite the file, if it exists. cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` (sub)class to use when writing. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool (optional, keyword-only) Whether to put the cosmology class in the Table metadata (if `True`, default) or as the first column (if `False`). rename : dict or None (optional keyword-only) A dictionary mapping fields of the `~astropy.cosmology.Cosmology` to columns of the table. **kwargs Passed to ``cls.write`` Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() A Cosmology can be written to an ECSV file: >>> from astropy.cosmology import Cosmology, Planck18 >>> file = Path(temp_dir.name) / "file.ecsv" >>> Planck18.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name H0 Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> If a file already exists, attempting to write will raise an error unless ``overwrite=True``. >>> Planck18.write(file, overwrite=True) By default :class:`~astropy.cosmology.Cosmology` instances are written using `~astropy.table.QTable` as an intermediate representation (for details see |Cosmology.to_format|, with ``format="astropy.table"``). The `~astropy.table.Table` type can be changed using the ``cls`` keyword argument. >>> from astropy.table import Table >>> file = Path(temp_dir.name) / "file2.ecsv" >>> Planck18.write(file, cls=Table) For most use cases, the default ``cls`` of :class:`~astropy.table.QTable` is recommended and will be largely indistinguishable from other table types, as the ECSV format is agnostic to the table type. An example of a difference that might necessitate using a different table type is if a different ECSV schema is desired. By default the cosmology class is written to the Table metadata. This can be changed to a column of the table using the ``cosmology_in_meta`` keyword argument. >>> file = Path(temp_dir.name) / "file3.ecsv" >>> Planck18.write(file, cosmology_in_meta=False) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: cosmology, datatype: string} # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> Fields of the Cosmology can be renamed to when writing to an ECSV file using the ``rename`` argument. >>> file = Path(temp_dir.name) / "file4.ecsv" >>> Planck18.write(file, rename={"H0": "Hubble"}) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: ... # schema: astropy-2.0 name Hubble Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> Additional keyword arguments are passed to :attr:`astropy.table.QTable.write`. .. testcleanup:: >>> temp_dir.cleanup()
def write_ecsv( cosmology, file, *, overwrite=False, cls=QTable, cosmology_in_meta=True, rename=None, **kwargs, ): """Serialize the cosmology into a ECSV. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` The cosmology instance to convert to a mapping. file : path-like or file-like Location to save the serialized cosmology. overwrite : bool Whether to overwrite the file, if it exists. cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` (sub)class to use when writing. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool (optional, keyword-only) Whether to put the cosmology class in the Table metadata (if `True`, default) or as the first column (if `False`). rename : dict or None (optional keyword-only) A dictionary mapping fields of the `~astropy.cosmology.Cosmology` to columns of the table. **kwargs Passed to ``cls.write`` Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() A Cosmology can be written to an ECSV file: >>> from astropy.cosmology import Cosmology, Planck18 >>> file = Path(temp_dir.name) / "file.ecsv" >>> Planck18.write(file) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 name H0 Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> If a file already exists, attempting to write will raise an error unless ``overwrite=True``. >>> Planck18.write(file, overwrite=True) By default :class:`~astropy.cosmology.Cosmology` instances are written using `~astropy.table.QTable` as an intermediate representation (for details see |Cosmology.to_format|, with ``format="astropy.table"``). The `~astropy.table.Table` type can be changed using the ``cls`` keyword argument. >>> from astropy.table import Table >>> file = Path(temp_dir.name) / "file2.ecsv" >>> Planck18.write(file, cls=Table) For most use cases, the default ``cls`` of :class:`~astropy.table.QTable` is recommended and will be largely indistinguishable from other table types, as the ECSV format is agnostic to the table type. An example of a difference that might necessitate using a different table type is if a different ECSV schema is desired. By default the cosmology class is written to the Table metadata. This can be changed to a column of the table using the ``cosmology_in_meta`` keyword argument. >>> file = Path(temp_dir.name) / "file3.ecsv" >>> Planck18.write(file, cosmology_in_meta=False) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: cosmology, datatype: string} # - {name: name, datatype: string} ... # meta: !!omap # - {Oc0: 0.2607} ... # schema: astropy-2.0 cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> Fields of the Cosmology can be renamed to when writing to an ECSV file using the ``rename`` argument. >>> file = Path(temp_dir.name) / "file4.ecsv" >>> Planck18.write(file, rename={"H0": "Hubble"}) >>> with open(file) as f: print(f.read()) # %ECSV 1.0 # --- # datatype: # - {name: name, datatype: string} ... # meta: ... # schema: astropy-2.0 name Hubble Om0 Tcmb0 Neff m_nu Ob0 Planck18 67.66 0.30966 2.7255 3.046 [0.0,0.0,0.06] 0.04897 <BLANKLINE> Additional keyword arguments are passed to :attr:`astropy.table.QTable.write`. .. testcleanup:: >>> temp_dir.cleanup() """ table = to_table( cosmology, cls=cls, cosmology_in_meta=cosmology_in_meta, rename=rename ) kwargs["format"] = "ascii.ecsv" table.write(file, overwrite=overwrite, **kwargs)
Identify if object uses the Table format. Returns ------- bool
def ecsv_identify(origin, filepath, fileobj, *args, **kwargs): """Identify if object uses the Table format. Returns ------- bool """ return filepath is not None and filepath.endswith(".ecsv")
Read a |Cosmology| from an HTML file. Parameters ---------- filename : path-like or file-like From where to read the Cosmology. index : int or str or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool, optional keyword-only Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str or |Cosmology| class or None, optional keyword-only The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. latex_names : bool, optional keyword-only Whether the |Table| (might) have latex column names for the parameters that need to be mapped to the correct parameter name -- e.g. $$H_{0}$$ to 'H0'. This is `True` by default, but can be turned off (set to `False`) if there is a known name conflict (e.g. both an 'H0' and '$$H_{0}$$' column) as this will raise an error. In this case, the correct name ('H0') is preferred. **kwargs : Any Passed to ``QTable.read``. ``format`` is set to 'ascii.html', regardless of input. Returns ------- |Cosmology| subclass instance Raises ------ ValueError If the keyword argument 'format' is given and is not "ascii.html".
def read_html_table( filename, index=None, *, move_to_meta=False, cosmology=None, latex_names=True, **kwargs, ): r"""Read a |Cosmology| from an HTML file. Parameters ---------- filename : path-like or file-like From where to read the Cosmology. index : int or str or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool, optional keyword-only Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str or |Cosmology| class or None, optional keyword-only The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. latex_names : bool, optional keyword-only Whether the |Table| (might) have latex column names for the parameters that need to be mapped to the correct parameter name -- e.g. $$H_{0}$$ to 'H0'. This is `True` by default, but can be turned off (set to `False`) if there is a known name conflict (e.g. both an 'H0' and '$$H_{0}$$' column) as this will raise an error. In this case, the correct name ('H0') is preferred. **kwargs : Any Passed to ``QTable.read``. ``format`` is set to 'ascii.html', regardless of input. Returns ------- |Cosmology| subclass instance Raises ------ ValueError If the keyword argument 'format' is given and is not "ascii.html". """ # Check that the format is 'ascii.html' (or not specified) format = kwargs.pop("format", "ascii.html") if format != "ascii.html": raise ValueError(f"format must be 'ascii.html', not {format}") # Reading is handled by `QTable`. with u.add_enabled_units(cu): # (cosmology units not turned on by default) table = QTable.read(filename, format="ascii.html", **kwargs) # Need to map the table's column names to Cosmology inputs (parameter # names). # TODO! move the `latex_names` into `from_table` if latex_names: table_columns = set(table.colnames) for name, latex in _FORMAT_TABLE.items(): if latex in table_columns: table.rename_column(latex, name) # Build the cosmology from table, using the private backend. return from_table( table, index=index, move_to_meta=move_to_meta, cosmology=cosmology, rename=None )
Serialize the |Cosmology| into a HTML table. Parameters ---------- cosmology : |Cosmology| subclass instance file : path-like or file-like Location to save the serialized cosmology. file : path-like or file-like Where to write the html table. overwrite : bool, optional keyword-only Whether to overwrite the file, if it exists. cls : |Table| class, optional keyword-only Astropy |Table| (sub)class to use when writing. Default is |QTable| class. latex_names : bool, optional keyword-only Whether to format the parameters (column) names to latex -- e.g. 'H0' to $$H_{0}$$. **kwargs : Any Passed to ``cls.write``. Raises ------ TypeError If the optional keyword-argument 'cls' is not a subclass of |Table|. ValueError If the keyword argument 'format' is given and is not "ascii.html". Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() Writing a cosmology to a html file will produce a table with the cosmology's type, name, and parameters as columns. >>> from astropy.cosmology import Planck18 >>> file = Path(temp_dir.name) / "file.html" >>> Planck18.write(file, overwrite=True) >>> with open(file) as f: print(f.read()) <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>cosmology</th> <th>name</th> <th>H0</th> <th>Om0</th> <th>Tcmb0</th> <th>Neff</th> <th colspan="3">m_nu</th> <th>Ob0</th> </tr> </thead> <tr> <td>FlatLambdaCDM</td> <td>Planck18</td> <td>67.66</td> <td>0.30966</td> <td>2.7255</td> <td>3.046</td> <td>0.0</td> <td>0.0</td> <td>0.06</td> <td>0.04897</td> </tr> </table> </body> </html> <BLANKLINE> <BLANKLINE> The cosmology's metadata is not included in the file. To save the cosmology in an existing file, use ``overwrite=True``; otherwise, an error will be raised. >>> Planck18.write(file, overwrite=True) To use a different table class as the underlying writer, use the ``cls`` kwarg. For more information on the available table classes, see the documentation on Astropy's table classes and on ``Cosmology.to_format("astropy.table")``. By default the parameter names are not converted to LaTeX / MathJax format. To enable this, set ``latex_names=True``. >>> file = Path(temp_dir.name) / "file2.html" >>> Planck18.write(file, latex_names=True) >>> with open(file) as f: print(f.read()) <html> ... <thead> <tr> <th>cosmology</th> <th>name</th> <th>$$H_{0}$$</th> <th>$$\Omega_{m,0}$$</th> <th>$$T_{0}$$</th> <th>$$N_{eff}$$</th> <th colspan="3">$$m_{nu}$$</th> <th>$$\Omega_{b,0}$$</th> </tr> ... .. testcleanup:: >>> temp_dir.cleanup() Notes ----- A HTML file containing a Cosmology HTML table should have scripts enabling MathJax. .. code-block:: html <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script> <script type="text/javascript" id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"> </script>
def write_html_table( cosmology, file, *, overwrite=False, cls=QTable, latex_names=False, **kwargs ): r"""Serialize the |Cosmology| into a HTML table. Parameters ---------- cosmology : |Cosmology| subclass instance file : path-like or file-like Location to save the serialized cosmology. file : path-like or file-like Where to write the html table. overwrite : bool, optional keyword-only Whether to overwrite the file, if it exists. cls : |Table| class, optional keyword-only Astropy |Table| (sub)class to use when writing. Default is |QTable| class. latex_names : bool, optional keyword-only Whether to format the parameters (column) names to latex -- e.g. 'H0' to $$H_{0}$$. **kwargs : Any Passed to ``cls.write``. Raises ------ TypeError If the optional keyword-argument 'cls' is not a subclass of |Table|. ValueError If the keyword argument 'format' is given and is not "ascii.html". Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() Writing a cosmology to a html file will produce a table with the cosmology's type, name, and parameters as columns. >>> from astropy.cosmology import Planck18 >>> file = Path(temp_dir.name) / "file.html" >>> Planck18.write(file, overwrite=True) >>> with open(file) as f: print(f.read()) <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>cosmology</th> <th>name</th> <th>H0</th> <th>Om0</th> <th>Tcmb0</th> <th>Neff</th> <th colspan="3">m_nu</th> <th>Ob0</th> </tr> </thead> <tr> <td>FlatLambdaCDM</td> <td>Planck18</td> <td>67.66</td> <td>0.30966</td> <td>2.7255</td> <td>3.046</td> <td>0.0</td> <td>0.0</td> <td>0.06</td> <td>0.04897</td> </tr> </table> </body> </html> <BLANKLINE> <BLANKLINE> The cosmology's metadata is not included in the file. To save the cosmology in an existing file, use ``overwrite=True``; otherwise, an error will be raised. >>> Planck18.write(file, overwrite=True) To use a different table class as the underlying writer, use the ``cls`` kwarg. For more information on the available table classes, see the documentation on Astropy's table classes and on ``Cosmology.to_format("astropy.table")``. By default the parameter names are not converted to LaTeX / MathJax format. To enable this, set ``latex_names=True``. >>> file = Path(temp_dir.name) / "file2.html" >>> Planck18.write(file, latex_names=True) >>> with open(file) as f: print(f.read()) <html> ... <thead> <tr> <th>cosmology</th> <th>name</th> <th>$$H_{0}$$</th> <th>$$\Omega_{m,0}$$</th> <th>$$T_{0}$$</th> <th>$$N_{eff}$$</th> <th colspan="3">$$m_{nu}$$</th> <th>$$\Omega_{b,0}$$</th> </tr> ... .. testcleanup:: >>> temp_dir.cleanup() Notes ----- A HTML file containing a Cosmology HTML table should have scripts enabling MathJax. .. code-block:: html <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script> <script type="text/javascript" id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"> </script> """ # Check that the format is 'ascii.html' (or not specified) format = kwargs.pop("format", "ascii.html") if format != "ascii.html": raise ValueError(f"format must be 'ascii.html', not {format}") # Set cosmology_in_meta as false for now since there is no metadata being kept table = to_table(cosmology, cls=cls, cosmology_in_meta=False) cosmo_cls = type(cosmology) for name, col in table.columns.items(): param = cosmo_cls.parameters.get(name) if not isinstance(param, Parameter) or param.unit in (None, u.one): continue # Replace column with unitless version table.replace_column(name, (col << param.unit).value, copy=False) if latex_names: new_names = [_FORMAT_TABLE.get(k, k) for k in cosmology.parameters] table.rename_columns(tuple(cosmology.parameters), new_names) # Write HTML, using table I/O table.write(file, overwrite=overwrite, format="ascii.html", **kwargs)
Identify if an object uses the HTML Table format. Parameters ---------- origin : Any Not used. filepath : str or Any From where to read the Cosmology. fileobj : Any Not used. *args : Any Not used. **kwargs : Any Not used. Returns ------- bool If the filepath is a string ending with '.html'.
def html_identify(origin, filepath, fileobj, *args, **kwargs): """Identify if an object uses the HTML Table format. Parameters ---------- origin : Any Not used. filepath : str or Any From where to read the Cosmology. fileobj : Any Not used. *args : Any Not used. **kwargs : Any Not used. Returns ------- bool If the filepath is a string ending with '.html'. """ return isinstance(filepath, str) and filepath.endswith(".html")
Serialize the |Cosmology| into a LaTeX. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance The cosmology to serialize. file : path-like or file-like Location to save the serialized cosmology. overwrite : bool Whether to overwrite the file, if it exists. cls : type, optional keyword-only Astropy :class:`~astropy.table.Table` (sub)class to use when writing. Default is :class:`~astropy.table.QTable`. latex_names : bool, optional keyword-only Whether to use LaTeX names for the parameters. Default is `True`. **kwargs Passed to ``cls.write`` Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() Writing a cosmology to a LaTeX file will produce a table with the cosmology's type, name, and parameters as columns. >>> from astropy.cosmology import Planck18 >>> file = Path(temp_dir.name) / "file.tex" >>> Planck18.write(file, format="ascii.latex") >>> with open(file) as f: print(f.read()) \begin{table} \begin{tabular}{cccccccc} cosmology & name & $H_0$ & $\Omega_{m,0}$ & $T_{0}$ & $N_{eff}$ & $m_{nu}$ & $\Omega_{b,0}$ \\ & & $\mathrm{km\,Mpc^{-1}\,s^{-1}}$ & & $\mathrm{K}$ & & $\mathrm{eV}$ & \\ FlatLambdaCDM & Planck18 & 67.66 & 0.30966 & 2.7255 & 3.046 & 0.0 .. 0.06 & 0.04897 \\ \end{tabular} \end{table} <BLANKLINE> The cosmology's metadata is not included in the table. To save the cosmology in an existing file, use ``overwrite=True``; otherwise, an error will be raised. >>> Planck18.write(file, format="ascii.latex", overwrite=True) To use a different table class as the underlying writer, use the ``cls`` kwarg. For more information on the available table classes, see the documentation on Astropy's table classes and on ``Cosmology.to_format("astropy.table")``. By default the parameter names are converted to LaTeX format. To disable this, set ``latex_names=False``. >>> file = Path(temp_dir.name) / "file2.tex" >>> Planck18.write(file, format="ascii.latex", latex_names=False) >>> with open(file) as f: print(f.read()) \begin{table} \begin{tabular}{cccccccc} cosmology & name & H0 & Om0 & Tcmb0 & Neff & m_nu & Ob0 \\ & & $\mathrm{km\,Mpc^{-1}\,s^{-1}}$ & & $\mathrm{K}$ & & $\mathrm{eV}$ & \\ FlatLambdaCDM & Planck18 & 67.66 & 0.30966 & 2.7255 & 3.046 & 0.0 .. 0.06 & 0.04897 \\ \end{tabular} \end{table} <BLANKLINE> .. testcleanup:: >>> temp_dir.cleanup()
def write_latex( cosmology, file, *, overwrite=False, cls=QTable, latex_names=True, **kwargs ): r"""Serialize the |Cosmology| into a LaTeX. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance The cosmology to serialize. file : path-like or file-like Location to save the serialized cosmology. overwrite : bool Whether to overwrite the file, if it exists. cls : type, optional keyword-only Astropy :class:`~astropy.table.Table` (sub)class to use when writing. Default is :class:`~astropy.table.QTable`. latex_names : bool, optional keyword-only Whether to use LaTeX names for the parameters. Default is `True`. **kwargs Passed to ``cls.write`` Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` Examples -------- We assume the following setup: >>> from pathlib import Path >>> from tempfile import TemporaryDirectory >>> temp_dir = TemporaryDirectory() Writing a cosmology to a LaTeX file will produce a table with the cosmology's type, name, and parameters as columns. >>> from astropy.cosmology import Planck18 >>> file = Path(temp_dir.name) / "file.tex" >>> Planck18.write(file, format="ascii.latex") >>> with open(file) as f: print(f.read()) \begin{table} \begin{tabular}{cccccccc} cosmology & name & $H_0$ & $\Omega_{m,0}$ & $T_{0}$ & $N_{eff}$ & $m_{nu}$ & $\Omega_{b,0}$ \\ & & $\mathrm{km\,Mpc^{-1}\,s^{-1}}$ & & $\mathrm{K}$ & & $\mathrm{eV}$ & \\ FlatLambdaCDM & Planck18 & 67.66 & 0.30966 & 2.7255 & 3.046 & 0.0 .. 0.06 & 0.04897 \\ \end{tabular} \end{table} <BLANKLINE> The cosmology's metadata is not included in the table. To save the cosmology in an existing file, use ``overwrite=True``; otherwise, an error will be raised. >>> Planck18.write(file, format="ascii.latex", overwrite=True) To use a different table class as the underlying writer, use the ``cls`` kwarg. For more information on the available table classes, see the documentation on Astropy's table classes and on ``Cosmology.to_format("astropy.table")``. By default the parameter names are converted to LaTeX format. To disable this, set ``latex_names=False``. >>> file = Path(temp_dir.name) / "file2.tex" >>> Planck18.write(file, format="ascii.latex", latex_names=False) >>> with open(file) as f: print(f.read()) \begin{table} \begin{tabular}{cccccccc} cosmology & name & H0 & Om0 & Tcmb0 & Neff & m_nu & Ob0 \\ & & $\mathrm{km\,Mpc^{-1}\,s^{-1}}$ & & $\mathrm{K}$ & & $\mathrm{eV}$ & \\ FlatLambdaCDM & Planck18 & 67.66 & 0.30966 & 2.7255 & 3.046 & 0.0 .. 0.06 & 0.04897 \\ \end{tabular} \end{table} <BLANKLINE> .. testcleanup:: >>> temp_dir.cleanup() """ # Check that the format is 'latex', 'ascii.latex' (or not specified) fmt = kwargs.pop("format", "ascii.latex") if fmt != "ascii.latex": raise ValueError(f"format must be 'ascii.latex', not {fmt}") # Set cosmology_in_meta as false for now since there is no metadata being kept table = to_table(cosmology, cls=cls, cosmology_in_meta=False) cosmo_cls = type(cosmology) for name in table.columns.keys(): param = cosmo_cls.parameters.get(name) if not isinstance(param, Parameter) or param.unit in (None, u.one): continue # Get column to correct unit table[name] <<= param.unit # Convert parameter names to LaTeX format if latex_names: new_names = [_FORMAT_TABLE.get(k, k) for k in cosmology.parameters] table.rename_columns(tuple(cosmology.parameters), new_names) table.write(file, overwrite=overwrite, format="ascii.latex", **kwargs)
Identify if object uses the Table format. Returns ------- bool
def latex_identify(origin, filepath, fileobj, *args, **kwargs): """Identify if object uses the Table format. Returns ------- bool """ return filepath is not None and filepath.endswith(".tex")
Apply rename to map.
def _rename_map(map, /, renames): """Apply rename to map.""" if common_names := set(renames.values()).intersection(map): raise ValueError( "'renames' values must be disjoint from 'map' keys, " f"the common keys are: {common_names}" ) return {renames.get(k, k): v for k, v in map.items()}
Load `~astropy.cosmology.Cosmology` from mapping object. Parameters ---------- mapping : Mapping Arguments into the class -- like "name" or "meta". If 'cosmology' is None, must have field "cosmology" which can be either the string name of the cosmology class (e.g. "FlatLambdaCDM") or the class itself. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'map'. rename : dict or None (optional, keyword-only) A dictionary mapping keys in ``map`` to fields of the `~astropy.cosmology.Cosmology`. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a dictionary with ``from_mapping``, we will first make a mapping using :meth:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> cm = Planck18.to_format('mapping') >>> cm {'cosmology': <class 'astropy.cosmology...FlatLambdaCDM'>, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, 'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046, 'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897, 'meta': ... Now this dict can be used to load a new cosmological instance identical to the |Planck18| cosmology from which it was generated. >>> cosmo = Cosmology.from_format(cm, format="mapping") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) The ``cosmology`` field can be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.from_format|. >>> del cm["cosmology"] # remove cosmology class >>> Cosmology.from_format(cm, cosmology="FlatLambdaCDM") FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> FlatLambdaCDM.from_format(cm) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. >>> del cm["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(cm) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) The ``move_to_meta`` keyword argument can be used to move fields that are not in the Cosmology constructor to the Cosmology's metadata. This is useful when the dictionary contains extra information that is not part of the Cosmology. >>> cm2 = cm | {"extra": 42, "cosmology": "FlatLambdaCDM"} >>> cosmo = Cosmology.from_format(cm2, move_to_meta=True) >>> cosmo.meta OrderedDict([('extra', 42), ...]) The ``rename`` keyword argument can be used to rename keys in the mapping to fields of the |Cosmology|. This is crucial when the mapping has keys that are not valid arguments to the |Cosmology| constructor. >>> cm3 = dict(cm) # copy >>> cm3["cosmo_cls"] = "FlatLambdaCDM" >>> cm3["cosmo_name"] = cm3.pop("name") >>> rename = {'cosmo_cls': 'cosmology', 'cosmo_name': 'name'} >>> Cosmology.from_format(cm3, rename=rename) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
def from_mapping(mapping, /, *, move_to_meta=False, cosmology=None, rename=None): """Load `~astropy.cosmology.Cosmology` from mapping object. Parameters ---------- mapping : Mapping Arguments into the class -- like "name" or "meta". If 'cosmology' is None, must have field "cosmology" which can be either the string name of the cosmology class (e.g. "FlatLambdaCDM") or the class itself. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'map'. rename : dict or None (optional, keyword-only) A dictionary mapping keys in ``map`` to fields of the `~astropy.cosmology.Cosmology`. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a dictionary with ``from_mapping``, we will first make a mapping using :meth:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> cm = Planck18.to_format('mapping') >>> cm {'cosmology': <class 'astropy.cosmology...FlatLambdaCDM'>, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, 'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046, 'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897, 'meta': ... Now this dict can be used to load a new cosmological instance identical to the |Planck18| cosmology from which it was generated. >>> cosmo = Cosmology.from_format(cm, format="mapping") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) The ``cosmology`` field can be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.from_format|. >>> del cm["cosmology"] # remove cosmology class >>> Cosmology.from_format(cm, cosmology="FlatLambdaCDM") FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> FlatLambdaCDM.from_format(cm) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. >>> del cm["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(cm) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) The ``move_to_meta`` keyword argument can be used to move fields that are not in the Cosmology constructor to the Cosmology's metadata. This is useful when the dictionary contains extra information that is not part of the Cosmology. >>> cm2 = cm | {"extra": 42, "cosmology": "FlatLambdaCDM"} >>> cosmo = Cosmology.from_format(cm2, move_to_meta=True) >>> cosmo.meta OrderedDict([('extra', 42), ...]) The ``rename`` keyword argument can be used to rename keys in the mapping to fields of the |Cosmology|. This is crucial when the mapping has keys that are not valid arguments to the |Cosmology| constructor. >>> cm3 = dict(cm) # copy >>> cm3["cosmo_cls"] = "FlatLambdaCDM" >>> cm3["cosmo_name"] = cm3.pop("name") >>> rename = {'cosmo_cls': 'cosmology', 'cosmo_name': 'name'} >>> Cosmology.from_format(cm3, rename=rename) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) """ # Rename keys, if given a ``renames`` dict. # Also, make a copy of the mapping, so we can pop from it. params = _rename_map(dict(mapping), renames=rename or {}) # Get cosmology class cosmology = _get_cosmology_class(cosmology, params) # select arguments from mapping that are in the cosmo's signature. sig = inspect.signature(cosmology) ba = sig.bind_partial() # blank set of args ba.apply_defaults() # fill in the defaults for k in sig.parameters.keys(): if k in params: # transfer argument, if in params ba.arguments[k] = params.pop(k) # deal with remaining params. If there is a **kwargs use that, else # allow to transfer to metadata. Raise TypeError if can't. lastp = next(reversed(sig.parameters.values())) if lastp.kind == 4: # variable keyword-only ba.arguments[lastp.name] = params elif move_to_meta: # prefers current meta, which was explicitly set meta = ba.arguments["meta"] or {} # (None -> dict) ba.arguments["meta"] = {**params, **meta} elif params: raise TypeError(f"there are unused parameters {params}.") # else: pass # no kwargs, no move-to-meta, and all the params are used return cosmology(*ba.args, **ba.kwargs)
Return the cosmology class, parameters, and metadata as a `dict`. Parameters ---------- cosmology : :class:`~astropy.cosmology.Cosmology` The cosmology instance to convert to a mapping. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` cls : type (optional, keyword-only) `dict` or `collections.Mapping` subclass. The mapping type to return. Default is `dict`. cosmology_as_str : bool (optional, keyword-only) Whether the cosmology value is the class (if `False`, default) or the semi-qualified name (if `True`). move_from_meta : bool (optional, keyword-only) Whether to add the Cosmology's metadata as an item to the mapping (if `False`, default) or to merge with the rest of the mapping, preferring the original values (if `True`) rename : dict or None (optional, keyword-only) A `dict` mapping fields of the :class:`~astropy.cosmology.Cosmology` to keys in the map. Returns ------- Mapping A mapping of type ``cls``, by default a `dict`. Has key-values for the cosmology parameters and also: - 'cosmology' : the class - 'meta' : the contents of the cosmology's metadata attribute. If ``move_from_meta`` is `True`, this key is missing and the contained metadata are added to the main `dict`. Examples -------- A Cosmology as a mapping will have the cosmology's name and parameters as items, and the metadata as a nested dictionary. >>> from astropy.cosmology import Planck18 >>> Planck18.to_format('mapping') {'cosmology': <class 'astropy.cosmology...FlatLambdaCDM'>, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, 'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046, 'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897, 'meta': ... The dictionary type may be changed with the ``cls`` keyword argument: >>> from collections import OrderedDict >>> Planck18.to_format('mapping', cls=OrderedDict) OrderedDict([('cosmology', <class 'astropy.cosmology...FlatLambdaCDM'>), ('name', 'Planck18'), ('H0', <Quantity 67.66 km / (Mpc s)>), ('Om0', 0.30966), ('Tcmb0', <Quantity 2.7255 K>), ('Neff', 3.046), ('m_nu', <Quantity [0. , 0. , 0.06] eV>), ('Ob0', 0.04897), ('meta', ... Sometimes it is more useful to have the name of the cosmology class, not the type itself. The keyword argument ``cosmology_as_str`` may be used: >>> Planck18.to_format('mapping', cosmology_as_str=True) {'cosmology': 'FlatLambdaCDM', ... The metadata is normally included as a nested mapping. To move the metadata into the main mapping, use the keyword argument ``move_from_meta``. This kwarg inverts ``move_to_meta`` in ``Cosmology.to_format("mapping", move_to_meta=...)`` where extra items are moved to the metadata (if the cosmology constructor does not have a variable keyword-only argument -- ``**kwargs``). >>> from astropy.cosmology import Planck18 >>> Planck18.to_format('mapping', move_from_meta=True) {'cosmology': <class 'astropy.cosmology...FlatLambdaCDM'>, 'name': 'Planck18', 'Oc0': 0.2607, 'n': 0.9665, 'sigma8': 0.8102, ... Lastly, the keys in the mapping may be renamed with the ``rename`` keyword. >>> rename = {'cosmology': 'cosmo_cls', 'name': 'cosmo_name'} >>> Planck18.to_format('mapping', rename=rename) {'cosmo_cls': <class 'astropy.cosmology...FlatLambdaCDM'>, 'cosmo_name': 'Planck18', ...
def to_mapping( cosmology, *args, cls=dict, cosmology_as_str=False, move_from_meta=False, rename=None, ): """Return the cosmology class, parameters, and metadata as a `dict`. Parameters ---------- cosmology : :class:`~astropy.cosmology.Cosmology` The cosmology instance to convert to a mapping. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` cls : type (optional, keyword-only) `dict` or `collections.Mapping` subclass. The mapping type to return. Default is `dict`. cosmology_as_str : bool (optional, keyword-only) Whether the cosmology value is the class (if `False`, default) or the semi-qualified name (if `True`). move_from_meta : bool (optional, keyword-only) Whether to add the Cosmology's metadata as an item to the mapping (if `False`, default) or to merge with the rest of the mapping, preferring the original values (if `True`) rename : dict or None (optional, keyword-only) A `dict` mapping fields of the :class:`~astropy.cosmology.Cosmology` to keys in the map. Returns ------- Mapping A mapping of type ``cls``, by default a `dict`. Has key-values for the cosmology parameters and also: - 'cosmology' : the class - 'meta' : the contents of the cosmology's metadata attribute. If ``move_from_meta`` is `True`, this key is missing and the contained metadata are added to the main `dict`. Examples -------- A Cosmology as a mapping will have the cosmology's name and parameters as items, and the metadata as a nested dictionary. >>> from astropy.cosmology import Planck18 >>> Planck18.to_format('mapping') {'cosmology': <class 'astropy.cosmology...FlatLambdaCDM'>, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, 'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046, 'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897, 'meta': ... The dictionary type may be changed with the ``cls`` keyword argument: >>> from collections import OrderedDict >>> Planck18.to_format('mapping', cls=OrderedDict) OrderedDict([('cosmology', <class 'astropy.cosmology...FlatLambdaCDM'>), ('name', 'Planck18'), ('H0', <Quantity 67.66 km / (Mpc s)>), ('Om0', 0.30966), ('Tcmb0', <Quantity 2.7255 K>), ('Neff', 3.046), ('m_nu', <Quantity [0. , 0. , 0.06] eV>), ('Ob0', 0.04897), ('meta', ... Sometimes it is more useful to have the name of the cosmology class, not the type itself. The keyword argument ``cosmology_as_str`` may be used: >>> Planck18.to_format('mapping', cosmology_as_str=True) {'cosmology': 'FlatLambdaCDM', ... The metadata is normally included as a nested mapping. To move the metadata into the main mapping, use the keyword argument ``move_from_meta``. This kwarg inverts ``move_to_meta`` in ``Cosmology.to_format("mapping", move_to_meta=...)`` where extra items are moved to the metadata (if the cosmology constructor does not have a variable keyword-only argument -- ``**kwargs``). >>> from astropy.cosmology import Planck18 >>> Planck18.to_format('mapping', move_from_meta=True) {'cosmology': <class 'astropy.cosmology...FlatLambdaCDM'>, 'name': 'Planck18', 'Oc0': 0.2607, 'n': 0.9665, 'sigma8': 0.8102, ... Lastly, the keys in the mapping may be renamed with the ``rename`` keyword. >>> rename = {'cosmology': 'cosmo_cls', 'name': 'cosmo_name'} >>> Planck18.to_format('mapping', rename=rename) {'cosmo_cls': <class 'astropy.cosmology...FlatLambdaCDM'>, 'cosmo_name': 'Planck18', ... """ if not issubclass(cls, (dict, Mapping)): raise TypeError(f"'cls' must be a (sub)class of dict or Mapping, not {cls}") m = cls() # start with the cosmology class & name m["cosmology"] = ( cosmology.__class__.__qualname__ if cosmology_as_str else cosmology.__class__ ) m["name"] = cosmology.name # here only for dict ordering meta = copy.deepcopy(cosmology.meta) # metadata (mutable) if move_from_meta: # Merge the mutable metadata. Since params are added later they will # be preferred in cases of overlapping keys. Likewise, need to pop # cosmology and name from meta. meta.pop("cosmology", None) meta.pop("name", None) m.update(meta) # Add all the immutable inputs m.update(cosmology.parameters) # Lastly, add the metadata, if haven't already (above) if not move_from_meta: m["meta"] = meta # TODO? should meta be type(cls) # Rename keys return m if rename is None else _rename_map(m, rename)
Identify if object uses the mapping format. Returns ------- bool
def mapping_identify(origin, format, *args, **kwargs): """Identify if object uses the mapping format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Mapping) and (format in (None, "mapping")) return itis
Load |Cosmology| from `~astropy.modeling.Model` object. Parameters ---------- model : `_CosmologyModel` subclass instance See ``Cosmology.to_format.help("astropy.model") for details. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> print(Cosmology.from_format(model)) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
def from_model(model): """Load |Cosmology| from `~astropy.modeling.Model` object. Parameters ---------- model : `_CosmologyModel` subclass instance See ``Cosmology.to_format.help("astropy.model") for details. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> print(Cosmology.from_format(model)) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) """ cosmo = model.cosmology # assemble the metadata meta = copy.deepcopy(model.meta) for n in model.param_names: p = getattr(model, n) meta[p.name] = { n: getattr(p, n) for n in dir(p) if not (n.startswith("_") or callable(getattr(p, n))) } return replace(cosmo, meta=meta)
Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance method : str, keyword-only The name of the method on the ``cosmology``. Returns ------- `_CosmologyModel` subclass instance The Model wraps the |Cosmology| method, converting each non-`None` :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` and the method to the model's ``__call__ / evaluate``. Examples -------- >>> from astropy.cosmology import Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> model <FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897, name='Planck18')>
def to_model(cosmology, *_, method): """Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance method : str, keyword-only The name of the method on the ``cosmology``. Returns ------- `_CosmologyModel` subclass instance The Model wraps the |Cosmology| method, converting each non-`None` :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` and the method to the model's ``__call__ / evaluate``. Examples -------- >>> from astropy.cosmology import Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> model <FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897, name='Planck18')> """ cosmo_cls = cosmology.__class__ # get bound method & sig from cosmology (unbound if class). if not hasattr(cosmology, method): raise AttributeError(f"{method} is not a method on {cosmology.__class__}.") func = getattr(cosmology, method) if not callable(func): raise ValueError(f"{cosmology.__class__}.{method} is not callable.") msig = inspect.signature(func) # introspect for number of positional inputs, ignoring "self" n_inputs = len([p for p in tuple(msig.parameters.values()) if (p.kind in (0, 1))]) attrs = {} # class attributes attrs["_cosmology_class"] = cosmo_cls attrs["_method_name"] = method attrs["n_inputs"] = n_inputs attrs["n_outputs"] = 1 params = { k: convert_parameter_to_model_parameter( cosmo_cls.parameters[k], v, meta=cosmology.meta.get(k) ) for k, v in cosmology.parameters.items() if v is not None } # class name is cosmology name + Cosmology + method name + Model clsname = ( cosmo_cls.__qualname__.replace(".", "_") + "Cosmology" + method.replace("_", " ").title().replace(" ", "") + "Model" ) # make Model class CosmoModel = type(clsname, (_CosmologyModel,), {**attrs, **params}) # override __signature__ and format the doc. CosmoModel.evaluate.__signature__ = msig CosmoModel.evaluate.__doc__ = CosmoModel.evaluate.__doc__.format( cosmo_cls=cosmo_cls.__qualname__, method=method ) # instantiate class using default values model = CosmoModel( **cosmology.parameters, name=cosmology.name, meta=copy.deepcopy(cosmology.meta) ) return model
Identify if object uses the :class:`~astropy.modeling.Model` format. Returns ------- bool
def model_identify(origin, format, *args, **kwargs): """Identify if object uses the :class:`~astropy.modeling.Model` format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Model) and (format in (None, "astropy.model")) return itis
Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`. Parameters ---------- row : `~astropy.table.Row` The object containing the Cosmology information. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, type, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. rename : dict or None (optional, keyword-only) A dictionary mapping columns in the row to fields of the `~astropy.cosmology.Cosmology`. Returns ------- `~astropy.cosmology.Cosmology` Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a Row with ``from_row``, we will first make a `~astropy.table.Row` using :func:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> cr = Planck18.to_format("astropy.row") >>> cr <Row index=0> cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Now this row can be used to load a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.from_format(cr, format="astropy.row") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) The ``cosmology`` information (column or metadata) may be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.from_format|. >>> del cr.columns["cosmology"] # remove cosmology from metadata >>> Cosmology.from_format(cr, cosmology="FlatLambdaCDM") FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> FlatLambdaCDM.from_format(cr) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. >>> del cr.columns["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(cr) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) If a `~astropy.table.Row` object has columns that do not match the fields of the `~astropy.cosmology.Cosmology` class, they can be mapped using the ``rename`` keyword argument. >>> renamed = Planck18.to_format("astropy.row", rename={"H0": "Hubble"}) >>> renamed <Row index=0> cosmology name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 >>> cosmo = Cosmology.from_format(renamed, format="astropy.row", ... rename={"Hubble": "H0"}) >>> cosmo == Planck18 True
def from_row(row, *, move_to_meta=False, cosmology=None, rename=None): """Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`. Parameters ---------- row : `~astropy.table.Row` The object containing the Cosmology information. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str, type, or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. rename : dict or None (optional, keyword-only) A dictionary mapping columns in the row to fields of the `~astropy.cosmology.Cosmology`. Returns ------- `~astropy.cosmology.Cosmology` Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a Row with ``from_row``, we will first make a `~astropy.table.Row` using :func:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> cr = Planck18.to_format("astropy.row") >>> cr <Row index=0> cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Now this row can be used to load a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.from_format(cr, format="astropy.row") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) The ``cosmology`` information (column or metadata) may be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.from_format|. >>> del cr.columns["cosmology"] # remove cosmology from metadata >>> Cosmology.from_format(cr, cosmology="FlatLambdaCDM") FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> FlatLambdaCDM.from_format(cr) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. >>> del cr.columns["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(cr) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) If a `~astropy.table.Row` object has columns that do not match the fields of the `~astropy.cosmology.Cosmology` class, they can be mapped using the ``rename`` keyword argument. >>> renamed = Planck18.to_format("astropy.row", rename={"H0": "Hubble"}) >>> renamed <Row index=0> cosmology name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 >>> cosmo = Cosmology.from_format(renamed, format="astropy.row", ... rename={"Hubble": "H0"}) >>> cosmo == Planck18 True """ inv_rename = {v: k for k, v in rename.items()} if rename is not None else {} kname = inv_rename.get("name", "name") kmeta = inv_rename.get("meta", "meta") kcosmo = inv_rename.get("cosmology", "cosmology") # special values name = row.get(kname) meta = defaultdict(dict, copy.deepcopy(row.meta)) # Now need to add the Columnar metadata. This is only available on the # parent table. If Row is ever separated from Table, this should be moved # to ``to_table``. for col in row._table.itercols(): if col.info.meta: # Only add metadata if not empty meta[col.name].update(col.info.meta) # turn row into mapping, filling cosmo if not in a column mapping = dict(row) mapping[kname] = name mapping.setdefault(kcosmo, meta.pop(kcosmo, None)) mapping[kmeta] = dict(meta) # build cosmology from map return from_mapping( mapping, move_to_meta=move_to_meta, cosmology=cosmology, rename=rename )
Serialize the cosmology into a `~astropy.table.Row`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` The cosmology instance to convert to a mapping. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` table_cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` class or subclass type to use. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool Whether to put the cosmology class in the Table metadata (if `True`) or as the first column (if `False`, default). Returns ------- `~astropy.table.Row` With columns for the cosmology parameters, and metadata in the Table's ``meta`` attribute. The cosmology class name will either be a column or in ``meta``, depending on 'cosmology_in_meta'. Examples -------- A `~astropy.cosmology.Cosmology` as a `~astropy.table.Row` will have the cosmology's name and parameters as columns. >>> from astropy.cosmology import Planck18 >>> cr = Planck18.to_format("astropy.row") >>> cr <Row index=0> cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 The cosmological class and other metadata, e.g. a paper reference, are in the Table's metadata. >>> cr.meta OrderedDict([('Oc0', 0.2607), ('n', 0.9665), ...]) To move the cosmology class from a column to the Table's metadata, set the ``cosmology_in_meta`` argument to `True`: >>> Planck18.to_format("astropy.table", cosmology_in_meta=True) <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 In Astropy, Row objects are always part of a Table. :class:`~astropy.table.QTable` is recommended for tables with `~astropy.units.Quantity` columns. However the returned type may be overridden using the ``cls`` argument: >>> from astropy.table import Table >>> Planck18.to_format("astropy.table", cls=Table) <Table length=1> ... The columns can be renamed using the ``rename`` keyword argument. >>> renamed = Planck18.to_format("astropy.row", rename={"H0": "Hubble"}) >>> renamed <Row index=0> cosmology name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
def to_row(cosmology, *args, cosmology_in_meta=False, table_cls=QTable, rename=None): """Serialize the cosmology into a `~astropy.table.Row`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` The cosmology instance to convert to a mapping. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` table_cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` class or subclass type to use. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool Whether to put the cosmology class in the Table metadata (if `True`) or as the first column (if `False`, default). Returns ------- `~astropy.table.Row` With columns for the cosmology parameters, and metadata in the Table's ``meta`` attribute. The cosmology class name will either be a column or in ``meta``, depending on 'cosmology_in_meta'. Examples -------- A `~astropy.cosmology.Cosmology` as a `~astropy.table.Row` will have the cosmology's name and parameters as columns. >>> from astropy.cosmology import Planck18 >>> cr = Planck18.to_format("astropy.row") >>> cr <Row index=0> cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 The cosmological class and other metadata, e.g. a paper reference, are in the Table's metadata. >>> cr.meta OrderedDict([('Oc0', 0.2607), ('n', 0.9665), ...]) To move the cosmology class from a column to the Table's metadata, set the ``cosmology_in_meta`` argument to `True`: >>> Planck18.to_format("astropy.table", cosmology_in_meta=True) <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 In Astropy, Row objects are always part of a Table. :class:`~astropy.table.QTable` is recommended for tables with `~astropy.units.Quantity` columns. However the returned type may be overridden using the ``cls`` argument: >>> from astropy.table import Table >>> Planck18.to_format("astropy.table", cls=Table) <Table length=1> ... The columns can be renamed using the ``rename`` keyword argument. >>> renamed = Planck18.to_format("astropy.row", rename={"H0": "Hubble"}) >>> renamed <Row index=0> cosmology name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 """ from .table import to_table table = to_table( cosmology, cls=table_cls, cosmology_in_meta=cosmology_in_meta, rename=rename ) return table[0]
Identify if object uses the `~astropy.table.Row` format. Returns ------- bool
def row_identify(origin, format, *args, **kwargs): """Identify if object uses the `~astropy.table.Row` format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Row) and (format in (None, "astropy.row")) return itis
Instantiate a `~astropy.cosmology.Cosmology` from a |QTable|. Parameters ---------- table : `~astropy.table.Table` The object to parse into a |Cosmology|. index : int, str, or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str or type or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. rename : dict or None (optional, keyword-only) A dictionary mapping columns in 'table' to fields of the `~astropy.cosmology.Cosmology` class. Returns ------- `~astropy.cosmology.Cosmology` Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a Table with ``from_table``, we will first make a |QTable| using :func:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> ct = Planck18.to_format("astropy.table") >>> ct <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Now this table can be used to load a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.from_format(ct, format="astropy.table") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) The ``cosmology`` information (column or metadata) may be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.from_format|. >>> del ct.meta["cosmology"] # remove cosmology from metadata >>> Cosmology.from_format(ct, cosmology="FlatLambdaCDM") FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> FlatLambdaCDM.from_format(ct) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. >>> del ct["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(ct) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) For tables with multiple rows of cosmological parameters, the ``index`` argument is needed to select the correct row. The index can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. Here is an example where ``index`` is needed and can be either an integer (for the row number) or the name of one of the cosmologies, e.g. 'Planck15'. >>> from astropy.cosmology import Planck13, Planck15, Planck18 >>> from astropy.table import vstack >>> cts = vstack([c.to_format("astropy.table") ... for c in (Planck13, Planck15, Planck18)], ... metadata_conflicts='silent') >>> cts <QTable length=3> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- -------- Planck13 67.77 0.30712 2.7255 3.046 0.0 .. 0.06 0.048252 Planck15 67.74 0.3075 2.7255 3.046 0.0 .. 0.06 0.0486 Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 >>> cosmo = Cosmology.from_format(cts, index="Planck15", format="astropy.table") >>> cosmo == Planck15 True Fields in the table can be renamed to match the `~astropy.cosmology.Cosmology` class' signature using the ``rename`` argument. This is useful when the table's column names do not match the class' parameter names. >>> renamed_table = Planck18.to_format("astropy.table", rename={"H0": "Hubble"}) >>> renamed_table <QTable length=1> name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 >>> cosmo = Cosmology.from_format(renamed_table, format="astropy.table", ... rename={"Hubble": "H0"}) >>> cosmo == Planck18 True For further examples, see :doc:`astropy:cosmology/io`.
def from_table(table, index=None, *, move_to_meta=False, cosmology=None, rename=None): """Instantiate a `~astropy.cosmology.Cosmology` from a |QTable|. Parameters ---------- table : `~astropy.table.Table` The object to parse into a |Cosmology|. index : int, str, or None, optional Needed to select the row in tables with multiple rows. ``index`` can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. move_to_meta : bool (optional, keyword-only) Whether to move keyword arguments that are not in the Cosmology class' signature to the Cosmology's metadata. This will only be applied if the Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``). Arguments moved to the metadata will be merged with existing metadata, preferring specified metadata in the case of a merge conflict (e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be ``{'key': 10}``). cosmology : str or type or None (optional, keyword-only) The cosmology class (or string name thereof) to use when constructing the cosmology instance. The class also provides default parameter values, filling in any non-mandatory arguments missing in 'table'. rename : dict or None (optional, keyword-only) A dictionary mapping columns in 'table' to fields of the `~astropy.cosmology.Cosmology` class. Returns ------- `~astropy.cosmology.Cosmology` Examples -------- To see loading a `~astropy.cosmology.Cosmology` from a Table with ``from_table``, we will first make a |QTable| using :func:`~astropy.cosmology.Cosmology.to_format`. >>> from astropy.cosmology import Cosmology, Planck18 >>> ct = Planck18.to_format("astropy.table") >>> ct <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Now this table can be used to load a new cosmological instance identical to the ``Planck18`` cosmology from which it was generated. >>> cosmo = Cosmology.from_format(ct, format="astropy.table") >>> cosmo FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) The ``cosmology`` information (column or metadata) may be omitted if the cosmology class (or its string name) is passed as the ``cosmology`` keyword argument to |Cosmology.from_format|. >>> del ct.meta["cosmology"] # remove cosmology from metadata >>> Cosmology.from_format(ct, cosmology="FlatLambdaCDM") FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) Alternatively, specific cosmology classes can be used to parse the data. >>> from astropy.cosmology import FlatLambdaCDM >>> FlatLambdaCDM.from_format(ct) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) When using a specific cosmology class, the class' default parameter values are used to fill in any missing information. >>> del ct["Tcmb0"] # show FlatLambdaCDM provides default >>> FlatLambdaCDM.from_format(ct) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897) For tables with multiple rows of cosmological parameters, the ``index`` argument is needed to select the correct row. The index can be an integer for the row number or, if the table is indexed by a column, the value of that column. If the table is not indexed and ``index`` is a string, the "name" column is used as the indexing column. Here is an example where ``index`` is needed and can be either an integer (for the row number) or the name of one of the cosmologies, e.g. 'Planck15'. >>> from astropy.cosmology import Planck13, Planck15, Planck18 >>> from astropy.table import vstack >>> cts = vstack([c.to_format("astropy.table") ... for c in (Planck13, Planck15, Planck18)], ... metadata_conflicts='silent') >>> cts <QTable length=3> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- -------- Planck13 67.77 0.30712 2.7255 3.046 0.0 .. 0.06 0.048252 Planck15 67.74 0.3075 2.7255 3.046 0.0 .. 0.06 0.0486 Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 >>> cosmo = Cosmology.from_format(cts, index="Planck15", format="astropy.table") >>> cosmo == Planck15 True Fields in the table can be renamed to match the `~astropy.cosmology.Cosmology` class' signature using the ``rename`` argument. This is useful when the table's column names do not match the class' parameter names. >>> renamed_table = Planck18.to_format("astropy.table", rename={"H0": "Hubble"}) >>> renamed_table <QTable length=1> name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 >>> cosmo = Cosmology.from_format(renamed_table, format="astropy.table", ... rename={"Hubble": "H0"}) >>> cosmo == Planck18 True For further examples, see :doc:`astropy:cosmology/io`. """ # Get row from table # string index uses the indexed column on the table to find the row index. if isinstance(index, str): if not table.indices: # no indexing column, find by string match nc = "name" # default name column if rename is not None: # from inverted `rename` for key, value in rename.items(): if value == "name": nc = key break indices = np.where(table[nc] == index)[0] else: # has indexing column indices = table.loc_indices[index] # need to convert to row index (int) if isinstance(indices, (int, np.integer)): # loc_indices index = indices elif len(indices) == 1: # only happens w/ np.where index = indices[0] elif len(indices) == 0: # matches from loc_indices raise KeyError(f"No matches found for key {indices}") else: # like the Highlander, there can be only 1 Cosmology raise ValueError(f"more than one cosmology found for key {indices}") # no index is needed for a 1-row table. For a multi-row table... if index is None: if len(table) != 1: # multi-row table and no index raise ValueError( "need to select a specific row (e.g. index=1) when " "constructing a Cosmology from a multi-row table." ) else: # single-row table index = 0 row = table[index] # index is now the row index (int) # parse row to cosmo return from_row(row, move_to_meta=move_to_meta, cosmology=cosmology, rename=rename)
Serialize the cosmology into a `~astropy.table.QTable`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` The cosmology instance to convert to a table. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` class or subclass type to return. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool (optional, keyword-only) Whether to put the cosmology class in the Table metadata (if `True`, default) or as the first column (if `False`). Returns ------- `~astropy.table.QTable` With columns for the cosmology parameters, and metadata and cosmology class name in the Table's ``meta`` attribute Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` Examples -------- A Cosmology as a `~astropy.table.QTable` will have the cosmology's name and parameters as columns. >>> from astropy.cosmology import Planck18 >>> ct = Planck18.to_format("astropy.table") >>> ct <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 The cosmological class and other metadata, e.g. a paper reference, are in the Table's metadata. >>> ct.meta OrderedDict([..., ('cosmology', 'FlatLambdaCDM')]) To move the cosmology class from the metadata to a Table column, set the ``cosmology_in_meta`` argument to `False`: >>> Planck18.to_format("astropy.table", cosmology_in_meta=False) <QTable length=1> cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Astropy recommends `~astropy.table.QTable` for tables with `~astropy.units.Quantity` columns. However the returned type may be overridden using the ``cls`` argument: >>> from astropy.table import Table >>> Planck18.to_format("astropy.table", cls=Table) <Table length=1> ... Fields of the cosmology may be renamed using the ``rename`` argument. >>> Planck18.to_format("astropy.table", rename={"H0": "Hubble"}) <QTable length=1> name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
def to_table(cosmology, *args, cls=QTable, cosmology_in_meta=True, rename=None): """Serialize the cosmology into a `~astropy.table.QTable`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` The cosmology instance to convert to a table. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` cls : type (optional, keyword-only) Astropy :class:`~astropy.table.Table` class or subclass type to return. Default is :class:`~astropy.table.QTable`. cosmology_in_meta : bool (optional, keyword-only) Whether to put the cosmology class in the Table metadata (if `True`, default) or as the first column (if `False`). Returns ------- `~astropy.table.QTable` With columns for the cosmology parameters, and metadata and cosmology class name in the Table's ``meta`` attribute Raises ------ TypeError If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table` Examples -------- A Cosmology as a `~astropy.table.QTable` will have the cosmology's name and parameters as columns. >>> from astropy.cosmology import Planck18 >>> ct = Planck18.to_format("astropy.table") >>> ct <QTable length=1> name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 The cosmological class and other metadata, e.g. a paper reference, are in the Table's metadata. >>> ct.meta OrderedDict([..., ('cosmology', 'FlatLambdaCDM')]) To move the cosmology class from the metadata to a Table column, set the ``cosmology_in_meta`` argument to `False`: >>> Planck18.to_format("astropy.table", cosmology_in_meta=False) <QTable length=1> cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str13 str8 float64 float64 float64 float64 float64[3] float64 ------------- -------- ------------ ------- ------- ------- ----------- ------- FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 Astropy recommends `~astropy.table.QTable` for tables with `~astropy.units.Quantity` columns. However the returned type may be overridden using the ``cls`` argument: >>> from astropy.table import Table >>> Planck18.to_format("astropy.table", cls=Table) <Table length=1> ... Fields of the cosmology may be renamed using the ``rename`` argument. >>> Planck18.to_format("astropy.table", rename={"H0": "Hubble"}) <QTable length=1> name Hubble Om0 Tcmb0 Neff m_nu Ob0 km / (Mpc s) K eV str8 float64 float64 float64 float64 float64[3] float64 -------- ------------ ------- ------- ------- ----------- ------- Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897 """ if not issubclass(cls, Table): raise TypeError(f"'cls' must be a (sub)class of Table, not {type(cls)}") # Start by getting a map representation. data = to_mapping(cosmology) data["cosmology"] = data["cosmology"].__qualname__ # change to str # Metadata meta = data.pop("meta") # remove the meta if cosmology_in_meta: meta["cosmology"] = data.pop("cosmology") # Need to turn everything into something Table can process: # - Column for Parameter # - list for anything else cosmo_cls = cosmology.__class__ for k, v in data.items(): if k in cosmology.parameters: col = convert_parameter_to_column( cosmo_cls.parameters[k], v, cosmology.meta.get(k) ) else: col = Column([v]) data[k] = col tbl = cls(data, meta=meta) # Renames renames = rename or {} for name in tbl.colnames: tbl.rename_column(name, renames.get(name, name)) # Add index tbl.add_index(renames.get("name", "name"), unique=True) return tbl
Identify if object uses the Table format. Returns ------- bool
def table_identify(origin, format, *args, **kwargs): """Identify if object uses the Table format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Table) and (format in (None, "astropy.table")) return itis
Convert a |Cosmology| Parameter to a Table |Column|. Parameters ---------- parameter : `astropy.cosmology.parameter.Parameter` value : Any meta : dict or None, optional Information from the Cosmology's metadata. Returns ------- `astropy.table.Column`
def convert_parameter_to_column(parameter, value, meta=None): """Convert a |Cosmology| Parameter to a Table |Column|. Parameters ---------- parameter : `astropy.cosmology.parameter.Parameter` value : Any meta : dict or None, optional Information from the Cosmology's metadata. Returns ------- `astropy.table.Column` """ shape = (1,) + np.shape(value) # minimum of 1d col = Column( data=np.reshape(value, shape), name=parameter.name, dtype=None, # inferred from the data description=parameter.__doc__, format=None, meta=meta, ) return col
Convert a Cosmology Parameter to a Model Parameter. Parameters ---------- parameter : `astropy.cosmology.parameter.Parameter` value : Any meta : dict or None, optional Information from the Cosmology's metadata. This function will use any of: 'getter', 'setter', 'fixed', 'tied', 'min', 'max', 'bounds', 'prior', 'posterior'. Returns ------- `astropy.modeling.Parameter`
def convert_parameter_to_model_parameter(parameter, value, meta=None): """Convert a Cosmology Parameter to a Model Parameter. Parameters ---------- parameter : `astropy.cosmology.parameter.Parameter` value : Any meta : dict or None, optional Information from the Cosmology's metadata. This function will use any of: 'getter', 'setter', 'fixed', 'tied', 'min', 'max', 'bounds', 'prior', 'posterior'. Returns ------- `astropy.modeling.Parameter` """ # Get from meta information relevant to Model attrs = ( "getter", "setter", "fixed", "tied", "min", "max", "bounds", "prior", "posterior", ) extra = {k: v for k, v in (meta or {}).items() if k in attrs} return ModelParameter( description=parameter.__doc__, default=value, unit=getattr(value, "unit", None), **extra, )
`yaml <https://yaml.org>`_ representation of |Cosmology| object. Parameters ---------- tag : str The class tag, e.g. '!astropy.cosmology.LambdaCDM' Returns ------- representer : callable[[`~astropy.io.misc.yaml.AstropyDumper`, |Cosmology|], str] Function to construct :mod:`yaml` representation of |Cosmology| object.
def yaml_representer(tag): """`yaml <https://yaml.org>`_ representation of |Cosmology| object. Parameters ---------- tag : str The class tag, e.g. '!astropy.cosmology.LambdaCDM' Returns ------- representer : callable[[`~astropy.io.misc.yaml.AstropyDumper`, |Cosmology|], str] Function to construct :mod:`yaml` representation of |Cosmology| object. """ def representer(dumper, obj): """Cosmology yaml representer function for {}. Parameters ---------- dumper : `~astropy.io.misc.yaml.AstropyDumper` obj : `~astropy.cosmology.Cosmology` Returns ------- str :mod:`yaml` representation of |Cosmology| object. """ # convert to mapping map = obj.to_format("mapping") # remove the cosmology class info. It's already recorded in `tag` map.pop("cosmology") # make the metadata serializable in an order-preserving way. map["meta"] = tuple(map["meta"].items()) return dumper.represent_mapping(tag, map) representer.__doc__ = representer.__doc__.format(tag) return representer
Cosmology| object from :mod:`yaml` representation. Parameters ---------- cls : type The class type, e.g. `~astropy.cosmology.LambdaCDM`. Returns ------- constructor : callable Function to construct |Cosmology| object from :mod:`yaml` representation.
def yaml_constructor(cls): """Cosmology| object from :mod:`yaml` representation. Parameters ---------- cls : type The class type, e.g. `~astropy.cosmology.LambdaCDM`. Returns ------- constructor : callable Function to construct |Cosmology| object from :mod:`yaml` representation. """ def constructor(loader, node): """Cosmology yaml constructor function. Parameters ---------- loader : `~astropy.io.misc.yaml.AstropyLoader` node : `yaml.nodes.MappingNode` yaml representation of |Cosmology| object. Returns ------- `~astropy.cosmology.Cosmology` subclass instance """ # create mapping from YAML node map = loader.construct_mapping(node) # restore metadata to dict map["meta"] = dict(map["meta"]) # get cosmology class qualified name from node cosmology = str(node.tag).split(".")[-1] # create Cosmology from mapping return from_mapping(map, move_to_meta=False, cosmology=cosmology) return constructor
Register :mod:`yaml` for Cosmology class. Parameters ---------- cosmo_cls : `~astropy.cosmology.Cosmology` class
def register_cosmology_yaml(cosmo_cls): """Register :mod:`yaml` for Cosmology class. Parameters ---------- cosmo_cls : `~astropy.cosmology.Cosmology` class """ fqn = f"{cosmo_cls.__module__}.{cosmo_cls.__qualname__}" tag = "!" + QNS.get( fqn, fqn ) # Possibly sub fully qualified name for a preferred path AstropyDumper.add_representer(cosmo_cls, yaml_representer(tag)) AstropyLoader.add_constructor(tag, yaml_constructor(cosmo_cls))
Load `~astropy.cosmology.Cosmology` from :mod:`yaml` object. Parameters ---------- yml : str :mod:`yaml` representation of |Cosmology| object cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The expected cosmology class (or string name thereof). This argument is is only checked for correctness if not `None`. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Raises ------ TypeError If the |Cosmology| object loaded from ``yml`` is not an instance of the ``cosmology`` (and ``cosmology`` is not `None`). Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> yml = Planck18.to_format("yaml") >>> print(Cosmology.from_format(yml, format="yaml")) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
def from_yaml(yml, *, cosmology=None): """Load `~astropy.cosmology.Cosmology` from :mod:`yaml` object. Parameters ---------- yml : str :mod:`yaml` representation of |Cosmology| object cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only) The expected cosmology class (or string name thereof). This argument is is only checked for correctness if not `None`. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Raises ------ TypeError If the |Cosmology| object loaded from ``yml`` is not an instance of the ``cosmology`` (and ``cosmology`` is not `None`). Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> yml = Planck18.to_format("yaml") >>> print(Cosmology.from_format(yml, format="yaml")) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) """ with u.add_enabled_units(cu): cosmo = load(yml) # Check argument `cosmology`, if not None # This kwarg is required for compatibility with |Cosmology.from_format| if isinstance(cosmology, str): cosmology = _COSMOLOGY_CLASSES[cosmology] if cosmology is not None and not isinstance(cosmo, cosmology): raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.") return cosmo
Return the cosmology class, parameters, and metadata as a :mod:`yaml` object. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance The cosmology to serialize. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` Returns ------- str :mod:`yaml` representation of |Cosmology| object Examples -------- >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("yaml") "!astropy.cosmology...FlatLambdaCDM\nH0: !astropy.units.Quantity...
def to_yaml(cosmology, *args): r"""Return the cosmology class, parameters, and metadata as a :mod:`yaml` object. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance The cosmology to serialize. *args Not used. Needed for compatibility with `~astropy.io.registry.UnifiedReadWriteMethod` Returns ------- str :mod:`yaml` representation of |Cosmology| object Examples -------- >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("yaml") "!astropy.cosmology...FlatLambdaCDM\nH0: !astropy.units.Quantity... """ return dump(cosmology)
Test that ONLY the expected I/O is registered.
def test_expected_readwrite_io(): """Test that ONLY the expected I/O is registered.""" got = {k for k, _ in readwrite_registry._readers.keys()} expected = {"ascii.ecsv", "ascii.html"} assert got == expected
Test that ONLY the expected I/O is registered.
def test_expected_convert_io(): """Test that ONLY the expected I/O is registered.""" got = {k for k, _ in convert_registry._readers.keys()} expected = { "astropy.cosmology", "mapping", "astropy.model", "astropy.row", "astropy.table", "yaml", } assert got == expected
Read JSON. Parameters ---------- filename : str **kwargs Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format` Returns ------- `~astropy.cosmology.Cosmology` instance
def read_json(filename, **kwargs): """Read JSON. Parameters ---------- filename : str **kwargs Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format` Returns ------- `~astropy.cosmology.Cosmology` instance """ # read if isinstance(filename, (str, bytes, os.PathLike)): with open(filename) as file: data = file.read() else: # file-like : this also handles errors in dumping data = filename.read() mapping = json.loads(data) # parse json mappable to dict # deserialize Quantity with u.add_enabled_units(cu.redshift): for k, v in mapping.items(): if isinstance(v, dict) and "value" in v and "unit" in v: mapping[k] = u.Quantity(v["value"], v["unit"]) for k, v in mapping.get("meta", {}).items(): # also the metadata if isinstance(v, dict) and "value" in v and "unit" in v: mapping["meta"][k] = u.Quantity(v["value"], v["unit"]) return Cosmology.from_format(mapping, format="mapping", **kwargs)
Write Cosmology to JSON. Parameters ---------- cosmology : `astropy.cosmology.Cosmology` subclass instance file : path-like or file-like overwrite : bool (optional, keyword-only)
def write_json(cosmology, file, *, overwrite=False): """Write Cosmology to JSON. Parameters ---------- cosmology : `astropy.cosmology.Cosmology` subclass instance file : path-like or file-like overwrite : bool (optional, keyword-only) """ data = cosmology.to_format("mapping") # start by turning into dict data["cosmology"] = data["cosmology"].__qualname__ # serialize Quantity for k, v in data.items(): if isinstance(v, u.Quantity): data[k] = {"value": v.value.tolist(), "unit": str(v.unit)} for k, v in data.get("meta", {}).items(): # also serialize the metadata if isinstance(v, u.Quantity): data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)} # check that file exists and whether to overwrite. if os.path.exists(file) and not overwrite: raise OSError(f"{file} exists. Set 'overwrite' to write over.") with open(file, "w") as write_file: json.dump(data, write_file)
Test :func:`~astropy.cosmology._io.yaml.yaml_representer`.
def test_yaml_representer(): """Test :func:`~astropy.cosmology._io.yaml.yaml_representer`.""" # test function `representer` representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM") assert callable(representer) # test the normal method of dumping to YAML yml = dump(Planck18) assert isinstance(yml, str) assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM")
Test :func:`~astropy.cosmology._io.yaml.yaml_constructor`.
def test_yaml_constructor(): """Test :func:`~astropy.cosmology._io.yaml.yaml_constructor`.""" # test function `constructor` constructor = yaml_constructor(FlatLambdaCDM) assert callable(constructor) # it's too hard to manually construct a node, so we only test dump/load # this is also a good round-trip test yml = dump(Planck18) with u.add_enabled_units(cu): # needed for redshift units cosmo = load(yml) assert isinstance(cosmo, FlatLambdaCDM) assert cosmo == Planck18 assert cosmo.meta == Planck18.meta
Calculate the Julian day based on the year, week of the year, and day of the week, with week_start_day representing whether the week of the year assumes the week starts on Sunday or Monday (6 or 0).
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon): """Calculate the Julian day based on the year, week of the year, and day of the week, with week_start_day representing whether the week of the year assumes the week starts on Sunday or Monday (6 or 0).""" first_weekday = datetime_date(year, 1, 1).weekday() # If we are dealing with the %U directive (week starts on Sunday), it's # easier to just shift the view to Sunday being the first day of the # week. if not week_starts_Mon: first_weekday = (first_weekday + 1) % 7 day_of_week = (day_of_week + 1) % 7 # Need to watch out for a week 0 (when the first day of the year is not # the same as that specified by %U or %W). week_0_length = (7 - first_weekday) % 7 if week_of_year == 0: return 1 + day_of_week - first_weekday else: days_to_week = week_0_length + (7 * (week_of_year - 1)) return 1 + days_to_week + day_of_week
Return a 2-tuple consisting of a time struct and an int containing the number of microseconds based on the input string and the format string.
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a 2-tuple consisting of a time struct and an int containing the number of microseconds based on the input string and the format string.""" for index, arg in enumerate([data_string, format]): if not isinstance(arg, str): msg = "strptime() argument {} must be str, not {}" raise TypeError(msg.format(index, type(arg))) global _TimeRE_cache, _regex_cache with _cache_lock: locale_time = _TimeRE_cache.locale_time if (_getlang() != locale_time.lang or time.tzname != locale_time.tzname or time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() format_regex = _regex_cache.get(format) if not format_regex: try: format_regex = _TimeRE_cache.compile(format) # KeyError raised when a bad format is found; can be specified as # \\, in which case it was a stray % but with a space after it except KeyError as err: bad_directive = err.args[0] if bad_directive == "\\": bad_directive = "%" del err raise ValueError("'%s' is a bad directive in format '%s'" % (bad_directive, format)) from None # IndexError only occurs when the format string is "%" except IndexError: raise ValueError("stray %% in format '%s'" % format) from None _regex_cache[format] = format_regex found = format_regex.match(data_string) if not found: raise ValueError("time data %r does not match format %r" % (data_string, format)) if len(data_string) != found.end(): raise ValueError("unconverted data remains: %s" % data_string[found.end():]) year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 tzoffset = None # Default to -1 to signify that values not known; not critical to have, # though week_of_year = -1 week_of_year_start = -1 # weekday and julian defaulted to None so as to signal need to calculate # values weekday = julian = None found_dict = found.groupdict() for group_key in found_dict.keys(): # Directives not explicitly handled below: # c, x, X # handled by making out of other directives # U, W # worthless without day of the week if group_key == 'y': year = int(found_dict['y']) # Open Group specification for strptime() states that a %y #value in the range of [00, 68] is in the century 2000, while #[69,99] is in the century 1900 if year <= 68: year += 2000 else: year += 1900 elif group_key == 'Y': year = int(found_dict['Y']) elif group_key == 'm': month = int(found_dict['m']) elif group_key == 'B': month = locale_time.f_month.index(found_dict['B'].lower()) elif group_key == 'b': month = locale_time.a_month.index(found_dict['b'].lower()) elif group_key == 'd': day = int(found_dict['d']) elif group_key == 'H': hour = int(found_dict['H']) elif group_key == 'I': hour = int(found_dict['I']) ampm = found_dict.get('p', '').lower() # If there was no AM/PM indicator, we'll treat this like AM if ampm in ('', locale_time.am_pm[0]): # We're in AM so the hour is correct unless we're # looking at 12 midnight. # 12 midnight == 12 AM == hour 0 if hour == 12: hour = 0 elif ampm == locale_time.am_pm[1]: # We're in PM so we need to add 12 to the hour unless # we're looking at 12 noon. # 12 noon == 12 PM == hour 12 if hour != 12: hour += 12 elif group_key == 'M': minute = int(found_dict['M']) elif group_key == 'S': second = int(found_dict['S']) elif group_key == 'f': s = found_dict['f'] # Pad to always return microseconds. s += "0" * (6 - len(s)) fraction = int(s) elif group_key == 'A': weekday = locale_time.f_weekday.index(found_dict['A'].lower()) elif group_key == 'a': weekday = locale_time.a_weekday.index(found_dict['a'].lower()) elif group_key == 'w': weekday = int(found_dict['w']) if weekday == 0: weekday = 6 else: weekday -= 1 elif group_key == 'j': julian = int(found_dict['j']) elif group_key in ('U', 'W'): week_of_year = int(found_dict[group_key]) if group_key == 'U': # U starts week on Sunday. week_of_year_start = 6 else: # W starts week on Monday. week_of_year_start = 0 elif group_key == 'z': z = found_dict['z'] tzoffset = int(z[1:3]) * 60 + int(z[3:5]) if z.startswith("-"): tzoffset = -tzoffset elif group_key == 'Z': # Since -1 is default value only need to worry about setting tz if # it can be something other than -1. found_zone = found_dict['Z'].lower() for value, tz_values in enumerate(locale_time.timezone): if found_zone in tz_values: # Deal with bad locale setup where timezone names are the # same and yet time.daylight is true; too ambiguous to # be able to tell what timezone has daylight savings if (time.tzname[0] == time.tzname[1] and time.daylight and found_zone not in ("utc", "gmt")): break else: tz = value break leap_year_fix = False if year is None and month == 2 and day == 29: year = 1904 # 1904 is first leap year of 20th century leap_year_fix = True elif year is None: year = 1900 # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian is None and week_of_year != -1 and weekday is not None: week_starts_Mon = True if week_of_year_start == 0 else False julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) if julian <= 0: year -= 1 yday = 366 if calendar.isleap(year) else 365 julian += yday # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the week # calculation. if julian is None: # Need to add 1 to result since first day of the year is 1, not 0. julian = datetime_date(year, month, day).toordinal() - \ datetime_date(year, 1, 1).toordinal() + 1 else: # Assume that if they bothered to include Julian day it will # be accurate. datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal()) year = datetime_result.year month = datetime_result.month day = datetime_result.day if weekday is None: weekday = datetime_date(year, month, day).weekday() # Add timezone info tzname = found_dict.get("Z") if tzoffset is not None: gmtoff = tzoffset * 60 else: gmtoff = None if leap_year_fix: # the caller didn't supply a year but asked for Feb 29th. We couldn't # use the default of 1900 for computations. We set it back to ensure # that February 29th is smaller than March 1st. year = 1900 return (year, month, day, hour, minute, second, weekday, julian, tz, tzname, gmtoff), fraction
Return a time struct based on the input string and the format string.
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a time struct based on the input string and the format string.""" tt = _strptime(data_string, format)[0] return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
Return a class cls instance based on the input string and the format string.
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"): """Return a class cls instance based on the input string and the format string.""" tt, fraction = _strptime(data_string, format) tzname, gmtoff = tt[-2:] args = tt[:6] + (fraction,) if gmtoff is not None: tzdelta = datetime_timedelta(seconds=gmtoff) if tzname: tz = datetime_timezone(tzdelta, tzname) else: tz = datetime_timezone(tzdelta) args += (tz,) return cls(*args)
An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*.
def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple:: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. """ if levels is None: # first time called levels = [] results = [] if res == True: return sorted(results) if res == False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return sorted(results) for (key, val) in list(res.items()): if val == True: continue if isinstance(cfg.get(key), Mapping): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return sorted(results)
Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list.
def get_extra_values(conf, _prepend=()): """ Find all the values and sections not in the configspec from a validated ConfigObj. ``get_extra_values`` returns a list of tuples where each tuple represents either an extra section, or an extra value. The tuples contain two values, a tuple representing the section the value is in and the name of the extra values. For extra values in the top level section the first member will be an empty tuple. For values in the 'foo' section the first member will be ``('foo',)``. For members in the 'bar' subsection of the 'foo' section the first member will be ``('foo', 'bar')``. NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't been validated it will return an empty list. """ out = [] out.extend([(_prepend, name) for name in conf.extra_values]) for name in conf.sections: if name not in conf.extra_values: out.extend(get_extra_values(conf[name], _prepend + (name,))) return out
Convert decimal dotted quad string to long integer >>> int(dottedQuadToNum('1 ')) 1 >>> int(dottedQuadToNum(' 1.2')) 16777218 >>> int(dottedQuadToNum(' 1.2.3 ')) 16908291 >>> int(dottedQuadToNum('1.2.3.4')) 16909060 >>> dottedQuadToNum('255.255.255.255') 4294967295 >>> dottedQuadToNum('255.255.255.256') Traceback (most recent call last): ValueError: Not a good dotted-quad IP: 255.255.255.256
def dottedQuadToNum(ip): """ Convert decimal dotted quad string to long integer >>> int(dottedQuadToNum('1 ')) 1 >>> int(dottedQuadToNum(' 1.2')) 16777218 >>> int(dottedQuadToNum(' 1.2.3 ')) 16908291 >>> int(dottedQuadToNum('1.2.3.4')) 16909060 >>> dottedQuadToNum('255.255.255.255') 4294967295 >>> dottedQuadToNum('255.255.255.256') Traceback (most recent call last): ValueError: Not a good dotted-quad IP: 255.255.255.256 """ # import here to avoid it when ip_addr values are not used import socket, struct try: return struct.unpack('!L', socket.inet_aton(ip.strip()))[0] except socket.error: raise ValueError('Not a good dotted-quad IP: %s' % ip) return
Convert int or long int to dotted quad string >>> numToDottedQuad(long(-1)) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(long(1)) '0.0.0.1' >>> numToDottedQuad(long(16777218)) '1.0.0.2' >>> numToDottedQuad(long(16908291)) '1.2.0.3' >>> numToDottedQuad(long(16909060)) '1.2.3.4' >>> numToDottedQuad(long(4294967295)) '255.255.255.255' >>> numToDottedQuad(long(4294967296)) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296 >>> numToDottedQuad(-1) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(1) '0.0.0.1' >>> numToDottedQuad(16777218) '1.0.0.2' >>> numToDottedQuad(16908291) '1.2.0.3' >>> numToDottedQuad(16909060) '1.2.3.4' >>> numToDottedQuad(4294967295) '255.255.255.255' >>> numToDottedQuad(4294967296) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296
def numToDottedQuad(num): """ Convert int or long int to dotted quad string >>> numToDottedQuad(long(-1)) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(long(1)) '0.0.0.1' >>> numToDottedQuad(long(16777218)) '1.0.0.2' >>> numToDottedQuad(long(16908291)) '1.2.0.3' >>> numToDottedQuad(long(16909060)) '1.2.3.4' >>> numToDottedQuad(long(4294967295)) '255.255.255.255' >>> numToDottedQuad(long(4294967296)) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296 >>> numToDottedQuad(-1) Traceback (most recent call last): ValueError: Not a good numeric IP: -1 >>> numToDottedQuad(1) '0.0.0.1' >>> numToDottedQuad(16777218) '1.0.0.2' >>> numToDottedQuad(16908291) '1.2.0.3' >>> numToDottedQuad(16909060) '1.2.3.4' >>> numToDottedQuad(4294967295) '255.255.255.255' >>> numToDottedQuad(4294967296) Traceback (most recent call last): ValueError: Not a good numeric IP: 4294967296 """ # import here to avoid it when ip_addr values are not used import socket, struct # no need to intercept here, 4294967295L is fine if num > long(4294967295) or num < 0: raise ValueError('Not a good numeric IP: %s' % num) try: return socket.inet_ntoa( struct.pack('!L', long(num))) except (socket.error, struct.error, OverflowError): raise ValueError('Not a good numeric IP: %s' % num)
Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a".
def _is_num_param(names, values, to_float=False): """ Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a". """ fun = to_float and float or int out_params = [] for (name, val) in zip(names, values): if val is None: out_params.append(val) elif isinstance(val, (int, long, float, string_type)): try: out_params.append(fun(val)) except ValueError as e: raise VdtParamError(name, val) else: raise VdtParamError(name, val) return out_params
A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0
def is_integer(value, min=None, max=None): """ A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0 """ (min_val, max_val) = _is_num_param(('min', 'max'), (min, max)) if not isinstance(value, (int, long, string_type)): raise VdtTypeError(value) if isinstance(value, string_type): # if it's a string - does it represent an integer ? try: value = int(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value
A check that tests that a given value is a float (an integer will be accepted), and optionally - that it is between bounds. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. This can accept negative values. >>> vtor.check('float', '2') 2.0 From now on we multiply the value to avoid comparing decimals >>> vtor.check('float', '-6.8') * 10 -68.0 >>> vtor.check('float', '12.2') * 10 122.0 >>> vtor.check('float', 8.4) * 10 84.0 >>> vtor.check('float', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('float(10.1)', '10.2') * 10 102.0 >>> vtor.check('float(max=20.2)', '15.1') * 10 151.0 >>> vtor.check('float(10.0)', '9.0') Traceback (most recent call last): VdtValueTooSmallError: the value "9.0" is too small. >>> vtor.check('float(max=20.0)', '35.0') Traceback (most recent call last): VdtValueTooBigError: the value "35.0" is too big.
def is_float(value, min=None, max=None): """ A check that tests that a given value is a float (an integer will be accepted), and optionally - that it is between bounds. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. This can accept negative values. >>> vtor.check('float', '2') 2.0 From now on we multiply the value to avoid comparing decimals >>> vtor.check('float', '-6.8') * 10 -68.0 >>> vtor.check('float', '12.2') * 10 122.0 >>> vtor.check('float', 8.4) * 10 84.0 >>> vtor.check('float', 'a') Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('float(10.1)', '10.2') * 10 102.0 >>> vtor.check('float(max=20.2)', '15.1') * 10 151.0 >>> vtor.check('float(10.0)', '9.0') Traceback (most recent call last): VdtValueTooSmallError: the value "9.0" is too small. >>> vtor.check('float(max=20.0)', '35.0') Traceback (most recent call last): VdtValueTooBigError: the value "35.0" is too big. """ (min_val, max_val) = _is_num_param( ('min', 'max'), (min, max), to_float=True) if not isinstance(value, (int, long, float, string_type)): raise VdtTypeError(value) if not isinstance(value, float): # if it's a string - does it represent a float ? try: value = float(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value
Check if the value represents a boolean. >>> vtor.check('boolean', 0) 0 >>> vtor.check('boolean', False) 0 >>> vtor.check('boolean', '0') 0 >>> vtor.check('boolean', 'off') 0 >>> vtor.check('boolean', 'false') 0 >>> vtor.check('boolean', 'no') 0 >>> vtor.check('boolean', 'nO') 0 >>> vtor.check('boolean', 'NO') 0 >>> vtor.check('boolean', 1) 1 >>> vtor.check('boolean', True) 1 >>> vtor.check('boolean', '1') 1 >>> vtor.check('boolean', 'on') 1 >>> vtor.check('boolean', 'true') 1 >>> vtor.check('boolean', 'yes') 1 >>> vtor.check('boolean', 'Yes') 1 >>> vtor.check('boolean', 'YES') 1 >>> vtor.check('boolean', '') Traceback (most recent call last): VdtTypeError: the value "" is of the wrong type. >>> vtor.check('boolean', 'up') Traceback (most recent call last): VdtTypeError: the value "up" is of the wrong type.
def is_boolean(value): """ Check if the value represents a boolean. >>> vtor.check('boolean', 0) 0 >>> vtor.check('boolean', False) 0 >>> vtor.check('boolean', '0') 0 >>> vtor.check('boolean', 'off') 0 >>> vtor.check('boolean', 'false') 0 >>> vtor.check('boolean', 'no') 0 >>> vtor.check('boolean', 'nO') 0 >>> vtor.check('boolean', 'NO') 0 >>> vtor.check('boolean', 1) 1 >>> vtor.check('boolean', True) 1 >>> vtor.check('boolean', '1') 1 >>> vtor.check('boolean', 'on') 1 >>> vtor.check('boolean', 'true') 1 >>> vtor.check('boolean', 'yes') 1 >>> vtor.check('boolean', 'Yes') 1 >>> vtor.check('boolean', 'YES') 1 >>> vtor.check('boolean', '') Traceback (most recent call last): VdtTypeError: the value "" is of the wrong type. >>> vtor.check('boolean', 'up') Traceback (most recent call last): VdtTypeError: the value "up" is of the wrong type. """ if isinstance(value, string_type): try: return bool_dict[value.lower()] except KeyError: raise VdtTypeError(value) # we do an equality test rather than an identity test # this ensures Python 2.2 compatibilty # and allows 0 and 1 to represent True and False if value == False: return False elif value == True: return True else: raise VdtTypeError(value)
Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type.
def is_ip_addr(value): """ Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, string_type): raise VdtTypeError(value) value = value.strip() try: dottedQuadToNum(value) except ValueError: raise VdtValueError(value) return value
Check that the value is a list of values. You can optionally specify the minimum and maximum number of members. It does no check on list members. >>> vtor.check('list', ()) [] >>> vtor.check('list', []) [] >>> vtor.check('list', (1, 2)) [1, 2] >>> vtor.check('list', [1, 2]) [1, 2] >>> vtor.check('list(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) [1, 2, 3, 4] >>> vtor.check('list', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('list', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type.
def is_list(value, min=None, max=None): """ Check that the value is a list of values. You can optionally specify the minimum and maximum number of members. It does no check on list members. >>> vtor.check('list', ()) [] >>> vtor.check('list', []) [] >>> vtor.check('list', (1, 2)) [1, 2] >>> vtor.check('list', [1, 2]) [1, 2] >>> vtor.check('list(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4)) [1, 2, 3, 4] >>> vtor.check('list', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('list', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) if isinstance(value, string_type): raise VdtTypeError(value) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return list(value)
Check that the value is a tuple of values. You can optionally specify the minimum and maximum number of members. It does no check on members. >>> vtor.check('tuple', ()) () >>> vtor.check('tuple', []) () >>> vtor.check('tuple', (1, 2)) (1, 2) >>> vtor.check('tuple', [1, 2]) (1, 2) >>> vtor.check('tuple(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4)) (1, 2, 3, 4) >>> vtor.check('tuple', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('tuple', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type.
def is_tuple(value, min=None, max=None): """ Check that the value is a tuple of values. You can optionally specify the minimum and maximum number of members. It does no check on members. >>> vtor.check('tuple', ()) () >>> vtor.check('tuple', []) () >>> vtor.check('tuple', (1, 2)) (1, 2) >>> vtor.check('tuple', [1, 2]) (1, 2) >>> vtor.check('tuple(3)', (1, 2)) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2)" is too short. >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6)) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long. >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4)) (1, 2, 3, 4) >>> vtor.check('tuple', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('tuple', '12') Traceback (most recent call last): VdtTypeError: the value "12" is of the wrong type. """ return tuple(is_list(value, min, max))
Check that the supplied value is a string. You can optionally specify the minimum and maximum number of members. >>> vtor.check('string', '0') '0' >>> vtor.check('string', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('string(2)', '12') '12' >>> vtor.check('string(2)', '1') Traceback (most recent call last): VdtValueTooShortError: the value "1" is too short. >>> vtor.check('string(min=2, max=3)', '123') '123' >>> vtor.check('string(min=2, max=3)', '1234') Traceback (most recent call last): VdtValueTooLongError: the value "1234" is too long.
def is_string(value, min=None, max=None): """ Check that the supplied value is a string. You can optionally specify the minimum and maximum number of members. >>> vtor.check('string', '0') '0' >>> vtor.check('string', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('string(2)', '12') '12' >>> vtor.check('string(2)', '1') Traceback (most recent call last): VdtValueTooShortError: the value "1" is too short. >>> vtor.check('string(min=2, max=3)', '123') '123' >>> vtor.check('string(min=2, max=3)', '1234') Traceback (most recent call last): VdtValueTooLongError: the value "1234" is too long. """ if not isinstance(value, string_type): raise VdtTypeError(value) (min_len, max_len) = _is_num_param(('min', 'max'), (min, max)) try: num_members = len(value) except TypeError: raise VdtTypeError(value) if min_len is not None and num_members < min_len: raise VdtValueTooShortError(value) if max_len is not None and num_members > max_len: raise VdtValueTooLongError(value) return value
Check that the value is a list of integers. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an integer. >>> vtor.check('int_list', ()) [] >>> vtor.check('int_list', []) [] >>> vtor.check('int_list', (1, 2)) [1, 2] >>> vtor.check('int_list', [1, 2]) [1, 2] >>> vtor.check('int_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type.
def is_int_list(value, min=None, max=None): """ Check that the value is a list of integers. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an integer. >>> vtor.check('int_list', ()) [] >>> vtor.check('int_list', []) [] >>> vtor.check('int_list', (1, 2)) [1, 2] >>> vtor.check('int_list', [1, 2]) [1, 2] >>> vtor.check('int_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_integer(mem) for mem in is_list(value, min, max)]
Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type.
def is_bool_list(value, min=None, max=None): """ Check that the value is a list of booleans. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a boolean. >>> vtor.check('bool_list', ()) [] >>> vtor.check('bool_list', []) [] >>> check_res = vtor.check('bool_list', (True, False)) >>> check_res == [True, False] 1 >>> check_res = vtor.check('bool_list', [True, False]) >>> check_res == [True, False] 1 >>> vtor.check('bool_list', [True, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_boolean(mem) for mem in is_list(value, min, max)]
Check that the value is a list of floats. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a float. >>> vtor.check('float_list', ()) [] >>> vtor.check('float_list', []) [] >>> vtor.check('float_list', (1, 2.0)) [1.0, 2.0] >>> vtor.check('float_list', [1, 2.0]) [1.0, 2.0] >>> vtor.check('float_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type.
def is_float_list(value, min=None, max=None): """ Check that the value is a list of floats. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a float. >>> vtor.check('float_list', ()) [] >>> vtor.check('float_list', []) [] >>> vtor.check('float_list', (1, 2.0)) [1.0, 2.0] >>> vtor.check('float_list', [1, 2.0]) [1.0, 2.0] >>> vtor.check('float_list', [1, 'a']) Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. """ return [is_float(mem) for mem in is_list(value, min, max)]
Check that the value is a list of strings. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a string. >>> vtor.check('string_list', ()) [] >>> vtor.check('string_list', []) [] >>> vtor.check('string_list', ('a', 'b')) ['a', 'b'] >>> vtor.check('string_list', ['a', 1]) Traceback (most recent call last): VdtTypeError: the value "1" is of the wrong type. >>> vtor.check('string_list', 'hello') Traceback (most recent call last): VdtTypeError: the value "hello" is of the wrong type.
def is_string_list(value, min=None, max=None): """ Check that the value is a list of strings. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is a string. >>> vtor.check('string_list', ()) [] >>> vtor.check('string_list', []) [] >>> vtor.check('string_list', ('a', 'b')) ['a', 'b'] >>> vtor.check('string_list', ['a', 1]) Traceback (most recent call last): VdtTypeError: the value "1" is of the wrong type. >>> vtor.check('string_list', 'hello') Traceback (most recent call last): VdtTypeError: the value "hello" is of the wrong type. """ if isinstance(value, string_type): raise VdtTypeError(value) return [is_string(mem) for mem in is_list(value, min, max)]
Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) Traceback (most recent call last): VdtValueError: the value "a" is unacceptable.
def is_ip_addr_list(value, min=None, max=None): """ Check that the value is a list of IP addresses. You can optionally specify the minimum and maximum number of members. Each list member is checked that it is an IP address. >>> vtor.check('ip_addr_list', ()) [] >>> vtor.check('ip_addr_list', []) [] >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8')) ['1.2.3.4', '5.6.7.8'] >>> vtor.check('ip_addr_list', ['a']) Traceback (most recent call last): VdtValueError: the value "a" is unacceptable. """ return [is_ip_addr(mem) for mem in is_list(value, min, max)]
Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello']
def force_list(value, min=None, max=None): """ Check that a value is a list, coercing strings into a list with one member. Useful where users forget the trailing comma that turns a single value into a list. You can optionally specify the minimum and maximum number of members. A minumum of greater than one will fail if the user only supplies a string. >>> vtor.check('force_list', ()) [] >>> vtor.check('force_list', []) [] >>> vtor.check('force_list', 'hello') ['hello'] """ if not isinstance(value, (list, tuple)): value = [value] return is_list(value, min, max)
Check that the value is a list. Allow specifying the type of each member. Work on lists of specific lengths. You specify each member as a positional argument specifying type Each type should be one of the following strings : 'integer', 'float', 'ip_addr', 'string', 'boolean' So you can specify a list of two strings, followed by two integers as : mixed_list('string', 'string', 'integer', 'integer') The length of the list must match the number of positional arguments you supply. >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) Traceback (most recent call last): VdtTypeError: the value "b" is of the wrong type. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. >>> vtor.check(mix_str, 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('mixed_list("yoda")', ('a')) Traceback (most recent call last): VdtParamError: passed an incorrect value "KeyError('yoda',)" for parameter "'mixed_list'"
def is_mixed_list(value, *args): """ Check that the value is a list. Allow specifying the type of each member. Work on lists of specific lengths. You specify each member as a positional argument specifying type Each type should be one of the following strings : 'integer', 'float', 'ip_addr', 'string', 'boolean' So you can specify a list of two strings, followed by two integers as : mixed_list('string', 'string', 'integer', 'integer') The length of the list must match the number of positional arguments you supply. >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')" >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True)) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True')) >>> check_res == [1, 2.0, '1.2.3.4', 'a', True] 1 >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True)) Traceback (most recent call last): VdtTypeError: the value "b" is of the wrong type. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a')) Traceback (most recent call last): VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short. >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b')) Traceback (most recent call last): VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long. >>> vtor.check(mix_str, 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. >>> vtor.check('mixed_list("yoda")', ('a')) Traceback (most recent call last): VdtParamError: passed an incorrect value "KeyError('yoda',)" for parameter "'mixed_list'" """ try: length = len(value) except TypeError: raise VdtTypeError(value) if length < len(args): raise VdtValueTooShortError(value) elif length > len(args): raise VdtValueTooLongError(value) try: return [fun_dict[arg](val) for arg, val in zip(args, value)] except KeyError as e: raise VdtParamError('mixed_list', e)
This check matches the value to any of a set of options. >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type.
def is_option(value, *options): """ This check matches the value to any of a set of options. >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, string_type): raise VdtTypeError(value) if not value in options: raise VdtValueError(value) return value
A function that exists for test purposes. >>> checks = [ ... '3, 6, min=1, max=3, test=list(a, b, c)', ... '3', ... '3, 6', ... '3,', ... 'min=1, test="a b c"', ... 'min=5, test="a, b, c"', ... 'min=1, max=3, test="a, b, c"', ... 'min=-100, test=-99', ... 'min=1, max=3', ... '3, 6, test="36"', ... '3, 6, test="a, b, c"', ... '3, max=3, test=list("a", "b", "c")', ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''', ... "test='x=fish(3)'", ... ] >>> v = Validator({'test': _test}) >>> for entry in checks: ... pprint(v.check(('test(%s)' % entry), 3)) (3, ('3', '6'), {'max': '3', 'min': '1', 'test': ['a', 'b', 'c']}) (3, ('3',), {}) (3, ('3', '6'), {}) (3, ('3',), {}) (3, (), {'min': '1', 'test': 'a b c'}) (3, (), {'min': '5', 'test': 'a, b, c'}) (3, (), {'max': '3', 'min': '1', 'test': 'a, b, c'}) (3, (), {'min': '-100', 'test': '-99'}) (3, (), {'max': '3', 'min': '1'}) (3, ('3', '6'), {'test': '36'}) (3, ('3', '6'), {'test': 'a, b, c'}) (3, ('3',), {'max': '3', 'test': ['a', 'b', 'c']}) (3, ('3',), {'max': '3', 'test': ["'a'", 'b', 'x=(c)']}) (3, (), {'test': 'x=fish(3)'}) >>> v = Validator() >>> v.check('integer(default=6)', '3') 3 >>> v.check('integer(default=6)', None, True) 6 >>> v.get_default_value('integer(default=6)') 6 >>> v.get_default_value('float(default=6)') 6.0 >>> v.get_default_value('pass(default=None)') >>> v.get_default_value("string(default='None')") 'None' >>> v.get_default_value('pass') Traceback (most recent call last): KeyError: 'Check "pass" has no default value.' >>> v.get_default_value('pass(default=list(1, 2, 3, 4))') ['1', '2', '3', '4'] >>> v = Validator() >>> v.check("pass(default=None)", None, True) >>> v.check("pass(default='None')", None, True) 'None' >>> v.check('pass(default="None")', None, True) 'None' >>> v.check('pass(default=list(1, 2, 3, 4))', None, True) ['1', '2', '3', '4'] Bug test for unicode arguments >>> v = Validator() >>> v.check(unicode('string(min=4)'), unicode('test')) == unicode('test') True >>> v = Validator() >>> v.get_default_value(unicode('string(min=4, default="1234")')) == unicode('1234') True >>> v.check(unicode('string(min=4, default="1234")'), unicode('test')) == unicode('test') True >>> v = Validator() >>> default = v.get_default_value('string(default=None)') >>> default == None 1
def _test(value, *args, **keywargs): """ A function that exists for test purposes. >>> checks = [ ... '3, 6, min=1, max=3, test=list(a, b, c)', ... '3', ... '3, 6', ... '3,', ... 'min=1, test="a b c"', ... 'min=5, test="a, b, c"', ... 'min=1, max=3, test="a, b, c"', ... 'min=-100, test=-99', ... 'min=1, max=3', ... '3, 6, test="36"', ... '3, 6, test="a, b, c"', ... '3, max=3, test=list("a", "b", "c")', ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''', ... "test='x=fish(3)'", ... ] >>> v = Validator({'test': _test}) >>> for entry in checks: ... pprint(v.check(('test(%s)' % entry), 3)) (3, ('3', '6'), {'max': '3', 'min': '1', 'test': ['a', 'b', 'c']}) (3, ('3',), {}) (3, ('3', '6'), {}) (3, ('3',), {}) (3, (), {'min': '1', 'test': 'a b c'}) (3, (), {'min': '5', 'test': 'a, b, c'}) (3, (), {'max': '3', 'min': '1', 'test': 'a, b, c'}) (3, (), {'min': '-100', 'test': '-99'}) (3, (), {'max': '3', 'min': '1'}) (3, ('3', '6'), {'test': '36'}) (3, ('3', '6'), {'test': 'a, b, c'}) (3, ('3',), {'max': '3', 'test': ['a', 'b', 'c']}) (3, ('3',), {'max': '3', 'test': ["'a'", 'b', 'x=(c)']}) (3, (), {'test': 'x=fish(3)'}) >>> v = Validator() >>> v.check('integer(default=6)', '3') 3 >>> v.check('integer(default=6)', None, True) 6 >>> v.get_default_value('integer(default=6)') 6 >>> v.get_default_value('float(default=6)') 6.0 >>> v.get_default_value('pass(default=None)') >>> v.get_default_value("string(default='None')") 'None' >>> v.get_default_value('pass') Traceback (most recent call last): KeyError: 'Check "pass" has no default value.' >>> v.get_default_value('pass(default=list(1, 2, 3, 4))') ['1', '2', '3', '4'] >>> v = Validator() >>> v.check("pass(default=None)", None, True) >>> v.check("pass(default='None')", None, True) 'None' >>> v.check('pass(default="None")', None, True) 'None' >>> v.check('pass(default=list(1, 2, 3, 4))', None, True) ['1', '2', '3', '4'] Bug test for unicode arguments >>> v = Validator() >>> v.check(unicode('string(min=4)'), unicode('test')) == unicode('test') True >>> v = Validator() >>> v.get_default_value(unicode('string(min=4, default="1234")')) == unicode('1234') True >>> v.check(unicode('string(min=4, default="1234")'), unicode('test')) == unicode('test') True >>> v = Validator() >>> default = v.get_default_value('string(default=None)') >>> default == None 1 """ return (value, args, keywargs)
>>> >>> v = Validator() >>> v.get_default_value('string(default="#ff00dd")') '#ff00dd' >>> v.get_default_value('integer(default=3) # comment') 3
def _test2(): """ >>> >>> v = Validator() >>> v.get_default_value('string(default="#ff00dd")') '#ff00dd' >>> v.get_default_value('integer(default=3) # comment') 3 """
>>> vtor.check('string(default="")', '', missing=True) '' >>> vtor.check('string(default="\n")', '', missing=True) '\n' >>> print(vtor.check('string(default="\n")', '', missing=True)) <BLANKLINE> <BLANKLINE> >>> vtor.check('string()', '\n') '\n' >>> vtor.check('string(default="\n\n\n")', '', missing=True) '\n\n\n' >>> vtor.check('string()', 'random \n text goes here\n\n') 'random \n text goes here\n\n' >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")', ... '', missing=True) ' \nrandom text\ngoes \n here\n\n ' >>> vtor.check("string(default='\n\n\n')", '', missing=True) '\n\n\n' >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True) '\n' >>> vtor.check("string_list()", ['foo', '\n', 'bar']) ['foo', '\n', 'bar'] >>> vtor.check("string_list(default=list('\n'))", '', missing=True) ['\n']
def _test3(): r""" >>> vtor.check('string(default="")', '', missing=True) '' >>> vtor.check('string(default="\n")', '', missing=True) '\n' >>> print(vtor.check('string(default="\n")', '', missing=True)) <BLANKLINE> <BLANKLINE> >>> vtor.check('string()', '\n') '\n' >>> vtor.check('string(default="\n\n\n")', '', missing=True) '\n\n\n' >>> vtor.check('string()', 'random \n text goes here\n\n') 'random \n text goes here\n\n' >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")', ... '', missing=True) ' \nrandom text\ngoes \n here\n\n ' >>> vtor.check("string(default='\n\n\n')", '', missing=True) '\n\n\n' >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True) '\n' >>> vtor.check("string_list()", ['foo', '\n', 'bar']) ['foo', '\n', 'bar'] >>> vtor.check("string_list(default=list('\n'))", '', missing=True) ['\n'] """
\s+
def t_CPP_WS(t): r'\s+' t.lexer.lineno += t.value.count("\n") return t
(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)
def CPP_INTEGER(t): r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)' return t
\"([^\\\n]|(\\(.|\n)))*?\"
def t_CPP_STRING(t): r'\"([^\\\n]|(\\(.|\n)))*?\"' t.lexer.lineno += t.value.count("\n") return t
(L)?\'([^\\\n]|(\\(.|\n)))*?\'
def t_CPP_CHAR(t): r'(L)?\'([^\\\n]|(\\(.|\n)))*?\'' t.lexer.lineno += t.value.count("\n") return t
(/\*(.|\n)*?\*/)
def t_CPP_COMMENT1(t): r'(/\*(.|\n)*?\*/)' ncr = t.value.count("\n") t.lexer.lineno += ncr # replace with one space or a number of '\n' t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' ' return t
(//.*?(\n|$))
def t_CPP_COMMENT2(t): r'(//.*?(\n|$))' # replace with '/n' t.type = 'CPP_WS'; t.value = '\n' return t
/\*(.|\n)*?\*/
def t_COMMENT(t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') return t
//.*\n
def t_CPPCOMMENT(t): r'//.*\n' t.lexer.lineno += 1 return t
Check that ``table`` has only columns with ndim <= ``max_ndim``. Currently ECSV is the only built-in format that supports output of arbitrary N-d columns, but HTML supports 2-d.
def _check_multidim_table(table, max_ndim): """Check that ``table`` has only columns with ndim <= ``max_ndim``. Currently ECSV is the only built-in format that supports output of arbitrary N-d columns, but HTML supports 2-d. """ # No limit? if max_ndim is None: return # Check for N-d columns nd_names = [col.info.name for col in table.itercols() if len(col.shape) > max_ndim] if nd_names: raise ValueError( f"column(s) with dimension > {max_ndim} " "cannot be be written with this format, try using 'ecsv' " "(Enhanced CSV) format" )
Replace tabs with spaces in given string, preserving quoted substrings. Parameters ---------- line : str String containing tabs to be replaced with spaces. escapechar : str Character in ``line`` used to escape special characters. quotechar : str Character in ``line`` indicating the start/end of a substring. Returns ------- line : str A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
def _replace_tab_with_space(line, escapechar, quotechar): """Replace tabs with spaces in given string, preserving quoted substrings. Parameters ---------- line : str String containing tabs to be replaced with spaces. escapechar : str Character in ``line`` used to escape special characters. quotechar : str Character in ``line`` indicating the start/end of a substring. Returns ------- line : str A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings. """ newline = [] in_quote = False lastchar = "NONE" for char in line: if char == quotechar and lastchar != escapechar: in_quote = not in_quote if char == "\t" and not in_quote: char = " " lastchar = char newline.append(char) return "".join(newline)
Return the appropriate line index, depending on ``line_or_func`` which can be either a function, a positive or negative int, or None.
def _get_line_index(line_or_func, lines): """Return the appropriate line index, depending on ``line_or_func`` which can be either a function, a positive or negative int, or None. """ if callable(line_or_func): return line_or_func(lines) elif line_or_func: if line_or_func >= 0: return line_or_func else: n_lines = sum(1 for line in lines) return n_lines + line_or_func else: return line_or_func
Return a tuple containing a function which converts a list into a numpy array and the type produced by the converter function. Parameters ---------- numpy_type : numpy data-type The numpy type required of an array returned by ``converter``. Must be a valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_ (e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python type covered by a numpy type (e.g., int, float, str, bool). Returns ------- converter : callable ``converter`` is a function which accepts a list and converts it to a numpy array of type ``numpy_type``. converter_type : type ``converter_type`` tracks the generic data type produced by the converter function. Raises ------ ValueError Raised by ``converter`` if the list elements could not be converted to the required type.
def convert_numpy(numpy_type): """Return a tuple containing a function which converts a list into a numpy array and the type produced by the converter function. Parameters ---------- numpy_type : numpy data-type The numpy type required of an array returned by ``converter``. Must be a valid `numpy type <https://numpy.org/doc/stable/user/basics.types.html>`_ (e.g., numpy.uint, numpy.int8, numpy.int64, numpy.float64) or a python type covered by a numpy type (e.g., int, float, str, bool). Returns ------- converter : callable ``converter`` is a function which accepts a list and converts it to a numpy array of type ``numpy_type``. converter_type : type ``converter_type`` tracks the generic data type produced by the converter function. Raises ------ ValueError Raised by ``converter`` if the list elements could not be converted to the required type. """ # Infer converter type from an instance of numpy_type. type_name = np.array([], dtype=numpy_type).dtype.name if "int" in type_name: converter_type = IntType elif "float" in type_name: converter_type = FloatType elif "bool" in type_name: converter_type = BoolType elif "str" in type_name: converter_type = StrType else: converter_type = AllType def bool_converter(vals): """ Convert values "False" and "True" to bools. Raise an exception for any other string values. """ if len(vals) == 0: return np.array([], dtype=bool) # Try a smaller subset first for a long array if len(vals) > 10000: svals = np.asarray(vals[:1000]) if not np.all( (svals == "False") | (svals == "True") | (svals == "0") | (svals == "1") ): raise ValueError('bool input strings must be False, True, 0, 1, or ""') vals = np.asarray(vals) trues = (vals == "True") | (vals == "1") falses = (vals == "False") | (vals == "0") if not np.all(trues | falses): raise ValueError('bool input strings must be only False, True, 0, 1, or ""') return trues def generic_converter(vals): return np.array(vals, numpy_type) converter = bool_converter if converter_type is BoolType else generic_converter return converter, converter_type
Ensure there are no duplicates in ``names``. This is done by iteratively adding ``_<N>`` to the name for increasing N until the name is unique.
def _deduplicate_names(names): """Ensure there are no duplicates in ``names``. This is done by iteratively adding ``_<N>`` to the name for increasing N until the name is unique. """ new_names = [] existing_names = set() for name in names: base_name = name + "_" i = 1 while name in existing_names: # Iterate until a unique name is found name = base_name + str(i) i += 1 new_names.append(name) existing_names.add(name) return new_names
Apply names, include_names and exclude_names to a table or BaseHeader. For the latter this relies on BaseHeader implementing ``colnames``, ``rename_column``, and ``remove_columns``. Parameters ---------- table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader` Input table or BaseHeader subclass instance names : list List of names to override those in table (set to None to use existing names) include_names : list List of names to include in output exclude_names : list List of names to exclude from output (applied after ``include_names``)
def _apply_include_exclude_names(table, names, include_names, exclude_names): """ Apply names, include_names and exclude_names to a table or BaseHeader. For the latter this relies on BaseHeader implementing ``colnames``, ``rename_column``, and ``remove_columns``. Parameters ---------- table : `~astropy.table.Table`, `~astropy.io.ascii.BaseHeader` Input table or BaseHeader subclass instance names : list List of names to override those in table (set to None to use existing names) include_names : list List of names to include in output exclude_names : list List of names to exclude from output (applied after ``include_names``) """ def rename_columns(table, names): # Rename table column names to those passed by user # Temporarily rename with names that are not in `names` or `table.colnames`. # This ensures that rename succeeds regardless of existing names. xxxs = "x" * max(len(name) for name in list(names) + list(table.colnames)) for ii, colname in enumerate(table.colnames): table.rename_column(colname, xxxs + str(ii)) for ii, name in enumerate(names): table.rename_column(xxxs + str(ii), name) if names is not None: rename_columns(table, names) else: colnames_uniq = _deduplicate_names(table.colnames) if colnames_uniq != list(table.colnames): rename_columns(table, colnames_uniq) names_set = set(table.colnames) if include_names is not None: names_set.intersection_update(include_names) if exclude_names is not None: names_set.difference_update(exclude_names) if names_set != set(table.colnames): remove_names = set(table.colnames) - names_set table.remove_columns(remove_names)
Initialize a table reader allowing for common customizations. See ui.get_reader() for param docs. This routine is for internal (package) use only and is useful because it depends only on the "core" module.
def _get_reader(reader_cls, inputter_cls=None, outputter_cls=None, **kwargs): """Initialize a table reader allowing for common customizations. See ui.get_reader() for param docs. This routine is for internal (package) use only and is useful because it depends only on the "core" module. """ from .fastbasic import FastBasic if issubclass(reader_cls, FastBasic): # Fast readers handle args separately if inputter_cls is not None: kwargs["inputter_cls"] = inputter_cls return reader_cls(**kwargs) # If user explicitly passed a fast reader with enable='force' # (e.g. by passing non-default options), raise an error for slow readers if "fast_reader" in kwargs: if kwargs["fast_reader"]["enable"] == "force": raise ParameterError( "fast_reader required with " "{}, but this is not a fast C reader: {}".format( kwargs["fast_reader"], reader_cls ) ) else: del kwargs["fast_reader"] # Otherwise ignore fast_reader parameter reader_kwargs = {k: v for k, v in kwargs.items() if k not in extra_reader_pars} reader = reader_cls(**reader_kwargs) if inputter_cls is not None: reader.inputter = inputter_cls() if outputter_cls is not None: reader.outputter = outputter_cls() # Issue #855 suggested to set data_start to header_start + default_header_length # Thus, we need to retrieve this from the class definition before resetting these numbers. try: default_header_length = reader.data.start_line - reader.header.start_line except TypeError: # Start line could be None or an instancemethod default_header_length = None # csv.reader is hard-coded to recognise either '\r' or '\n' as end-of-line, # therefore DefaultSplitter cannot handle these as delimiters. if "delimiter" in kwargs: if kwargs["delimiter"] in ("\n", "\r", "\r\n"): reader.header.splitter = BaseSplitter() reader.data.splitter = BaseSplitter() reader.header.splitter.delimiter = kwargs["delimiter"] reader.data.splitter.delimiter = kwargs["delimiter"] if "comment" in kwargs: reader.header.comment = kwargs["comment"] reader.data.comment = kwargs["comment"] if "quotechar" in kwargs: reader.header.splitter.quotechar = kwargs["quotechar"] reader.data.splitter.quotechar = kwargs["quotechar"] if "data_start" in kwargs: reader.data.start_line = kwargs["data_start"] if "data_end" in kwargs: reader.data.end_line = kwargs["data_end"] if "header_start" in kwargs: if reader.header.start_line is not None: reader.header.start_line = kwargs["header_start"] # For FixedWidthTwoLine the data_start is calculated relative to the position line. # However, position_line is given as absolute number and not relative to header_start. # So, ignore this Reader here. if ( ("data_start" not in kwargs) and (default_header_length is not None) and reader._format_name not in ["fixed_width_two_line", "commented_header"] ): reader.data.start_line = ( reader.header.start_line + default_header_length ) elif kwargs["header_start"] is not None: # User trying to set a None header start to some value other than None raise ValueError("header_start cannot be modified for this Reader") if "converters" in kwargs: reader.outputter.converters = kwargs["converters"] if "data_splitter_cls" in kwargs: reader.data.splitter = kwargs["data_splitter_cls"]() if "header_splitter_cls" in kwargs: reader.header.splitter = kwargs["header_splitter_cls"]() if "names" in kwargs: reader.names = kwargs["names"] if None in reader.names: raise TypeError("Cannot have None for column name") if len(set(reader.names)) != len(reader.names): raise ValueError("Duplicate column names") if "include_names" in kwargs: reader.include_names = kwargs["include_names"] if "exclude_names" in kwargs: reader.exclude_names = kwargs["exclude_names"] # Strict names is normally set only within the guessing process to # indicate that column names cannot be numeric or have certain # characters at the beginning or end. It gets used in # BaseHeader.check_column_names(). if "strict_names" in kwargs: reader.strict_names = kwargs["strict_names"] if "fill_values" in kwargs: reader.data.fill_values = kwargs["fill_values"] if "fill_include_names" in kwargs: reader.data.fill_include_names = kwargs["fill_include_names"] if "fill_exclude_names" in kwargs: reader.data.fill_exclude_names = kwargs["fill_exclude_names"] if "encoding" in kwargs: reader.encoding = kwargs["encoding"] reader.inputter.encoding = kwargs["encoding"] return reader
Initialize a table writer allowing for common customizations. This routine is for internal (package) use only and is useful because it depends only on the "core" module.
def _get_writer(writer_cls, fast_writer, **kwargs): """Initialize a table writer allowing for common customizations. This routine is for internal (package) use only and is useful because it depends only on the "core" module. """ from .fastbasic import FastBasic # A value of None for fill_values imply getting the default string # representation of masked values (depending on the writer class), but the # machinery expects a list. The easiest here is to just pop the value off, # i.e. fill_values=None is the same as not providing it at all. if "fill_values" in kwargs and kwargs["fill_values"] is None: del kwargs["fill_values"] if issubclass(writer_cls, FastBasic): # Fast writers handle args separately return writer_cls(**kwargs) elif fast_writer and f"fast_{writer_cls._format_name}" in FAST_CLASSES: # Switch to fast writer kwargs["fast_writer"] = fast_writer return FAST_CLASSES[f"fast_{writer_cls._format_name}"](**kwargs) writer_kwargs = {k: v for k, v in kwargs.items() if k not in extra_writer_pars} writer = writer_cls(**writer_kwargs) if "delimiter" in kwargs: writer.header.splitter.delimiter = kwargs["delimiter"] writer.data.splitter.delimiter = kwargs["delimiter"] if "comment" in kwargs: writer.header.write_comment = kwargs["comment"] writer.data.write_comment = kwargs["comment"] if "quotechar" in kwargs: writer.header.splitter.quotechar = kwargs["quotechar"] writer.data.splitter.quotechar = kwargs["quotechar"] if "formats" in kwargs: writer.data.formats = kwargs["formats"] if "strip_whitespace" in kwargs: if kwargs["strip_whitespace"]: # Restore the default SplitterClass process_val method which strips # whitespace. This may have been changed in the Writer # initialization (e.g. Rdb and Tab) writer.data.splitter.process_val = operator.methodcaller("strip", " \t") else: writer.data.splitter.process_val = None if "names" in kwargs: writer.header.names = kwargs["names"] if "include_names" in kwargs: writer.include_names = kwargs["include_names"] if "exclude_names" in kwargs: writer.exclude_names = kwargs["exclude_names"] if "fill_values" in kwargs: # Prepend user-specified values to the class default. with suppress(TypeError, IndexError): # Test if it looks like (match, replace_string, optional_colname), # in which case make it a list kwargs["fill_values"][1] + "" kwargs["fill_values"] = [kwargs["fill_values"]] writer.data.fill_values = kwargs["fill_values"] + writer.data.fill_values if "fill_include_names" in kwargs: writer.data.fill_include_names = kwargs["fill_include_names"] if "fill_exclude_names" in kwargs: writer.data.fill_exclude_names = kwargs["fill_exclude_names"] return writer
Checks whether the given BeautifulSoup tag is the table the user intends to process.
def identify_table(soup, htmldict, numtable): """ Checks whether the given BeautifulSoup tag is the table the user intends to process. """ if soup is None or soup.name != "table": return False # Tag is not a <table> elif "table_id" not in htmldict: return numtable == 1 table_id = htmldict["table_id"] if isinstance(table_id, str): return "id" in soup.attrs and soup["id"] == table_id elif isinstance(table_id, int): return table_id == numtable # Return False if an invalid parameter is given return False
Add a value from a dictionary to a list. Parameters ---------- adict : dictionary key : hashable alist : list List where value should be added
def add_dictval_to_list(adict, key, alist): """ Add a value from a dictionary to a list. Parameters ---------- adict : dictionary key : hashable alist : list List where value should be added """ if key in adict: if isinstance(adict[key], str): alist.append(adict[key]) else: alist.extend(adict[key])
Find the first line which matches a pattern. Parameters ---------- lines : list List of strings latex : str Search pattern Returns ------- line_num : int, None Line number. Returns None, if no match was found
def find_latex_line(lines, latex): """ Find the first line which matches a pattern. Parameters ---------- lines : list List of strings latex : str Search pattern Returns ------- line_num : int, None Line number. Returns None, if no match was found """ re_string = re.compile(latex.replace("\\", "\\\\")) for i, line in enumerate(lines): if re_string.match(line): return i return None
find the first index position for the which the callable pred returns True.
def first_true_index(iterable, pred=None, default=None): """find the first index position for the which the callable pred returns True.""" if pred is None: func = operator.itemgetter(1) else: func = lambda x: pred(x[1]) # either index-item pair or default ii = next(filter(func, enumerate(iterable)), default) return ii[0] if ii else default
find the first index position for the which the callable pred returns False.
def first_false_index(iterable, pred=None, default=None): """find the first index position for the which the callable pred returns False.""" if pred is None: func = operator.not_ else: func = lambda x: not pred(x) return first_true_index(iterable, func, default)
Sorts any number of lists according to: optionally given item sorting key function(s) and/or a global sorting key function. Parameters ---------- One or more lists Keywords -------- globalkey : None revert to sorting by key function globalkey : callable Sort by evaluated value for all items in the lists (call signature of this function needs to be such that it accepts an argument tuple of items from each list. eg.: ``globalkey = lambda *l: sum(l)`` will order all the lists by the sum of the items from each list if key: None sorting done by value of first input list (in this case the objects in the first iterable need the comparison methods __lt__ etc...) if key: callable sorting done by value of key(item) for items in first iterable if key: tuple sorting done by value of (key(item_0), ..., key(item_n)) for items in the first n iterables (where n is the length of the key tuple) i.e. the first callable is the primary sorting criterion, and the rest act as tie-breakers. Returns ------- Sorted lists Examples -------- Capture sorting indices:: l = list('CharacterS') In [1]: sortmore( l, range(len(l)) ) Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'], [0, 9, 2, 4, 5, 7, 1, 3, 8, 6]) In [2]: sortmore( l, range(len(l)), key=str.lower ) Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'], [2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
def sortmore(*args, **kw): """ Sorts any number of lists according to: optionally given item sorting key function(s) and/or a global sorting key function. Parameters ---------- One or more lists Keywords -------- globalkey : None revert to sorting by key function globalkey : callable Sort by evaluated value for all items in the lists (call signature of this function needs to be such that it accepts an argument tuple of items from each list. eg.: ``globalkey = lambda *l: sum(l)`` will order all the lists by the sum of the items from each list if key: None sorting done by value of first input list (in this case the objects in the first iterable need the comparison methods __lt__ etc...) if key: callable sorting done by value of key(item) for items in first iterable if key: tuple sorting done by value of (key(item_0), ..., key(item_n)) for items in the first n iterables (where n is the length of the key tuple) i.e. the first callable is the primary sorting criterion, and the rest act as tie-breakers. Returns ------- Sorted lists Examples -------- Capture sorting indices:: l = list('CharacterS') In [1]: sortmore( l, range(len(l)) ) Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'], [0, 9, 2, 4, 5, 7, 1, 3, 8, 6]) In [2]: sortmore( l, range(len(l)), key=str.lower ) Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'], [2, 4, 0, 5, 7, 1, 3, 8, 9, 6]) """ first = list(args[0]) if not len(first): return args globalkey = kw.get("globalkey") key = kw.get("key") if key is None: if globalkey: # if global sort function given and no local (secondary) key given, ==> no tiebreakers key = lambda x: 0 else: # if no global sort and no local sort keys given, sort by item values key = lambda x: x if globalkey is None: globalkey = lambda *x: 0 if not isinstance(globalkey, collections.abc.Callable): raise ValueError("globalkey needs to be callable") if isinstance(key, collections.abc.Callable): k = lambda x: (globalkey(*x), key(x[0])) elif isinstance(key, tuple): key = (k if k else lambda x: 0 for k in key) k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x)) else: raise KeyError( "kw arg 'key' should be None, callable, or a sequence of callables, " f"not {type(key)}" ) res = sorted(zip(*args), key=k) if "order" in kw: if kw["order"].startswith(("descend", "reverse")): res = reversed(res) return tuple(map(list, zip(*res)))
Extends the itertools.groupby functionality to arbitrary number of iterators.
def groupmore(func=None, *its): """Extends the itertools.groupby functionality to arbitrary number of iterators.""" if not func: func = lambda x: x its = sortmore(*its, key=func) nfunc = lambda x: func(x[0]) zipper = itertools.groupby(zip(*its), nfunc) unzipper = ((key, zip(*groups)) for key, groups in zipper) return unzipper
Interpret a QDP file line. Parameters ---------- line : str a single line of the file Returns ------- type : str Line type: "comment", "command", or "data" Examples -------- >>> _line_type("READ SERR 3") 'command' >>> _line_type(" \n !some gibberish") 'comment' >>> _line_type(" ") 'comment' >>> _line_type(" 21345.45") 'data,1' >>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan") 'data,6' >>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',') 'data,6' >>> _line_type(" 21345.45 ! a comment to disturb") 'data,1' >>> _line_type("NO NO NO NO NO") 'new' >>> _line_type("NO,NO,NO,NO,NO", delimiter=',') 'new' >>> _line_type("N O N NOON OON O") Traceback (most recent call last): ... ValueError: Unrecognized QDP line... >>> _line_type(" some non-comment gibberish") Traceback (most recent call last): ... ValueError: Unrecognized QDP line...
def _line_type(line, delimiter=None): """Interpret a QDP file line. Parameters ---------- line : str a single line of the file Returns ------- type : str Line type: "comment", "command", or "data" Examples -------- >>> _line_type("READ SERR 3") 'command' >>> _line_type(" \\n !some gibberish") 'comment' >>> _line_type(" ") 'comment' >>> _line_type(" 21345.45") 'data,1' >>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan") 'data,6' >>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',') 'data,6' >>> _line_type(" 21345.45 ! a comment to disturb") 'data,1' >>> _line_type("NO NO NO NO NO") 'new' >>> _line_type("NO,NO,NO,NO,NO", delimiter=',') 'new' >>> _line_type("N O N NOON OON O") Traceback (most recent call last): ... ValueError: Unrecognized QDP line... >>> _line_type(" some non-comment gibberish") Traceback (most recent call last): ... ValueError: Unrecognized QDP line... """ _decimal_re = r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?" _command_re = r"READ [TS]ERR(\s+[0-9]+)+" sep = delimiter if delimiter is None: sep = r"\s+" _new_re = rf"NO({sep}NO)+" _data_re = rf"({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)" _type_re = rf"^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$" _line_type_re = re.compile(_type_re, re.IGNORECASE) line = line.strip() if not line: return "comment" match = _line_type_re.match(line) if match is None: raise ValueError(f"Unrecognized QDP line: {line}") for type_, val in match.groupdict().items(): if val is None: continue if type_ == "data": return f"data,{len(val.split(sep=delimiter))}" else: return type_
Read through the list of QDP file lines and label each line by type. Parameters ---------- lines : list List containing one file line in each entry Returns ------- contents : list List containing the type for each line (see `line_type_and_data`) ncol : int The number of columns in the data lines. Must be the same throughout the file Examples -------- >>> line0 = "! A comment" >>> line1 = "543 12 456.0" >>> lines = [line0, line1] >>> types, ncol = _get_type_from_list_of_lines(lines) >>> types[0] 'comment' >>> types[1] 'data,3' >>> ncol 3 >>> lines.append("23") >>> _get_type_from_list_of_lines(lines) Traceback (most recent call last): ... ValueError: Inconsistent number of columns
def _get_type_from_list_of_lines(lines, delimiter=None): """Read through the list of QDP file lines and label each line by type. Parameters ---------- lines : list List containing one file line in each entry Returns ------- contents : list List containing the type for each line (see `line_type_and_data`) ncol : int The number of columns in the data lines. Must be the same throughout the file Examples -------- >>> line0 = "! A comment" >>> line1 = "543 12 456.0" >>> lines = [line0, line1] >>> types, ncol = _get_type_from_list_of_lines(lines) >>> types[0] 'comment' >>> types[1] 'data,3' >>> ncol 3 >>> lines.append("23") >>> _get_type_from_list_of_lines(lines) Traceback (most recent call last): ... ValueError: Inconsistent number of columns """ types = [_line_type(line, delimiter=delimiter) for line in lines] current_ncol = None for type_ in types: if type_.startswith("data,"): ncol = int(type_[5:]) if current_ncol is None: current_ncol = ncol elif ncol != current_ncol: raise ValueError("Inconsistent number of columns") return types, current_ncol
Give list of column names from the READ SERR and TERR commands. Parameters ---------- err_specs : dict ``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}`` Error specifications for symmetric and two-sided errors ncols : int Number of data columns Other Parameters ---------------- names : list of str Name of data columns (defaults to ['col1', 'col2', ...]), _not_ including error columns. Returns ------- colnames : list List containing the column names. Error columns will have the name of the main column plus ``_err`` for symmetric errors, and ``_perr`` and ``_nerr`` for positive and negative errors respectively Examples -------- >>> col_in = ['MJD', 'Rate'] >>> cols = _interpret_err_lines(None, 2, names=col_in) >>> cols[0] 'MJD' >>> err_specs = {'terr': [1], 'serr': [2]} >>> ncols = 5 >>> cols = _interpret_err_lines(err_specs, ncols, names=col_in) >>> cols[0] 'MJD' >>> cols[2] 'MJD_nerr' >>> cols[4] 'Rate_err' >>> _interpret_err_lines(err_specs, 6, names=col_in) Traceback (most recent call last): ... ValueError: Inconsistent number of input colnames
def _interpret_err_lines(err_specs, ncols, names=None): """Give list of column names from the READ SERR and TERR commands. Parameters ---------- err_specs : dict ``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}`` Error specifications for symmetric and two-sided errors ncols : int Number of data columns Other Parameters ---------------- names : list of str Name of data columns (defaults to ['col1', 'col2', ...]), _not_ including error columns. Returns ------- colnames : list List containing the column names. Error columns will have the name of the main column plus ``_err`` for symmetric errors, and ``_perr`` and ``_nerr`` for positive and negative errors respectively Examples -------- >>> col_in = ['MJD', 'Rate'] >>> cols = _interpret_err_lines(None, 2, names=col_in) >>> cols[0] 'MJD' >>> err_specs = {'terr': [1], 'serr': [2]} >>> ncols = 5 >>> cols = _interpret_err_lines(err_specs, ncols, names=col_in) >>> cols[0] 'MJD' >>> cols[2] 'MJD_nerr' >>> cols[4] 'Rate_err' >>> _interpret_err_lines(err_specs, 6, names=col_in) Traceback (most recent call last): ... ValueError: Inconsistent number of input colnames """ colnames = ["" for i in range(ncols)] if err_specs is None: serr_cols = terr_cols = [] else: # I don't want to empty the original one when using `pop` below err_specs = copy.deepcopy(err_specs) serr_cols = err_specs.pop("serr", []) terr_cols = err_specs.pop("terr", []) if names is not None: all_error_cols = len(serr_cols) + len(terr_cols) * 2 if all_error_cols + len(names) != ncols: raise ValueError("Inconsistent number of input colnames") shift = 0 for i in range(ncols): col_num = i + 1 - shift if colnames[i] != "": continue colname_root = f"col{col_num}" if names is not None: colname_root = names[col_num - 1] colnames[i] = f"{colname_root}" if col_num in serr_cols: colnames[i + 1] = f"{colname_root}_err" shift += 1 continue if col_num in terr_cols: colnames[i + 1] = f"{colname_root}_perr" colnames[i + 2] = f"{colname_root}_nerr" shift += 2 continue assert not np.any([c == "" for c in colnames]) return colnames
Get all tables from a QDP file. Parameters ---------- qdp_file : str Input QDP file name Other Parameters ---------------- input_colnames : list of str Name of data columns (defaults to ['col1', 'col2', ...]), _not_ including error columns. delimiter : str Delimiter for the values in the table. Returns ------- list of `~astropy.table.Table` List containing all the tables present inside the QDP file
def _get_tables_from_qdp_file(qdp_file, input_colnames=None, delimiter=None): """Get all tables from a QDP file. Parameters ---------- qdp_file : str Input QDP file name Other Parameters ---------------- input_colnames : list of str Name of data columns (defaults to ['col1', 'col2', ...]), _not_ including error columns. delimiter : str Delimiter for the values in the table. Returns ------- list of `~astropy.table.Table` List containing all the tables present inside the QDP file """ lines = _get_lines_from_file(qdp_file) contents, ncol = _get_type_from_list_of_lines(lines, delimiter=delimiter) table_list = [] err_specs = {} colnames = None comment_text = "" initial_comments = "" command_lines = "" current_rows = None for line, datatype in zip(lines, contents): line = line.strip().lstrip("!") # Is this a comment? if datatype == "comment": comment_text += line + "\n" continue if datatype == "command": # The first time I find commands, I save whatever comments into # The initial comments. if command_lines == "": initial_comments = comment_text comment_text = "" if err_specs != {}: warnings.warn( "This file contains multiple command blocks. Please verify", AstropyUserWarning, ) command_lines += line + "\n" continue if datatype.startswith("data"): # The first time I find data, I define err_specs if err_specs == {} and command_lines != "": for cline in command_lines.strip().split("\n"): command = cline.strip().split() # This should never happen, but just in case. if len(command) < 3: continue err_specs[command[1].lower()] = [int(c) for c in command[2:]] if colnames is None: colnames = _interpret_err_lines(err_specs, ncol, names=input_colnames) if current_rows is None: current_rows = [] values = [] for v in line.split(delimiter): if v.upper() == "NO": values.append(np.ma.masked) else: # Understand if number is int or float try: values.append(int(v)) except ValueError: values.append(float(v)) current_rows.append(values) continue if datatype == "new": # Save table to table_list and reset if current_rows is not None: new_table = Table(names=colnames, rows=current_rows) new_table.meta["initial_comments"] = initial_comments.strip().split( "\n" ) new_table.meta["comments"] = comment_text.strip().split("\n") # Reset comments comment_text = "" table_list.append(new_table) current_rows = None continue # At the very end, if there is still a table being written, let's save # it to the table_list if current_rows is not None: new_table = Table(names=colnames, rows=current_rows) new_table.meta["initial_comments"] = initial_comments.strip().split("\n") new_table.meta["comments"] = comment_text.strip().split("\n") table_list.append(new_table) return table_list
Get which column names are error columns. Examples -------- >>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr'] >>> serr, terr = _understand_err_col(colnames) >>> np.allclose(serr, [1]) True >>> np.allclose(terr, [2]) True >>> serr, terr = _understand_err_col(['a', 'a_nerr']) Traceback (most recent call last): ... ValueError: Missing positive error... >>> serr, terr = _understand_err_col(['a', 'a_perr']) Traceback (most recent call last): ... ValueError: Missing negative error...
def _understand_err_col(colnames): """Get which column names are error columns. Examples -------- >>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr'] >>> serr, terr = _understand_err_col(colnames) >>> np.allclose(serr, [1]) True >>> np.allclose(terr, [2]) True >>> serr, terr = _understand_err_col(['a', 'a_nerr']) Traceback (most recent call last): ... ValueError: Missing positive error... >>> serr, terr = _understand_err_col(['a', 'a_perr']) Traceback (most recent call last): ... ValueError: Missing negative error... """ shift = 0 serr = [] terr = [] for i, col in enumerate(colnames): if col.endswith("_err"): # The previous column, but they're numbered from 1! # Plus, take shift into account serr.append(i - shift) shift += 1 elif col.endswith("_perr"): terr.append(i - shift) if len(colnames) == i + 1 or not colnames[i + 1].endswith("_nerr"): raise ValueError("Missing negative error") shift += 2 elif col.endswith("_nerr") and not colnames[i - 1].endswith("_perr"): raise ValueError("Missing positive error") return serr, terr
Read a table from a QDP file. Parameters ---------- qdp_file : str Input QDP file name Other Parameters ---------------- names : list of str Name of data columns (defaults to ['col1', 'col2', ...]), _not_ including error columns. table_id : int, default 0 Number of the table to be read from the QDP file. This is useful when multiple tables present in the file. By default, the first is read. delimiter : str Any delimiter accepted by the `sep` argument of str.split() Returns ------- tables : list of `~astropy.table.Table` List containing all the tables present inside the QDP file
def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None): """Read a table from a QDP file. Parameters ---------- qdp_file : str Input QDP file name Other Parameters ---------------- names : list of str Name of data columns (defaults to ['col1', 'col2', ...]), _not_ including error columns. table_id : int, default 0 Number of the table to be read from the QDP file. This is useful when multiple tables present in the file. By default, the first is read. delimiter : str Any delimiter accepted by the `sep` argument of str.split() Returns ------- tables : list of `~astropy.table.Table` List containing all the tables present inside the QDP file """ if table_id is None: warnings.warn( "table_id not specified. Reading the first available table", AstropyUserWarning, ) table_id = 0 tables = _get_tables_from_qdp_file( qdp_file, input_colnames=names, delimiter=delimiter ) return tables[table_id]
Write a table to a QDP file. Parameters ---------- table : :class:`~astropy.table.Table` Input table to be written filename : str Output QDP file name Other Parameters ---------------- err_specs : dict Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying which columns have symmetric and two-sided errors (see QDP format specification)
def _write_table_qdp(table, filename=None, err_specs=None): """Write a table to a QDP file. Parameters ---------- table : :class:`~astropy.table.Table` Input table to be written filename : str Output QDP file name Other Parameters ---------------- err_specs : dict Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying which columns have symmetric and two-sided errors (see QDP format specification) """ import io fobj = io.StringIO() if "initial_comments" in table.meta and table.meta["initial_comments"] != []: for line in table.meta["initial_comments"]: line = line.strip() if not line.startswith("!"): line = "!" + line print(line, file=fobj) if err_specs is None: serr_cols, terr_cols = _understand_err_col(table.colnames) else: serr_cols = err_specs.pop("serr", []) terr_cols = err_specs.pop("terr", []) if serr_cols != []: col_string = " ".join([str(val) for val in serr_cols]) print(f"READ SERR {col_string}", file=fobj) if terr_cols != []: col_string = " ".join([str(val) for val in terr_cols]) print(f"READ TERR {col_string}", file=fobj) if "comments" in table.meta and table.meta["comments"] != []: for line in table.meta["comments"]: line = line.strip() if not line.startswith("!"): line = "!" + line print(line, file=fobj) colnames = table.colnames print("!" + " ".join(colnames), file=fobj) for row in table: values = [] for val in row: if not np.ma.is_masked(val): rep = str(val) else: rep = "NO" values.append(rep) print(" ".join(values), file=fobj) full_string = fobj.getvalue() fobj.close() if filename is not None: with open(filename, "w") as fobj: print(full_string, file=fobj) return full_string.split("\n")
Determine if ``table`` probably contains HTML content. See PR #3693 and issue #3691 for context.
def _probably_html(table, maxchars=100000): """ Determine if ``table`` probably contains HTML content. See PR #3693 and issue #3691 for context. """ if not isinstance(table, str): try: # If table is an iterable (list of strings) then take the first # maxchars of these. Make sure this is something with random # access to exclude a file-like object table[0] table[:1] size = 0 for i, line in enumerate(table): size += len(line) if size > maxchars: table = table[: i + 1] break table = os.linesep.join(table) except Exception: pass if isinstance(table, str): # Look for signs of an HTML table in the first maxchars characters table = table[:maxchars] # URL ending in .htm or .html if re.match( r"( http[s]? | ftp | file ) :// .+ \.htm[l]?$", table, re.IGNORECASE | re.VERBOSE, ): return True # Filename ending in .htm or .html which exists if re.search(r"\.htm[l]?$", table[-5:], re.IGNORECASE) and os.path.exists( os.path.expanduser(table) ): return True # Table starts with HTML document type declaration if re.match(r"\s* <! \s* DOCTYPE \s* HTML", table, re.IGNORECASE | re.VERBOSE): return True # Look for <TABLE .. >, <TR .. >, <TD .. > tag openers. if all( re.search(rf"< \s* {element} [^>]* >", table, re.IGNORECASE | re.VERBOSE) for element in ("table", "tr", "td") ): return True return False
Set the default value of the ``guess`` parameter for read(). Parameters ---------- guess : bool New default ``guess`` value (e.g., True or False)
def set_guess(guess): """ Set the default value of the ``guess`` parameter for read(). Parameters ---------- guess : bool New default ``guess`` value (e.g., True or False) """ global _GUESS _GUESS = guess
Initialize a table reader allowing for common customizations. Most of the default behavior for various parameters is determined by the Reader class specified by ``reader_cls``. Parameters ---------- reader_cls : `~astropy.io.ascii.BaseReader` Reader class. Default is :class:`Basic`. inputter_cls : `~astropy.io.ascii.BaseInputter` Inputter class outputter_cls : `~astropy.io.ascii.BaseOutputter` Outputter class delimiter : str Column delimiter string comment : str Regular expression defining a comment line in table quotechar : str One-character string to quote fields containing special characters header_start : int Line index for the header line not counting comment or blank lines. A line with only whitespace is considered blank. data_start : int Line index for the start of data not counting comment or blank lines. A line with only whitespace is considered blank. data_end : int Line index for the end of data not counting comment or blank lines. This value can be negative to count from the end. converters : dict Dict of converters. data_splitter_cls : `~astropy.io.ascii.BaseSplitter` Splitter class to split data columns. header_splitter_cls : `~astropy.io.ascii.BaseSplitter` Splitter class to split header columns. names : list List of names corresponding to each data column. include_names : list, optional List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``). fill_values : tuple, list of tuple Specification of fill values for bad or missing table values. fill_include_names : list List of names to include in fill_values. fill_exclude_names : list List of names to exclude from fill_values (applied after ``fill_include_names``). Returns ------- reader : `~astropy.io.ascii.BaseReader` subclass ASCII format reader instance
def get_reader(reader_cls=None, inputter_cls=None, outputter_cls=None, **kwargs): """ Initialize a table reader allowing for common customizations. Most of the default behavior for various parameters is determined by the Reader class specified by ``reader_cls``. Parameters ---------- reader_cls : `~astropy.io.ascii.BaseReader` Reader class. Default is :class:`Basic`. inputter_cls : `~astropy.io.ascii.BaseInputter` Inputter class outputter_cls : `~astropy.io.ascii.BaseOutputter` Outputter class delimiter : str Column delimiter string comment : str Regular expression defining a comment line in table quotechar : str One-character string to quote fields containing special characters header_start : int Line index for the header line not counting comment or blank lines. A line with only whitespace is considered blank. data_start : int Line index for the start of data not counting comment or blank lines. A line with only whitespace is considered blank. data_end : int Line index for the end of data not counting comment or blank lines. This value can be negative to count from the end. converters : dict Dict of converters. data_splitter_cls : `~astropy.io.ascii.BaseSplitter` Splitter class to split data columns. header_splitter_cls : `~astropy.io.ascii.BaseSplitter` Splitter class to split header columns. names : list List of names corresponding to each data column. include_names : list, optional List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``). fill_values : tuple, list of tuple Specification of fill values for bad or missing table values. fill_include_names : list List of names to include in fill_values. fill_exclude_names : list List of names to exclude from fill_values (applied after ``fill_include_names``). Returns ------- reader : `~astropy.io.ascii.BaseReader` subclass ASCII format reader instance """ # This function is a light wrapper around core._get_reader to provide a # public interface with a default Reader. if reader_cls is None: # Default reader is Basic unless fast reader is forced fast_reader = _get_fast_reader_dict(kwargs) if fast_reader["enable"] == "force": reader_cls = fastbasic.FastBasic else: reader_cls = basic.Basic reader = core._get_reader( reader_cls, inputter_cls=inputter_cls, outputter_cls=outputter_cls, **kwargs ) return reader
Convert 'fast_reader' key in kwargs into a dict if not already and make sure 'enable' key is available.
def _get_fast_reader_dict(kwargs): """Convert 'fast_reader' key in kwargs into a dict if not already and make sure 'enable' key is available. """ fast_reader = copy.deepcopy(kwargs.get("fast_reader", True)) if isinstance(fast_reader, dict): fast_reader.setdefault("enable", "force") else: fast_reader = {"enable": fast_reader} return fast_reader
Validate types of keyword arg inputs to read() or write().
def _validate_read_write_kwargs(read_write, **kwargs): """Validate types of keyword arg inputs to read() or write().""" def is_ducktype(val, cls): """Check if ``val`` is an instance of ``cls`` or "seems" like one: ``cls(val) == val`` does not raise and exception and is `True`. In this way you can pass in ``np.int16(2)`` and have that count as `int`. This has a special-case of ``cls`` being 'list-like', meaning it is an iterable but not a string. """ if cls == "list-like": ok = not isinstance(val, str) and isinstance(val, collections.abc.Iterable) else: ok = isinstance(val, cls) if not ok: # See if ``val`` walks and quacks like a ``cls```. try: new_val = cls(val) assert new_val == val except Exception: ok = False else: ok = True return ok kwarg_types = READ_KWARG_TYPES if read_write == "read" else WRITE_KWARG_TYPES for arg, val in kwargs.items(): # Kwarg type checking is opt-in, so kwargs not in the list are considered OK. # This reflects that some readers allow additional arguments that may not # be well-specified, e.g. ```__init__(self, **kwargs)`` is an option. if arg not in kwarg_types or val is None: continue # Single type or tuple of types for this arg (like isinstance()) types = kwarg_types[arg] err_msg = ( f"{read_write}() argument '{arg}' must be a " f"{types} object, got {type(val)} instead" ) # Force `types` to be a tuple for the any() check below if not isinstance(types, tuple): types = (types,) if not any(is_ducktype(val, cls) for cls in types): raise TypeError(err_msg)