response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Try to read the table using various sets of keyword args. Start with the standard guess list and filter to make it unique and consistent with user-supplied read keyword args. Finally, if none of those work then try the original user-supplied keyword args. Parameters ---------- table : str, file-like, list Input table as a file name, file-like object, list of strings, or single newline-separated string. read_kwargs : dict Keyword arguments from user to be supplied to reader format : str Table format fast_reader : dict Options for the C engine fast reader. See read() function for details. Returns ------- dat : `~astropy.table.Table` or None Output table or None if only one guess format was available
def _guess(table, read_kwargs, format, fast_reader): """ Try to read the table using various sets of keyword args. Start with the standard guess list and filter to make it unique and consistent with user-supplied read keyword args. Finally, if none of those work then try the original user-supplied keyword args. Parameters ---------- table : str, file-like, list Input table as a file name, file-like object, list of strings, or single newline-separated string. read_kwargs : dict Keyword arguments from user to be supplied to reader format : str Table format fast_reader : dict Options for the C engine fast reader. See read() function for details. Returns ------- dat : `~astropy.table.Table` or None Output table or None if only one guess format was available """ # Keep a trace of all failed guesses kwarg failed_kwargs = [] # Get an ordered list of read() keyword arg dicts that will be cycled # through in order to guess the format. full_list_guess = _get_guess_kwargs_list(read_kwargs) # If a fast version of the reader is available, try that before the slow version if ( fast_reader["enable"] and format is not None and f"fast_{format}" in core.FAST_CLASSES ): fast_kwargs = copy.deepcopy(read_kwargs) fast_kwargs["reader_cls"] = core.FAST_CLASSES[f"fast_{format}"] full_list_guess = [fast_kwargs] + full_list_guess else: fast_kwargs = None # Filter the full guess list so that each entry is consistent with user kwarg inputs. # This also removes any duplicates from the list. filtered_guess_kwargs = [] fast_reader = read_kwargs.get("fast_reader") for guess_kwargs in full_list_guess: # If user specified slow reader then skip all fast readers if ( fast_reader["enable"] is False and guess_kwargs["reader_cls"] in core.FAST_CLASSES.values() ): _read_trace.append( { "kwargs": copy.deepcopy(guess_kwargs), "reader_cls": guess_kwargs["reader_cls"].__class__, "status": "Disabled: reader only available in fast version", "dt": f"{0.0:.3f} ms", } ) continue # If user required a fast reader then skip all non-fast readers if ( fast_reader["enable"] == "force" and guess_kwargs["reader_cls"] not in core.FAST_CLASSES.values() ): _read_trace.append( { "kwargs": copy.deepcopy(guess_kwargs), "reader_cls": guess_kwargs["reader_cls"].__class__, "status": "Disabled: no fast version of reader available", "dt": f"{0.0:.3f} ms", } ) continue guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs? for key, val in read_kwargs.items(): # Do guess_kwargs.update(read_kwargs) except that if guess_args has # a conflicting key/val pair then skip this guess entirely. if key not in guess_kwargs: guess_kwargs[key] = copy.deepcopy(val) elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs: guess_kwargs_ok = False break if not guess_kwargs_ok: # User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g. # user supplies delimiter="|" but the guess wants to try delimiter=" ", # so skip the guess entirely. continue # Add the guess_kwargs to filtered list only if it is not already there. if guess_kwargs not in filtered_guess_kwargs: filtered_guess_kwargs.append(guess_kwargs) # If there are not at least two formats to guess then return no table # (None) to indicate that guessing did not occur. In that case the # non-guess read() will occur and any problems will result in a more useful # traceback. if len(filtered_guess_kwargs) <= 1: return None # Define whitelist of exceptions that are expected from readers when # processing invalid inputs. Note that OSError must fall through here # so one cannot simply catch any exception. guess_exception_classes = ( core.InconsistentTableError, ValueError, TypeError, AttributeError, core.OptionalTableImportError, core.ParameterError, cparser.CParserError, ) # Now cycle through each possible reader and associated keyword arguments. # Try to read the table using those args, and if an exception occurs then # keep track of the failed guess and move on. for guess_kwargs in filtered_guess_kwargs: t0 = time.time() try: # If guessing will try all Readers then use strict req'ts on column names if "reader_cls" not in read_kwargs: guess_kwargs["strict_names"] = True reader = get_reader(**guess_kwargs) reader.guessing = True dat = reader.read(table) _read_trace.append( { "kwargs": copy.deepcopy(guess_kwargs), "reader_cls": reader.__class__, "status": "Success (guessing)", "dt": f"{(time.time() - t0) * 1000:.3f} ms", } ) return dat except guess_exception_classes as err: _read_trace.append( { "kwargs": copy.deepcopy(guess_kwargs), "status": f"{err.__class__.__name__}: {str(err)}", "dt": f"{(time.time() - t0) * 1000:.3f} ms", } ) failed_kwargs.append(guess_kwargs) # Failed all guesses, try the original read_kwargs without column requirements try: reader = get_reader(**read_kwargs) dat = reader.read(table) _read_trace.append( { "kwargs": copy.deepcopy(read_kwargs), "reader_cls": reader.__class__, "status": ( "Success with original kwargs without strict_names (guessing)" ), } ) return dat except guess_exception_classes as err: _read_trace.append( { "kwargs": copy.deepcopy(read_kwargs), "status": f"{err.__class__.__name__}: {str(err)}", } ) failed_kwargs.append(read_kwargs) lines = ["\nERROR: Unable to guess table format with the guesses listed below:"] for kwargs in failed_kwargs: sorted_keys = sorted( x for x in sorted(kwargs) if x not in ("reader_cls", "outputter_cls") ) reader_repr = repr(kwargs.get("reader_cls", basic.Basic)) keys_vals = ["reader_cls:" + re.search(r"\.(\w+)'>", reader_repr).group(1)] kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys) keys_vals.extend([f"{key}: {val!r}" for key, val in kwargs_sorted]) lines.append(" ".join(keys_vals)) msg = [ "", "************************************************************************", "** ERROR: Unable to guess table format with the guesses listed above. **", "** **", "** To figure out why the table did not read, use guess=False and **", "** fast_reader=False, along with any appropriate arguments to read(). **", "** In particular specify the format and any known attributes like the **", "** delimiter. **", "************************************************************************", ] lines.extend(msg) raise core.InconsistentTableError("\n".join(lines)) from None
Get the full list of reader keyword argument dicts. These are the basis for the format guessing process. The returned full list will then be: - Filtered to be consistent with user-supplied kwargs - Cleaned to have only unique entries - Used one by one to try reading the input table Note that the order of the guess list has been tuned over years of usage. Maintainers need to be very careful about any adjustments as the reasoning may not be immediately evident in all cases. This list can (and usually does) include duplicates. This is a result of the order tuning, but these duplicates get removed later. Parameters ---------- read_kwargs : dict User-supplied read keyword args Returns ------- guess_kwargs_list : list List of read format keyword arg dicts
def _get_guess_kwargs_list(read_kwargs): """Get the full list of reader keyword argument dicts. These are the basis for the format guessing process. The returned full list will then be: - Filtered to be consistent with user-supplied kwargs - Cleaned to have only unique entries - Used one by one to try reading the input table Note that the order of the guess list has been tuned over years of usage. Maintainers need to be very careful about any adjustments as the reasoning may not be immediately evident in all cases. This list can (and usually does) include duplicates. This is a result of the order tuning, but these duplicates get removed later. Parameters ---------- read_kwargs : dict User-supplied read keyword args Returns ------- guess_kwargs_list : list List of read format keyword arg dicts """ guess_kwargs_list = [] # If the table is probably HTML based on some heuristics then start with the # HTML reader. if read_kwargs.pop("guess_html", None): guess_kwargs_list.append({"reader_cls": html.HTML}) # Start with ECSV because an ECSV file will be read by Basic. This format # has very specific header requirements and fails out quickly. guess_kwargs_list.append({"reader_cls": ecsv.Ecsv}) # Now try readers that accept the user-supplied keyword arguments # (actually include all here - check for compatibility of arguments later). # FixedWidthTwoLine would also be read by Basic, so it needs to come first; # same for RST. for reader in ( fixedwidth.FixedWidthTwoLine, rst.RST, fastbasic.FastBasic, basic.Basic, fastbasic.FastRdb, basic.Rdb, fastbasic.FastTab, basic.Tab, cds.Cds, mrt.Mrt, daophot.Daophot, sextractor.SExtractor, ipac.Ipac, latex.Latex, latex.AASTex, ): guess_kwargs_list.append({"reader_cls": reader}) # Cycle through the basic-style readers using all combinations of delimiter # and quotechar. for reader_cls in ( fastbasic.FastCommentedHeader, basic.CommentedHeader, fastbasic.FastBasic, basic.Basic, fastbasic.FastNoHeader, basic.NoHeader, ): for delimiter in ("|", ",", " ", r"\s"): for quotechar in ('"', "'"): guess_kwargs_list.append( { "reader_cls": reader_cls, "delimiter": delimiter, "quotechar": quotechar, } ) return guess_kwargs_list
For fast_reader read the ``table`` in chunks and vstack to create a single table, OR return a generator of chunk tables.
def _read_in_chunks(table, **kwargs): """ For fast_reader read the ``table`` in chunks and vstack to create a single table, OR return a generator of chunk tables. """ fast_reader = kwargs["fast_reader"] chunk_size = fast_reader.pop("chunk_size") chunk_generator = fast_reader.pop("chunk_generator", False) tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs) if chunk_generator: return tbl_chunks tbl0 = next(tbl_chunks) masked = tbl0.masked # Numpy won't allow resizing the original so make a copy here. out_cols = {col.name: col.data.copy() for col in tbl0.itercols()} str_kinds = ("S", "U") for tbl in tbl_chunks: masked |= tbl.masked for name, col in tbl.columns.items(): # Concatenate current column data and new column data # If one of the inputs is string-like and the other is not, then # convert the non-string to a string. In a perfect world this would # be handled by numpy, but as of numpy 1.13 this results in a string # dtype that is too long (https://github.com/numpy/numpy/issues/10062). col1, col2 = out_cols[name], col.data if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds: col2 = np.array(col2.tolist(), dtype=col1.dtype.kind) elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds: col1 = np.array(col1.tolist(), dtype=col2.dtype.kind) # Choose either masked or normal concatenation concatenate = np.ma.concatenate if masked else np.concatenate out_cols[name] = concatenate([col1, col2]) # Make final table from numpy arrays, converting dict to list out_cols = [out_cols[name] for name in tbl0.colnames] out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta, copy=False) return out
For fast_reader read the ``table`` in chunks and return a generator of tables for each chunk.
def _read_in_chunks_generator(table, chunk_size, **kwargs): """ For fast_reader read the ``table`` in chunks and return a generator of tables for each chunk. """ @contextlib.contextmanager def passthrough_fileobj(fileobj, encoding=None): """Stub for get_readable_fileobj, which does not seem to work in Py3 for input file-like object, see #6460. """ yield fileobj # Set up to coerce `table` input into a readable file object by selecting # an appropriate function. # Convert table-as-string to a File object. Finding a newline implies # that the string is not a filename. if isinstance(table, str) and ("\n" in table or "\r" in table): table = StringIO(table) fileobj_context = passthrough_fileobj elif hasattr(table, "read") and hasattr(table, "seek"): fileobj_context = passthrough_fileobj else: # string filename or pathlib fileobj_context = get_readable_fileobj # Set up for iterating over chunks kwargs["fast_reader"]["return_header_chars"] = True header = "" # Table header (up to start of data) prev_chunk_chars = "" # Chars from previous chunk after last newline first_chunk = True # True for the first chunk, False afterward with fileobj_context(table, encoding=kwargs.get("encoding")) as fh: while True: chunk = fh.read(chunk_size) # Got fewer chars than requested, must be end of file final_chunk = len(chunk) < chunk_size # If this is the last chunk and there is only whitespace then break if final_chunk and not re.search(r"\S", chunk): break # Step backwards from last character in chunk and find first newline for idx in range(len(chunk) - 1, -1, -1): if final_chunk or chunk[idx] == "\n": break else: raise ValueError("no newline found in chunk (chunk_size too small?)") # Stick on the header to the chunk part up to (and including) the # last newline. Make sure the small strings are concatenated first. complete_chunk = (header + prev_chunk_chars) + chunk[: idx + 1] prev_chunk_chars = chunk[idx + 1 :] # Now read the chunk as a complete table tbl = read(complete_chunk, guess=False, **kwargs) # For the first chunk pop the meta key which contains the header # characters (everything up to the start of data) then fix kwargs # so it doesn't return that in meta any more. if first_chunk: header = tbl.meta.pop("__ascii_fast_reader_header_chars__") first_chunk = False yield tbl if final_chunk: break
Initialize a table writer allowing for common customizations. Most of the default behavior for various parameters is determined by the Writer class. Parameters ---------- writer_cls : ``writer_cls`` Writer class. Defaults to :class:`Basic`. delimiter : str Column delimiter string comment : str String defining a comment line in table quotechar : str One-character string to quote fields containing special characters formats : dict Dictionary of format specifiers or formatting functions strip_whitespace : bool Strip surrounding whitespace from column values. names : list List of names corresponding to each data column include_names : list List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``) fast_writer : bool Whether to use the fast Cython writer. Returns ------- writer : `~astropy.io.ascii.BaseReader` subclass ASCII format writer instance
def get_writer(writer_cls=None, fast_writer=True, **kwargs): """ Initialize a table writer allowing for common customizations. Most of the default behavior for various parameters is determined by the Writer class. Parameters ---------- writer_cls : ``writer_cls`` Writer class. Defaults to :class:`Basic`. delimiter : str Column delimiter string comment : str String defining a comment line in table quotechar : str One-character string to quote fields containing special characters formats : dict Dictionary of format specifiers or formatting functions strip_whitespace : bool Strip surrounding whitespace from column values. names : list List of names corresponding to each data column include_names : list List of names to include in output. exclude_names : list List of names to exclude from output (applied after ``include_names``) fast_writer : bool Whether to use the fast Cython writer. Returns ------- writer : `~astropy.io.ascii.BaseReader` subclass ASCII format writer instance """ if writer_cls is None: writer_cls = basic.Basic if "strip_whitespace" not in kwargs: kwargs["strip_whitespace"] = True writer = core._get_writer(writer_cls, fast_writer, **kwargs) # Handle the corner case of wanting to disable writing table comments for the # commented_header format. This format *requires* a string for `write_comment` # because that is used for the header column row, so it is not possible to # set the input `comment` to None. Without adding a new keyword or assuming # a default comment character, there is no other option but to tell user to # simply remove the meta['comments']. if isinstance( writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader) ) and not isinstance(kwargs.get("comment", ""), str): raise ValueError( "for the commented_header writer you must supply a string\n" "value for the `comment` keyword. In order to disable writing\n" "table comments use `del t.meta['comments']` prior to writing." ) return writer
Return a traceback of the attempted read formats for the last call to `~astropy.io.ascii.read` where guessing was enabled. This is primarily for debugging. The return value is a list of dicts, where each dict includes the keyword args ``kwargs`` used in the read call and the returned ``status``. Returns ------- trace : list of dict Ordered list of format guesses and status
def get_read_trace(): """ Return a traceback of the attempted read formats for the last call to `~astropy.io.ascii.read` where guessing was enabled. This is primarily for debugging. The return value is a list of dicts, where each dict includes the keyword args ``kwargs`` used in the read call and the returned ``status``. Returns ------- trace : list of dict Ordered list of format guesses and status """ return copy.deepcopy(_read_trace)
Tests whether or not the CDS writer can roundtrip a table, i.e. read a table to ``Table`` object and write it exactly as it is back to a file. Since, presently CDS uses a MRT format template while writing, only the Byte-By-Byte and the data section of the table can be compared between original and the newly written table. Further, the CDS Reader does not have capability to recognize column format from the header of a CDS/MRT table, so this test can work for a limited set of simple tables, which don't have whitespaces in the column values or mix-in columns. Because of this the written table output cannot be directly matched with the original file and have to be checked against a list of lines. Masked columns are read properly though, and thus are being tested during round-tripping. The difference between ``cdsFunctional2.dat`` file and ``exp_output`` is the following: * Metadata is different because MRT template is used for writing. * Spacing between ``Label`` and ``Explanations`` column in the Byte-By-Byte. * Units are written as ``[cm.s-2]`` and not ``[cm/s2]``, since both are valid according to CDS/MRT standard.
def test_roundtrip_mrt_table(): """ Tests whether or not the CDS writer can roundtrip a table, i.e. read a table to ``Table`` object and write it exactly as it is back to a file. Since, presently CDS uses a MRT format template while writing, only the Byte-By-Byte and the data section of the table can be compared between original and the newly written table. Further, the CDS Reader does not have capability to recognize column format from the header of a CDS/MRT table, so this test can work for a limited set of simple tables, which don't have whitespaces in the column values or mix-in columns. Because of this the written table output cannot be directly matched with the original file and have to be checked against a list of lines. Masked columns are read properly though, and thus are being tested during round-tripping. The difference between ``cdsFunctional2.dat`` file and ``exp_output`` is the following: * Metadata is different because MRT template is used for writing. * Spacing between ``Label`` and ``Explanations`` column in the Byte-By-Byte. * Units are written as ``[cm.s-2]`` and not ``[cm/s2]``, since both are valid according to CDS/MRT standard. """ exp_output = [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", " Bytes Format Units Label Explanations", "--------------------------------------------------------------------------------", " 1- 7 A7 --- ID Star ID ", " 9-12 I4 K Teff [4337/4654] Effective temperature ", "14-17 F4.2 [cm.s-2] logg [0.77/1.28] Surface gravity ", "19-22 F4.2 km.s-1 vturb [1.23/1.82] Micro-turbulence velocity", "24-28 F5.2 [-] [Fe/H] [-2.11/-1.5] Metallicity ", "30-33 F4.2 [-] e_[Fe/H] ? rms uncertainty on [Fe/H] ", "--------------------------------------------------------------------------------", "Notes:", "--------------------------------------------------------------------------------", "S05-5 4337 0.77 1.80 -2.07 ", "S08-229 4625 1.23 1.23 -1.50 ", "S05-10 4342 0.91 1.82 -2.11 0.14", "S05-47 4654 1.28 1.74 -1.64 0.16", ] dat = get_pkg_data_filename( "data/cdsFunctional2.dat", package="astropy.io.ascii.tests" ) t = Table.read(dat, format="ascii.mrt") out = StringIO() t.write(out, format="ascii.mrt") lines = out.getvalue().splitlines() i_bbb = lines.index("=" * 80) lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines. assert lines == exp_output
This test differs from the ``test_write_null_data_values`` above in that it tests the column value limits in the Byte-By-Byte description section for columns whose values are masked. It also checks the description for columns with same values.
def test_write_byte_by_byte_for_masked_column(): """ This test differs from the ``test_write_null_data_values`` above in that it tests the column value limits in the Byte-By-Byte description section for columns whose values are masked. It also checks the description for columns with same values. """ exp_output = [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", " Bytes Format Units Label Explanations", "--------------------------------------------------------------------------------", " 1- 8 A8 --- names Description of names ", "10-14 E5.1 --- e [0.0/0.01]? Description of e ", "16-17 F2.0 --- d ? Description of d ", "19-25 E7.1 --- s [-9e+34/2.0] Description of s ", "27-29 I3 --- i [-30/67] Description of i ", "31-33 F3.1 --- sameF [5.0/5.0] Description of sameF", "35-36 I2 --- sameI [20] Description of sameI ", "--------------------------------------------------------------------------------", "Notes:", "--------------------------------------------------------------------------------", "HD81809 1e-07 2e+00 67 5.0 20", "HD103095 -9e+34 -30 5.0 20", ] t = ascii.read(test_dat) t.add_column([5.0, 5.0], name="sameF") t.add_column([20, 20], name="sameI") t["e"] = MaskedColumn(t["e"], mask=[False, True]) t["d"] = MaskedColumn(t["d"], mask=[True, True]) out = StringIO() t.write(out, format="ascii.mrt") lines = out.getvalue().splitlines() i_bbb = lines.index("=" * 80) lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines. assert lines == exp_output
There can only be one such coordinate column in a single table, because division of columns into individual component columns requires iterating over the table columns, which will have to be done again if additional such coordinate columns are present.
def test_write_coord_cols(): """ There can only be one such coordinate column in a single table, because division of columns into individual component columns requires iterating over the table columns, which will have to be done again if additional such coordinate columns are present. """ t = ascii.read(test_dat) t.add_column([5.0, 5.0], name="sameF") t.add_column([20, 20], name="sameI") # Coordinates of ASASSN-15lh coord = SkyCoord(330.564375, -61.65961111, unit=u.deg) # Coordinates of ASASSN-14li coordp = SkyCoord(192.06343503, 17.77402684, unit=u.deg) cols = [ Column([coord, coordp]), # Generic coordinate column coordp, # Coordinate column with positive DEC coord.galactic, # Galactic coordinates coord.geocentrictrueecliptic, # Ecliptic coordinates ] # Loop through different types of coordinate columns. for col, coord_type in zip(cols, exp_coord_cols_output): exp_output = exp_coord_cols_output[coord_type] t["coord"] = col out = StringIO() t.write(out, format="ascii.mrt") lines = out.getvalue().splitlines() i_bbb = lines.index("=" * 80) lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines. # Check the written table. assert lines == exp_output # Check if the original table columns remains unmodified. assert t.colnames == ["names", "e", "d", "s", "i", "sameF", "sameI", "coord"]
Tests the alignment of Byte counts with respect to hyphen in the Bytes column of Byte-By-Byte. The whitespace around the hyphen is govered by the number of digits in the total Byte count. Single Byte columns should have a single Byte count without the hyphen.
def test_write_byte_by_byte_bytes_col_format(): """ Tests the alignment of Byte counts with respect to hyphen in the Bytes column of Byte-By-Byte. The whitespace around the hyphen is govered by the number of digits in the total Byte count. Single Byte columns should have a single Byte count without the hyphen. """ exp_output = [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", " Bytes Format Units Label Explanations", "--------------------------------------------------------------------------------", " 1- 8 A8 --- names Description of names ", "10-21 E12.6 --- e [-3160000.0/0.01] Description of e", "23-30 F8.5 --- d [22.25/27.25] Description of d ", "32-38 E7.1 --- s [-9e+34/2.0] Description of s ", "40-42 I3 --- i [-30/67] Description of i ", "44-46 F3.1 --- sameF [5.0/5.0] Description of sameF ", "48-49 I2 --- sameI [20] Description of sameI ", " 51 I1 --- singleByteCol [2] Description of singleByteCol ", "53-54 I2 h RAh Right Ascension (hour) ", "56-57 I2 min RAm Right Ascension (minute) ", "59-71 F13.10 s RAs Right Ascension (second) ", " 73 A1 --- DE- Sign of Declination ", "74-75 I2 deg DEd Declination (degree) ", "77-78 I2 arcmin DEm Declination (arcmin) ", "80-91 F12.9 arcsec DEs Declination (arcsec) ", "--------------------------------------------------------------------------------", ] t = ascii.read(test_dat) t.add_column([5.0, 5.0], name="sameF") t.add_column([20, 20], name="sameI") t["coord"] = SkyCoord(330.564375, -61.65961111, unit=u.deg) t["singleByteCol"] = [2, 2] t["e"].format = ".5E" out = StringIO() t.write(out, format="ascii.mrt") lines = out.getvalue().splitlines() i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))] # Select only the Byte-By-Byte section. lines = lines[i_secs[0] : i_secs[-2]] lines.append("-" * 80) # Append a separator line. assert lines == exp_output
Test line wrapping in the description column of the Byte-By-Byte section of the ReadMe.
def test_write_byte_by_byte_wrapping(): """ Test line wrapping in the description column of the Byte-By-Byte section of the ReadMe. """ exp_output = """\ ================================================================================ Byte-by-byte Description of file: table.dat -------------------------------------------------------------------------------- Bytes Format Units Label Explanations -------------------------------------------------------------------------------- 1- 8 A8 --- thisIsALongColumnLabel This is a tediously long description. But they do sometimes have them. Better to put extra details in the notes. This is a tediously long description. But they do sometimes have them. Better to put extra details in the notes. 10-14 E5.1 --- e [-3160000.0/0.01] Description of e 16-23 F8.5 --- d [22.25/27.25] Description of d -------------------------------------------------------------------------------- """ t = ascii.read(test_dat) t.remove_columns(["s", "i"]) description = ( "This is a tediously long description." " But they do sometimes have them." " Better to put extra details in the notes. " ) t["names"].description = description * 2 t["names"].name = "thisIsALongColumnLabel" out = StringIO() t.write(out, format="ascii.mrt") lines = out.getvalue().splitlines() i_secs = [i for i, s in enumerate(lines) if s.startswith(("------", "======="))] # Select only the Byte-By-Byte section. lines = lines[i_secs[0] : i_secs[-2]] lines.append("-" * 80) # Append a separator line. assert lines == exp_output.splitlines()
Tests conversion to string values for ``mix-in`` columns other than ``SkyCoord`` and for columns with only partial ``SkyCoord`` values.
def test_write_mixin_and_broken_cols(): """ Tests conversion to string values for ``mix-in`` columns other than ``SkyCoord`` and for columns with only partial ``SkyCoord`` values. """ exp_output = [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", " Bytes Format Units Label Explanations", "--------------------------------------------------------------------------------", " 1- 7 A7 --- name Description of name ", " 9- 74 A66 --- Unknown Description of Unknown ", " 76-114 A39 --- cart Description of cart ", "116-138 A23 --- time Description of time ", "140-142 F3.1 m q [1.0/1.0] Description of q", "--------------------------------------------------------------------------------", "Notes:", "--------------------------------------------------------------------------------", "HD81809 <SkyCoord (ICRS): (ra, dec) in deg", " (330.564375, -61.65961111)> (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000 1.0", "random 12 (0.41342785, -0.23329341, -0.88014294) 2019-01-01 00:00:00.000 1.0", ] t = Table() t["name"] = ["HD81809"] coord = SkyCoord(330.564375, -61.65961111, unit=u.deg) t["coord"] = Column(coord) t.add_row(["random", 12]) t["cart"] = coord.cartesian t["time"] = Time("2019-1-1") t["q"] = u.Quantity(1.0, u.m) out = StringIO() t.write(out, format="ascii.mrt") lines = out.getvalue().splitlines() i_bbb = lines.index("=" * 80) lines = lines[i_bbb:] # Select Byte-By-Byte section and later lines. # Check the written table. assert lines == exp_output
Tests output for cases when table contains multiple ``SkyCoord`` columns.
def test_write_extra_skycoord_cols(): """ Tests output for cases when table contains multiple ``SkyCoord`` columns. """ exp_output = [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", " Bytes Format Units Label Explanations", "--------------------------------------------------------------------------------", " 1- 7 A7 --- name Description of name ", " 9-10 I2 h RAh Right Ascension (hour) ", "12-13 I2 min RAm Right Ascension (minute)", "15-27 F13.10 s RAs Right Ascension (second)", " 29 A1 --- DE- Sign of Declination ", "30-31 I2 deg DEd Declination (degree) ", "33-34 I2 arcmin DEm Declination (arcmin) ", "36-47 F12.9 arcsec DEs Declination (arcsec) ", "49-62 A14 --- coord2 Description of coord2 ", "--------------------------------------------------------------------------------", "Notes:", "--------------------------------------------------------------------------------", "HD4760 0 49 39.9000000000 +06 24 07.999200000 12.4163 6.407 ", "HD81809 22 02 15.4500000000 -61 39 34.599996000 330.564 -61.66", ] t = Table() t["name"] = ["HD4760", "HD81809"] t["coord1"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg) t["coord2"] = SkyCoord([12.41630, 330.564400], [6.407, -61.66], unit=u.deg) out = StringIO() with pytest.warns( UserWarning, match=r"column 2 is being skipped with designation of a " r"string valued column `coord2`", ): t.write(out, format="ascii.mrt") lines = out.getvalue().splitlines() i_bbb = lines.index("=" * 80) lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines. # Check the written table. assert lines[:-2] == exp_output[:-2] for a, b in zip(lines[-2:], exp_output[-2:]): assert a[:18] == b[:18] assert a[30:42] == b[30:42] assert_almost_equal( np.fromstring(a[2:], sep=" "), np.fromstring(b[2:], sep=" ") )
Tests output with custom setting for ``SkyCoord`` (second) columns.
def test_write_skycoord_with_format(): """ Tests output with custom setting for ``SkyCoord`` (second) columns. """ exp_output = [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", " Bytes Format Units Label Explanations", "--------------------------------------------------------------------------------", " 1- 7 A7 --- name Description of name ", " 9-10 I2 h RAh Right Ascension (hour) ", "12-13 I2 min RAm Right Ascension (minute)", "15-19 F5.2 s RAs Right Ascension (second)", " 21 A1 --- DE- Sign of Declination ", "22-23 I2 deg DEd Declination (degree) ", "25-26 I2 arcmin DEm Declination (arcmin) ", "28-31 F4.1 arcsec DEs Declination (arcsec) ", "--------------------------------------------------------------------------------", "Notes:", "--------------------------------------------------------------------------------", "HD4760 0 49 39.90 +06 24 08.0", "HD81809 22 02 15.45 -61 39 34.6", ] t = Table() t["name"] = ["HD4760", "HD81809"] t["coord"] = SkyCoord([12.41625, 330.564375], [6.402222, -61.65961111], unit=u.deg) out = StringIO() # This will raise a warning because `formats` is checked before the writer creating the # final list of columns is called. with pytest.warns( AstropyWarning, match=r"The key.s. {'[RD][AE]s', '[RD][AE]s'} specified in " r"the formats argument do not match a column name.", ): t.write(out, format="ascii.mrt", formats={"RAs": "05.2f", "DEs": "04.1f"}) lines = out.getvalue().splitlines() i_bbb = lines.index("=" * 80) lines = lines[i_bbb:] # Select Byte-By-Byte section and following lines. # Check the written table. assert lines == exp_output
If properly registered, filename should be sufficient to specify format #3189
def test_read_csv(): """If properly registered, filename should be sufficient to specify format #3189 """ Table.read(get_pkg_data_filename("data/simple_csv.csv"))
If properly registered, filename should be sufficient to specify format #3189
def test_write_csv(tmp_path): """If properly registered, filename should be sufficient to specify format #3189 """ t = Table() t.add_column(Column(name="a", data=[1, 2, 3])) t.add_column(Column(name="b", data=["a", "b", "c"])) path = tmp_path / "data.csv" t.write(path)
Test equality of all columns in a table, with stricter tolerances for float columns than the np.allclose default.
def assert_table_equal(t1, t2, check_meta=False, rtol=1.0e-15, atol=1.0e-300): """ Test equality of all columns in a table, with stricter tolerances for float columns than the np.allclose default. """ assert_equal(len(t1), len(t2)) assert_equal(t1.colnames, t2.colnames) if check_meta: assert_equal(t1.meta, t2.meta) for name in t1.colnames: if len(t1) != 0: assert_equal(t1[name].dtype.kind, t2[name].dtype.kind) if not isinstance(t1[name], MaskedColumn): for i, el in enumerate(t1[name]): try: if not isinstance(el, str) and np.isnan(el): assert_true( not isinstance(t2[name][i], str) and np.isnan(t2[name][i]) ) elif isinstance(el, str): assert_equal(el, t2[name][i]) else: assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol) except (TypeError, NotImplementedError): pass
Test that embedded newlines are supported for io.ascii readers and writers, both fast and Python readers.
def test_embedded_newlines(delimiter, quotechar, fast): """Test that embedded newlines are supported for io.ascii readers and writers, both fast and Python readers.""" # Start with an assortment of values with different embedded newlines and whitespace dat = [ ["\t a ", " b \n cd ", "\n"], [" 1\n ", '2 \n" \t 3\n4\n5', "1\n '2\n"], [" x,y \nz\t", "\t 12\n\t34\t ", "56\t\n"], ] dat = Table(dat, names=("a", "b", "c")) # Construct a table which is our expected result of writing the table and # reading it back. Certain stripping of whitespace is expected. exp = {} # expected output from reading for col in dat.itercols(): vals = [] for val in col: # Readers and writers both strip whitespace from ends of values val = val.strip(" \t") if not fast: # Pure Python reader has a "feature" where it strips trailing # whitespace from each input line. This means a value like # " x \ny \t\n" gets read as "x\ny". bits = val.splitlines(keepends=True) bits_out = [] for bit in bits: bit = re.sub(r"[ \t]+(\n?)$", r"\1", bit.strip(" \t")) bits_out.append(bit) val = "".join(bits_out) vals.append(val) exp[col.info.name] = vals exp = Table(exp) if delimiter == "csv": format = "csv" delimiter = "," else: format = "basic" # Write the table to `text` fh = io.StringIO() ascii.write( dat, fh, format=format, delimiter=delimiter, quotechar=quotechar, fast_writer=fast, ) text = fh.getvalue() # Read it back and compare to the expected dat_out = ascii.read( text, format=format, guess=False, delimiter=delimiter, quotechar=quotechar, fast_reader=fast, ) eq = dat_out.values_equal(exp) assert all(np.all(col) for col in eq.itercols())
Make sure the fast reader works with basic input data.
def test_simple_data(read_basic): """ Make sure the fast reader works with basic input data. """ table = read_basic("A B C\n1 2 3\n4 5 6") expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected)
Make sure that the read() function takes filenames, strings, and lists of strings in addition to file-like objects.
def test_read_types(): """ Make sure that the read() function takes filenames, strings, and lists of strings in addition to file-like objects. """ t1 = ascii.read("a b c\n1 2 3\n4 5 6", format="fast_basic", guess=False) # TODO: also read from file t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format="fast_basic", guess=False) t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format="fast_basic", guess=False) assert_table_equal(t1, t2) assert_table_equal(t2, t3)
If passed as a parameter, names should replace any column names found in the header.
def test_supplied_names(read_basic): """ If passed as a parameter, names should replace any column names found in the header. """ table = read_basic("A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z")) expected = Table([[1, 4], [2, 5], [3, 6]], names=("X", "Y", "Z")) assert_table_equal(table, expected)
The header should not be read when header_start=None. Unless names is passed, the column names should be auto-generated.
def test_no_header(read_basic, read_no_header): """ The header should not be read when header_start=None. Unless names is passed, the column names should be auto-generated. """ # Cannot set header_start=None for basic format with pytest.raises(ValueError): read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0) t2 = read_no_header("A B C\n1 2 3\n4 5 6") expected = Table( [["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]], names=("col1", "col2", "col3"), ) assert_table_equal(t2, expected)
If header_start=None and names is passed as a parameter, header data should not be read and names should be used instead.
def test_no_header_supplied_names(read_basic, read_no_header): """ If header_start=None and names is passed as a parameter, header data should not be read and names should be used instead. """ table = read_no_header("A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z")) expected = Table( [["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]], names=("X", "Y", "Z") ) assert_table_equal(table, expected)
Make sure that line comments are ignored by the C reader.
def test_comment(read_basic): """ Make sure that line comments are ignored by the C reader. """ table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6") expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected)
Make sure that empty lines are ignored by the C reader.
def test_empty_lines(read_basic): """ Make sure that empty lines are ignored by the C reader. """ table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n") expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected)
Test to make sure the reader ignores whitespace at the beginning of fields.
def test_lstrip_whitespace(read_basic): """ Test to make sure the reader ignores whitespace at the beginning of fields. """ text = """ 1, 2, \t3 A,\t\t B, C a, b, c \n""" table = read_basic(text, delimiter=",") expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3")) assert_table_equal(table, expected)
Test to make sure the reader ignores whitespace at the end of fields.
def test_rstrip_whitespace(read_basic): """ Test to make sure the reader ignores whitespace at the end of fields. """ text = " 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n" table = read_basic(text, delimiter=",") expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3")) assert_table_equal(table, expected)
The reader should try to convert each column to ints. If this fails, the reader should try to convert to floats. Failing this, i.e. on parsing non-numeric input including isolated positive/negative signs, it should fall back to strings.
def test_conversion(read_basic): """ The reader should try to convert each column to ints. If this fails, the reader should try to convert to floats. Failing this, i.e. on parsing non-numeric input including isolated positive/negative signs, it should fall back to strings. """ text = """ A B C D E F G H 1 a 3 4 5 6 7 8 2. 1 9 -.1e1 10.0 8.7 6 -5.3e4 4 2 -12 .4 +.e1 - + six """ table = read_basic(text) assert_equal(table["A"].dtype.kind, "f") assert table["B"].dtype.kind in ("S", "U") assert_equal(table["C"].dtype.kind, "i") assert_equal(table["D"].dtype.kind, "f") assert table["E"].dtype.kind in ("S", "U") assert table["F"].dtype.kind in ("S", "U") assert table["G"].dtype.kind in ("S", "U") assert table["H"].dtype.kind in ("S", "U")
Make sure that different delimiters work as expected.
def test_delimiter(read_basic): """ Make sure that different delimiters work as expected. """ text = dedent( """ COL1 COL2 COL3 1 A -1 2 B -2 """ ) expected = Table([[1, 2], ["A", "B"], [-1, -2]], names=("COL1", "COL2", "COL3")) for sep in " ,\t#;": table = read_basic( text.replace(" ", sep), delimiter=sep, ) assert_table_equal(table, expected)
If include_names is not None, the parser should read only those columns in include_names.
def test_include_names(read_basic): """ If include_names is not None, the parser should read only those columns in include_names. """ table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=["A", "D"]) expected = Table([[1, 5], [4, 8]], names=("A", "D")) assert_table_equal(table, expected)
If exclude_names is not None, the parser should exclude the columns in exclude_names.
def test_exclude_names(read_basic): """ If exclude_names is not None, the parser should exclude the columns in exclude_names. """ table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=["A", "D"]) expected = Table([[2, 6], [3, 7]], names=("B", "C")) assert_table_equal(table, expected)
Make sure that include_names is applied before exclude_names if both are specified.
def test_include_exclude_names(read_basic): """ Make sure that include_names is applied before exclude_names if both are specified. """ text = dedent( """ A B C D E F G H 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 """ ) table = read_basic( text, include_names=["A", "B", "D", "F", "H"], exclude_names=["B", "F"] ) expected = Table([[1, 9], [4, 12], [8, 16]], names=("A", "D", "H")) assert_table_equal(table, expected)
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted field was incorrect.
def test_doubled_quotes(read_csv): """ Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted field was incorrect. """ tbl = "\n".join( # noqa: FLY002 [ "a,b", '"d""","d""q"', '"""q",""""', ] ) # fmt: off expected = Table([['d"', '"q'], ['d"q', '"']], names=('a', 'b')) # fmt: on dat = read_csv(tbl) assert_table_equal(dat, expected) # In addition to the local read_csv wrapper, check that default # parsing with guessing gives the right answer. for fast_reader in True, False: dat = ascii.read(tbl, fast_reader=fast_reader) assert_table_equal(dat, expected)
Test the exact example from #8281 which resulted in SEGV prior to #8283 (in contrast to the tests above that just gave the wrong answer). Attempts to produce a more minimal example were unsuccessful, so the whole thing is included.
def test_doubled_quotes_segv(): """ Test the exact example from #8281 which resulted in SEGV prior to #8283 (in contrast to the tests above that just gave the wrong answer). Attempts to produce a more minimal example were unsuccessful, so the whole thing is included. """ tbl = dedent( """ "ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min" "CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.unistra.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.unistra.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.unistra.fr/2MASS/H","https://alaskybis.unistra.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.unistra.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600" """ ) ascii.read(tbl, format="csv", fast_reader=True, guess=False)
The character quotechar (default '"') should denote the start of a field which can contain the field delimiter and newlines.
def test_quoted_fields(read_basic): """ The character quotechar (default '"') should denote the start of a field which can contain the field delimiter and newlines. """ text = dedent( """ "A B" C D 1.5 2.1 -37.1 a b " c d" """ ) table = read_basic(text) expected = Table( [["1.5", "a"], ["2.1", "b"], ["-37.1", "c\nd"]], names=("A B", "C", "D") ) assert_table_equal(table, expected) table = read_basic(text.replace('"', "'"), quotechar="'") assert_table_equal(table, expected)
Make sure the C reader raises an error if passed parameters it can't handle.
def test_invalid_parameters(key, val): """ Make sure the C reader raises an error if passed parameters it can't handle. """ with pytest.raises(ParameterError): FastBasic(**{key: val}).read("1 2 3\n4 5 6") with pytest.raises(ParameterError): ascii.read("1 2 3\n4 5 6", format="fast_basic", guess=False, **{key: val})
If a row contains too many columns, the C reader should raise an error.
def test_too_many_cols1(): """ If a row contains too many columns, the C reader should raise an error. """ text = dedent( """ A B C 1 2 3 4 5 6 7 8 9 10 11 12 13 """ ) with pytest.raises(InconsistentTableError) as e: FastBasic().read(text) assert ( "Number of header columns (3) inconsistent with data columns in data line 2" in str(e.value) )
If a row does not have enough columns, the FastCsv reader should add empty fields while the FastBasic reader should raise an error.
def test_not_enough_cols(read_csv): """ If a row does not have enough columns, the FastCsv reader should add empty fields while the FastBasic reader should raise an error. """ text = """ A,B,C 1,2,3 4,5 6,7,8 """ table = read_csv(text) assert table["B"][1] is not ma.masked assert table["C"][1] is ma.masked with pytest.raises(InconsistentTableError): table = FastBasic(delimiter=",").read(text)
The parameter data_end should specify where data reading ends.
def test_data_end(read_basic, read_rdb): """ The parameter data_end should specify where data reading ends. """ text = """ A B C 1 2 3 4 5 6 7 8 9 10 11 12 """ table = read_basic(text, data_end=3) expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected) # data_end supports negative indexing table = read_basic(text, data_end=-2) assert_table_equal(table, expected) text = """ A\tB\tC N\tN\tS 1\t2\ta 3\t4\tb 5\t6\tc """ # make sure data_end works with RDB table = read_rdb(text, data_end=-1) expected = Table([[1, 3], [2, 4], ["a", "b"]], names=("A", "B", "C")) assert_table_equal(table, expected) # positive index table = read_rdb(text, data_end=3) expected = Table([[1], [2], ["a"]], names=("A", "B", "C")) assert_table_equal(table, expected) # empty table if data_end is too small table = read_rdb(text, data_end=1) expected = Table([[], [], []], names=("A", "B", "C")) assert_table_equal(table, expected)
Test that inf and nan-like values are correctly parsed on all platforms. Regression test for https://github.com/astropy/astropy/pull/3525
def test_inf_nan(read_basic): """ Test that inf and nan-like values are correctly parsed on all platforms. Regression test for https://github.com/astropy/astropy/pull/3525 """ text = dedent( """\ A nan +nan -nan inf infinity +inf +infinity -inf -infinity """ ) expected = Table( { "A": [ np.nan, np.nan, np.nan, np.inf, np.inf, np.inf, np.inf, -np.inf, -np.inf, ] } ) table = read_basic(text) assert table["A"].dtype.kind == "f" assert_table_equal(table, expected)
Make sure that the parameter fill_values works as intended. If fill_values is not specified, the default behavior should be to convert '' to 0.
def test_fill_values(read_basic): """ Make sure that the parameter fill_values works as intended. If fill_values is not specified, the default behavior should be to convert '' to 0. """ text = """ A, B, C , 2, nan a, -999, -3.4 nan, 5, -9999 8, nan, 7.6e12 """ table = read_basic(text, delimiter=",") # The empty value in row A should become a masked '0' assert isinstance(table["A"], MaskedColumn) assert table["A"][0] is ma.masked # '0' rather than 0 because there is a string in the column assert_equal(table["A"].data.data[0], "0") assert table["A"][1] is not ma.masked table = read_basic(text, delimiter=",", fill_values=("-999", "0")) assert isinstance(table["B"], MaskedColumn) assert table["A"][0] is not ma.masked # empty value unaffected assert table["C"][2] is not ma.masked # -9999 is not an exact match assert table["B"][1] is ma.masked # Numeric because the rest of the column contains numeric data assert_equal(table["B"].data.data[1], 0.0) assert table["B"][0] is not ma.masked table = read_basic(text, delimiter=",", fill_values=[]) # None of the columns should be masked for name in "ABC": assert not isinstance(table[name], MaskedColumn) table = read_basic( text, delimiter=",", fill_values=[("", "0", "A"), ("nan", "999", "A", "C")] ) assert np.isnan(table["B"][3]) # nan filling skips column B # should skip masking as well as replacing nan assert table["B"][3] is not ma.masked assert table["A"][0] is ma.masked assert table["A"][2] is ma.masked assert_equal(table["A"].data.data[0], "0") assert_equal(table["A"].data.data[2], "999") assert table["C"][0] is ma.masked assert_almost_equal(table["C"].data.data[0], 999.0) assert_almost_equal(table["C"][1], -3.4)
fill_include_names and fill_exclude_names should filter missing/empty value handling in the same way that include_names and exclude_names filter output columns.
def test_fill_include_exclude_names(read_csv): """ fill_include_names and fill_exclude_names should filter missing/empty value handling in the same way that include_names and exclude_names filter output columns. """ text = """ A, B, C , 1, 2 3, , 4 5, 5, """ table = read_csv(text, fill_include_names=["A", "B"]) assert table["A"][0] is ma.masked assert table["B"][1] is ma.masked assert table["C"][2] is not ma.masked # C not in fill_include_names table = read_csv(text, fill_exclude_names=["A", "B"]) assert table["C"][2] is ma.masked assert table["A"][0] is not ma.masked assert table["B"][1] is not ma.masked # A and B excluded from fill handling table = read_csv(text, fill_include_names=["A", "B"], fill_exclude_names=["B"]) assert table["A"][0] is ma.masked # fill_exclude_names applies after fill_include_names assert table["B"][1] is not ma.masked assert table["C"][2] is not ma.masked
Make sure memory reallocation works okay when the number of rows is large (so that each column string is longer than INITIAL_COL_SIZE).
def test_many_rows(read_basic): """ Make sure memory reallocation works okay when the number of rows is large (so that each column string is longer than INITIAL_COL_SIZE). """ text = "A B C\n" for i in range(500): # create 500 rows text += " ".join([str(i) for i in range(3)]) text += "\n" table = read_basic(text) expected = Table([[0] * 500, [1] * 500, [2] * 500], names=("A", "B", "C")) assert_table_equal(table, expected)
Make sure memory reallocation works okay when the number of columns is large (so that each header string is longer than INITIAL_HEADER_SIZE).
def test_many_columns(read_basic): """ Make sure memory reallocation works okay when the number of columns is large (so that each header string is longer than INITIAL_HEADER_SIZE). """ # create a string with 500 columns and two data rows text = " ".join([str(i) for i in range(500)]) text += "\n" + text + "\n" + text table = read_basic(text) expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)]) assert_table_equal(table, expected)
Make sure that ascii.read() works as expected by default and with fast_reader specified.
def test_fast_reader(): """ Make sure that ascii.read() works as expected by default and with fast_reader specified. """ text = "a b c\n1 2 3\n4 5 6" with pytest.raises(ParameterError): # C reader can't handle regex comment ascii.read(text, format="fast_basic", guess=False, comment="##") # Enable the fast converter ascii.read( text, format="basic", guess=False, fast_reader={"use_fast_converter": True} ) # Should raise an error if fast_reader has an invalid key with pytest.raises(FastOptionsError): ascii.read(text, format="fast_basic", guess=False, fast_reader={"foo": True}) # Use the slow reader instead ascii.read(text, format="basic", guess=False, comment="##", fast_reader=False) # Will try the slow reader afterwards by default ascii.read(text, format="basic", guess=False, comment="##")
The fast reader for tab-separated values should not strip whitespace, unlike the basic reader.
def test_read_tab(read_tab): """ The fast reader for tab-separated values should not strip whitespace, unlike the basic reader. """ text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t ' table = read_tab(text) assert_equal(table["1"][0], " a") # preserve line whitespace assert_equal(table["2"][0], " b ") # preserve field whitespace assert table["3"][0] is ma.masked # empty value should be masked assert_equal(table["2"][1], " d\n e") # preserve whitespace in quoted fields assert_equal(table["3"][1], " ")
If data_start is not explicitly passed to read(), data processing should beginning right after the header.
def test_default_data_start(read_basic): """ If data_start is not explicitly passed to read(), data processing should beginning right after the header. """ text = "ignore this line\na b c\n1 2 3\n4 5 6" table = read_basic(text, header_start=1) expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c")) assert_table_equal(table, expected)
The FastCommentedHeader reader should mimic the behavior of the CommentedHeader by overriding the default header behavior of FastBasic.
def test_commented_header(read_commented_header): """ The FastCommentedHeader reader should mimic the behavior of the CommentedHeader by overriding the default header behavior of FastBasic. """ text = """ # A B C 1 2 3 4 5 6 """ t1 = read_commented_header(text) expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(t1, expected) text = "# first commented line\n # second commented line\n\n" + text t2 = read_commented_header(text, header_start=2, data_start=0) assert_table_equal(t2, expected) # negative indexing allowed t3 = read_commented_header(text, header_start=-1, data_start=0) assert_table_equal(t3, expected) text += "7 8 9" t4 = read_commented_header(text, header_start=2, data_start=2) expected = Table([[7], [8], [9]], names=("A", "B", "C")) assert_table_equal(t4, expected) with pytest.raises(ParameterError): # data_start cannot be negative read_commented_header( text, header_start=-1, data_start=-1, )
Make sure the FastRdb reader works as expected.
def test_rdb(read_rdb): """ Make sure the FastRdb reader works as expected. """ text = """ A\tB\tC 1n\tS\t4N 1\t 9\t4.3 """ table = read_rdb(text) expected = Table([[1], [" 9"], [4.3]], names=("A", "B", "C")) assert_table_equal(table, expected) assert_equal(table["A"].dtype.kind, "i") assert table["B"].dtype.kind in ("S", "U") assert_equal(table["C"].dtype.kind, "f") with pytest.raises(ValueError) as e: text = "A\tB\tC\nN\tS\tN\n4\tb\ta" # C column contains non-numeric data read_rdb( text, ) assert "Column C failed to convert" in str(e.value) with pytest.raises(ValueError) as e: text = "A\tB\tC\nN\tN\n1\t2\t3" # not enough types specified read_rdb( text, ) assert "mismatch between number of column names and column types" in str(e.value) with pytest.raises(ValueError) as e: text = "A\tB\tC\nN\tN\t5\n1\t2\t3" # invalid type for column C read_rdb( text, ) assert "type definitions do not all match [num](N|S)" in str(e.value)
Make sure that data parsing begins at data_start (ignoring empty and commented lines but not taking quoted values into account).
def test_data_start(read_basic): """ Make sure that data parsing begins at data_start (ignoring empty and commented lines but not taking quoted values into account). """ text = """ A B C 1 2 3 4 5 6 7 8 "9 1" # comment 10 11 12 """ table = read_basic(text, data_start=2) expected = Table( [[4, 7, 10], [5, 8, 11], ["6", "9\n1", "12"]], names=("A", "B", "C") ) assert_table_equal(table, expected) table = read_basic(text, data_start=3) # ignore empty line expected = Table([[7, 10], [8, 11], ["9\n1", "12"]], names=("A", "B", "C")) assert_table_equal(table, expected) with pytest.raises(InconsistentTableError) as e: # tries to begin in the middle of quoted field read_basic( text, data_start=4, ) assert "header columns (3) inconsistent with data columns in data line 0" in str( e.value ) table = read_basic(text, data_start=5) # ignore commented line expected = Table([[10], [11], [12]], names=("A", "B", "C")) assert_table_equal(table, expected) text = """ A B C 1 2 3 4 5 6 7 8 9 # comment 10 11 12 """
Quoted empty values spanning multiple lines should be treated correctly.
def test_quoted_empty_values(read_basic): """ Quoted empty values spanning multiple lines should be treated correctly. """ text = 'a b c\n1 2 " \n "' table = read_basic(text) assert table["c"][0] == "\n"
Unless the comment parameter is specified, the CSV reader should not treat any lines as comments.
def test_csv_comment_default(read_csv): """ Unless the comment parameter is specified, the CSV reader should not treat any lines as comments. """ text = "a,b,c\n#1,2,3\n4,5,6" table = read_csv(text) expected = Table([["#1", "4"], [2, 5], [3, 6]], names=("a", "b", "c")) assert_table_equal(table, expected)
Readers that don't strip whitespace from data (Tab, RDB) should still treat lines with leading whitespace and then the comment char as comment lines.
def test_whitespace_before_comment(read_tab): """ Readers that don't strip whitespace from data (Tab, RDB) should still treat lines with leading whitespace and then the comment char as comment lines. """ text = "a\tb\tc\n # comment line\n1\t2\t3" table = read_tab(text) expected = Table([[1], [2], [3]], names=("a", "b", "c")) assert_table_equal(table, expected)
Readers that strip whitespace from lines should ignore trailing whitespace after the last data value of each row.
def test_strip_line_trailing_whitespace(read_basic): """ Readers that strip whitespace from lines should ignore trailing whitespace after the last data value of each row. """ text = "a b c\n1 2 \n3 4 5" with pytest.raises(InconsistentTableError) as e: ascii.read(StringIO(text), format="fast_basic", guess=False) assert "header columns (3) inconsistent with data columns in data line 0" in str( e.value ) text = "a b c\n 1 2 3 \t \n 4 5 6 " table = read_basic(text) expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c")) assert_table_equal(table, expected)
As long as column names are supplied, the C reader should return an empty table in the absence of data.
def test_no_data(read_basic): """ As long as column names are supplied, the C reader should return an empty table in the absence of data. """ table = read_basic("a b c") expected = Table([[], [], []], names=("a", "b", "c")) assert_table_equal(table, expected) table = read_basic("a b c\n1 2 3", data_start=2) assert_table_equal(table, expected)
Make sure the fast reader accepts CR and CR+LF as newlines.
def test_line_endings(read_basic, read_commented_header, read_rdb): """ Make sure the fast reader accepts CR and CR+LF as newlines. """ text = "a b c\n1 2 3\n4 5 6\n7 8 9\n" expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=("a", "b", "c")) for newline in ("\r\n", "\r"): table = read_basic( text.replace("\n", newline), ) assert_table_equal(table, expected) # Make sure the splitlines() method of FileString # works with CR/CR+LF line endings text = "#" + text for newline in ("\r\n", "\r"): table = read_commented_header( text.replace("\n", newline), ) assert_table_equal(table, expected) expected = Table( [MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])], names=("a", "b", "c"), ) expected["a"][0] = np.ma.masked expected["c"][0] = np.ma.masked text = "a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n" for newline in ("\r\n", "\r"): table = read_rdb( text.replace("\n", newline), ) assert_table_equal(table, expected) assert np.all(table == expected)
Make sure that the output Table produced by the fast reader stores any comment lines in its meta attribute.
def test_store_comments(read_basic): """ Make sure that the output Table produced by the fast reader stores any comment lines in its meta attribute. """ text = """ # header comment a b c # comment 2 # comment 3 1 2 3 4 5 6 """ table = read_basic(text, check_meta=True) assert_equal(table.meta["comments"], ["header comment", "comment 2", "comment 3"])
Make sure the C reader doesn't segfault when the input data contains empty quotes. [#3407]
def test_empty_quotes(read_basic): """ Make sure the C reader doesn't segfault when the input data contains empty quotes. [#3407] """ table = read_basic('a b\n1 ""\n2 ""') expected = Table([[1, 2], [0, 0]], names=("a", "b")) assert_table_equal(table, expected)
Make sure the C reader doesn't segfault when the header for the first column is missing [#3545]
def test_fast_tab_with_names(read_tab): """ Make sure the C reader doesn't segfault when the header for the first column is missing [#3545] """ content = """# \tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot -3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" head = [f"A{i}" for i in range(28)] read_tab(content, data_start=1, names=head)
Test reading of a huge file. This test generates a huge CSV file (~2.3Gb) before reading it (see https://github.com/astropy/astropy/pull/5319). The test is run only if the ``--run-hugemem`` cli option is given. Note that running the test requires quite a lot of memory (~18Gb when reading the file) !!
def test_read_big_table(tmp_path): """Test reading of a huge file. This test generates a huge CSV file (~2.3Gb) before reading it (see https://github.com/astropy/astropy/pull/5319). The test is run only if the ``--run-hugemem`` cli option is given. Note that running the test requires quite a lot of memory (~18Gb when reading the file) !! """ NB_ROWS = 250000 NB_COLS = 500 filename = tmp_path / "big_table.csv" print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).") data = np.random.random(NB_ROWS) t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)]) data = None print(f"Saving the table to {filename}") t.write(filename, format="ascii.csv", overwrite=True) t = None print( "Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)." ) with open(filename) as f: assert sum(1 for line in f) == NB_ROWS + 1 print("Reading the file with astropy.") t = Table.read(filename, format="ascii.csv", fast_reader=True) assert len(t) == NB_ROWS
Test reading of a file with a huge column.
def test_read_big_table2(tmp_path): """Test reading of a file with a huge column.""" # (2**32 // 2) : max value for int # // 10 : we use a value for rows that have 10 chars (1e9) # + 5 : add a few lines so the length cannot be stored by an int NB_ROWS = 2**32 // 2 // 10 + 5 filename = tmp_path / "big_table.csv" print(f"Creating a {NB_ROWS} rows table.") data = np.full(NB_ROWS, int(1e9), dtype=np.int32) t = Table(data=[data], names=["a"], copy=False) print(f"Saving the table to {filename}") t.write(filename, format="ascii.csv", overwrite=True) t = None print( "Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)." ) with open(filename) as f: assert sum(1 for line in f) == NB_ROWS + 1 print("Reading the file with astropy.") t = Table.read(filename, format="ascii.csv", fast_reader=True) assert len(t) == NB_ROWS
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|) shall be returned as 0 and +-inf respectively by the C parser, just like the Python parser. Test fast converter only to nominal accuracy.
def test_data_out_of_range(fast_reader, guess): """ Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|) shall be returned as 0 and +-inf respectively by the C parser, just like the Python parser. Test fast converter only to nominal accuracy. """ # Python reader and strtod() are expected to return precise results rtol = 1.0e-30 # Update fast_reader dict; adapt relative precision for fast_converter if fast_reader: if fast_reader.get("use_fast_converter"): rtol = 1.0e-15 elif sys.maxsize < 2**32: # On 32bit the standard C parser (strtod) returns strings for these pytest.xfail("C parser cannot handle float64 on 32bit systems") if not fast_reader: ctx = nullcontext() else: ctx = pytest.warns() fields = ["10.1E+199", "3.14e+313", "2048e+306", "0.6E-325", "-2.e345"] values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf]) # NOTE: Warning behavior varies for the parameters being passed in. with ctx as w: t = ascii.read( StringIO(" ".join(fields)), format="no_header", guess=guess, fast_reader=fast_reader, ) if fast_reader: # Assert precision warnings for cols 2-5 assert len(w) == 4 for i in range(len(w)): assert f"OverflowError converting to FloatType in column col{i+2}" in str( w[i].message ) read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324) # Test some additional corner cases fields = [ ".0101E202", "0.000000314E+314", "1777E+305", "-1799E+305", "0.2e-323", "5200e-327", " 0.0000000000000000000001024E+330", ] values = np.array( [1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308] ) with ctx as w: t = ascii.read( StringIO(" ".join(fields)), format="no_header", guess=guess, fast_reader=fast_reader, ) if fast_reader: # Assert precision warnings for cols 4-6 if sys.platform == "win32" and not fast_reader.get("use_fast_converter"): assert len(w) == 2 else: assert len(w) == 3 for i in range(len(w)): assert f"OverflowError converting to FloatType in column col{i+4}" in str( w[i].message ) read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324) # Test corner cases again with non-standard exponent_style (auto-detection) if fast_reader and fast_reader.get("use_fast_converter"): fast_reader.update({"exponent_style": "A"}) else: pytest.skip("Fortran exponent style only available in fast converter") fields = [ ".0101D202", "0.000000314d+314", "1777+305", "-1799E+305", "0.2e-323", "2500-327", " 0.0000000000000000000001024Q+330", ] with ctx as w: t = ascii.read( StringIO(" ".join(fields)), format="no_header", guess=guess, fast_reader=fast_reader, ) if fast_reader: # CI Windows identifies as "win32" but has 64 bit compiler; # its `strtod` not emitting certain warnings. if sys.platform == "win32" and not fast_reader.get("use_fast_converter"): assert len(w) == 2 else: assert len(w) == 3 read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324)
Test parsing of fixed-format float64 numbers near range limits (|~4.94e-324 to 1.7977e+308|) - within limit for full precision (|~2.5e-307| for strtod C parser, factor 10 better for fast_converter) exact numbers shall be returned, beyond that an Overflow warning raised. Input of exactly 0.0 must not raise an OverflowError.
def test_data_at_range_limit(fast_reader, guess): """ Test parsing of fixed-format float64 numbers near range limits (|~4.94e-324 to 1.7977e+308|) - within limit for full precision (|~2.5e-307| for strtod C parser, factor 10 better for fast_converter) exact numbers shall be returned, beyond that an Overflow warning raised. Input of exactly 0.0 must not raise an OverflowError. """ # Python reader and strtod() are expected to return precise results rtol = 1.0e-30 # CI "win32" 64 bit compiler with `strtod` not emitting certain warnings. if sys.platform == "win32": ctx = nullcontext() else: ctx = pytest.warns() # Update fast_reader dict; adapt relative precision for fast_converter if fast_reader: # `xstrtod` behaves the same on win32 if fast_reader.get("use_fast_converter"): rtol = 1.0e-15 ctx = pytest.warns() elif sys.maxsize < 2**32: # On 32bit the standard C parser (strtod) returns strings for these pytest.xfail("C parser cannot handle float64 on 32bit systems") # Test very long fixed-format strings (to strtod range limit w/o Overflow) for D in 99, 202, 305: t = ascii.read( StringIO(99 * "0" + "." + D * "0" + "1"), format="no_header", guess=guess, fast_reader=fast_reader, ) assert_almost_equal(t["col1"][0], 10.0 ** -(D + 1), rtol=rtol, atol=1.0e-324) for D in 99, 202, 308: t = ascii.read( StringIO("1" + D * "0" + ".0"), format="no_header", guess=guess, fast_reader=fast_reader, ) assert_almost_equal(t["col1"][0], 10.0**D, rtol=rtol, atol=1.0e-324) # 0.0 is always exact (no Overflow warning)! for s in "0.0", "0.0e+0", 399 * "0" + "." + 365 * "0": t = ascii.read( StringIO(s), format="no_header", guess=guess, fast_reader=fast_reader ) assert t["col1"][0] == 0.0 # Test OverflowError at precision limit with laxer rtol if not fast_reader: pytest.skip("Python/numpy reader does not raise on Overflow") with ctx as w: t = ascii.read( StringIO("0." + 314 * "0" + "1"), format="no_header", guess=guess, fast_reader=fast_reader, ) if not isinstance(ctx, nullcontext): assert len(w) == 1, f"Expected 1 warning, found {len(w)}" assert ( "OverflowError converting to FloatType in column col1, possibly " "resulting in degraded precision" in str(w[0].message) ) assert_almost_equal(t["col1"][0], 1.0e-315, rtol=1.0e-10, atol=1.0e-324)
Integer numbers outside int range shall be returned as string columns consistent with the standard (Python) parser (no 'upcasting' to float).
def test_int_out_of_range(guess): """ Integer numbers outside int range shall be returned as string columns consistent with the standard (Python) parser (no 'upcasting' to float). """ imin = np.iinfo(np.int64).min + 1 imax = np.iinfo(np.int64).max - 1 huge = f"{imax+2:d}" text = f"P M S\n {imax:d} {imin:d} {huge:s}" expected = Table([[imax], [imin], [huge]], names=("P", "M", "S")) # NOTE: Warning behavior varies for the parameters being passed in. with pytest.warns() as w: table = ascii.read( text, format="basic", guess=guess, ) assert_table_equal(table, expected) # Check with leading zeroes to make sure strtol does not read them as octal text = f"P M S\n000{imax:d} -0{-imin:d} 00{huge:s}" expected = Table([[imax], [imin], ["00" + huge]], names=("P", "M", "S")) with pytest.warns() as w: table = ascii.read(text, format="basic", guess=guess) assert_table_equal(table, expected)
Mixed columns should be returned as float, but if the out-of-range integer shows up first, it will produce a string column - with both readers.
def test_int_out_of_order(guess): """ Mixed columns should be returned as float, but if the out-of-range integer shows up first, it will produce a string column - with both readers. """ imax = np.iinfo(np.int64).max - 1 text = f"A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7" expected = Table([[12.3, 10.0 * imax], [f"{imax:d}0", "45.6e7"]], names=("A", "B")) with pytest.warns( AstropyWarning, match=r"OverflowError converting to " r"IntType in column B, reverting to String", ): table = ascii.read(text, format="basic", guess=guess, fast_reader=True) assert_table_equal(table, expected) with pytest.warns( AstropyWarning, match=r"OverflowError converting to " r"IntType in column B, reverting to String", ): table = ascii.read(text, format="basic", guess=guess, fast_reader=False) assert_table_equal(table, expected)
Make sure that ascii.read() can read Fortran-style exponential notation using the fast_reader.
def test_fortran_reader(guess): """ Make sure that ascii.read() can read Fortran-style exponential notation using the fast_reader. """ # Check for nominal np.float64 precision rtol = 1.0e-15 atol = 0.0 text = ( "A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n" " 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309" ) expc = Table( [[1.0001e101, 0.42], [2, 0.5], [2.0e-103, 6.0e3], [3, 1.7e307]], names=("A", "B", "C", "D"), ) expstyles = { "e": 6 * "E", "D": ("D", "d", "d", "D", "d", "D"), "Q": 3 * ("q", "Q"), "Fortran": ("E", "0", "D", "Q", "d", "0"), } # C strtod (not-fast converter) can't handle Fortran exp with pytest.raises(FastOptionsError) as e: ascii.read( text.format(*(6 * "D")), format="basic", guess=guess, fast_reader={ "use_fast_converter": False, "exponent_style": "D", }, ) assert "fast_reader: exponent_style requires use_fast_converter" in str(e.value) # Enable multiprocessing and the fast converter iterate over # all style-exponent combinations, with auto-detection for s, c in expstyles.items(): table = ascii.read( text.format(*c), guess=guess, fast_reader={"exponent_style": s}, ) assert_table_equal(table, expc, rtol=rtol, atol=atol) # Additional corner-case checks including triple-exponents without # any character and mixed whitespace separators text = ( "A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n " "0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330" ) table = ascii.read(text, guess=guess, fast_reader={"exponent_style": "A"}) assert_table_equal(table, expc, rtol=rtol, atol=atol)
Test Fortran-style exponential notation in the fast_reader with invalid exponent-like patterns (no triple-digits) to make sure they are returned as strings instead, as with the standard C parser.
def test_fortran_invalid_exp(guess): """ Test Fortran-style exponential notation in the fast_reader with invalid exponent-like patterns (no triple-digits) to make sure they are returned as strings instead, as with the standard C parser. """ formats = {"basic": " ", "tab": "\t", "csv": ","} header = ["S1", "F2", "S2", "F3", "S3", "F4", "F5", "S4", "I1", "F6", "F7"] # Tested entries and expected returns, first for auto-detect, # then for different specified exponents # fmt: off fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.', '2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314'] vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, '4.2-022', 1.45e308] vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314'] vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308] vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, '4.2-022', 1.45e308] # fmt: on # Iterate over supported format types and separators for f, s in formats.items(): t1 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), format=f, guess=guess, fast_reader={"exponent_style": "A"}, ) assert_table_equal(t1, Table([[col] for col in vals_a], names=header)) # Non-basic separators require guessing enabled to be detected if guess: formats["bar"] = "|" else: formats = {"basic": " "} for s in formats.values(): t2 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"exponent_style": "a"}, ) assert_table_equal(t2, Table([[col] for col in vals_a], names=header)) # Iterate for (default) expchar 'E' for s in formats.values(): t3 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"use_fast_converter": True}, ) assert_table_equal(t3, Table([[col] for col in vals_e], names=header)) # Iterate for expchar 'D' for s in formats.values(): t4 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"exponent_style": "D"}, ) assert_table_equal(t4, Table([[col] for col in vals_d], names=header)) # Iterate for regular converter (strtod) for s in formats.values(): t5 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"use_fast_converter": False}, ) read_values = [col[0] for col in t5.itercols()] if os.name == "nt": # Apparently C strtod() on (some?) MSVC recognizes 'd' exponents! assert read_values in (vals_v, vals_e) else: assert read_values == vals_e
Check if readers without a fast option raise a value error when a fast_reader is asked for (implies the default 'guess=True').
def test_fortran_reader_notbasic(): """ Check if readers without a fast option raise a value error when a fast_reader is asked for (implies the default 'guess=True'). """ tabstr = dedent( """ a b 1 1.23D4 2 5.67D-8 """ )[1:-1] t1 = ascii.read(tabstr.split("\n"), fast_reader={"exponent_style": "D"}) assert t1["b"].dtype.kind == "f" tabrdb = dedent( """ a\tb # A simple RDB table N\tN 1\t 1.23D4 2\t 5.67-008 """ )[1:-1] t2 = ascii.read( tabrdb.split("\n"), format="rdb", fast_reader={"exponent_style": "fortran"} ) assert t2["b"].dtype.kind == "f" tabrst = dedent( """ = ======= a b = ======= 1 1.23E4 2 5.67E-8 = ======= """ )[1:-1] t3 = ascii.read(tabrst.split("\n"), format="rst") assert t3["b"].dtype.kind == "f" t4 = ascii.read(tabrst.split("\n"), guess=True) assert t4["b"].dtype.kind == "f" # In the special case of fast_converter=True (the default), # incompatibility is ignored t5 = ascii.read(tabrst.split("\n"), format="rst", fast_reader=True) assert t5["b"].dtype.kind == "f" with pytest.raises(ParameterError): ascii.read(tabrst.split("\n"), format="rst", guess=False, fast_reader="force") with pytest.raises(ParameterError): ascii.read( tabrst.split("\n"), format="rst", guess=False, fast_reader={"use_fast_converter": False}, ) tabrst = tabrst.replace("E", "D") with pytest.raises(ParameterError): ascii.read( tabrst.split("\n"), format="rst", guess=False, fast_reader={"exponent_style": "D"}, )
Check if dictionaries passed as kwargs (fast_reader in this test) are left intact by ascii.read()
def test_dict_kwarg_integrity(fast_reader, guess): """ Check if dictionaries passed as kwargs (fast_reader in this test) are left intact by ascii.read() """ expstyle = fast_reader.get("exponent_style", "E") fields = ["10.1D+199", "3.14d+313", "2048d+306", "0.6D-325", "-2.d345"] ascii.read(StringIO(" ".join(fields)), guess=guess, fast_reader=fast_reader) assert fast_reader.get("exponent_style", None) == expstyle
Test for reading a "basic" format table that has no data but has comments. Tests the fix for #8267.
def test_read_empty_basic_table_with_comments(fast_reader): """ Test for reading a "basic" format table that has no data but has comments. Tests the fix for #8267. """ dat = """ # comment 1 # comment 2 col1 col2 """ t = ascii.read(dat, fast_reader=fast_reader) assert t.meta["comments"] == ["comment 1", "comment 2"] assert len(t) == 0 assert t.colnames == ["col1", "col2"]
The reader should try to convert each column to ints. If this fails, the reader should try to convert to floats. Failing this, i.e. on parsing non-numeric input including isolated positive/negative signs, it should fall back to strings.
def test_conversion_fast(fast_reader): """ The reader should try to convert each column to ints. If this fails, the reader should try to convert to floats. Failing this, i.e. on parsing non-numeric input including isolated positive/negative signs, it should fall back to strings. """ text = """ A B C D E F G H 1 a 3 4 5 6 7 8 2. 1 9 -.1e1 10.0 8.7 6 -5.3e4 4 2 -12 .4 +.e1 - + six """ table = ascii.read(text, fast_reader=fast_reader) assert_equal(table["A"].dtype.kind, "f") assert table["B"].dtype.kind in ("S", "U") assert_equal(table["C"].dtype.kind, "i") assert_equal(table["D"].dtype.kind, "f") assert table["E"].dtype.kind in ("S", "U") assert table["F"].dtype.kind in ("S", "U") assert table["G"].dtype.kind in ("S", "U") assert table["H"].dtype.kind in ("S", "U")
Check that newline characters are correctly handled as delimiters. Tests the fix for #9928.
def test_newline_as_delimiter(delimiter, fast_reader): """ Check that newline characters are correctly handled as delimiters. Tests the fix for #9928. """ if delimiter == "\r": eol = "\n" else: eol = "\r" inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "] inp1 = ( f"a {delimiter:s} b {delimiter:s}c{eol:s} 1 {delimiter:s}'2'{delimiter:s} 3.0" ) inp2 = [f"a {delimiter} b{delimiter} c", f"1{delimiter} '2' {delimiter} 3.0"] t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader) t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader) t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader) assert t1.colnames == t2.colnames == ["a", "b", "c"] assert len(t1) == len(t2) == 1 assert t1["b"].dtype.kind in ("S", "U") assert t2["b"].dtype.kind in ("S", "U") assert_table_equal(t1, t0) assert_table_equal(t2, t0) inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format("|", eol) inp1 = ( f'a {delimiter:s} b {delimiter:s} c{eol:s} 1 {delimiter:s}"2"{delimiter:s} 3.0' ) t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader) t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader) if not fast_reader: pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter") assert_equal(t1["b"].dtype.kind, "i")
String input without a newline character is interpreted as filename, unless element of an iterable. Maybe not logical, but test that it is at least treated consistently.
def test_single_line_string(delimiter, fast_reader): """ String input without a newline character is interpreted as filename, unless element of an iterable. Maybe not logical, but test that it is at least treated consistently. """ expected = Table([[1], [2], [3.00]], names=("col1", "col2", "col3")) text = f"1{delimiter:s}2{delimiter:s}3.0" if delimiter in ("\r", "\n"): t1 = ascii.read( text, format="no_header", delimiter=delimiter, fast_reader=fast_reader ) assert_table_equal(t1, expected) else: # Windows raises OSError, but not the other OSes. with pytest.raises((FileNotFoundError, OSError)): t1 = ascii.read( text, format="no_header", delimiter=delimiter, fast_reader=fast_reader ) t2 = ascii.read( [text], format="no_header", delimiter=delimiter, fast_reader=fast_reader ) assert_table_equal(t2, expected)
Write a simple table with common types. This shows the compact version of serialization with one line per column.
def test_write_simple(): """ Write a simple table with common types. This shows the compact version of serialization with one line per column. """ t = simple_table() out = StringIO() t.write(out, format="ascii.ecsv") assert out.getvalue().splitlines() == SIMPLE_LINES
Write a full-featured table with common types and explicitly checkout output
def test_write_full(): """ Write a full-featured table with common types and explicitly checkout output """ t = T_DTYPES["bool", "int64", "float64", "str"] lines = [ "# %ECSV 1.0", "# ---", "# datatype:", "# - name: bool", "# unit: m / s", "# datatype: bool", "# description: descr_bool", "# meta: !!omap", "# - {meta bool: 1}", "# - {a: 2}", "# - name: int64", "# unit: m / s", "# datatype: int64", "# description: descr_int64", "# meta: !!omap", "# - {meta int64: 1}", "# - {a: 2}", "# - name: float64", "# unit: m / s", "# datatype: float64", "# description: descr_float64", "# meta: !!omap", "# - {meta float64: 1}", "# - {a: 2}", "# - name: str", "# unit: m / s", "# datatype: string", "# description: descr_str", "# meta: !!omap", "# - {meta str: 1}", "# - {a: 2}", "# meta: !!omap", "# - comments: [comment1, comment2]", "# - {a: 3}", "# schema: astropy-2.0", "bool int64 float64 str", 'False 0 0.0 "ab 0"', 'True 1 1.0 "ab, 1"', "False 2 2.0 ab2", ] out = StringIO() t.write(out, format="ascii.ecsv") assert out.getvalue().splitlines() == lines
Write a full-featured table with all types and see that it round-trips on readback. Use both space and comma delimiters.
def test_write_read_roundtrip(): """ Write a full-featured table with all types and see that it round-trips on readback. Use both space and comma delimiters. """ t = T_DTYPES for delimiter in DELIMITERS: out = StringIO() t.write(out, format="ascii.ecsv", delimiter=delimiter) t2s = [ Table.read(out.getvalue(), format="ascii.ecsv"), Table.read(out.getvalue(), format="ascii"), ascii.read(out.getvalue()), ascii.read(out.getvalue(), format="ecsv", guess=False), ascii.read(out.getvalue(), format="ecsv"), ] for t2 in t2s: assert t.meta == t2.meta for name in t.colnames: assert t[name].attrs_equal(t2[name]) assert np.all(t[name] == t2[name])
Passing a delimiter other than space or comma gives an exception
def test_bad_delimiter(): """ Passing a delimiter other than space or comma gives an exception """ out = StringIO() with pytest.raises(ValueError) as err: T_DTYPES.write(out, format="ascii.ecsv", delimiter="|") assert "only space and comma are allowed" in str(err.value)
Bad header without initial # %ECSV x.x
def test_bad_header_start(): """ Bad header without initial # %ECSV x.x """ lines = copy.copy(SIMPLE_LINES) lines[0] = "# %ECV 0.9" with pytest.raises(ascii.InconsistentTableError): Table.read("\n".join(lines), format="ascii.ecsv", guess=False)
Illegal delimiter in input
def test_bad_delimiter_input(): """ Illegal delimiter in input """ lines = copy.copy(SIMPLE_LINES) lines.insert(2, "# delimiter: |") with pytest.raises(ValueError) as err: Table.read("\n".join(lines), format="ascii.ecsv", guess=False) assert "only space and comma are allowed" in str(err.value)
Multi-dimensional column in input
def test_multidim_input(): """ Multi-dimensional column in input """ t = Table() t["a"] = np.arange(24).reshape(2, 3, 4) t["a"].info.description = "description" t["a"].info.meta = {1: 2} t["b"] = [1, 2] out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format="ascii.ecsv") assert np.all(t2["a"] == t["a"]) assert t2["a"].shape == t["a"].shape assert t2["a"].dtype == t["a"].dtype assert t2["a"].info.description == t["a"].info.description assert t2["a"].info.meta == t["a"].info.meta assert np.all(t2["b"] == t["b"])
Structured column in input.
def test_structured_input(): """ Structured column in input. """ t = Table() # Add unit, description and meta to make sure that round-trips as well. t["a"] = Column( [("B", (1.0, [2.0, 3.0])), ("A", (9.0, [8.0, 7.0]))], dtype=[("s", "U1"), ("v", [("p0", "f8"), ("p1", "2f8")])], description="description", format=">", # Most formats do not work with structured! unit="m", # Overall unit should round-trip. meta={1: 2}, ) t["b"] = Column( [[(1.0, 2.0), (9.0, 8.0)], [(3.0, 4.0), (7.0, 6.0)]], dtype="f8,f8", unit=u.Unit("m,s"), # Per part unit should round-trip too. ) out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format="ascii.ecsv") for col in t.colnames: assert np.all(t2[col] == t[col]) assert t2[col].shape == t[col].shape assert t2[col].dtype == t[col].dtype assert t2[col].unit == t[col].unit assert t2[col].format == t[col].format assert t2[col].info.description == t[col].info.description assert t2[col].info.meta == t[col].info.meta
Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)
def test_round_trip_empty_table(): """Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)""" t = Table(dtype=[bool, "i", "f"], names=["a", "b", "c"]) out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format="ascii.ecsv") assert t.dtype == t2.dtype assert len(t2) == 0
Test that mismatch in column names from normal CSV header vs. ECSV YAML header raises the expected exception.
def test_csv_ecsv_colnames_mismatch(): """ Test that mismatch in column names from normal CSV header vs. ECSV YAML header raises the expected exception. """ lines = copy.copy(SIMPLE_LINES) header_index = lines.index("a b c") lines[header_index] = "a b d" with pytest.raises(ValueError) as err: ascii.read(lines, format="ecsv") assert "column names from ECSV header ['a', 'b', 'c']" in str(err.value)
See https://github.com/astropy/astropy/issues/5604 for more.
def test_regression_5604(): """ See https://github.com/astropy/astropy/issues/5604 for more. """ t = Table() t.meta = {"foo": 5 * u.km, "foo2": u.s} t["bar"] = [7] * u.km out = StringIO() t.write(out, format="ascii.ecsv") assert "!astropy.units.Unit" in out.getvalue() assert "!astropy.units.Quantity" in out.getvalue()
Ensure that ascii.read(ecsv_file) returns the correct class (QTable if any Quantity subclasses, Table otherwise).
def test_ecsv_mixins_ascii_read_class(): """Ensure that ascii.read(ecsv_file) returns the correct class (QTable if any Quantity subclasses, Table otherwise). """ # Make a table with every mixin type except Quantities t = QTable( { name: col for name, col in mixin_cols.items() if not isinstance(col.info, QuantityInfo) } ) out = StringIO() t.write(out, format="ascii.ecsv") t2 = ascii.read(out.getvalue(), format="ecsv") assert type(t2) is Table # Add a single quantity column t["lon"] = mixin_cols["lon"] out = StringIO() t.write(out, format="ascii.ecsv") t2 = ascii.read(out.getvalue(), format="ecsv") assert type(t2) is QTable
Test writing as QTable and reading as Table. Ensure correct classes come out.
def test_ecsv_mixins_qtable_to_table(): """Test writing as QTable and reading as Table. Ensure correct classes come out. """ names = sorted(mixin_cols) t = QTable([mixin_cols[name] for name in names], names=names) out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format="ascii.ecsv") assert t.colnames == t2.colnames for name, col in t.columns.items(): col2 = t2[name] attrs = compare_attrs[name] compare_class = True if isinstance(col.info, QuantityInfo): # Downgrade Quantity to Column + unit assert type(col2) is Column # Class-specific attributes like `value` or `wrap_angle` are lost. attrs = ["unit"] compare_class = False # Compare data values here (assert_objects_equal doesn't know how in this case) assert np.allclose(col.value, col2, rtol=1e-10) assert_objects_equal(col, col2, attrs, compare_class)
Test write/read all cols at once and validate intermediate column names
def test_ecsv_mixins_as_one(table_cls): """Test write/read all cols at once and validate intermediate column names""" names = sorted(mixin_cols) all_serialized_names = [] # ECSV stores times as value by default, so we just get the column back. # One exception is tm3, which is set to serialize via jd1 and jd2. for name in names: s_names = serialized_names[name] if not name.startswith("tm3"): s_names = [ s_name.replace(".jd1", "") for s_name in s_names if not s_name.endswith("jd2") ] all_serialized_names.extend(s_names) t = table_cls([mixin_cols[name] for name in names], names=names) out = StringIO() t.write(out, format="ascii.ecsv") t2 = table_cls.read(out.getvalue(), format="ascii.ecsv") assert t.colnames == t2.colnames # Read as a ascii.basic table (skip all the ECSV junk) t3 = table_cls.read(out.getvalue(), format="ascii.basic") assert t3.colnames == all_serialized_names
Take a col with length=2 and make it N-d by repeating elements. For the special case of ndim==1 just return the original. The output has shape [3] * ndim. By using 3 we can be sure that repeating the two input elements gives an output that is sufficiently unique for the multidim tests.
def make_multidim(col, ndim): """Take a col with length=2 and make it N-d by repeating elements. For the special case of ndim==1 just return the original. The output has shape [3] * ndim. By using 3 we can be sure that repeating the two input elements gives an output that is sufficiently unique for the multidim tests. """ if ndim > 1: import itertools idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3**ndim))] col = col[idxs].reshape([3] * ndim) return col
Test write/read one col at a time and do detailed validation. This tests every input column type as 1-d, 2-d and 3-d.
def test_ecsv_mixins_per_column(table_cls, name_col, ndim): """Test write/read one col at a time and do detailed validation. This tests every input column type as 1-d, 2-d and 3-d. """ name, col = name_col c = make_multidim(np.array([1.0, 2.0]), ndim) col = make_multidim(col, ndim) t = table_cls([c, col, c], names=["c1", name, "c2"]) t[name].info.description = "description" t[name].info.meta = {"b": 2, "a": 1} out = StringIO() t.write(out, format="ascii.ecsv") t2 = table_cls.read(out.getvalue(), format="ascii.ecsv") assert t.colnames == t2.colnames for colname in t.colnames: assert len(t2[colname].shape) == ndim if colname in ("c1", "c2"): compare = ["data"] else: # Storing Longitude as Column loses wrap_angle. compare = [ attr for attr in compare_attrs[colname] if not (attr == "wrap_angle" and table_cls is Table) ] assert_objects_equal(t[colname], t2[colname], compare) # Special case to make sure Column type doesn't leak into Time class data if name.startswith("tm"): assert t2[name]._time.jd1.__class__ is np.ndarray assert t2[name]._time.jd2.__class__ is np.ndarray
Test (mostly) round-trip of MaskedColumn through ECSV using default serialization that uses an empty string "" to mark NULL values. Note: >>> simple_table(masked=True) <Table masked=True length=3> a b c int64 float64 str1 ----- ------- ---- -- 1.0 c 2 2.0 -- 3 -- e
def test_round_trip_masked_table_default(tmp_path): """Test (mostly) round-trip of MaskedColumn through ECSV using default serialization that uses an empty string "" to mark NULL values. Note: >>> simple_table(masked=True) <Table masked=True length=3> a b c int64 float64 str1 ----- ------- ---- -- 1.0 c 2 2.0 -- 3 -- e """ filename = tmp_path / "test.ecsv" t = simple_table(masked=True) # int, float, and str cols with one masked element t.write(filename) t2 = Table.read(filename) assert t2.masked is False assert t2.colnames == t.colnames for name in t2.colnames: # From formal perspective the round-trip columns are the "same" assert np.all(t2[name].mask == t[name].mask) assert np.all(t2[name] == t[name]) # But peeking under the mask shows that the underlying data are changed # because by default ECSV uses "" to represent masked elements. t[name].mask = False t2[name].mask = False assert not np.all(t2[name] == t[name])
Same as prev but set the serialize_method to 'data_mask' so mask is written out
def test_round_trip_masked_table_serialize_mask(tmp_path): """ Same as prev but set the serialize_method to 'data_mask' so mask is written out """ filename = tmp_path / "test.ecsv" t = simple_table(masked=True) # int, float, and str cols with one masked element t["c"][0] = "" # This would come back as masked for default "" NULL marker # MaskedColumn with no masked elements. See table the MaskedColumnInfo class # _represent_as_dict() method for info about how we test a column with no masked elements. t["d"] = [1, 2, 3] t.write(filename, serialize_method="data_mask") t2 = Table.read(filename) assert t2.masked is False assert t2.colnames == t.colnames for name in t2.colnames: assert np.all(t2[name].mask == t[name].mask) assert np.all(t2[name] == t[name]) # Data under the mask round-trips also (unmask data to show this). t[name].mask = False t2[name].mask = False assert np.all(t2[name] == t[name])
Ensure that we can read-back enabled user-defined units.
def test_ecsv_round_trip_user_defined_unit(table_cls, tmp_path): """Ensure that we can read-back enabled user-defined units.""" # Test adapted from #8897, where it was noted that this works # but was not tested. filename = tmp_path / "test.ecsv" unit = u.def_unit("bandpass_sol_lum") t = table_cls() t["l"] = np.arange(5) * unit t.write(filename) # without the unit enabled, get UnrecognizedUnit if table_cls is QTable: ctx = pytest.warns(u.UnitsWarning, match=r"'bandpass_sol_lum' did not parse .*") else: ctx = nullcontext() # Note: The read might also generate ResourceWarning, in addition to UnitsWarning with ctx: t2 = table_cls.read(filename) assert isinstance(t2["l"].unit, u.UnrecognizedUnit) assert str(t2["l"].unit) == "bandpass_sol_lum" if table_cls is QTable: assert np.all(t2["l"].value == t["l"].value) else: assert np.all(t2["l"] == t["l"]) # But with it enabled, it works. with u.add_enabled_units(unit): t3 = table_cls.read(filename) assert t3["l"].unit is unit assert np.all(t3["l"] == t["l"]) # Just to be sure, also try writing with unit enabled. filename2 = tmp_path / "test2.ecsv" t3.write(filename2) t4 = table_cls.read(filename) assert t4["l"].unit is unit assert np.all(t4["l"] == t["l"])
Test an ECSV file with a string type but unknown subtype
def test_multidim_unknown_subtype(subtype): """Test an ECSV file with a string type but unknown subtype""" txt = f"""\ # %ECSV 1.0 # --- # datatype: # - name: a # datatype: string # subtype: {subtype} # schema: astropy-2.0 a [1,2] [3,4]""" with pytest.warns( InvalidEcsvDatatypeWarning, match=rf"unexpected subtype '{subtype}' set for column 'a'", ): t = ascii.read(txt, format="ecsv") assert t["a"].dtype.kind == "U" assert t["a"][0] == "[1,2]"
Test a malformed ECSV file
def test_multidim_bad_shape(): """Test a malformed ECSV file""" txt = """\ # %ECSV 1.0 # --- # datatype: # - name: a # datatype: string # subtype: int64[3] # schema: astropy-2.0 a [1,2] [3,4]""" with pytest.raises( ValueError, match="column 'a' failed to convert: shape mismatch" ): Table.read(txt, format="ascii.ecsv")
Test a malformed ECSV file
def test_read_not_json_serializable(): """Test a malformed ECSV file""" txt = """\ # %ECSV 1.0 # --- # datatype: # - {name: a, datatype: string, subtype: json} # schema: astropy-2.0 a fail [3,4]""" match = "column 'a' failed to convert: column value is not valid JSON" with pytest.raises(ValueError, match=match): Table.read(txt, format="ascii.ecsv")
Test a malformed ECSV file
def test_read_bad_datatype(): """Test a malformed ECSV file""" txt = """\ # %ECSV 1.0 # --- # datatype: # - {name: a, datatype: object} # schema: astropy-2.0 a fail [3,4]""" with pytest.warns( InvalidEcsvDatatypeWarning, match="unexpected datatype 'object' of column 'a' is not in allowed", ): t = Table.read(txt, format="ascii.ecsv") assert t["a"][0] == "fail" assert type(t["a"][1]) is str assert type(t["a"].dtype) == np.dtype("O")
Test an ECSV v1.0 file with a complex column
def test_read_complex(): """Test an ECSV v1.0 file with a complex column""" txt = """\ # %ECSV 1.0 # --- # datatype: # - {name: a, datatype: complex} # schema: astropy-2.0 a 1+1j 2+2j""" with pytest.warns( InvalidEcsvDatatypeWarning, match="unexpected datatype 'complex' of column 'a' is not in allowed", ): t = Table.read(txt, format="ascii.ecsv") assert t["a"].dtype.type is np.complex128
Test an ECSV file with a 'str' instead of 'string' datatype
def test_read_str(): """Test an ECSV file with a 'str' instead of 'string' datatype""" txt = """\ # %ECSV 1.0 # --- # datatype: # - {name: a, datatype: str} # schema: astropy-2.0 a sometext S""" # also testing single character text with pytest.warns( InvalidEcsvDatatypeWarning, match="unexpected datatype 'str' of column 'a' is not in allowed", ): t = Table.read(txt, format="ascii.ecsv") assert isinstance(t["a"][1], str) assert isinstance(t["a"][0], np.str_)
Test a malformed ECSV file
def test_read_bad_datatype_for_object_subtype(): """Test a malformed ECSV file""" txt = """\ # %ECSV 1.0 # --- # datatype: # - {name: a, datatype: int64, subtype: json} # schema: astropy-2.0 a fail [3,4]""" match = "column 'a' failed to convert: datatype of column 'a' must be \"string\"" with pytest.raises(ValueError, match=match): Table.read(txt, format="ascii.ecsv")
Test round-trip of float values to full precision even with format specified
def test_full_repr_roundtrip(): """Test round-trip of float values to full precision even with format specified""" t = Table() t["a"] = np.array([np.pi, 1 / 7], dtype=np.float64) t["a"].info.format = ".2f" out = StringIO() t.write(out, format="ascii.ecsv") t2 = Table.read(out.getvalue(), format="ascii.ecsv") assert np.all(t["a"] == t2["a"]) assert t2["a"].info.format == ".2f"