Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
create_max_ndvi_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs) |
Method for calculating the pixel value for the max ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the pixel value for the max ndvi value. | def create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):
"""
Method for calculating the pixel value for the max ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
dataset_in = dataset_in.copy(deep=True)
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
dataset_in_dtypes = None
if dtype is None:
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != -9999) & clean_mask)
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = range(len(dataset_in.time))
for timeslice in time_slices:
dataset_slice = dataset_in.isel(time=timeslice).drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice, ::]] = -1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
utilities.clear_attrs(dataset_out)
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = \
dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values]
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_max_ndvi_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset_in",
"=",
"dataset_in",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"dataset_in_dtypes",
"=",
"None",
"if",
"dtype",
"is",
"None",
":",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"-",
"9999",
")",
"&",
"clean_mask",
")",
"if",
"intermediate_product",
"is",
"not",
"None",
":",
"dataset_out",
"=",
"intermediate_product",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"else",
":",
"dataset_out",
"=",
"None",
"time_slices",
"=",
"range",
"(",
"len",
"(",
"dataset_in",
".",
"time",
")",
")",
"for",
"timeslice",
"in",
"time_slices",
":",
"dataset_slice",
"=",
"dataset_in",
".",
"isel",
"(",
"time",
"=",
"timeslice",
")",
".",
"drop",
"(",
"'time'",
")",
"ndvi",
"=",
"(",
"dataset_slice",
".",
"nir",
"-",
"dataset_slice",
".",
"red",
")",
"/",
"(",
"dataset_slice",
".",
"nir",
"+",
"dataset_slice",
".",
"red",
")",
"ndvi",
".",
"values",
"[",
"np",
".",
"invert",
"(",
"clean_mask",
")",
"[",
"timeslice",
",",
":",
":",
"]",
"]",
"=",
"-",
"1000000000",
"dataset_slice",
"[",
"'ndvi'",
"]",
"=",
"ndvi",
"if",
"dataset_out",
"is",
"None",
":",
"dataset_out",
"=",
"dataset_slice",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"utilities",
".",
"clear_attrs",
"(",
"dataset_out",
")",
"else",
":",
"for",
"key",
"in",
"list",
"(",
"dataset_slice",
".",
"data_vars",
")",
":",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
">",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"=",
"dataset_slice",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
">",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
213,
0
] | [
276,
22
] | python | en | ['en', 'error', 'th'] | False |
create_min_ndvi_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs) |
Method for calculating the pixel value for the min ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the pixel value for the min ndvi value. | def create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):
"""
Method for calculating the pixel value for the min ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
dataset_in = dataset_in.copy(deep=True)
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
dataset_in_dtypes = None
if dtype is None:
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != -9999) & clean_mask)
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = range(len(dataset_in.time))
for timeslice in time_slices:
dataset_slice = dataset_in.isel(time=timeslice).drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice, ::]] = 1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
utilities.clear_attrs(dataset_out)
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values <
dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values <
dataset_out.ndvi.values]
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, None, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_min_ndvi_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset_in",
"=",
"dataset_in",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"dataset_in_dtypes",
"=",
"None",
"if",
"dtype",
"is",
"None",
":",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"-",
"9999",
")",
"&",
"clean_mask",
")",
"if",
"intermediate_product",
"is",
"not",
"None",
":",
"dataset_out",
"=",
"intermediate_product",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"else",
":",
"dataset_out",
"=",
"None",
"time_slices",
"=",
"range",
"(",
"len",
"(",
"dataset_in",
".",
"time",
")",
")",
"for",
"timeslice",
"in",
"time_slices",
":",
"dataset_slice",
"=",
"dataset_in",
".",
"isel",
"(",
"time",
"=",
"timeslice",
")",
".",
"drop",
"(",
"'time'",
")",
"ndvi",
"=",
"(",
"dataset_slice",
".",
"nir",
"-",
"dataset_slice",
".",
"red",
")",
"/",
"(",
"dataset_slice",
".",
"nir",
"+",
"dataset_slice",
".",
"red",
")",
"ndvi",
".",
"values",
"[",
"np",
".",
"invert",
"(",
"clean_mask",
")",
"[",
"timeslice",
",",
":",
":",
"]",
"]",
"=",
"1000000000",
"dataset_slice",
"[",
"'ndvi'",
"]",
"=",
"ndvi",
"if",
"dataset_out",
"is",
"None",
":",
"dataset_out",
"=",
"dataset_slice",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"utilities",
".",
"clear_attrs",
"(",
"dataset_out",
")",
"else",
":",
"for",
"key",
"in",
"list",
"(",
"dataset_slice",
".",
"data_vars",
")",
":",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
"<",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"=",
"dataset_slice",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
"<",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"None",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
279,
0
] | [
343,
22
] | python | en | ['en', 'error', 'th'] | False |
unpack_bits | (land_cover_endcoding, data_array, cover_type) |
Description:
Unpack bits for end of ls7 and ls8 functions
-----
Input:
land_cover_encoding(dict hash table) land cover endcoding provided by ls7 or ls8
data_array( xarray DataArray)
cover_type(String) type of cover
Output:
unpacked DataArray
|
Description:
Unpack bits for end of ls7 and ls8 functions
-----
Input:
land_cover_encoding(dict hash table) land cover endcoding provided by ls7 or ls8
data_array( xarray DataArray)
cover_type(String) type of cover
Output:
unpacked DataArray
| def unpack_bits(land_cover_endcoding, data_array, cover_type):
"""
Description:
Unpack bits for end of ls7 and ls8 functions
-----
Input:
land_cover_encoding(dict hash table) land cover endcoding provided by ls7 or ls8
data_array( xarray DataArray)
cover_type(String) type of cover
Output:
unpacked DataArray
"""
boolean_mask = np.isin(data_array.values, land_cover_endcoding[cover_type])
return xr.DataArray(boolean_mask.astype(bool),
coords = data_array.coords,
dims = data_array.dims,
name = cover_type + "_mask",
attrs = data_array.attrs) | [
"def",
"unpack_bits",
"(",
"land_cover_endcoding",
",",
"data_array",
",",
"cover_type",
")",
":",
"boolean_mask",
"=",
"np",
".",
"isin",
"(",
"data_array",
".",
"values",
",",
"land_cover_endcoding",
"[",
"cover_type",
"]",
")",
"return",
"xr",
".",
"DataArray",
"(",
"boolean_mask",
".",
"astype",
"(",
"bool",
")",
",",
"coords",
"=",
"data_array",
".",
"coords",
",",
"dims",
"=",
"data_array",
".",
"dims",
",",
"name",
"=",
"cover_type",
"+",
"\"_mask\"",
",",
"attrs",
"=",
"data_array",
".",
"attrs",
")"
] | [
345,
0
] | [
362,
49
] | python | en | ['en', 'error', 'th'] | False |
ls8_oli_unpack_qa | (data_array, cover_type) |
Returns a boolean `xarray.DataArray` denoting which points in `data_array`
are of the selected `cover_type` (True indicates presence and
False indicates absence).
For more information, see this: https://landsat.usgs.gov/collectionqualityband
The most relevant section for this function is titled
"Landsat 8 OLI/ OLI-TIRS Level-1 Possible Attributes,
Pixel Values, and Pixel Value Interpretations".
Parameters
----------
data_array: xarray.DataArray
A DataArray of the QA band.
cover_type: string
A string in the set [fill, terrain_occ, clear, rad_sat_1_2,
rad_sat_3_4, rad_sat_5_pls, cloud, low_conf_cl,
med_conf_cl, high_conf_cl, high_cl_shdw,
high_snow_ice, low_conf_cir, high_conf_cir].
'fill' removes "no_data" values, which indicates an absence of data. This value is -9999 for Landsat platforms.
Generally, don't use 'fill'.
'terrain_occ' allows only occluded terrain.
'clear' allows only clear terrain. 'water' allows only water. 'shadow' allows only cloud shadows.
'rad_sat_1_2' denotes radiometric saturation in 1 or 2 bands.
'rad_sat_3_4' denotes radiometric saturation in 3 or 4 bands.
'rad_sat_5_pls' denotes radiometric saturation in 5 or more bands.
'cloud' allows only clouds, but note that it often only selects cloud boundaries.
'low_conf_cl', 'med_conf_cl', and 'high_conf_cl' denote low, medium, and high confidence in cloud coverage.
- 'low_conf_cl' is useful on its own for only removing clouds, however, 'clear' is usually better suited for this.
- 'med_conf_cl' is useful in combination with 'low_conf_cl' to allow slightly heavier cloud coverage.
- Note that 'med_conf_cl' and 'cloud' are very similar.
- 'high_conf_cl' is useful in combination with both 'low_conf_cl' and 'med_conf_cl'.
'high_cl_shdw' denotes high confidence in cloud shadow.
'high_snow_ice' denotes high confidence in snow or ice.
'low_conf_cir' and 'high_conf_cir' denote low and high confidence in cirrus clouds.
Returns
-------
mask: xarray.DataArray
The boolean `xarray.DataArray` denoting which points in `data_array`
are of the selected `cover_type` (True indicates presence and
False indicates absence). This will have the same dimensions and coordinates as `data_array`.
|
Returns a boolean `xarray.DataArray` denoting which points in `data_array`
are of the selected `cover_type` (True indicates presence and
False indicates absence). | def ls8_oli_unpack_qa(data_array, cover_type):
"""
Returns a boolean `xarray.DataArray` denoting which points in `data_array`
are of the selected `cover_type` (True indicates presence and
False indicates absence).
For more information, see this: https://landsat.usgs.gov/collectionqualityband
The most relevant section for this function is titled
"Landsat 8 OLI/ OLI-TIRS Level-1 Possible Attributes,
Pixel Values, and Pixel Value Interpretations".
Parameters
----------
data_array: xarray.DataArray
A DataArray of the QA band.
cover_type: string
A string in the set [fill, terrain_occ, clear, rad_sat_1_2,
rad_sat_3_4, rad_sat_5_pls, cloud, low_conf_cl,
med_conf_cl, high_conf_cl, high_cl_shdw,
high_snow_ice, low_conf_cir, high_conf_cir].
'fill' removes "no_data" values, which indicates an absence of data. This value is -9999 for Landsat platforms.
Generally, don't use 'fill'.
'terrain_occ' allows only occluded terrain.
'clear' allows only clear terrain. 'water' allows only water. 'shadow' allows only cloud shadows.
'rad_sat_1_2' denotes radiometric saturation in 1 or 2 bands.
'rad_sat_3_4' denotes radiometric saturation in 3 or 4 bands.
'rad_sat_5_pls' denotes radiometric saturation in 5 or more bands.
'cloud' allows only clouds, but note that it often only selects cloud boundaries.
'low_conf_cl', 'med_conf_cl', and 'high_conf_cl' denote low, medium, and high confidence in cloud coverage.
- 'low_conf_cl' is useful on its own for only removing clouds, however, 'clear' is usually better suited for this.
- 'med_conf_cl' is useful in combination with 'low_conf_cl' to allow slightly heavier cloud coverage.
- Note that 'med_conf_cl' and 'cloud' are very similar.
- 'high_conf_cl' is useful in combination with both 'low_conf_cl' and 'med_conf_cl'.
'high_cl_shdw' denotes high confidence in cloud shadow.
'high_snow_ice' denotes high confidence in snow or ice.
'low_conf_cir' and 'high_conf_cir' denote low and high confidence in cirrus clouds.
Returns
-------
mask: xarray.DataArray
The boolean `xarray.DataArray` denoting which points in `data_array`
are of the selected `cover_type` (True indicates presence and
False indicates absence). This will have the same dimensions and coordinates as `data_array`.
"""
land_cover_encoding = dict(fill =[1],
terrain_occ =[2, 2722],
clear =[2720, 2724, 2728, 2732],
rad_sat_1_2 =[2724, 2756, 2804, 2980, 3012, 3748, 3780, 6820, 6852, 6900, 7076, 7108, 7844, 7876],
rad_sat_3_4 =[2728, 2760, 2808, 2984, 3016, 3752, 3784, 6824, 6856, 6904, 7080, 7112, 7848, 7880],
rad_sat_5_pls=[2732, 2764, 2812, 2988, 3020, 3756, 3788, 6828, 6860, 6908, 7084, 7116, 7852, 7884],
cloud =[2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908],
low_conf_cl =[2752, 2722, 2724, 2728, 2732, 2976, 2980, 2984, 2988, 3744, 3748, 3752, 3756, 6816, 6820, 6824, 6828, 7072, 7076, 7080, 7084, 7840, 7844, 7848, 7852],
med_conf_cl =[2752, 2756, 2760, 2764, 3008, 3012, 3016, 3020, 3776, 3780, 3784, 3788, 6848, 6852, 6856, 6860, 7104, 7108, 7112, 7116, 7872, 7876, 7880, 7884],
high_conf_cl =[2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908],
high_cl_shdw=[2976, 2980, 2984, 2988, 3008, 3012, 3016, 3020, 7072, 7076, 7080, 7084, 7104, 7108, 7112, 7116],
high_snow_ice=[3744, 3748, 3752, 3756, 3776, 3780, 3784, 3788, 7840, 7844, 7848, 7852, 7872, 7876, 7880, 7884],
low_conf_cir =[2720, 2722, 2724, 2728, 2732, 2752, 2756, 2760, 2764, 2800, 2804, 2808, 2812, 2976, 2980, 2984, 2988, 3008, 3012, 3016, 3020, 3744, 3748, 3752, 3756, 3780, 3784, 3788],
high_conf_cir=[6816, 6820, 6824, 6828, 6848, 6852, 6856, 6860, 6896, 6900, 6904, 6908, 7072, 7076, 7080, 7084, 7104, 7108, 7112, 7116, 7840, 7844, 7848, 7852, 7872, 7876, 7880, 7884]
)
return unpack_bits(land_cover_encoding, data_array, cover_type) | [
"def",
"ls8_oli_unpack_qa",
"(",
"data_array",
",",
"cover_type",
")",
":",
"land_cover_encoding",
"=",
"dict",
"(",
"fill",
"=",
"[",
"1",
"]",
",",
"terrain_occ",
"=",
"[",
"2",
",",
"2722",
"]",
",",
"clear",
"=",
"[",
"2720",
",",
"2724",
",",
"2728",
",",
"2732",
"]",
",",
"rad_sat_1_2",
"=",
"[",
"2724",
",",
"2756",
",",
"2804",
",",
"2980",
",",
"3012",
",",
"3748",
",",
"3780",
",",
"6820",
",",
"6852",
",",
"6900",
",",
"7076",
",",
"7108",
",",
"7844",
",",
"7876",
"]",
",",
"rad_sat_3_4",
"=",
"[",
"2728",
",",
"2760",
",",
"2808",
",",
"2984",
",",
"3016",
",",
"3752",
",",
"3784",
",",
"6824",
",",
"6856",
",",
"6904",
",",
"7080",
",",
"7112",
",",
"7848",
",",
"7880",
"]",
",",
"rad_sat_5_pls",
"=",
"[",
"2732",
",",
"2764",
",",
"2812",
",",
"2988",
",",
"3020",
",",
"3756",
",",
"3788",
",",
"6828",
",",
"6860",
",",
"6908",
",",
"7084",
",",
"7116",
",",
"7852",
",",
"7884",
"]",
",",
"cloud",
"=",
"[",
"2800",
",",
"2804",
",",
"2808",
",",
"2812",
",",
"6896",
",",
"6900",
",",
"6904",
",",
"6908",
"]",
",",
"low_conf_cl",
"=",
"[",
"2752",
",",
"2722",
",",
"2724",
",",
"2728",
",",
"2732",
",",
"2976",
",",
"2980",
",",
"2984",
",",
"2988",
",",
"3744",
",",
"3748",
",",
"3752",
",",
"3756",
",",
"6816",
",",
"6820",
",",
"6824",
",",
"6828",
",",
"7072",
",",
"7076",
",",
"7080",
",",
"7084",
",",
"7840",
",",
"7844",
",",
"7848",
",",
"7852",
"]",
",",
"med_conf_cl",
"=",
"[",
"2752",
",",
"2756",
",",
"2760",
",",
"2764",
",",
"3008",
",",
"3012",
",",
"3016",
",",
"3020",
",",
"3776",
",",
"3780",
",",
"3784",
",",
"3788",
",",
"6848",
",",
"6852",
",",
"6856",
",",
"6860",
",",
"7104",
",",
"7108",
",",
"7112",
",",
"7116",
",",
"7872",
",",
"7876",
",",
"7880",
",",
"7884",
"]",
",",
"high_conf_cl",
"=",
"[",
"2800",
",",
"2804",
",",
"2808",
",",
"2812",
",",
"6896",
",",
"6900",
",",
"6904",
",",
"6908",
"]",
",",
"high_cl_shdw",
"=",
"[",
"2976",
",",
"2980",
",",
"2984",
",",
"2988",
",",
"3008",
",",
"3012",
",",
"3016",
",",
"3020",
",",
"7072",
",",
"7076",
",",
"7080",
",",
"7084",
",",
"7104",
",",
"7108",
",",
"7112",
",",
"7116",
"]",
",",
"high_snow_ice",
"=",
"[",
"3744",
",",
"3748",
",",
"3752",
",",
"3756",
",",
"3776",
",",
"3780",
",",
"3784",
",",
"3788",
",",
"7840",
",",
"7844",
",",
"7848",
",",
"7852",
",",
"7872",
",",
"7876",
",",
"7880",
",",
"7884",
"]",
",",
"low_conf_cir",
"=",
"[",
"2720",
",",
"2722",
",",
"2724",
",",
"2728",
",",
"2732",
",",
"2752",
",",
"2756",
",",
"2760",
",",
"2764",
",",
"2800",
",",
"2804",
",",
"2808",
",",
"2812",
",",
"2976",
",",
"2980",
",",
"2984",
",",
"2988",
",",
"3008",
",",
"3012",
",",
"3016",
",",
"3020",
",",
"3744",
",",
"3748",
",",
"3752",
",",
"3756",
",",
"3780",
",",
"3784",
",",
"3788",
"]",
",",
"high_conf_cir",
"=",
"[",
"6816",
",",
"6820",
",",
"6824",
",",
"6828",
",",
"6848",
",",
"6852",
",",
"6856",
",",
"6860",
",",
"6896",
",",
"6900",
",",
"6904",
",",
"6908",
",",
"7072",
",",
"7076",
",",
"7080",
",",
"7084",
",",
"7104",
",",
"7108",
",",
"7112",
",",
"7116",
",",
"7840",
",",
"7844",
",",
"7848",
",",
"7852",
",",
"7872",
",",
"7876",
",",
"7880",
",",
"7884",
"]",
")",
"return",
"unpack_bits",
"(",
"land_cover_encoding",
",",
"data_array",
",",
"cover_type",
")"
] | [
382,
0
] | [
442,
67
] | python | en | ['en', 'error', 'th'] | False |
create_hdmedians_multiple_band_mosaic | (dataset_in,
clean_mask=None,
no_data=-9999,
dtype=None,
intermediate_product=None,
operation="median",
**kwargs) |
Calculates the geomedian or geomedoid using a multi-band processing method.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude (in that order)
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
operation: str in ['median', 'medoid']
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Calculates the geomedian or geomedoid using a multi-band processing method. | def create_hdmedians_multiple_band_mosaic(dataset_in,
clean_mask=None,
no_data=-9999,
dtype=None,
intermediate_product=None,
operation="median",
**kwargs):
"""
Calculates the geomedian or geomedoid using a multi-band processing method.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude (in that order)
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
operation: str in ['median', 'medoid']
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
assert operation in ['median', 'medoid'], "Only median and medoid operations are supported."
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = None
if dtype is None:
# Save dtypes because masking with Dataset.where() converts to float64.
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != no_data) & clean_mask)
arrays = [dataset_in[band] for band in band_list]
stacked_data = np.stack(arrays)
bands_shape, time_slices_shape, lat_shape, lon_shape = stacked_data.shape[0], \
stacked_data.shape[1], stacked_data.shape[2], \
stacked_data.shape[3]
# Reshape to remove lat/lon
reshaped_stack = stacked_data.reshape(bands_shape, time_slices_shape,
lat_shape * lon_shape)
# Build zeroes array across time slices.
hdmedians_result = np.zeros((bands_shape, lat_shape * lon_shape))
# For each pixel (lat/lon combination), find the geomedian or geomedoid across time.
for x in range(reshaped_stack.shape[2]):
try:
hdmedians_result[:, x] = hd.nangeomedian(
reshaped_stack[:, :, x], axis=1) if operation == "median" else hd.nanmedoid(
reshaped_stack[:, :, x], axis=1)
except ValueError as e:
# If all bands have nan values across time, the geomedians are nans.
hdmedians_result[:, x] = np.full((bands_shape), np.nan)
output_dict = {
value: (('latitude', 'longitude'), hdmedians_result[index, :].reshape(lat_shape, lon_shape))
for index, value in enumerate(band_list)
}
dataset_out = xr.Dataset(output_dict,
coords={'latitude': dataset_in['latitude'],
'longitude': dataset_in['longitude']},
attrs=dataset_in.attrs)
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_hdmedians_multiple_band_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"operation",
"=",
"\"median\"",
",",
"*",
"*",
"kwargs",
")",
":",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"assert",
"operation",
"in",
"[",
"'median'",
",",
"'medoid'",
"]",
",",
"\"Only median and medoid operations are supported.\"",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"None",
"if",
"dtype",
"is",
"None",
":",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"no_data",
")",
"&",
"clean_mask",
")",
"arrays",
"=",
"[",
"dataset_in",
"[",
"band",
"]",
"for",
"band",
"in",
"band_list",
"]",
"stacked_data",
"=",
"np",
".",
"stack",
"(",
"arrays",
")",
"bands_shape",
",",
"time_slices_shape",
",",
"lat_shape",
",",
"lon_shape",
"=",
"stacked_data",
".",
"shape",
"[",
"0",
"]",
",",
"stacked_data",
".",
"shape",
"[",
"1",
"]",
",",
"stacked_data",
".",
"shape",
"[",
"2",
"]",
",",
"stacked_data",
".",
"shape",
"[",
"3",
"]",
"# Reshape to remove lat/lon",
"reshaped_stack",
"=",
"stacked_data",
".",
"reshape",
"(",
"bands_shape",
",",
"time_slices_shape",
",",
"lat_shape",
"*",
"lon_shape",
")",
"# Build zeroes array across time slices.",
"hdmedians_result",
"=",
"np",
".",
"zeros",
"(",
"(",
"bands_shape",
",",
"lat_shape",
"*",
"lon_shape",
")",
")",
"# For each pixel (lat/lon combination), find the geomedian or geomedoid across time.",
"for",
"x",
"in",
"range",
"(",
"reshaped_stack",
".",
"shape",
"[",
"2",
"]",
")",
":",
"try",
":",
"hdmedians_result",
"[",
":",
",",
"x",
"]",
"=",
"hd",
".",
"nangeomedian",
"(",
"reshaped_stack",
"[",
":",
",",
":",
",",
"x",
"]",
",",
"axis",
"=",
"1",
")",
"if",
"operation",
"==",
"\"median\"",
"else",
"hd",
".",
"nanmedoid",
"(",
"reshaped_stack",
"[",
":",
",",
":",
",",
"x",
"]",
",",
"axis",
"=",
"1",
")",
"except",
"ValueError",
"as",
"e",
":",
"# If all bands have nan values across time, the geomedians are nans.",
"hdmedians_result",
"[",
":",
",",
"x",
"]",
"=",
"np",
".",
"full",
"(",
"(",
"bands_shape",
")",
",",
"np",
".",
"nan",
")",
"output_dict",
"=",
"{",
"value",
":",
"(",
"(",
"'latitude'",
",",
"'longitude'",
")",
",",
"hdmedians_result",
"[",
"index",
",",
":",
"]",
".",
"reshape",
"(",
"lat_shape",
",",
"lon_shape",
")",
")",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"band_list",
")",
"}",
"dataset_out",
"=",
"xr",
".",
"Dataset",
"(",
"output_dict",
",",
"coords",
"=",
"{",
"'latitude'",
":",
"dataset_in",
"[",
"'latitude'",
"]",
",",
"'longitude'",
":",
"dataset_in",
"[",
"'longitude'",
"]",
"}",
",",
"attrs",
"=",
"dataset_in",
".",
"attrs",
")",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
473,
0
] | [
551,
22
] | python | en | ['en', 'error', 'th'] | False |
restore_or_convert_dtypes | (dtype_for_all=None, band_list=None, dataset_in_dtypes=None, dataset_out=None, no_data=-9999) |
Converts datatypes of data variables in a copy of an xarray Dataset.
Parameters
----------
dtype_for_all: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
band_list: list-like
!! UNUSED, DEPRECATED !!
dataset_in_dtypes: dict
A dictionary mapping band names to datatypes.
One of `dtype_for_all` or `dataset_in_dtypes` must be `None`.
no_data: int or float
The no data value.
Returns
-------
dataset_out: xarray.Dataset
The output Dataset.
|
Converts datatypes of data variables in a copy of an xarray Dataset. | def restore_or_convert_dtypes(dtype_for_all=None, band_list=None, dataset_in_dtypes=None, dataset_out=None, no_data=-9999):
"""
Converts datatypes of data variables in a copy of an xarray Dataset.
Parameters
----------
dtype_for_all: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
band_list: list-like
!! UNUSED, DEPRECATED !!
dataset_in_dtypes: dict
A dictionary mapping band names to datatypes.
One of `dtype_for_all` or `dataset_in_dtypes` must be `None`.
no_data: int or float
The no data value.
Returns
-------
dataset_out: xarray.Dataset
The output Dataset.
"""
assert dtype_for_all is None or dataset_in_dtypes is None, \
"One of `dtype_for_all` or `dataset_in_dtypes` must be `None`."
if dtype_for_all is not None:
# Integer types can't represent nan.
if np.issubdtype(dtype_for_all, np.integer): # This also works for Python int type.
utilities.nan_to_num(dataset_out, no_data)
convert_to_dtype(dataset_out, dtype_for_all)
else: # Restore dtypes to state before masking.
for band in dataset_in_dtypes:
band_dtype = dataset_in_dtypes[band]
if np.issubdtype(band_dtype, np.integer):
utilities.nan_to_num(dataset_out[band], no_data)
dataset_out[band] = dataset_out[band].astype(band_dtype)
return dataset_out | [
"def",
"restore_or_convert_dtypes",
"(",
"dtype_for_all",
"=",
"None",
",",
"band_list",
"=",
"None",
",",
"dataset_in_dtypes",
"=",
"None",
",",
"dataset_out",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
")",
":",
"assert",
"dtype_for_all",
"is",
"None",
"or",
"dataset_in_dtypes",
"is",
"None",
",",
"\"One of `dtype_for_all` or `dataset_in_dtypes` must be `None`.\"",
"if",
"dtype_for_all",
"is",
"not",
"None",
":",
"# Integer types can't represent nan.",
"if",
"np",
".",
"issubdtype",
"(",
"dtype_for_all",
",",
"np",
".",
"integer",
")",
":",
"# This also works for Python int type.",
"utilities",
".",
"nan_to_num",
"(",
"dataset_out",
",",
"no_data",
")",
"convert_to_dtype",
"(",
"dataset_out",
",",
"dtype_for_all",
")",
"else",
":",
"# Restore dtypes to state before masking.",
"for",
"band",
"in",
"dataset_in_dtypes",
":",
"band_dtype",
"=",
"dataset_in_dtypes",
"[",
"band",
"]",
"if",
"np",
".",
"issubdtype",
"(",
"band_dtype",
",",
"np",
".",
"integer",
")",
":",
"utilities",
".",
"nan_to_num",
"(",
"dataset_out",
"[",
"band",
"]",
",",
"no_data",
")",
"dataset_out",
"[",
"band",
"]",
"=",
"dataset_out",
"[",
"band",
"]",
".",
"astype",
"(",
"band_dtype",
")",
"return",
"dataset_out"
] | [
553,
0
] | [
588,
22
] | python | en | ['en', 'error', 'th'] | False |
EvictionModel.run | (self) | def run(self):
settings = self.config.experiment_settings(self.name())
invocations = settings["invocations"]
sleep = settings["sleep"]
repetitions = settings["repetitions"]
invocation_idx = settings["function_copy_idx"]
port = settings["client-port"]
from requests import get
ip = get("http://checkip.amazonaws.com/").text.rstrip()
"""
"""
function_names = self.functions_names[invocation_idx :: self.function_copies_per_time]
functions = self.functions[invocation_idx :: self.function_copies_per_time]
results = {}
# Disable logging - otherwise we have RLock that can't get be pickled
for func in functions:
# func.disable_logging()
for tr in func.triggers_all():
del tr._logging_handlers
# self.disable_logging()
# del self.logging
for t in self.times:
results[t] = []
fname = f"results_{invocations}_{repetitions}_{sleep}.json"
"""
Allocate one process for each invocation => process N invocations in parallel.
Each process uses M threads to execute in parallel invocations with a different time sleep
between executions.
The result: repeated N invocations for M different imes.
"""
threads = len(self.times)
with multiprocessing.Pool(processes=(invocations + threads)) as pool:
for i in range(0, repetitions):
"""
Attempt to kill all existing containers.
"""
# for func in functions:
# self._deployment_client.enforce_cold_start(func)
# time.sleep(5)
for _, t in enumerate(self.times):
results[t].append([])
local_results = []
servers_results = []
"""
Start M server intances. Each one handles one set of invocations.
"""
for j in range(0, threads):
servers_results.append(
pool.apply_async(EvictionModel.accept_replies, args=(port + j, invocations))
)
"""
Start N parallel invocations
"""
for j in range(0, invocations):
payload = {"ip-address": ip, "port": port}
print(payload)
local_results.append(
pool.apply_async(
EvictionModel.process_function,
args=(i, j, invocations, functions, self.times, payload),
)
)
time.sleep(10)
import sys
sys.stdout.flush()
"""
Rethrow exceptions if appear
"""
for result in servers_results:
ret = result.get()
for result in local_results:
ret = result.get()
for i, val in enumerate(ret):
results[self.times[i]][-1].append(val)
"""
Make sure that parallel invocations are truly parallel,
i.e. no execution happens after another one finished.
"""
# verify_results(results)
with open(os.path.join(self._out_dir, fname), "w") as out_f:
# print(results)
print(f"Write results to {os.path.join(self._out_dir, fname)}")
out_f.write(serialize(results)) | [
"def",
"run",
"(",
"self",
")",
":",
"settings",
"=",
"self",
".",
"config",
".",
"experiment_settings",
"(",
"self",
".",
"name",
"(",
")",
")",
"invocations",
"=",
"settings",
"[",
"\"invocations\"",
"]",
"sleep",
"=",
"settings",
"[",
"\"sleep\"",
"]",
"repetitions",
"=",
"settings",
"[",
"\"repetitions\"",
"]",
"invocation_idx",
"=",
"settings",
"[",
"\"function_copy_idx\"",
"]",
"port",
"=",
"settings",
"[",
"\"client-port\"",
"]",
"from",
"requests",
"import",
"get",
"ip",
"=",
"get",
"(",
"\"http://checkip.amazonaws.com/\"",
")",
".",
"text",
".",
"rstrip",
"(",
")",
"function_names",
"=",
"self",
".",
"functions_names",
"[",
"invocation_idx",
":",
":",
"self",
".",
"function_copies_per_time",
"]",
"functions",
"=",
"self",
".",
"functions",
"[",
"invocation_idx",
":",
":",
"self",
".",
"function_copies_per_time",
"]",
"results",
"=",
"{",
"}",
"# Disable logging - otherwise we have RLock that can't get be pickled",
"for",
"func",
"in",
"functions",
":",
"# func.disable_logging()",
"for",
"tr",
"in",
"func",
".",
"triggers_all",
"(",
")",
":",
"del",
"tr",
".",
"_logging_handlers",
"# self.disable_logging()",
"# del self.logging",
"for",
"t",
"in",
"self",
".",
"times",
":",
"results",
"[",
"t",
"]",
"=",
"[",
"]",
"fname",
"=",
"f\"results_{invocations}_{repetitions}_{sleep}.json\"",
"\"\"\"\n Allocate one process for each invocation => process N invocations in parallel.\n Each process uses M threads to execute in parallel invocations with a different time sleep\n between executions.\n\n The result: repeated N invocations for M different imes.\n \"\"\"",
"threads",
"=",
"len",
"(",
"self",
".",
"times",
")",
"with",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"(",
"invocations",
"+",
"threads",
")",
")",
"as",
"pool",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"repetitions",
")",
":",
"\"\"\"\n Attempt to kill all existing containers.\n \"\"\"",
"# for func in functions:",
"# self._deployment_client.enforce_cold_start(func)",
"# time.sleep(5)",
"for",
"_",
",",
"t",
"in",
"enumerate",
"(",
"self",
".",
"times",
")",
":",
"results",
"[",
"t",
"]",
".",
"append",
"(",
"[",
"]",
")",
"local_results",
"=",
"[",
"]",
"servers_results",
"=",
"[",
"]",
"\"\"\"\n Start M server intances. Each one handles one set of invocations.\n \"\"\"",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"threads",
")",
":",
"servers_results",
".",
"append",
"(",
"pool",
".",
"apply_async",
"(",
"EvictionModel",
".",
"accept_replies",
",",
"args",
"=",
"(",
"port",
"+",
"j",
",",
"invocations",
")",
")",
")",
"\"\"\"\n Start N parallel invocations\n \"\"\"",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"invocations",
")",
":",
"payload",
"=",
"{",
"\"ip-address\"",
":",
"ip",
",",
"\"port\"",
":",
"port",
"}",
"print",
"(",
"payload",
")",
"local_results",
".",
"append",
"(",
"pool",
".",
"apply_async",
"(",
"EvictionModel",
".",
"process_function",
",",
"args",
"=",
"(",
"i",
",",
"j",
",",
"invocations",
",",
"functions",
",",
"self",
".",
"times",
",",
"payload",
")",
",",
")",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"import",
"sys",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"\"\"\"\n Rethrow exceptions if appear\n \"\"\"",
"for",
"result",
"in",
"servers_results",
":",
"ret",
"=",
"result",
".",
"get",
"(",
")",
"for",
"result",
"in",
"local_results",
":",
"ret",
"=",
"result",
".",
"get",
"(",
")",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"ret",
")",
":",
"results",
"[",
"self",
".",
"times",
"[",
"i",
"]",
"]",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"val",
")",
"\"\"\"\n Make sure that parallel invocations are truly parallel,\n i.e. no execution happens after another one finished.\n \"\"\"",
"# verify_results(results)",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_out_dir",
",",
"fname",
")",
",",
"\"w\"",
")",
"as",
"out_f",
":",
"# print(results)",
"print",
"(",
"f\"Write results to {os.path.join(self._out_dir, fname)}\"",
")",
"out_f",
".",
"write",
"(",
"serialize",
"(",
"results",
")",
")"
] | [
206,
4
] | [
304,
47
] | python | en | ['en', 'error', 'th'] | False |
||
CommandTests.test_wait_for_db_ready | (self) | Test waiting for db when db is available | Test waiting for db when db is available | def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1) | [
"def",
"test_wait_for_db_ready",
"(",
"self",
")",
":",
"with",
"patch",
"(",
"'django.db.utils.ConnectionHandler.__getitem__'",
")",
"as",
"gi",
":",
"gi",
".",
"return_value",
"=",
"True",
"call_command",
"(",
"'wait_for_db'",
")",
"self",
".",
"assertEqual",
"(",
"gi",
".",
"call_count",
",",
"1",
")"
] | [
9,
4
] | [
14,
46
] | python | en | ['en', 'en', 'en'] | True |
CommandTests.test_wait_for_db | (self, ts) | Test waiting for db | Test waiting for db | def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6) | [
"def",
"test_wait_for_db",
"(",
"self",
",",
"ts",
")",
":",
"with",
"patch",
"(",
"'django.db.utils.ConnectionHandler.__getitem__'",
")",
"as",
"gi",
":",
"gi",
".",
"side_effect",
"=",
"[",
"OperationalError",
"]",
"*",
"5",
"+",
"[",
"True",
"]",
"call_command",
"(",
"'wait_for_db'",
")",
"self",
".",
"assertEqual",
"(",
"gi",
".",
"call_count",
",",
"6",
")"
] | [
17,
4
] | [
22,
46
] | python | en | ['en', 'en', 'en'] | True |
ConfiguredAssetSqlDataConnector.add_data_asset | (
self,
name: str,
config: dict,
) |
Add data_asset to DataConnector using data_asset name as key, and data_asset configuration as value.
|
Add data_asset to DataConnector using data_asset name as key, and data_asset configuration as value.
| def add_data_asset(
self,
name: str,
config: dict,
):
"""
Add data_asset to DataConnector using data_asset name as key, and data_asset configuration as value.
"""
self._assets[name] = config | [
"def",
"add_data_asset",
"(",
"self",
",",
"name",
":",
"str",
",",
"config",
":",
"dict",
",",
")",
":",
"self",
".",
"_assets",
"[",
"name",
"]",
"=",
"config"
] | [
57,
4
] | [
65,
35
] | python | en | ['en', 'error', 'th'] | False |
ConfiguredAssetSqlDataConnector.get_available_data_asset_names | (self) |
Return the list of asset names known by this DataConnector.
Returns:
A list of available names
|
Return the list of asset names known by this DataConnector. | def get_available_data_asset_names(self) -> List[str]:
"""
Return the list of asset names known by this DataConnector.
Returns:
A list of available names
"""
return list(self.assets.keys()) | [
"def",
"get_available_data_asset_names",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"list",
"(",
"self",
".",
"assets",
".",
"keys",
"(",
")",
")"
] | [
122,
4
] | [
129,
39
] | python | en | ['en', 'error', 'th'] | False |
ConfiguredAssetSqlDataConnector.get_unmatched_data_references | (self) |
Returns the list of data_references unmatched by configuration by looping through items in _data_references_cache
and returning data_reference that do not have an associated data_asset.
Returns:
list of data_references that are not matched by configuration.
|
Returns the list of data_references unmatched by configuration by looping through items in _data_references_cache
and returning data_reference that do not have an associated data_asset. | def get_unmatched_data_references(self) -> List[str]:
"""
Returns the list of data_references unmatched by configuration by looping through items in _data_references_cache
and returning data_reference that do not have an associated data_asset.
Returns:
list of data_references that are not matched by configuration.
"""
return [] | [
"def",
"get_unmatched_data_references",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"[",
"]"
] | [
131,
4
] | [
139,
17
] | python | en | ['en', 'error', 'th'] | False |
ConfiguredAssetSqlDataConnector.build_batch_spec | (
self, batch_definition: BatchDefinition
) |
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
|
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. | def build_batch_spec(
self, batch_definition: BatchDefinition
) -> SqlAlchemyDatasourceBatchSpec:
"""
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
"""
data_asset_name: str = batch_definition.data_asset_name
if (
data_asset_name in self.assets
and self.assets[data_asset_name].get("batch_spec_passthrough")
and isinstance(
self.assets[data_asset_name].get("batch_spec_passthrough"), dict
)
):
# batch_spec_passthrough from data_asset
batch_spec_passthrough = deepcopy(
self.assets[data_asset_name]["batch_spec_passthrough"]
)
batch_definition_batch_spec_passthrough = (
deepcopy(batch_definition.batch_spec_passthrough) or {}
)
# batch_spec_passthrough from Batch Definition supersedes batch_spec_passthrough from data_asset
batch_spec_passthrough.update(batch_definition_batch_spec_passthrough)
batch_definition.batch_spec_passthrough = batch_spec_passthrough
batch_spec: BatchSpec = super().build_batch_spec(
batch_definition=batch_definition
)
return SqlAlchemyDatasourceBatchSpec(batch_spec) | [
"def",
"build_batch_spec",
"(",
"self",
",",
"batch_definition",
":",
"BatchDefinition",
")",
"->",
"SqlAlchemyDatasourceBatchSpec",
":",
"data_asset_name",
":",
"str",
"=",
"batch_definition",
".",
"data_asset_name",
"if",
"(",
"data_asset_name",
"in",
"self",
".",
"assets",
"and",
"self",
".",
"assets",
"[",
"data_asset_name",
"]",
".",
"get",
"(",
"\"batch_spec_passthrough\"",
")",
"and",
"isinstance",
"(",
"self",
".",
"assets",
"[",
"data_asset_name",
"]",
".",
"get",
"(",
"\"batch_spec_passthrough\"",
")",
",",
"dict",
")",
")",
":",
"# batch_spec_passthrough from data_asset",
"batch_spec_passthrough",
"=",
"deepcopy",
"(",
"self",
".",
"assets",
"[",
"data_asset_name",
"]",
"[",
"\"batch_spec_passthrough\"",
"]",
")",
"batch_definition_batch_spec_passthrough",
"=",
"(",
"deepcopy",
"(",
"batch_definition",
".",
"batch_spec_passthrough",
")",
"or",
"{",
"}",
")",
"# batch_spec_passthrough from Batch Definition supersedes batch_spec_passthrough from data_asset",
"batch_spec_passthrough",
".",
"update",
"(",
"batch_definition_batch_spec_passthrough",
")",
"batch_definition",
".",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
"batch_spec",
":",
"BatchSpec",
"=",
"super",
"(",
")",
".",
"build_batch_spec",
"(",
"batch_definition",
"=",
"batch_definition",
")",
"return",
"SqlAlchemyDatasourceBatchSpec",
"(",
"batch_spec",
")"
] | [
187,
4
] | [
223,
56
] | python | en | ['en', 'error', 'th'] | False |
ConfiguredAssetSqlDataConnector._generate_batch_spec_parameters_from_batch_definition | (
self, batch_definition: BatchDefinition
) |
Build BatchSpec parameters from batch_definition with the following components:
1. data_asset_name from batch_definition
2. batch_identifiers from batch_definition
3. data_asset from data_connector
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
dict built from batch_definition
|
Build BatchSpec parameters from batch_definition with the following components:
1. data_asset_name from batch_definition
2. batch_identifiers from batch_definition
3. data_asset from data_connector | def _generate_batch_spec_parameters_from_batch_definition(
self, batch_definition: BatchDefinition
) -> dict:
"""
Build BatchSpec parameters from batch_definition with the following components:
1. data_asset_name from batch_definition
2. batch_identifiers from batch_definition
3. data_asset from data_connector
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
dict built from batch_definition
"""
data_asset_name: str = batch_definition.data_asset_name
return {
"data_asset_name": data_asset_name,
"table_name": data_asset_name,
"batch_identifiers": batch_definition.batch_identifiers,
**self.assets[data_asset_name],
} | [
"def",
"_generate_batch_spec_parameters_from_batch_definition",
"(",
"self",
",",
"batch_definition",
":",
"BatchDefinition",
")",
"->",
"dict",
":",
"data_asset_name",
":",
"str",
"=",
"batch_definition",
".",
"data_asset_name",
"return",
"{",
"\"data_asset_name\"",
":",
"data_asset_name",
",",
"\"table_name\"",
":",
"data_asset_name",
",",
"\"batch_identifiers\"",
":",
"batch_definition",
".",
"batch_identifiers",
",",
"*",
"*",
"self",
".",
"assets",
"[",
"data_asset_name",
"]",
",",
"}"
] | [
225,
4
] | [
246,
9
] | python | en | ['en', 'error', 'th'] | False |
ConfiguredAssetSqlDataConnector._split_on_whole_table | (
self,
table_name: str,
) |
'Split' by returning the whole table
Note: the table_name parameter is a required to keep the signature of this method consistent with other methods.
|
'Split' by returning the whole table | def _split_on_whole_table(
self,
table_name: str,
):
"""
'Split' by returning the whole table
Note: the table_name parameter is a required to keep the signature of this method consistent with other methods.
"""
return sa.select([sa.true()]) | [
"def",
"_split_on_whole_table",
"(",
"self",
",",
"table_name",
":",
"str",
",",
")",
":",
"return",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"true",
"(",
")",
"]",
")"
] | [
250,
4
] | [
260,
37
] | python | en | ['en', 'error', 'th'] | False |
ConfiguredAssetSqlDataConnector._split_on_column_value | (
self,
table_name: str,
column_name: str,
) | Split using the values in the named column | Split using the values in the named column | def _split_on_column_value(
self,
table_name: str,
column_name: str,
):
"""Split using the values in the named column"""
# query = f"SELECT DISTINCT(\"{self.column_name}\") FROM {self.table_name}"
return sa.select([sa.func.distinct(sa.column(column_name))]).select_from(
sa.text(table_name)
) | [
"def",
"_split_on_column_value",
"(",
"self",
",",
"table_name",
":",
"str",
",",
"column_name",
":",
"str",
",",
")",
":",
"# query = f\"SELECT DISTINCT(\\\"{self.column_name}\\\") FROM {self.table_name}\"",
"return",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"func",
".",
"distinct",
"(",
"sa",
".",
"column",
"(",
"column_name",
")",
")",
"]",
")",
".",
"select_from",
"(",
"sa",
".",
"text",
"(",
"table_name",
")",
")"
] | [
262,
4
] | [
272,
9
] | python | en | ['en', 'no', 'en'] | True |
ConfiguredAssetSqlDataConnector._split_on_converted_datetime | (
self,
table_name: str,
column_name: str,
date_format_string: str = "%Y-%m-%d",
) | Convert the values in the named column to the given date_format, and split on that | Convert the values in the named column to the given date_format, and split on that | def _split_on_converted_datetime(
self,
table_name: str,
column_name: str,
date_format_string: str = "%Y-%m-%d",
):
"""Convert the values in the named column to the given date_format, and split on that"""
# query = f"SELECT DISTINCT( strftime(\"{date_format_string}\", \"{self.column_name}\")) as my_var FROM {self.table_name}"
return sa.select(
[
sa.func.distinct(
sa.func.strftime(
date_format_string,
sa.column(column_name),
)
)
]
).select_from(sa.text(table_name)) | [
"def",
"_split_on_converted_datetime",
"(",
"self",
",",
"table_name",
":",
"str",
",",
"column_name",
":",
"str",
",",
"date_format_string",
":",
"str",
"=",
"\"%Y-%m-%d\"",
",",
")",
":",
"# query = f\"SELECT DISTINCT( strftime(\\\"{date_format_string}\\\", \\\"{self.column_name}\\\")) as my_var FROM {self.table_name}\"",
"return",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"func",
".",
"distinct",
"(",
"sa",
".",
"func",
".",
"strftime",
"(",
"date_format_string",
",",
"sa",
".",
"column",
"(",
"column_name",
")",
",",
")",
")",
"]",
")",
".",
"select_from",
"(",
"sa",
".",
"text",
"(",
"table_name",
")",
")"
] | [
274,
4
] | [
292,
42
] | python | en | ['en', 'en', 'en'] | True |
ConfiguredAssetSqlDataConnector._split_on_divided_integer | (
self, table_name: str, column_name: str, divisor: int
) | Divide the values in the named column by `divisor`, and split on that | Divide the values in the named column by `divisor`, and split on that | def _split_on_divided_integer(
self, table_name: str, column_name: str, divisor: int
):
"""Divide the values in the named column by `divisor`, and split on that"""
# query = f"SELECT DISTINCT(\"{self.column_name}\" / {divisor}) AS my_var FROM {self.table_name}"
return sa.select(
[sa.func.distinct(sa.cast(sa.column(column_name) / divisor, sa.Integer))]
).select_from(sa.text(table_name)) | [
"def",
"_split_on_divided_integer",
"(",
"self",
",",
"table_name",
":",
"str",
",",
"column_name",
":",
"str",
",",
"divisor",
":",
"int",
")",
":",
"# query = f\"SELECT DISTINCT(\\\"{self.column_name}\\\" / {divisor}) AS my_var FROM {self.table_name}\"",
"return",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"func",
".",
"distinct",
"(",
"sa",
".",
"cast",
"(",
"sa",
".",
"column",
"(",
"column_name",
")",
"/",
"divisor",
",",
"sa",
".",
"Integer",
")",
")",
"]",
")",
".",
"select_from",
"(",
"sa",
".",
"text",
"(",
"table_name",
")",
")"
] | [
294,
4
] | [
302,
42
] | python | en | ['en', 'en', 'en'] | True |
ConfiguredAssetSqlDataConnector._split_on_mod_integer | (self, table_name: str, column_name: str, mod: int) | Divide the values in the named column by `divisor`, and split on that | Divide the values in the named column by `divisor`, and split on that | def _split_on_mod_integer(self, table_name: str, column_name: str, mod: int):
"""Divide the values in the named column by `divisor`, and split on that"""
# query = f"SELECT DISTINCT(\"{self.column_name}\" / {divisor}) AS my_var FROM {self.table_name}"
return sa.select(
[sa.func.distinct(sa.cast(sa.column(column_name) % mod, sa.Integer))]
).select_from(sa.text(table_name)) | [
"def",
"_split_on_mod_integer",
"(",
"self",
",",
"table_name",
":",
"str",
",",
"column_name",
":",
"str",
",",
"mod",
":",
"int",
")",
":",
"# query = f\"SELECT DISTINCT(\\\"{self.column_name}\\\" / {divisor}) AS my_var FROM {self.table_name}\"",
"return",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"func",
".",
"distinct",
"(",
"sa",
".",
"cast",
"(",
"sa",
".",
"column",
"(",
"column_name",
")",
"%",
"mod",
",",
"sa",
".",
"Integer",
")",
")",
"]",
")",
".",
"select_from",
"(",
"sa",
".",
"text",
"(",
"table_name",
")",
")"
] | [
304,
4
] | [
310,
42
] | python | en | ['en', 'en', 'en'] | True |
ConfiguredAssetSqlDataConnector._split_on_multi_column_values | (
self,
table_name: str,
column_names: List[str],
) | Split on the joint values in the named columns | Split on the joint values in the named columns | def _split_on_multi_column_values(
self,
table_name: str,
column_names: List[str],
):
"""Split on the joint values in the named columns"""
# query = f"SELECT DISTINCT(\"{self.column_name}\") FROM {self.table_name}"
return (
sa.select([sa.column(column_name) for column_name in column_names])
.distinct()
.select_from(sa.text(table_name))
) | [
"def",
"_split_on_multi_column_values",
"(",
"self",
",",
"table_name",
":",
"str",
",",
"column_names",
":",
"List",
"[",
"str",
"]",
",",
")",
":",
"# query = f\"SELECT DISTINCT(\\\"{self.column_name}\\\") FROM {self.table_name}\"",
"return",
"(",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"column",
"(",
"column_name",
")",
"for",
"column_name",
"in",
"column_names",
"]",
")",
".",
"distinct",
"(",
")",
".",
"select_from",
"(",
"sa",
".",
"text",
"(",
"table_name",
")",
")",
")"
] | [
312,
4
] | [
324,
9
] | python | en | ['en', 'en', 'en'] | True |
ConfiguredAssetSqlDataConnector._split_on_hashed_column | (
self,
table_name: str,
column_name: str,
hash_digits: int,
) | Note: this method is experimental. It does not work with all SQL dialects. | Note: this method is experimental. It does not work with all SQL dialects. | def _split_on_hashed_column(
self,
table_name: str,
column_name: str,
hash_digits: int,
):
"""Note: this method is experimental. It does not work with all SQL dialects."""
# query = f"SELECT MD5(\"{self.column_name}\") = {matching_hash}) AS hashed_var FROM {self.table_name}"
return sa.select([sa.func.md5(sa.column(column_name))]).select_from(
sa.text(table_name)
) | [
"def",
"_split_on_hashed_column",
"(",
"self",
",",
"table_name",
":",
"str",
",",
"column_name",
":",
"str",
",",
"hash_digits",
":",
"int",
",",
")",
":",
"# query = f\"SELECT MD5(\\\"{self.column_name}\\\") = {matching_hash}) AS hashed_var FROM {self.table_name}\"",
"return",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"func",
".",
"md5",
"(",
"sa",
".",
"column",
"(",
"column_name",
")",
")",
"]",
")",
".",
"select_from",
"(",
"sa",
".",
"text",
"(",
"table_name",
")",
")"
] | [
326,
4
] | [
337,
9
] | python | en | ['en', 'en', 'en'] | True |
download | (name, test_size=None, data_path=DATA_PATH) | Load the CSV with the given name from S3.
If the CSV has never been loaded before, it will be downloaded
from the [d3-ai-orion bucket](https://d3-ai-orion.s3.amazonaws.com) or
the S3 bucket specified following the `s3://{bucket}/path/to/the.csv` format,
and then cached inside the `data` folder, within the `orion` package
directory, and then returned.
Otherwise, if it has been downloaded and cached before, it will be directly
loaded from the `orion/data` folder without contacting S3.
If a `test_size` value is given, the data will be split in two parts
without altering its order, making the second one proportionally as
big as the given value.
Args:
name (str): Name of the CSV to load.
test_size (float): Value between 0 and 1 indicating the proportional
size of the test split. If 0 or None (default), the data is not split.
Returns:
If no test_size is given, a single pandas.DataFrame is returned containing all
the data. If test_size is given, a tuple containing one pandas.DataFrame for
the train split and another one for the test split is returned.
| Load the CSV with the given name from S3. | def download(name, test_size=None, data_path=DATA_PATH):
"""Load the CSV with the given name from S3.
If the CSV has never been loaded before, it will be downloaded
from the [d3-ai-orion bucket](https://d3-ai-orion.s3.amazonaws.com) or
the S3 bucket specified following the `s3://{bucket}/path/to/the.csv` format,
and then cached inside the `data` folder, within the `orion` package
directory, and then returned.
Otherwise, if it has been downloaded and cached before, it will be directly
loaded from the `orion/data` folder without contacting S3.
If a `test_size` value is given, the data will be split in two parts
without altering its order, making the second one proportionally as
big as the given value.
Args:
name (str): Name of the CSV to load.
test_size (float): Value between 0 and 1 indicating the proportional
size of the test split. If 0 or None (default), the data is not split.
Returns:
If no test_size is given, a single pandas.DataFrame is returned containing all
the data. If test_size is given, a tuple containing one pandas.DataFrame for
the train split and another one for the test split is returned.
"""
url = None
if name.startswith('s3://'):
parts = name[5:].split('/', 1)
bucket = parts[0]
path = parts[1]
url = S3_URL.format(bucket, path)
filename = os.path.join(data_path, path.split('/')[-1])
else:
filename = os.path.join(data_path, name + '.csv')
if os.path.exists(filename):
data = pd.read_csv(filename)
else:
url = url or S3_URL.format(BUCKET, '{}.csv'.format(name))
LOGGER.info('Downloading CSV %s from %s', name, url)
os.makedirs(data_path, exist_ok=True)
data = pd.read_csv(url)
data.to_csv(filename, index=False)
return data | [
"def",
"download",
"(",
"name",
",",
"test_size",
"=",
"None",
",",
"data_path",
"=",
"DATA_PATH",
")",
":",
"url",
"=",
"None",
"if",
"name",
".",
"startswith",
"(",
"'s3://'",
")",
":",
"parts",
"=",
"name",
"[",
"5",
":",
"]",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"bucket",
"=",
"parts",
"[",
"0",
"]",
"path",
"=",
"parts",
"[",
"1",
"]",
"url",
"=",
"S3_URL",
".",
"format",
"(",
"bucket",
",",
"path",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"path",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
")",
"else",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"name",
"+",
"'.csv'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
")",
"else",
":",
"url",
"=",
"url",
"or",
"S3_URL",
".",
"format",
"(",
"BUCKET",
",",
"'{}.csv'",
".",
"format",
"(",
"name",
")",
")",
"LOGGER",
".",
"info",
"(",
"'Downloading CSV %s from %s'",
",",
"name",
",",
"url",
")",
"os",
".",
"makedirs",
"(",
"data_path",
",",
"exist_ok",
"=",
"True",
")",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"url",
")",
"data",
".",
"to_csv",
"(",
"filename",
",",
"index",
"=",
"False",
")",
"return",
"data"
] | [
42,
0
] | [
90,
15
] | python | en | ['en', 'en', 'en'] | True |
SubdirReaderBatchKwargsGenerator._build_batch_kwargs | (self, batch_parameters) |
Args:
batch_parameters:
Returns:
batch_kwargs
| def _build_batch_kwargs(self, batch_parameters):
"""
Args:
batch_parameters:
Returns:
batch_kwargs
"""
try:
data_asset_name = batch_parameters.pop("data_asset_name")
except KeyError:
raise BatchKwargsError(
"Unable to build BatchKwargs: no name provided in batch_parameters.",
batch_kwargs=batch_parameters,
)
if "partition_id" in batch_parameters:
partition_id = batch_parameters.pop("partition_id")
# Find the path
path = None
for extension in self.known_extensions:
if os.path.isfile(
os.path.join(
self.base_directory, data_asset_name, partition_id + extension
)
):
path = os.path.join(
self.base_directory, data_asset_name, partition_id + extension
)
if path is None:
logger.warning(
"Unable to find path with the provided partition; searching for asset-name partitions."
)
# Fall through to this case in the event that there is not a subdir available, or if partition_id was
# not provided
if os.path.isfile(os.path.join(self.base_directory, data_asset_name)):
path = os.path.join(self.base_directory, data_asset_name)
for extension in self.known_extensions:
if os.path.isfile(
os.path.join(self.base_directory, data_asset_name + extension)
):
path = os.path.join(
self.base_directory, data_asset_name + extension
)
if path is None:
raise BatchKwargsError(
"Unable to build batch kwargs from for asset '%s'"
% data_asset_name,
batch_parameters,
)
return self._build_batch_kwargs_from_path(path, **batch_parameters)
else:
return self.yield_batch_kwargs(
data_asset_name=data_asset_name, **batch_parameters
) | [
"def",
"_build_batch_kwargs",
"(",
"self",
",",
"batch_parameters",
")",
":",
"try",
":",
"data_asset_name",
"=",
"batch_parameters",
".",
"pop",
"(",
"\"data_asset_name\"",
")",
"except",
"KeyError",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to build BatchKwargs: no name provided in batch_parameters.\"",
",",
"batch_kwargs",
"=",
"batch_parameters",
",",
")",
"if",
"\"partition_id\"",
"in",
"batch_parameters",
":",
"partition_id",
"=",
"batch_parameters",
".",
"pop",
"(",
"\"partition_id\"",
")",
"# Find the path",
"path",
"=",
"None",
"for",
"extension",
"in",
"self",
".",
"known_extensions",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_directory",
",",
"data_asset_name",
",",
"partition_id",
"+",
"extension",
")",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_directory",
",",
"data_asset_name",
",",
"partition_id",
"+",
"extension",
")",
"if",
"path",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Unable to find path with the provided partition; searching for asset-name partitions.\"",
")",
"# Fall through to this case in the event that there is not a subdir available, or if partition_id was",
"# not provided",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_directory",
",",
"data_asset_name",
")",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_directory",
",",
"data_asset_name",
")",
"for",
"extension",
"in",
"self",
".",
"known_extensions",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_directory",
",",
"data_asset_name",
"+",
"extension",
")",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_directory",
",",
"data_asset_name",
"+",
"extension",
")",
"if",
"path",
"is",
"None",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to build batch kwargs from for asset '%s'\"",
"%",
"data_asset_name",
",",
"batch_parameters",
",",
")",
"return",
"self",
".",
"_build_batch_kwargs_from_path",
"(",
"path",
",",
"*",
"*",
"batch_parameters",
")",
"else",
":",
"return",
"self",
".",
"yield_batch_kwargs",
"(",
"data_asset_name",
"=",
"data_asset_name",
",",
"*",
"*",
"batch_parameters",
")"
] | [
123,
4
] | [
183,
13
] | python | en | ['en', 'error', 'th'] | False |
|
IRTLSContext.from_legacy | (cls, ir: 'IR', name: str, rkey: str, location: str,
cert: 'IRAmbassadorTLS', termination: bool,
validation_ca: Optional['IRAmbassadorTLS']) |
Create an IRTLSContext from a legacy TLS-module style definition.
'cert' is the TLS certificate that we'll offer to our peer -- for a termination
context, this is our server cert, and for an origination context, it's our client
cert.
For termination contexts, 'validation_ca' may also be provided. It's the TLS
certificate that we'll use to validate the certificates our clients offer. Note
that no private key is needed or supported.
:param ir: IR in play
:param name: name for the newly-created context
:param rkey: rkey for the newly-created context
:param location: location for the newly-created context
:param cert: information about the cert to present to the peer
:param termination: is this a termination context?
:param validation_ca: information about how we'll validate the peer's cert
:return: newly-created IRTLSContext
|
Create an IRTLSContext from a legacy TLS-module style definition. | def from_legacy(cls, ir: 'IR', name: str, rkey: str, location: str,
cert: 'IRAmbassadorTLS', termination: bool,
validation_ca: Optional['IRAmbassadorTLS']) -> 'IRTLSContext':
"""
Create an IRTLSContext from a legacy TLS-module style definition.
'cert' is the TLS certificate that we'll offer to our peer -- for a termination
context, this is our server cert, and for an origination context, it's our client
cert.
For termination contexts, 'validation_ca' may also be provided. It's the TLS
certificate that we'll use to validate the certificates our clients offer. Note
that no private key is needed or supported.
:param ir: IR in play
:param name: name for the newly-created context
:param rkey: rkey for the newly-created context
:param location: location for the newly-created context
:param cert: information about the cert to present to the peer
:param termination: is this a termination context?
:param validation_ca: information about how we'll validate the peer's cert
:return: newly-created IRTLSContext
"""
new_args = {}
for key in [ 'secret', 'cert_chain_file', 'private_key_file',
'alpn_protocols', 'redirect_cleartext_from' ]:
value = cert.get(key, None)
if value:
new_args[key] = value
if (('secret' not in new_args) and
('cert_chain_file' not in new_args) and
('private_key_file' not in new_args)):
# Assume they want the 'ambassador-certs' secret.
new_args['secret'] = 'ambassador-certs'
if termination:
new_args['hosts'] = [ '*' ]
if validation_ca and validation_ca.get('enabled', True):
for key in [ 'secret', 'cacert_chain_file', 'cert_required' ]:
value = validation_ca.get(key, None)
if value:
if key == 'secret':
new_args['ca_secret'] = value
else:
new_args[key] = value
if (('ca_secret' not in new_args) and
('cacert_chain_file' not in new_args)):
# Assume they want the 'ambassador-cacert' secret.
new_args['secret'] = 'ambassador-cacert'
ctx = IRTLSContext(ir, ir.aconf,
rkey=rkey,
name=name,
location=location,
kind="synthesized-TLS-context",
_legacy=True,
**new_args)
return ctx | [
"def",
"from_legacy",
"(",
"cls",
",",
"ir",
":",
"'IR'",
",",
"name",
":",
"str",
",",
"rkey",
":",
"str",
",",
"location",
":",
"str",
",",
"cert",
":",
"'IRAmbassadorTLS'",
",",
"termination",
":",
"bool",
",",
"validation_ca",
":",
"Optional",
"[",
"'IRAmbassadorTLS'",
"]",
")",
"->",
"'IRTLSContext'",
":",
"new_args",
"=",
"{",
"}",
"for",
"key",
"in",
"[",
"'secret'",
",",
"'cert_chain_file'",
",",
"'private_key_file'",
",",
"'alpn_protocols'",
",",
"'redirect_cleartext_from'",
"]",
":",
"value",
"=",
"cert",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"value",
":",
"new_args",
"[",
"key",
"]",
"=",
"value",
"if",
"(",
"(",
"'secret'",
"not",
"in",
"new_args",
")",
"and",
"(",
"'cert_chain_file'",
"not",
"in",
"new_args",
")",
"and",
"(",
"'private_key_file'",
"not",
"in",
"new_args",
")",
")",
":",
"# Assume they want the 'ambassador-certs' secret.",
"new_args",
"[",
"'secret'",
"]",
"=",
"'ambassador-certs'",
"if",
"termination",
":",
"new_args",
"[",
"'hosts'",
"]",
"=",
"[",
"'*'",
"]",
"if",
"validation_ca",
"and",
"validation_ca",
".",
"get",
"(",
"'enabled'",
",",
"True",
")",
":",
"for",
"key",
"in",
"[",
"'secret'",
",",
"'cacert_chain_file'",
",",
"'cert_required'",
"]",
":",
"value",
"=",
"validation_ca",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"value",
":",
"if",
"key",
"==",
"'secret'",
":",
"new_args",
"[",
"'ca_secret'",
"]",
"=",
"value",
"else",
":",
"new_args",
"[",
"key",
"]",
"=",
"value",
"if",
"(",
"(",
"'ca_secret'",
"not",
"in",
"new_args",
")",
"and",
"(",
"'cacert_chain_file'",
"not",
"in",
"new_args",
")",
")",
":",
"# Assume they want the 'ambassador-cacert' secret.",
"new_args",
"[",
"'secret'",
"]",
"=",
"'ambassador-cacert'",
"ctx",
"=",
"IRTLSContext",
"(",
"ir",
",",
"ir",
".",
"aconf",
",",
"rkey",
"=",
"rkey",
",",
"name",
"=",
"name",
",",
"location",
"=",
"location",
",",
"kind",
"=",
"\"synthesized-TLS-context\"",
",",
"_legacy",
"=",
"True",
",",
"*",
"*",
"new_args",
")",
"return",
"ctx"
] | [
332,
4
] | [
396,
18
] | python | en | ['en', 'error', 'th'] | False |
Rule.__init__ | (
self,
name: str,
domain_builder: Optional[DomainBuilder] = None,
parameter_builders: Optional[List[ParameterBuilder]] = None,
expectation_configuration_builders: Optional[
List[ExpectationConfigurationBuilder]
] = None,
variables: Optional[ParameterContainer] = None,
) |
Sets Profiler rule name, domain builders, parameters builders, configuration builders,
and other necessary instance data (variables)
:param name: A string representing the name of the ProfilerRule
:param domain_builder: A Domain Builder object used to build rule data domain
:param parameter_builders: A Parameter Builder list used to configure necessary rule evaluation parameters for
every configuration
:param expectation_configuration_builders: A list of Expectation Configuration Builders
:param variables: Any instance data required to verify a rule
|
Sets Profiler rule name, domain builders, parameters builders, configuration builders,
and other necessary instance data (variables)
:param name: A string representing the name of the ProfilerRule
:param domain_builder: A Domain Builder object used to build rule data domain
:param parameter_builders: A Parameter Builder list used to configure necessary rule evaluation parameters for
every configuration
:param expectation_configuration_builders: A list of Expectation Configuration Builders
:param variables: Any instance data required to verify a rule
| def __init__(
self,
name: str,
domain_builder: Optional[DomainBuilder] = None,
parameter_builders: Optional[List[ParameterBuilder]] = None,
expectation_configuration_builders: Optional[
List[ExpectationConfigurationBuilder]
] = None,
variables: Optional[ParameterContainer] = None,
):
"""
Sets Profiler rule name, domain builders, parameters builders, configuration builders,
and other necessary instance data (variables)
:param name: A string representing the name of the ProfilerRule
:param domain_builder: A Domain Builder object used to build rule data domain
:param parameter_builders: A Parameter Builder list used to configure necessary rule evaluation parameters for
every configuration
:param expectation_configuration_builders: A list of Expectation Configuration Builders
:param variables: Any instance data required to verify a rule
"""
self._name = name
self._domain_builder = domain_builder
self._parameter_builders = parameter_builders
self._expectation_configuration_builders = expectation_configuration_builders
self._variables = variables
self._parameters = {} | [
"def",
"__init__",
"(",
"self",
",",
"name",
":",
"str",
",",
"domain_builder",
":",
"Optional",
"[",
"DomainBuilder",
"]",
"=",
"None",
",",
"parameter_builders",
":",
"Optional",
"[",
"List",
"[",
"ParameterBuilder",
"]",
"]",
"=",
"None",
",",
"expectation_configuration_builders",
":",
"Optional",
"[",
"List",
"[",
"ExpectationConfigurationBuilder",
"]",
"]",
"=",
"None",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
")",
":",
"self",
".",
"_name",
"=",
"name",
"self",
".",
"_domain_builder",
"=",
"domain_builder",
"self",
".",
"_parameter_builders",
"=",
"parameter_builders",
"self",
".",
"_expectation_configuration_builders",
"=",
"expectation_configuration_builders",
"self",
".",
"_variables",
"=",
"variables",
"self",
".",
"_parameters",
"=",
"{",
"}"
] | [
15,
4
] | [
41,
29
] | python | en | ['en', 'error', 'th'] | False |
Rule.generate | (
self,
) |
Builds a list of Expectation Configurations, returning a single Expectation Configuration entry for every
ConfigurationBuilder available based on the instantiation.
:return: List of Corresponding Expectation Configurations representing every configured rule
|
Builds a list of Expectation Configurations, returning a single Expectation Configuration entry for every
ConfigurationBuilder available based on the instantiation. | def generate(
self,
) -> List[ExpectationConfiguration]:
"""
Builds a list of Expectation Configurations, returning a single Expectation Configuration entry for every
ConfigurationBuilder available based on the instantiation.
:return: List of Corresponding Expectation Configurations representing every configured rule
"""
expectation_configurations: List[ExpectationConfiguration] = []
domains: List[Domain] = self._domain_builder.get_domains(
variables=self.variables
)
domain: Domain
for domain in domains:
parameter_container: ParameterContainer = ParameterContainer(
parameter_nodes=None
)
self._parameters[domain.id] = parameter_container
parameter_builder: ParameterBuilder
for parameter_builder in self._parameter_builders:
parameter_builder.build_parameters(
parameter_container=parameter_container,
domain=domain,
variables=self.variables,
parameters=self.parameters,
)
expectation_configuration_builder: ExpectationConfigurationBuilder
for (
expectation_configuration_builder
) in self._expectation_configuration_builders:
expectation_configurations.append(
expectation_configuration_builder.build_expectation_configuration(
domain=domain,
variables=self.variables,
parameters=self.parameters,
)
)
return expectation_configurations | [
"def",
"generate",
"(",
"self",
",",
")",
"->",
"List",
"[",
"ExpectationConfiguration",
"]",
":",
"expectation_configurations",
":",
"List",
"[",
"ExpectationConfiguration",
"]",
"=",
"[",
"]",
"domains",
":",
"List",
"[",
"Domain",
"]",
"=",
"self",
".",
"_domain_builder",
".",
"get_domains",
"(",
"variables",
"=",
"self",
".",
"variables",
")",
"domain",
":",
"Domain",
"for",
"domain",
"in",
"domains",
":",
"parameter_container",
":",
"ParameterContainer",
"=",
"ParameterContainer",
"(",
"parameter_nodes",
"=",
"None",
")",
"self",
".",
"_parameters",
"[",
"domain",
".",
"id",
"]",
"=",
"parameter_container",
"parameter_builder",
":",
"ParameterBuilder",
"for",
"parameter_builder",
"in",
"self",
".",
"_parameter_builders",
":",
"parameter_builder",
".",
"build_parameters",
"(",
"parameter_container",
"=",
"parameter_container",
",",
"domain",
"=",
"domain",
",",
"variables",
"=",
"self",
".",
"variables",
",",
"parameters",
"=",
"self",
".",
"parameters",
",",
")",
"expectation_configuration_builder",
":",
"ExpectationConfigurationBuilder",
"for",
"(",
"expectation_configuration_builder",
")",
"in",
"self",
".",
"_expectation_configuration_builders",
":",
"expectation_configurations",
".",
"append",
"(",
"expectation_configuration_builder",
".",
"build_expectation_configuration",
"(",
"domain",
"=",
"domain",
",",
"variables",
"=",
"self",
".",
"variables",
",",
"parameters",
"=",
"self",
".",
"parameters",
",",
")",
")",
"return",
"expectation_configurations"
] | [
43,
4
] | [
85,
41
] | python | en | ['en', 'error', 'th'] | False |
venv | (request) |
Prepares a virtual environment for nose.
:rtype : virtual_environments.VirtualEnvDescription
|
Prepares a virtual environment for nose.
:rtype : virtual_environments.VirtualEnvDescription
| def venv(request):
"""
Prepares a virtual environment for nose.
:rtype : virtual_environments.VirtualEnvDescription
"""
return virtual_environments.prepare_virtualenv([request.param]) | [
"def",
"venv",
"(",
"request",
")",
":",
"return",
"virtual_environments",
".",
"prepare_virtualenv",
"(",
"[",
"request",
".",
"param",
"]",
")"
] | [
13,
0
] | [
18,
67
] | python | en | ['en', 'error', 'th'] | False |
RunVar.get | (self, default=_NO_DEFAULT) | Gets the value of this :class:`RunVar` for the current run call. | Gets the value of this :class:`RunVar` for the current run call. | def get(self, default=_NO_DEFAULT):
"""Gets the value of this :class:`RunVar` for the current run call."""
try:
return _run.GLOBAL_RUN_CONTEXT.runner._locals[self]
except AttributeError:
raise RuntimeError("Cannot be used outside of a run context") from None
except KeyError:
# contextvars consistency
if default is not self._NO_DEFAULT:
return default
if self._default is not self._NO_DEFAULT:
return self._default
raise LookupError(self) from None | [
"def",
"get",
"(",
"self",
",",
"default",
"=",
"_NO_DEFAULT",
")",
":",
"try",
":",
"return",
"_run",
".",
"GLOBAL_RUN_CONTEXT",
".",
"runner",
".",
"_locals",
"[",
"self",
"]",
"except",
"AttributeError",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot be used outside of a run context\"",
")",
"from",
"None",
"except",
"KeyError",
":",
"# contextvars consistency",
"if",
"default",
"is",
"not",
"self",
".",
"_NO_DEFAULT",
":",
"return",
"default",
"if",
"self",
".",
"_default",
"is",
"not",
"self",
".",
"_NO_DEFAULT",
":",
"return",
"self",
".",
"_default",
"raise",
"LookupError",
"(",
"self",
")",
"from",
"None"
] | [
37,
4
] | [
51,
45
] | python | en | ['en', 'en', 'en'] | True |
RunVar.set | (self, value) | Sets the value of this :class:`RunVar` for this current run
call.
| Sets the value of this :class:`RunVar` for this current run
call. | def set(self, value):
"""Sets the value of this :class:`RunVar` for this current run
call.
"""
try:
old_value = self.get()
except LookupError:
token = _RunVarToken.empty(self)
else:
token = _RunVarToken(self, old_value)
# This can't fail, because if we weren't in Trio context then the
# get() above would have failed.
_run.GLOBAL_RUN_CONTEXT.runner._locals[self] = value
return token | [
"def",
"set",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"old_value",
"=",
"self",
".",
"get",
"(",
")",
"except",
"LookupError",
":",
"token",
"=",
"_RunVarToken",
".",
"empty",
"(",
"self",
")",
"else",
":",
"token",
"=",
"_RunVarToken",
"(",
"self",
",",
"old_value",
")",
"# This can't fail, because if we weren't in Trio context then the",
"# get() above would have failed.",
"_run",
".",
"GLOBAL_RUN_CONTEXT",
".",
"runner",
".",
"_locals",
"[",
"self",
"]",
"=",
"value",
"return",
"token"
] | [
53,
4
] | [
68,
20
] | python | en | ['en', 'en', 'en'] | True |
RunVar.reset | (self, token) | Resets the value of this :class:`RunVar` to what it was
previously specified by the token.
| Resets the value of this :class:`RunVar` to what it was
previously specified by the token. | def reset(self, token):
"""Resets the value of this :class:`RunVar` to what it was
previously specified by the token.
"""
if token is None:
raise TypeError("token must not be none")
if token.redeemed:
raise ValueError("token has already been used")
if token._var is not self:
raise ValueError("token is not for us")
previous = token.previous_value
try:
if previous is _RunVarToken._no_value:
_run.GLOBAL_RUN_CONTEXT.runner._locals.pop(self)
else:
_run.GLOBAL_RUN_CONTEXT.runner._locals[self] = previous
except AttributeError:
raise RuntimeError("Cannot be used outside of a run context")
token.redeemed = True | [
"def",
"reset",
"(",
"self",
",",
"token",
")",
":",
"if",
"token",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"token must not be none\"",
")",
"if",
"token",
".",
"redeemed",
":",
"raise",
"ValueError",
"(",
"\"token has already been used\"",
")",
"if",
"token",
".",
"_var",
"is",
"not",
"self",
":",
"raise",
"ValueError",
"(",
"\"token is not for us\"",
")",
"previous",
"=",
"token",
".",
"previous_value",
"try",
":",
"if",
"previous",
"is",
"_RunVarToken",
".",
"_no_value",
":",
"_run",
".",
"GLOBAL_RUN_CONTEXT",
".",
"runner",
".",
"_locals",
".",
"pop",
"(",
"self",
")",
"else",
":",
"_run",
".",
"GLOBAL_RUN_CONTEXT",
".",
"runner",
".",
"_locals",
"[",
"self",
"]",
"=",
"previous",
"except",
"AttributeError",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot be used outside of a run context\"",
")",
"token",
".",
"redeemed",
"=",
"True"
] | [
70,
4
] | [
93,
29
] | python | en | ['en', 'en', 'en'] | True |
parse_result_format | (result_format) | This is a simple helper utility that can be used to parse a string result_format into the dict format used
internally by great_expectations. It is not necessary but allows shorthand for result_format in cases where
there is no need to specify a custom partial_unexpected_count. | This is a simple helper utility that can be used to parse a string result_format into the dict format used
internally by great_expectations. It is not necessary but allows shorthand for result_format in cases where
there is no need to specify a custom partial_unexpected_count. | def parse_result_format(result_format):
"""This is a simple helper utility that can be used to parse a string result_format into the dict format used
internally by great_expectations. It is not necessary but allows shorthand for result_format in cases where
there is no need to specify a custom partial_unexpected_count."""
if isinstance(result_format, str):
result_format = {"result_format": result_format, "partial_unexpected_count": 20}
else:
if "partial_unexpected_count" not in result_format:
result_format["partial_unexpected_count"] = 20
return result_format | [
"def",
"parse_result_format",
"(",
"result_format",
")",
":",
"if",
"isinstance",
"(",
"result_format",
",",
"str",
")",
":",
"result_format",
"=",
"{",
"\"result_format\"",
":",
"result_format",
",",
"\"partial_unexpected_count\"",
":",
"20",
"}",
"else",
":",
"if",
"\"partial_unexpected_count\"",
"not",
"in",
"result_format",
":",
"result_format",
"[",
"\"partial_unexpected_count\"",
"]",
"=",
"20",
"return",
"result_format"
] | [
18,
0
] | [
28,
24
] | python | en | ['en', 'en', 'en'] | True |
recursively_convert_to_json_serializable | (test_obj) |
Helper function to convert a dict object to one that is serializable
Args:
test_obj: an object to attempt to convert a corresponding json-serializable object
Returns:
(dict) A converted test_object
Warning:
test_obj may also be converted in place.
|
Helper function to convert a dict object to one that is serializable | def recursively_convert_to_json_serializable(test_obj):
"""
Helper function to convert a dict object to one that is serializable
Args:
test_obj: an object to attempt to convert a corresponding json-serializable object
Returns:
(dict) A converted test_object
Warning:
test_obj may also be converted in place.
"""
# If it's one of our types, we pass
if isinstance(
test_obj,
(
ExpectationConfiguration,
ExpectationSuite,
ExpectationValidationResult,
ExpectationSuiteValidationResult,
),
):
return test_obj
# Validate that all aruguments are of approved types, coerce if it's easy, else exception
# print(type(test_obj), test_obj)
# Note: Not 100% sure I've resolved this correctly...
try:
if not isinstance(test_obj, list) and np.isnan(test_obj):
# np.isnan is functionally vectorized, but we only want to apply this to single objects
# Hence, why we test for `not isinstance(list))`
return None
except (TypeError, ValueError):
pass
if isinstance(test_obj, (str, int, float, bool)):
# No problem to encode json
return test_obj
elif isinstance(test_obj, dict):
new_dict = {}
for key in test_obj:
# A pandas index can be numeric, and a dict key can be numeric, but a json key must be a string
new_dict[str(key)] = recursively_convert_to_json_serializable(test_obj[key])
return new_dict
elif isinstance(test_obj, (list, tuple, set)):
new_list = []
for val in test_obj:
new_list.append(recursively_convert_to_json_serializable(val))
return new_list
elif isinstance(test_obj, (np.ndarray, pd.Index)):
# test_obj[key] = test_obj[key].tolist()
# If we have an array or index, convert it first to a list--causing coercion to float--and then round
# to the number of digits for which the string representation will equal the float representation
return [recursively_convert_to_json_serializable(x) for x in test_obj.tolist()]
# Note: This clause has to come after checking for np.ndarray or we get:
# `ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()`
elif test_obj is None:
# No problem to encode json
return test_obj
elif isinstance(test_obj, (datetime.datetime, datetime.date)):
return str(test_obj)
# Use built in base type from numpy, https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
# https://github.com/numpy/numpy/pull/9505
elif np.issubdtype(type(test_obj), np.bool_):
return bool(test_obj)
elif np.issubdtype(type(test_obj), np.integer) or np.issubdtype(
type(test_obj), np.uint
):
return int(test_obj)
elif np.issubdtype(type(test_obj), np.floating):
# Note: Use np.floating to avoid FutureWarning from numpy
return float(round(test_obj, sys.float_info.dig))
elif isinstance(test_obj, pd.Series):
# Converting a series is tricky since the index may not be a string, but all json
# keys must be strings. So, we use a very ugly serialization strategy
index_name = test_obj.index.name or "index"
value_name = test_obj.name or "value"
return [
{
index_name: recursively_convert_to_json_serializable(idx),
value_name: recursively_convert_to_json_serializable(val),
}
for idx, val in test_obj.iteritems()
]
elif isinstance(test_obj, pd.DataFrame):
return recursively_convert_to_json_serializable(
test_obj.to_dict(orient="records")
)
# elif np.issubdtype(type(test_obj), np.complexfloating):
# Note: Use np.complexfloating to avoid Future Warning from numpy
# Complex numbers consist of two floating point numbers
# return complex(
# float(round(test_obj.real, sys.float_info.dig)),
# float(round(test_obj.imag, sys.float_info.dig)))
elif isinstance(test_obj, decimal.Decimal):
return float(test_obj)
else:
raise TypeError(
"%s is of type %s which cannot be serialized."
% (str(test_obj), type(test_obj).__name__)
) | [
"def",
"recursively_convert_to_json_serializable",
"(",
"test_obj",
")",
":",
"# If it's one of our types, we pass",
"if",
"isinstance",
"(",
"test_obj",
",",
"(",
"ExpectationConfiguration",
",",
"ExpectationSuite",
",",
"ExpectationValidationResult",
",",
"ExpectationSuiteValidationResult",
",",
")",
",",
")",
":",
"return",
"test_obj",
"# Validate that all aruguments are of approved types, coerce if it's easy, else exception",
"# print(type(test_obj), test_obj)",
"# Note: Not 100% sure I've resolved this correctly...",
"try",
":",
"if",
"not",
"isinstance",
"(",
"test_obj",
",",
"list",
")",
"and",
"np",
".",
"isnan",
"(",
"test_obj",
")",
":",
"# np.isnan is functionally vectorized, but we only want to apply this to single objects",
"# Hence, why we test for `not isinstance(list))`",
"return",
"None",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"if",
"isinstance",
"(",
"test_obj",
",",
"(",
"str",
",",
"int",
",",
"float",
",",
"bool",
")",
")",
":",
"# No problem to encode json",
"return",
"test_obj",
"elif",
"isinstance",
"(",
"test_obj",
",",
"dict",
")",
":",
"new_dict",
"=",
"{",
"}",
"for",
"key",
"in",
"test_obj",
":",
"# A pandas index can be numeric, and a dict key can be numeric, but a json key must be a string",
"new_dict",
"[",
"str",
"(",
"key",
")",
"]",
"=",
"recursively_convert_to_json_serializable",
"(",
"test_obj",
"[",
"key",
"]",
")",
"return",
"new_dict",
"elif",
"isinstance",
"(",
"test_obj",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"new_list",
"=",
"[",
"]",
"for",
"val",
"in",
"test_obj",
":",
"new_list",
".",
"append",
"(",
"recursively_convert_to_json_serializable",
"(",
"val",
")",
")",
"return",
"new_list",
"elif",
"isinstance",
"(",
"test_obj",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Index",
")",
")",
":",
"# test_obj[key] = test_obj[key].tolist()",
"# If we have an array or index, convert it first to a list--causing coercion to float--and then round",
"# to the number of digits for which the string representation will equal the float representation",
"return",
"[",
"recursively_convert_to_json_serializable",
"(",
"x",
")",
"for",
"x",
"in",
"test_obj",
".",
"tolist",
"(",
")",
"]",
"# Note: This clause has to come after checking for np.ndarray or we get:",
"# `ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()`",
"elif",
"test_obj",
"is",
"None",
":",
"# No problem to encode json",
"return",
"test_obj",
"elif",
"isinstance",
"(",
"test_obj",
",",
"(",
"datetime",
".",
"datetime",
",",
"datetime",
".",
"date",
")",
")",
":",
"return",
"str",
"(",
"test_obj",
")",
"# Use built in base type from numpy, https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html",
"# https://github.com/numpy/numpy/pull/9505",
"elif",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"test_obj",
")",
",",
"np",
".",
"bool_",
")",
":",
"return",
"bool",
"(",
"test_obj",
")",
"elif",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"test_obj",
")",
",",
"np",
".",
"integer",
")",
"or",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"test_obj",
")",
",",
"np",
".",
"uint",
")",
":",
"return",
"int",
"(",
"test_obj",
")",
"elif",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"test_obj",
")",
",",
"np",
".",
"floating",
")",
":",
"# Note: Use np.floating to avoid FutureWarning from numpy",
"return",
"float",
"(",
"round",
"(",
"test_obj",
",",
"sys",
".",
"float_info",
".",
"dig",
")",
")",
"elif",
"isinstance",
"(",
"test_obj",
",",
"pd",
".",
"Series",
")",
":",
"# Converting a series is tricky since the index may not be a string, but all json",
"# keys must be strings. So, we use a very ugly serialization strategy",
"index_name",
"=",
"test_obj",
".",
"index",
".",
"name",
"or",
"\"index\"",
"value_name",
"=",
"test_obj",
".",
"name",
"or",
"\"value\"",
"return",
"[",
"{",
"index_name",
":",
"recursively_convert_to_json_serializable",
"(",
"idx",
")",
",",
"value_name",
":",
"recursively_convert_to_json_serializable",
"(",
"val",
")",
",",
"}",
"for",
"idx",
",",
"val",
"in",
"test_obj",
".",
"iteritems",
"(",
")",
"]",
"elif",
"isinstance",
"(",
"test_obj",
",",
"pd",
".",
"DataFrame",
")",
":",
"return",
"recursively_convert_to_json_serializable",
"(",
"test_obj",
".",
"to_dict",
"(",
"orient",
"=",
"\"records\"",
")",
")",
"# elif np.issubdtype(type(test_obj), np.complexfloating):",
"# Note: Use np.complexfloating to avoid Future Warning from numpy",
"# Complex numbers consist of two floating point numbers",
"# return complex(",
"# float(round(test_obj.real, sys.float_info.dig)),",
"# float(round(test_obj.imag, sys.float_info.dig)))",
"elif",
"isinstance",
"(",
"test_obj",
",",
"decimal",
".",
"Decimal",
")",
":",
"return",
"float",
"(",
"test_obj",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"%s is of type %s which cannot be serialized.\"",
"%",
"(",
"str",
"(",
"test_obj",
")",
",",
"type",
"(",
"test_obj",
")",
".",
"__name__",
")",
")"
] | [
85,
0
] | [
202,
9
] | python | en | ['en', 'error', 'th'] | False |
TeamcityFormatter._report_suite_started | (self, suite, suite_name) |
:param suite: behave suite
:param suite_name: suite name that must be reported, be sure to use it instead of suite.name
|
:param suite: behave suite
:param suite_name: suite name that must be reported, be sure to use it instead of suite.name | def _report_suite_started(self, suite, suite_name):
"""
:param suite: behave suite
:param suite_name: suite name that must be reported, be sure to use it instead of suite.name
"""
self._messages.testSuiteStarted(suite_name) | [
"def",
"_report_suite_started",
"(",
"self",
",",
"suite",
",",
"suite_name",
")",
":",
"self",
".",
"_messages",
".",
"testSuiteStarted",
"(",
"suite_name",
")"
] | [
78,
4
] | [
84,
51
] | python | en | ['en', 'error', 'th'] | False |
TeamcityFormatter._report_test_started | (self, test, test_name) |
Suite name is always stripped, be sure to strip() it too
:param test: behave test
:param test_name: test name that must be reported, be sure to use it instead of test.name
|
Suite name is always stripped, be sure to strip() it too
:param test: behave test
:param test_name: test name that must be reported, be sure to use it instead of test.name
| def _report_test_started(self, test, test_name):
"""
Suite name is always stripped, be sure to strip() it too
:param test: behave test
:param test_name: test name that must be reported, be sure to use it instead of test.name
"""
self._messages.testStarted(test_name) | [
"def",
"_report_test_started",
"(",
"self",
",",
"test",
",",
"test_name",
")",
":",
"self",
".",
"_messages",
".",
"testStarted",
"(",
"test_name",
")"
] | [
86,
4
] | [
92,
45
] | python | en | ['en', 'error', 'th'] | False |
Template.load | (self) | Load this Template as an MLPipeline.
Returns:
MLPipeline
| Load this Template as an MLPipeline. | def load(self):
"""Load this Template as an MLPipeline.
Returns:
MLPipeline
"""
return MLPipeline(self.json) | [
"def",
"load",
"(",
"self",
")",
":",
"return",
"MLPipeline",
"(",
"self",
".",
"json",
")"
] | [
91,
4
] | [
97,
36
] | python | en | ['en', 'en', 'en'] | True |
Pipeline.load | (self) | Load this Pipeline as an MLPipeline.
Returns:
MLPipeline
| Load this Pipeline as an MLPipeline. | def load(self):
"""Load this Pipeline as an MLPipeline.
Returns:
MLPipeline
"""
return MLPipeline(self.json) | [
"def",
"load",
"(",
"self",
")",
":",
"return",
"MLPipeline",
"(",
"self",
".",
"json",
")"
] | [
118,
4
] | [
124,
36
] | python | en | ['en', 'en', 'en'] | True |
Datarun.start | (self) | Mark this Datarun as started on DB.
The ``start_time`` will be set to ``datetime.utcnow()``,
the ``status`` will be set to RUNNING and the software
versions will be captured.
| Mark this Datarun as started on DB. | def start(self):
"""Mark this Datarun as started on DB.
The ``start_time`` will be set to ``datetime.utcnow()``,
the ``status`` will be set to RUNNING and the software
versions will be captured.
"""
self.start_time = datetime.utcnow()
self.status = self.STATUS_RUNNING
self.software_versions = self._software_versions
self.save() | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"start_time",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"status",
"=",
"self",
".",
"STATUS_RUNNING",
"self",
".",
"software_versions",
"=",
"self",
".",
"_software_versions",
"self",
".",
"save",
"(",
")"
] | [
176,
4
] | [
186,
19
] | python | en | ['en', 'en', 'en'] | True |
Datarun.end | (self, status) | Mark this Datarun as ended on DB.
The ``end_time`` will be set to ``datetime.utcnow()``, the ``status``
will be set to the given value, and the ``num_events`` field will be
populated with the sum of the events detected by the children Signalruns.
| Mark this Datarun as ended on DB. | def end(self, status):
"""Mark this Datarun as ended on DB.
The ``end_time`` will be set to ``datetime.utcnow()``, the ``status``
will be set to the given value, and the ``num_events`` field will be
populated with the sum of the events detected by the children Signalruns.
"""
self.end_time = datetime.utcnow()
self.status = status
self.num_events = Event.find(signalrun__in=self.signalruns).count()
self.save() | [
"def",
"end",
"(",
"self",
",",
"status",
")",
":",
"self",
".",
"end_time",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"status",
"=",
"status",
"self",
".",
"num_events",
"=",
"Event",
".",
"find",
"(",
"signalrun__in",
"=",
"self",
".",
"signalruns",
")",
".",
"count",
"(",
")",
"self",
".",
"save",
"(",
")"
] | [
188,
4
] | [
198,
19
] | python | en | ['en', 'en', 'en'] | True |
Signalrun.start | (self) | Mark this Signalrun as started on DB.
The ``start_time`` will be set to ``datetime.utcnow()``,
the ``status`` will be set to RUNNING.
| Mark this Signalrun as started on DB. | def start(self):
"""Mark this Signalrun as started on DB.
The ``start_time`` will be set to ``datetime.utcnow()``,
the ``status`` will be set to RUNNING.
"""
self.start_time = datetime.utcnow()
self.status = self.STATUS_RUNNING
self.save() | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"start_time",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"status",
"=",
"self",
".",
"STATUS_RUNNING",
"self",
".",
"save",
"(",
")"
] | [
223,
4
] | [
231,
19
] | python | en | ['en', 'en', 'en'] | True |
Signalrun.end | (self, status, events) | Mark this Signalrun as ended on DB.
The ``end_time`` will be set to ``datetime.utcnow()``, the ``status``
will be set to the given value, and the given events will be inserted
into the Database.
| Mark this Signalrun as ended on DB. | def end(self, status, events):
"""Mark this Signalrun as ended on DB.
The ``end_time`` will be set to ``datetime.utcnow()``, the ``status``
will be set to the given value, and the given events will be inserted
into the Database.
"""
try:
if events is None:
events = []
for start_time, stop_time, severity in events:
Event.insert(
signalrun=self,
signal=self.signal,
start_time=start_time,
stop_time=stop_time,
severity=severity,
source=Event.SOURCE_ORION,
)
except Exception:
LOGGER.exception('Error storing signalrun %s events', self.id)
status = self.STATUS_ERRORED
self.end_time = datetime.utcnow()
self.status = status
self.num_events = len(events)
self.save() | [
"def",
"end",
"(",
"self",
",",
"status",
",",
"events",
")",
":",
"try",
":",
"if",
"events",
"is",
"None",
":",
"events",
"=",
"[",
"]",
"for",
"start_time",
",",
"stop_time",
",",
"severity",
"in",
"events",
":",
"Event",
".",
"insert",
"(",
"signalrun",
"=",
"self",
",",
"signal",
"=",
"self",
".",
"signal",
",",
"start_time",
"=",
"start_time",
",",
"stop_time",
"=",
"stop_time",
",",
"severity",
"=",
"severity",
",",
"source",
"=",
"Event",
".",
"SOURCE_ORION",
",",
")",
"except",
"Exception",
":",
"LOGGER",
".",
"exception",
"(",
"'Error storing signalrun %s events'",
",",
"self",
".",
"id",
")",
"status",
"=",
"self",
".",
"STATUS_ERRORED",
"self",
".",
"end_time",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"status",
"=",
"status",
"self",
".",
"num_events",
"=",
"len",
"(",
"events",
")",
"self",
".",
"save",
"(",
")"
] | [
233,
4
] | [
259,
19
] | python | en | ['en', 'en', 'en'] | True |
docs | (ctx) | Data Docs operations | Data Docs operations | def docs(ctx):
"""Data Docs operations"""
directory: str = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
context: DataContext = toolkit.load_data_context_with_error_handling(
directory=directory,
from_cli_upgrade_command=False,
)
# TODO consider moving this all the way up in to the CLIState constructor
ctx.obj.data_context = context
usage_stats_prefix = f"cli.docs.{ctx.invoked_subcommand}"
toolkit.send_usage_message(
data_context=context,
event=f"{usage_stats_prefix}.begin",
success=True,
)
ctx.obj.usage_event_end = f"{usage_stats_prefix}.end" | [
"def",
"docs",
"(",
"ctx",
")",
":",
"directory",
":",
"str",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"context",
":",
"DataContext",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
"=",
"directory",
",",
"from_cli_upgrade_command",
"=",
"False",
",",
")",
"# TODO consider moving this all the way up in to the CLIState constructor",
"ctx",
".",
"obj",
".",
"data_context",
"=",
"context",
"usage_stats_prefix",
"=",
"f\"cli.docs.{ctx.invoked_subcommand}\"",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"f\"{usage_stats_prefix}.begin\"",
",",
"success",
"=",
"True",
",",
")",
"ctx",
".",
"obj",
".",
"usage_event_end",
"=",
"f\"{usage_stats_prefix}.end\""
] | [
11,
0
] | [
29,
57
] | python | en | ['en', 'bg', 'en'] | True |
docs_build | (ctx, site_name=None, no_view=False) | Build Data Docs for a project. | Build Data Docs for a project. | def docs_build(ctx, site_name=None, no_view=False):
"""Build Data Docs for a project."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
if site_name is not None and site_name not in context.get_site_names():
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
message=f"<red>The specified site name `{site_name}` does not exist in this project.</red>",
)
if site_name is None:
sites_to_build = context.get_site_names()
else:
sites_to_build = [site_name]
build_docs(
context,
usage_stats_event=usage_event_end,
site_names=sites_to_build,
view=not no_view,
assume_yes=ctx.obj.assume_yes,
)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
) | [
"def",
"docs_build",
"(",
"ctx",
",",
"site_name",
"=",
"None",
",",
"no_view",
"=",
"False",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"if",
"site_name",
"is",
"not",
"None",
"and",
"site_name",
"not",
"in",
"context",
".",
"get_site_names",
"(",
")",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>The specified site name `{site_name}` does not exist in this project.</red>\"",
",",
")",
"if",
"site_name",
"is",
"None",
":",
"sites_to_build",
"=",
"context",
".",
"get_site_names",
"(",
")",
"else",
":",
"sites_to_build",
"=",
"[",
"site_name",
"]",
"build_docs",
"(",
"context",
",",
"usage_stats_event",
"=",
"usage_event_end",
",",
"site_names",
"=",
"sites_to_build",
",",
"view",
"=",
"not",
"no_view",
",",
"assume_yes",
"=",
"ctx",
".",
"obj",
".",
"assume_yes",
",",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")"
] | [
47,
0
] | [
72,
5
] | python | en | ['en', 'en', 'en'] | True |
docs_list | (ctx) | List known Data Docs sites. | List known Data Docs sites. | def docs_list(ctx):
"""List known Data Docs sites."""
context = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
docs_sites_url_dicts = context.get_docs_sites_urls()
try:
if len(docs_sites_url_dicts) == 0:
cli_message("No Data Docs sites found")
else:
docs_sites_strings = [
" - <cyan>{}</cyan>: {}".format(
docs_site_dict["site_name"],
docs_site_dict.get("site_url")
or f"site configured but does not exist. Run the following command to build site: great_expectations "
f'docs build --site-name {docs_site_dict["site_name"]}',
)
for docs_site_dict in docs_sites_url_dicts
]
list_intro_string = _build_intro_string(docs_sites_strings)
cli_message_list(docs_sites_strings, list_intro_string)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return | [
"def",
"docs_list",
"(",
"ctx",
")",
":",
"context",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"docs_sites_url_dicts",
"=",
"context",
".",
"get_docs_sites_urls",
"(",
")",
"try",
":",
"if",
"len",
"(",
"docs_sites_url_dicts",
")",
"==",
"0",
":",
"cli_message",
"(",
"\"No Data Docs sites found\"",
")",
"else",
":",
"docs_sites_strings",
"=",
"[",
"\" - <cyan>{}</cyan>: {}\"",
".",
"format",
"(",
"docs_site_dict",
"[",
"\"site_name\"",
"]",
",",
"docs_site_dict",
".",
"get",
"(",
"\"site_url\"",
")",
"or",
"f\"site configured but does not exist. Run the following command to build site: great_expectations \"",
"f'docs build --site-name {docs_site_dict[\"site_name\"]}'",
",",
")",
"for",
"docs_site_dict",
"in",
"docs_sites_url_dicts",
"]",
"list_intro_string",
"=",
"_build_intro_string",
"(",
"docs_sites_strings",
")",
"cli_message_list",
"(",
"docs_sites_strings",
",",
"list_intro_string",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>{e}</red>\"",
",",
")",
"return"
] | [
77,
0
] | [
109,
14
] | python | en | ['en', 'fr', 'en'] | True |
docs_clean | (ctx, site_name=None, all_sites=False) |
Remove all files from a Data Docs site.
This is a useful first step if you wish to completely re-build a site from scratch.
|
Remove all files from a Data Docs site. | def docs_clean(ctx, site_name=None, all_sites=False):
"""
Remove all files from a Data Docs site.
This is a useful first step if you wish to completely re-build a site from scratch.
"""
context = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
if (site_name is None and all_sites is False) or (site_name and all_sites):
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
message="<red>Please specify either --all to clean all sites or a specific site using --site-name</red>",
)
try:
# if site_name is None, context.clean_data_docs(site_name=site_name)
# will clean all sites.
context.clean_data_docs(site_name=site_name)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
cli_message("<green>{}</green>".format("Cleaned data docs"))
except DataContextError as de:
toolkit.exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event_end,
message=f"<red>{de}</red>",
) | [
"def",
"docs_clean",
"(",
"ctx",
",",
"site_name",
"=",
"None",
",",
"all_sites",
"=",
"False",
")",
":",
"context",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"if",
"(",
"site_name",
"is",
"None",
"and",
"all_sites",
"is",
"False",
")",
"or",
"(",
"site_name",
"and",
"all_sites",
")",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"\"<red>Please specify either --all to clean all sites or a specific site using --site-name</red>\"",
",",
")",
"try",
":",
"# if site_name is None, context.clean_data_docs(site_name=site_name)",
"# will clean all sites.",
"context",
".",
"clean_data_docs",
"(",
"site_name",
"=",
"site_name",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"cli_message",
"(",
"\"<green>{}</green>\"",
".",
"format",
"(",
"\"Cleaned data docs\"",
")",
")",
"except",
"DataContextError",
"as",
"de",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>{de}</red>\"",
",",
")"
] | [
126,
0
] | [
154,
9
] | python | en | ['en', 'error', 'th'] | False |
visual_baseline_folder_setup | () | Handle Logging | Handle Logging | def visual_baseline_folder_setup():
""" Handle Logging """
if not os.path.exists(visual_baseline_path):
try:
os.makedirs(visual_baseline_path)
except Exception:
pass | [
"def",
"visual_baseline_folder_setup",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"visual_baseline_path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"visual_baseline_path",
")",
"except",
"Exception",
":",
"pass"
] | [
12,
0
] | [
18,
16
] | python | en | ['it', 'ja', 'en'] | False |
get_filetype_from_buffer | (buf, max_lines=5) |
Scan the buffer for modelines and return filetype if one is found.
|
Scan the buffer for modelines and return filetype if one is found.
| def get_filetype_from_buffer(buf, max_lines=5):
"""
Scan the buffer for modelines and return filetype if one is found.
"""
lines = buf.splitlines()
for l in lines[-1:-max_lines-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
for l in lines[max_lines:-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
return None | [
"def",
"get_filetype_from_buffer",
"(",
"buf",
",",
"max_lines",
"=",
"5",
")",
":",
"lines",
"=",
"buf",
".",
"splitlines",
"(",
")",
"for",
"l",
"in",
"lines",
"[",
"-",
"1",
":",
"-",
"max_lines",
"-",
"1",
":",
"-",
"1",
"]",
":",
"ret",
"=",
"get_filetype_from_line",
"(",
"l",
")",
"if",
"ret",
":",
"return",
"ret",
"for",
"l",
"in",
"lines",
"[",
"max_lines",
":",
"-",
"1",
":",
"-",
"1",
"]",
":",
"ret",
"=",
"get_filetype_from_line",
"(",
"l",
")",
"if",
"ret",
":",
"return",
"ret",
"return",
"None"
] | [
28,
0
] | [
42,
15
] | python | en | ['en', 'error', 'th'] | False |
cli | (verbose) |
Welcome to the great_expectations CLI!
Most commands follow this format: great_expectations <NOUN> <VERB>
The nouns are: datasource, docs, project, suite, validation-operator
Most nouns accept the following verbs: new, list, edit
In particular, the CLI supports the following special commands:
- great_expectations init : create a new great_expectations project
- great_expectations datasource profile : profile a datasource
- great_expectations docs build : compile documentation from expectations |
Welcome to the great_expectations CLI! | def cli(verbose):
"""
Welcome to the great_expectations CLI!
Most commands follow this format: great_expectations <NOUN> <VERB>
The nouns are: datasource, docs, project, suite, validation-operator
Most nouns accept the following verbs: new, list, edit
In particular, the CLI supports the following special commands:
- great_expectations init : create a new great_expectations project
- great_expectations datasource profile : profile a datasource
- great_expectations docs build : compile documentation from expectations"""
logger = _set_up_logger()
if verbose:
# Note we are explicitly not using a logger in all CLI output to have
# more control over console UI.
logger.setLevel(logging.DEBUG) | [
"def",
"cli",
"(",
"verbose",
")",
":",
"logger",
"=",
"_set_up_logger",
"(",
")",
"if",
"verbose",
":",
"# Note we are explicitly not using a logger in all CLI output to have",
"# more control over console UI.",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")"
] | [
35,
0
] | [
56,
38
] | python | en | ['en', 'error', 'th'] | False |
Skeleton.remove_joints | (self, joints_to_remove) |
Remove the joints specified in 'joints_to_remove'.
|
Remove the joints specified in 'joints_to_remove'.
| def remove_joints(self, joints_to_remove):
"""
Remove the joints specified in 'joints_to_remove'.
"""
valid_joints = []
for joint in range(len(self._parents)):
if joint not in joints_to_remove:
valid_joints.append(joint)
for i in range(len(self._parents)):
while self._parents[i] in joints_to_remove:
self._parents[i] = self._parents[self._parents[i]]
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for i, parent in enumerate(self._parents):
if i not in joints_to_remove:
new_parents.append(parent - index_offsets[parent])
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
if self._joints_left is not None:
new_joints_left = []
for joint in self._joints_left:
if joint in valid_joints:
new_joints_left.append(joint - index_offsets[joint])
self._joints_left = new_joints_left
if self._joints_right is not None:
new_joints_right = []
for joint in self._joints_right:
if joint in valid_joints:
new_joints_right.append(joint - index_offsets[joint])
self._joints_right = new_joints_right
self._compute_metadata()
return valid_joints | [
"def",
"remove_joints",
"(",
"self",
",",
"joints_to_remove",
")",
":",
"valid_joints",
"=",
"[",
"]",
"for",
"joint",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_parents",
")",
")",
":",
"if",
"joint",
"not",
"in",
"joints_to_remove",
":",
"valid_joints",
".",
"append",
"(",
"joint",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_parents",
")",
")",
":",
"while",
"self",
".",
"_parents",
"[",
"i",
"]",
"in",
"joints_to_remove",
":",
"self",
".",
"_parents",
"[",
"i",
"]",
"=",
"self",
".",
"_parents",
"[",
"self",
".",
"_parents",
"[",
"i",
"]",
"]",
"index_offsets",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"self",
".",
"_parents",
")",
",",
"dtype",
"=",
"int",
")",
"new_parents",
"=",
"[",
"]",
"for",
"i",
",",
"parent",
"in",
"enumerate",
"(",
"self",
".",
"_parents",
")",
":",
"if",
"i",
"not",
"in",
"joints_to_remove",
":",
"new_parents",
".",
"append",
"(",
"parent",
"-",
"index_offsets",
"[",
"parent",
"]",
")",
"else",
":",
"index_offsets",
"[",
"i",
":",
"]",
"+=",
"1",
"self",
".",
"_parents",
"=",
"np",
".",
"array",
"(",
"new_parents",
")",
"if",
"self",
".",
"_joints_left",
"is",
"not",
"None",
":",
"new_joints_left",
"=",
"[",
"]",
"for",
"joint",
"in",
"self",
".",
"_joints_left",
":",
"if",
"joint",
"in",
"valid_joints",
":",
"new_joints_left",
".",
"append",
"(",
"joint",
"-",
"index_offsets",
"[",
"joint",
"]",
")",
"self",
".",
"_joints_left",
"=",
"new_joints_left",
"if",
"self",
".",
"_joints_right",
"is",
"not",
"None",
":",
"new_joints_right",
"=",
"[",
"]",
"for",
"joint",
"in",
"self",
".",
"_joints_right",
":",
"if",
"joint",
"in",
"valid_joints",
":",
"new_joints_right",
".",
"append",
"(",
"joint",
"-",
"index_offsets",
"[",
"joint",
"]",
")",
"self",
".",
"_joints_right",
"=",
"new_joints_right",
"self",
".",
"_compute_metadata",
"(",
")",
"return",
"valid_joints"
] | [
29,
4
] | [
66,
27
] | python | en | ['en', 'error', 'th'] | False |
test_password_masker_mask_db_url | (monkeypatch, tmp_path) |
What does this test and why?
The PasswordMasker.mask_db_url() should mask passwords consistently in database urls. The output of mask_db_url should be the same whether user_urlparse is set to True or False.
This test uses database url examples from
https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls
|
What does this test and why?
The PasswordMasker.mask_db_url() should mask passwords consistently in database urls. The output of mask_db_url should be the same whether user_urlparse is set to True or False.
This test uses database url examples from
https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls
| def test_password_masker_mask_db_url(monkeypatch, tmp_path):
"""
What does this test and why?
The PasswordMasker.mask_db_url() should mask passwords consistently in database urls. The output of mask_db_url should be the same whether user_urlparse is set to True or False.
This test uses database url examples from
https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls
"""
# PostgreSQL
# default
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql://scott:***@{db_hostname}:65432/mydatabase"
)
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql://scott:***@{db_hostname}:65432/mydatabase"
)
# missing port number, using urlparse
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}/mydatabase", use_urlparse=True
)
== f"postgresql://scott:***@{db_hostname}/mydatabase"
)
# psycopg2
assert (
PasswordMasker.mask_db_url(
f"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase"
)
assert (
PasswordMasker.mask_db_url(
f"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase"
)
# pg8000 (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase"
)
# MySQL
# default (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(f"mysql://scott:tiger@{db_hostname}:65432/foo")
== f"mysql://scott:***@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql://scott:***@{db_hostname}:65432/foo"
)
# mysqlclient (a maintained fork of MySQL-Python) (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo"
)
== f"mysql+mysqldb://scott:***@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql+mysqldb://scott:***@{db_hostname}:65432/foo"
)
# PyMySQL
assert (
PasswordMasker.mask_db_url(
f"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo"
)
== f"mysql+pymysql://scott:***@{db_hostname}:65432/foo"
)
assert (
PasswordMasker.mask_db_url(
f"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql+pymysql://scott:***@{db_hostname}:65432/foo"
)
# Oracle (if installed in test environment)
url_host = os.getenv("GE_TEST_LOCALHOST_URL", "127.0.0.1")
try:
assert (
PasswordMasker.mask_db_url(f"oracle://scott:tiger@{url_host}:1521/sidname")
== f"oracle://scott:***@{url_host}:1521/sidname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"oracle://scott:tiger@{url_host}:1521/sidname", use_urlparse=True
)
== f"oracle://scott:***@{url_host}:1521/sidname"
)
try:
assert (
PasswordMasker.mask_db_url("oracle+cx_oracle://scott:tiger@tnsname")
== "oracle+cx_oracle://scott:***@tnsname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
"oracle+cx_oracle://scott:tiger@tnsname", use_urlparse=True
)
== "oracle+cx_oracle://scott:***@tnsname"
)
# Microsoft SQL Server
# pyodbc
assert (
PasswordMasker.mask_db_url("mssql+pyodbc://scott:tiger@mydsn")
== "mssql+pyodbc://scott:***@mydsn"
)
assert (
PasswordMasker.mask_db_url(
"mssql+pyodbc://scott:tiger@mydsn", use_urlparse=True
)
== "mssql+pyodbc://scott:***@mydsn"
)
# pymssql (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname"
)
== f"mssql+pymssql://scott:***@{db_hostname}:12345/dbname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname", use_urlparse=True
)
== f"mssql+pymssql://scott:***@{db_hostname}:12345/dbname"
)
# SQLite
# relative path
temp_dir = tmp_path / "sqllite_tests"
temp_dir.mkdir()
monkeypatch.chdir(temp_dir)
assert (
PasswordMasker.mask_db_url(f"sqlite:///something/foo.db")
== f"sqlite:///something/foo.db"
)
assert (
PasswordMasker.mask_db_url(f"sqlite:///something/foo.db", use_urlparse=True)
== f"sqlite:///something/foo.db"
)
# absolute path
# Unix/Mac - 4 initial slashes in total
assert (
PasswordMasker.mask_db_url("sqlite:////absolute/path/to/foo.db")
== "sqlite:////absolute/path/to/foo.db"
)
assert (
PasswordMasker.mask_db_url(
"sqlite:////absolute/path/to/foo.db", use_urlparse=True
)
== "sqlite:////absolute/path/to/foo.db"
)
# Windows
assert (
PasswordMasker.mask_db_url("sqlite:///C:\\path\\to\\foo.db")
== "sqlite:///C:\\path\\to\\foo.db"
)
assert (
PasswordMasker.mask_db_url("sqlite:///C:\\path\\to\\foo.db", use_urlparse=True)
== "sqlite:///C:\\path\\to\\foo.db"
)
# Windows alternative using raw string
assert (
PasswordMasker.mask_db_url(r"sqlite:///C:\path\to\foo.db")
== r"sqlite:///C:\path\to\foo.db"
)
assert (
PasswordMasker.mask_db_url(r"sqlite:///C:\path\to\foo.db", use_urlparse=True)
== r"sqlite:///C:\path\to\foo.db"
)
# in-memory
assert PasswordMasker.mask_db_url("sqlite://") == "sqlite://"
assert PasswordMasker.mask_db_url("sqlite://", use_urlparse=True) == "sqlite://" | [
"def",
"test_password_masker_mask_db_url",
"(",
"monkeypatch",
",",
"tmp_path",
")",
":",
"# PostgreSQL",
"# default",
"db_hostname",
"=",
"os",
".",
"getenv",
"(",
"\"GE_TEST_LOCAL_DB_HOSTNAME\"",
",",
"\"localhost\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"postgresql://scott:tiger@{db_hostname}:65432/mydatabase\"",
")",
"==",
"f\"postgresql://scott:***@{db_hostname}:65432/mydatabase\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"postgresql://scott:tiger@{db_hostname}:65432/mydatabase\"",
",",
"use_urlparse",
"=",
"True",
",",
")",
"==",
"f\"postgresql://scott:***@{db_hostname}:65432/mydatabase\"",
")",
"# missing port number, using urlparse",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"postgresql://scott:tiger@{db_hostname}/mydatabase\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"f\"postgresql://scott:***@{db_hostname}/mydatabase\"",
")",
"# psycopg2",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase\"",
")",
"==",
"f\"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase\"",
",",
"use_urlparse",
"=",
"True",
",",
")",
"==",
"f\"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase\"",
")",
"# pg8000 (if installed in test environment)",
"try",
":",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase\"",
")",
"==",
"f\"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase\"",
")",
"except",
"ModuleNotFoundError",
":",
"pass",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase\"",
",",
"use_urlparse",
"=",
"True",
",",
")",
"==",
"f\"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase\"",
")",
"# MySQL",
"# default (if installed in test environment)",
"try",
":",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mysql://scott:tiger@{db_hostname}:65432/foo\"",
")",
"==",
"f\"mysql://scott:***@{db_hostname}:65432/foo\"",
")",
"except",
"ModuleNotFoundError",
":",
"pass",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mysql://scott:tiger@{db_hostname}:65432/foo\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"f\"mysql://scott:***@{db_hostname}:65432/foo\"",
")",
"# mysqlclient (a maintained fork of MySQL-Python) (if installed in test environment)",
"try",
":",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo\"",
")",
"==",
"f\"mysql+mysqldb://scott:***@{db_hostname}:65432/foo\"",
")",
"except",
"ModuleNotFoundError",
":",
"pass",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"f\"mysql+mysqldb://scott:***@{db_hostname}:65432/foo\"",
")",
"# PyMySQL",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo\"",
")",
"==",
"f\"mysql+pymysql://scott:***@{db_hostname}:65432/foo\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"f\"mysql+pymysql://scott:***@{db_hostname}:65432/foo\"",
")",
"# Oracle (if installed in test environment)",
"url_host",
"=",
"os",
".",
"getenv",
"(",
"\"GE_TEST_LOCALHOST_URL\"",
",",
"\"127.0.0.1\"",
")",
"try",
":",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"oracle://scott:tiger@{url_host}:1521/sidname\"",
")",
"==",
"f\"oracle://scott:***@{url_host}:1521/sidname\"",
")",
"except",
"ModuleNotFoundError",
":",
"pass",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"oracle://scott:tiger@{url_host}:1521/sidname\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"f\"oracle://scott:***@{url_host}:1521/sidname\"",
")",
"try",
":",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"oracle+cx_oracle://scott:tiger@tnsname\"",
")",
"==",
"\"oracle+cx_oracle://scott:***@tnsname\"",
")",
"except",
"ModuleNotFoundError",
":",
"pass",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"oracle+cx_oracle://scott:tiger@tnsname\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"\"oracle+cx_oracle://scott:***@tnsname\"",
")",
"# Microsoft SQL Server",
"# pyodbc",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"mssql+pyodbc://scott:tiger@mydsn\"",
")",
"==",
"\"mssql+pyodbc://scott:***@mydsn\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"mssql+pyodbc://scott:tiger@mydsn\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"\"mssql+pyodbc://scott:***@mydsn\"",
")",
"# pymssql (if installed in test environment)",
"try",
":",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname\"",
")",
"==",
"f\"mssql+pymssql://scott:***@{db_hostname}:12345/dbname\"",
")",
"except",
"ModuleNotFoundError",
":",
"pass",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"f\"mssql+pymssql://scott:***@{db_hostname}:12345/dbname\"",
")",
"# SQLite",
"# relative path",
"temp_dir",
"=",
"tmp_path",
"/",
"\"sqllite_tests\"",
"temp_dir",
".",
"mkdir",
"(",
")",
"monkeypatch",
".",
"chdir",
"(",
"temp_dir",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"sqlite:///something/foo.db\"",
")",
"==",
"f\"sqlite:///something/foo.db\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"f\"sqlite:///something/foo.db\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"f\"sqlite:///something/foo.db\"",
")",
"# absolute path",
"# Unix/Mac - 4 initial slashes in total",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"sqlite:////absolute/path/to/foo.db\"",
")",
"==",
"\"sqlite:////absolute/path/to/foo.db\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"sqlite:////absolute/path/to/foo.db\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"\"sqlite:////absolute/path/to/foo.db\"",
")",
"# Windows",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"sqlite:///C:\\\\path\\\\to\\\\foo.db\"",
")",
"==",
"\"sqlite:///C:\\\\path\\\\to\\\\foo.db\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"sqlite:///C:\\\\path\\\\to\\\\foo.db\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"\"sqlite:///C:\\\\path\\\\to\\\\foo.db\"",
")",
"# Windows alternative using raw string",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"r\"sqlite:///C:\\path\\to\\foo.db\"",
")",
"==",
"r\"sqlite:///C:\\path\\to\\foo.db\"",
")",
"assert",
"(",
"PasswordMasker",
".",
"mask_db_url",
"(",
"r\"sqlite:///C:\\path\\to\\foo.db\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"r\"sqlite:///C:\\path\\to\\foo.db\"",
")",
"# in-memory",
"assert",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"sqlite://\"",
")",
"==",
"\"sqlite://\"",
"assert",
"PasswordMasker",
".",
"mask_db_url",
"(",
"\"sqlite://\"",
",",
"use_urlparse",
"=",
"True",
")",
"==",
"\"sqlite://\""
] | [
50,
0
] | [
272,
84
] | python | en | ['en', 'error', 'th'] | False |
test_parse_substitution_variable | () |
What does this test and why?
Ensure parse_substitution_variable works as expected.
Returns:
|
What does this test and why?
Ensure parse_substitution_variable works as expected.
Returns: | def test_parse_substitution_variable():
"""
What does this test and why?
Ensure parse_substitution_variable works as expected.
Returns:
"""
assert parse_substitution_variable("${SOME_VAR}") == "SOME_VAR"
assert parse_substitution_variable("$SOME_VAR") == "SOME_VAR"
assert parse_substitution_variable("SOME_STRING") is None
assert parse_substitution_variable("SOME_$TRING") is None
assert parse_substitution_variable("${some_var}") == "some_var"
assert parse_substitution_variable("$some_var") == "some_var"
assert parse_substitution_variable("some_string") is None
assert parse_substitution_variable("some_$tring") is None
assert parse_substitution_variable("${SOME_$TRING}") is None
assert parse_substitution_variable("$SOME_$TRING") == "SOME_" | [
"def",
"test_parse_substitution_variable",
"(",
")",
":",
"assert",
"parse_substitution_variable",
"(",
"\"${SOME_VAR}\"",
")",
"==",
"\"SOME_VAR\"",
"assert",
"parse_substitution_variable",
"(",
"\"$SOME_VAR\"",
")",
"==",
"\"SOME_VAR\"",
"assert",
"parse_substitution_variable",
"(",
"\"SOME_STRING\"",
")",
"is",
"None",
"assert",
"parse_substitution_variable",
"(",
"\"SOME_$TRING\"",
")",
"is",
"None",
"assert",
"parse_substitution_variable",
"(",
"\"${some_var}\"",
")",
"==",
"\"some_var\"",
"assert",
"parse_substitution_variable",
"(",
"\"$some_var\"",
")",
"==",
"\"some_var\"",
"assert",
"parse_substitution_variable",
"(",
"\"some_string\"",
")",
"is",
"None",
"assert",
"parse_substitution_variable",
"(",
"\"some_$tring\"",
")",
"is",
"None",
"assert",
"parse_substitution_variable",
"(",
"\"${SOME_$TRING}\"",
")",
"is",
"None",
"assert",
"parse_substitution_variable",
"(",
"\"$SOME_$TRING\"",
")",
"==",
"\"SOME_\""
] | [
275,
0
] | [
291,
65
] | python | en | ['en', 'error', 'th'] | False |
BaseDRLearner.__init__ | (
self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=0.05,
control_name=0,
) | Initialize a DR-learner.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize a DR-learner. | def __init__(
self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=0.05,
control_name=0,
):
"""Initialize a DR-learner.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
assert (learner is not None) or (
(control_outcome_learner is not None)
and (treatment_outcome_learner is not None)
and (treatment_effect_learner is not None)
)
if control_outcome_learner is None:
self.model_mu_c = deepcopy(learner)
else:
self.model_mu_c = control_outcome_learner
if treatment_outcome_learner is None:
self.model_mu_t = deepcopy(learner)
else:
self.model_mu_t = treatment_outcome_learner
if treatment_effect_learner is None:
self.model_tau = deepcopy(learner)
else:
self.model_tau = treatment_effect_learner
self.ate_alpha = ate_alpha
self.control_name = control_name
self.propensity = None | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"control_outcome_learner",
"=",
"None",
",",
"treatment_outcome_learner",
"=",
"None",
",",
"treatment_effect_learner",
"=",
"None",
",",
"ate_alpha",
"=",
"0.05",
",",
"control_name",
"=",
"0",
",",
")",
":",
"assert",
"(",
"learner",
"is",
"not",
"None",
")",
"or",
"(",
"(",
"control_outcome_learner",
"is",
"not",
"None",
")",
"and",
"(",
"treatment_outcome_learner",
"is",
"not",
"None",
")",
"and",
"(",
"treatment_effect_learner",
"is",
"not",
"None",
")",
")",
"if",
"control_outcome_learner",
"is",
"None",
":",
"self",
".",
"model_mu_c",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_mu_c",
"=",
"control_outcome_learner",
"if",
"treatment_outcome_learner",
"is",
"None",
":",
"self",
".",
"model_mu_t",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_mu_t",
"=",
"treatment_outcome_learner",
"if",
"treatment_effect_learner",
"is",
"None",
":",
"self",
".",
"model_tau",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_tau",
"=",
"treatment_effect_learner",
"self",
".",
"ate_alpha",
"=",
"ate_alpha",
"self",
".",
"control_name",
"=",
"control_name",
"self",
".",
"propensity",
"=",
"None"
] | [
30,
4
] | [
74,
30
] | python | en | ['en', 'en', 'it'] | True |
BaseDRLearner.fit | (self, X, treatment, y, p=None, seed=None) | Fit the inference model.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
seed (int): random seed for cross-fitting
| Fit the inference model. | def fit(self, X, treatment, y, p=None, seed=None):
"""Fit the inference model.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
seed (int): random seed for cross-fitting
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
check_treatment_vector(treatment, self.control_name)
self.t_groups = np.unique(treatment[treatment != self.control_name])
self.t_groups.sort()
self._classes = {group: i for i, group in enumerate(self.t_groups)}
# The estimator splits the data into 3 partitions for cross-fit on the propensity score estimation,
# the outcome regression, and the treatment regression on the doubly robust estimates. The use of
# the partitions is rotated so we do not lose on the sample size.
cv = KFold(n_splits=3, shuffle=True, random_state=seed)
split_indices = [index for _, index in cv.split(y)]
self.models_mu_c = [
deepcopy(self.model_mu_c),
deepcopy(self.model_mu_c),
deepcopy(self.model_mu_c),
]
self.models_mu_t = {
group: [
deepcopy(self.model_mu_t),
deepcopy(self.model_mu_t),
deepcopy(self.model_mu_t),
]
for group in self.t_groups
}
self.models_tau = {
group: [
deepcopy(self.model_tau),
deepcopy(self.model_tau),
deepcopy(self.model_tau),
]
for group in self.t_groups
}
if p is None:
self.propensity = {group: np.zeros(y.shape[0]) for group in self.t_groups}
for ifold in range(3):
treatment_idx = split_indices[ifold]
outcome_idx = split_indices[(ifold + 1) % 3]
tau_idx = split_indices[(ifold + 2) % 3]
treatment_treat, treatment_out, treatment_tau = (
treatment[treatment_idx],
treatment[outcome_idx],
treatment[tau_idx],
)
y_out, y_tau = y[outcome_idx], y[tau_idx]
X_treat, X_out, X_tau = X[treatment_idx], X[outcome_idx], X[tau_idx]
if p is None:
logger.info("Generating propensity score")
cur_p = dict()
for group in self.t_groups:
mask = (treatment_treat == group) | (
treatment_treat == self.control_name
)
treatment_filt = treatment_treat[mask]
X_filt = X_treat[mask]
w_filt = (treatment_filt == group).astype(int)
w = (treatment_tau == group).astype(int)
cur_p[group], _ = compute_propensity_score(
X=X_filt, treatment=w_filt, X_pred=X_tau, treatment_pred=w
)
self.propensity[group][tau_idx] = cur_p[group]
else:
cur_p = dict()
if isinstance(p, (np.ndarray, pd.Series)):
cur_p = {self.t_groups[0]: convert_pd_to_np(p[tau_idx])}
else:
cur_p = {g: prop[tau_idx] for g, prop in p.items()}
check_p_conditions(cur_p, self.t_groups)
logger.info("Generate outcome regressions")
self.models_mu_c[ifold].fit(
X_out[treatment_out == self.control_name],
y_out[treatment_out == self.control_name],
)
for group in self.t_groups:
self.models_mu_t[group][ifold].fit(
X_out[treatment_out == group], y_out[treatment_out == group]
)
logger.info("Fit pseudo outcomes from the DR formula")
for group in self.t_groups:
mask = (treatment_tau == group) | (treatment_tau == self.control_name)
treatment_filt = treatment_tau[mask]
X_filt = X_tau[mask]
y_filt = y_tau[mask]
w_filt = (treatment_filt == group).astype(int)
p_filt = cur_p[group][mask]
mu_t = self.models_mu_t[group][ifold].predict(X_filt)
mu_c = self.models_mu_c[ifold].predict(X_filt)
dr = (
(w_filt - p_filt)
/ p_filt
/ (1 - p_filt)
* (y_filt - mu_t * w_filt - mu_c * (1 - w_filt))
+ mu_t
- mu_c
)
self.models_tau[group][ifold].fit(X_filt, dr) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"check_treatment_vector",
"(",
"treatment",
",",
"self",
".",
"control_name",
")",
"self",
".",
"t_groups",
"=",
"np",
".",
"unique",
"(",
"treatment",
"[",
"treatment",
"!=",
"self",
".",
"control_name",
"]",
")",
"self",
".",
"t_groups",
".",
"sort",
"(",
")",
"self",
".",
"_classes",
"=",
"{",
"group",
":",
"i",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
"}",
"# The estimator splits the data into 3 partitions for cross-fit on the propensity score estimation,",
"# the outcome regression, and the treatment regression on the doubly robust estimates. The use of",
"# the partitions is rotated so we do not lose on the sample size.",
"cv",
"=",
"KFold",
"(",
"n_splits",
"=",
"3",
",",
"shuffle",
"=",
"True",
",",
"random_state",
"=",
"seed",
")",
"split_indices",
"=",
"[",
"index",
"for",
"_",
",",
"index",
"in",
"cv",
".",
"split",
"(",
"y",
")",
"]",
"self",
".",
"models_mu_c",
"=",
"[",
"deepcopy",
"(",
"self",
".",
"model_mu_c",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_c",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_c",
")",
",",
"]",
"self",
".",
"models_mu_t",
"=",
"{",
"group",
":",
"[",
"deepcopy",
"(",
"self",
".",
"model_mu_t",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_t",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_mu_t",
")",
",",
"]",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"self",
".",
"models_tau",
"=",
"{",
"group",
":",
"[",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
",",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
",",
"]",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"if",
"p",
"is",
"None",
":",
"self",
".",
"propensity",
"=",
"{",
"group",
":",
"np",
".",
"zeros",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"for",
"ifold",
"in",
"range",
"(",
"3",
")",
":",
"treatment_idx",
"=",
"split_indices",
"[",
"ifold",
"]",
"outcome_idx",
"=",
"split_indices",
"[",
"(",
"ifold",
"+",
"1",
")",
"%",
"3",
"]",
"tau_idx",
"=",
"split_indices",
"[",
"(",
"ifold",
"+",
"2",
")",
"%",
"3",
"]",
"treatment_treat",
",",
"treatment_out",
",",
"treatment_tau",
"=",
"(",
"treatment",
"[",
"treatment_idx",
"]",
",",
"treatment",
"[",
"outcome_idx",
"]",
",",
"treatment",
"[",
"tau_idx",
"]",
",",
")",
"y_out",
",",
"y_tau",
"=",
"y",
"[",
"outcome_idx",
"]",
",",
"y",
"[",
"tau_idx",
"]",
"X_treat",
",",
"X_out",
",",
"X_tau",
"=",
"X",
"[",
"treatment_idx",
"]",
",",
"X",
"[",
"outcome_idx",
"]",
",",
"X",
"[",
"tau_idx",
"]",
"if",
"p",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"Generating propensity score\"",
")",
"cur_p",
"=",
"dict",
"(",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment_treat",
"==",
"group",
")",
"|",
"(",
"treatment_treat",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment_treat",
"[",
"mask",
"]",
"X_filt",
"=",
"X_treat",
"[",
"mask",
"]",
"w_filt",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"w",
"=",
"(",
"treatment_tau",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"cur_p",
"[",
"group",
"]",
",",
"_",
"=",
"compute_propensity_score",
"(",
"X",
"=",
"X_filt",
",",
"treatment",
"=",
"w_filt",
",",
"X_pred",
"=",
"X_tau",
",",
"treatment_pred",
"=",
"w",
")",
"self",
".",
"propensity",
"[",
"group",
"]",
"[",
"tau_idx",
"]",
"=",
"cur_p",
"[",
"group",
"]",
"else",
":",
"cur_p",
"=",
"dict",
"(",
")",
"if",
"isinstance",
"(",
"p",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"cur_p",
"=",
"{",
"self",
".",
"t_groups",
"[",
"0",
"]",
":",
"convert_pd_to_np",
"(",
"p",
"[",
"tau_idx",
"]",
")",
"}",
"else",
":",
"cur_p",
"=",
"{",
"g",
":",
"prop",
"[",
"tau_idx",
"]",
"for",
"g",
",",
"prop",
"in",
"p",
".",
"items",
"(",
")",
"}",
"check_p_conditions",
"(",
"cur_p",
",",
"self",
".",
"t_groups",
")",
"logger",
".",
"info",
"(",
"\"Generate outcome regressions\"",
")",
"self",
".",
"models_mu_c",
"[",
"ifold",
"]",
".",
"fit",
"(",
"X_out",
"[",
"treatment_out",
"==",
"self",
".",
"control_name",
"]",
",",
"y_out",
"[",
"treatment_out",
"==",
"self",
".",
"control_name",
"]",
",",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"self",
".",
"models_mu_t",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"fit",
"(",
"X_out",
"[",
"treatment_out",
"==",
"group",
"]",
",",
"y_out",
"[",
"treatment_out",
"==",
"group",
"]",
")",
"logger",
".",
"info",
"(",
"\"Fit pseudo outcomes from the DR formula\"",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment_tau",
"==",
"group",
")",
"|",
"(",
"treatment_tau",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment_tau",
"[",
"mask",
"]",
"X_filt",
"=",
"X_tau",
"[",
"mask",
"]",
"y_filt",
"=",
"y_tau",
"[",
"mask",
"]",
"w_filt",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"p_filt",
"=",
"cur_p",
"[",
"group",
"]",
"[",
"mask",
"]",
"mu_t",
"=",
"self",
".",
"models_mu_t",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"predict",
"(",
"X_filt",
")",
"mu_c",
"=",
"self",
".",
"models_mu_c",
"[",
"ifold",
"]",
".",
"predict",
"(",
"X_filt",
")",
"dr",
"=",
"(",
"(",
"w_filt",
"-",
"p_filt",
")",
"/",
"p_filt",
"/",
"(",
"1",
"-",
"p_filt",
")",
"*",
"(",
"y_filt",
"-",
"mu_t",
"*",
"w_filt",
"-",
"mu_c",
"*",
"(",
"1",
"-",
"w_filt",
")",
")",
"+",
"mu_t",
"-",
"mu_c",
")",
"self",
".",
"models_tau",
"[",
"group",
"]",
"[",
"ifold",
"]",
".",
"fit",
"(",
"X_filt",
",",
"dr",
")"
] | [
88,
4
] | [
202,
61
] | python | en | ['en', 'en', 'en'] | True |
BaseDRLearner.predict | (self, X, treatment=None, y=None, p=None, return_components=False, verbose=True) | Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
| Predict treatment effects. | def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True):
"""Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
te = np.zeros((X.shape[0], self.t_groups.shape[0]))
yhat_cs = {}
yhat_ts = {}
for i, group in enumerate(self.t_groups):
models_tau = self.models_tau[group]
_te = np.r_[[model.predict(X) for model in models_tau]].mean(axis=0)
te[:, i] = np.ravel(_te)
yhat_cs[group] = np.r_[[model.predict(X) for model in self.models_mu_c]].mean(axis=0)
yhat_ts[group] = np.r_[[model.predict(X) for model in self.models_mu_t[group]]].mean(axis=0)
if (y is not None) and (treatment is not None) and verbose:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
yhat = np.zeros_like(y_filt, dtype=float)
yhat[w == 0] = yhat_cs[group][mask][w == 0]
yhat[w == 1] = yhat_ts[group][mask][w == 1]
logger.info("Error metrics for group {}".format(group))
regression_metrics(y_filt, yhat, w)
if not return_components:
return te
else:
return te, yhat_cs, yhat_ts | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"treatment",
"=",
"None",
",",
"y",
"=",
"None",
",",
"p",
"=",
"None",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"te",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
")",
"yhat_cs",
"=",
"{",
"}",
"yhat_ts",
"=",
"{",
"}",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"models_tau",
"=",
"self",
".",
"models_tau",
"[",
"group",
"]",
"_te",
"=",
"np",
".",
"r_",
"[",
"[",
"model",
".",
"predict",
"(",
"X",
")",
"for",
"model",
"in",
"models_tau",
"]",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"te",
"[",
":",
",",
"i",
"]",
"=",
"np",
".",
"ravel",
"(",
"_te",
")",
"yhat_cs",
"[",
"group",
"]",
"=",
"np",
".",
"r_",
"[",
"[",
"model",
".",
"predict",
"(",
"X",
")",
"for",
"model",
"in",
"self",
".",
"models_mu_c",
"]",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"yhat_ts",
"[",
"group",
"]",
"=",
"np",
".",
"r_",
"[",
"[",
"model",
".",
"predict",
"(",
"X",
")",
"for",
"model",
"in",
"self",
".",
"models_mu_t",
"[",
"group",
"]",
"]",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"if",
"(",
"y",
"is",
"not",
"None",
")",
"and",
"(",
"treatment",
"is",
"not",
"None",
")",
"and",
"verbose",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"X_filt",
"=",
"X",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"yhat",
"=",
"np",
".",
"zeros_like",
"(",
"y_filt",
",",
"dtype",
"=",
"float",
")",
"yhat",
"[",
"w",
"==",
"0",
"]",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"0",
"]",
"yhat",
"[",
"w",
"==",
"1",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"1",
"]",
"logger",
".",
"info",
"(",
"\"Error metrics for group {}\"",
".",
"format",
"(",
"group",
")",
")",
"regression_metrics",
"(",
"y_filt",
",",
"yhat",
",",
"w",
")",
"if",
"not",
"return_components",
":",
"return",
"te",
"else",
":",
"return",
"te",
",",
"yhat_cs",
",",
"yhat_ts"
] | [
204,
4
] | [
245,
39
] | python | en | ['fr', 'en', 'en'] | True |
BaseDRLearner.fit_predict | (
self,
X,
treatment,
y,
p=None,
return_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
return_components=False,
verbose=True,
seed=None,
) | Fit the treatment effect and outcome models of the R learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
seed (int): random seed for cross-fitting
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment]
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
| Fit the treatment effect and outcome models of the R learner and predict treatment effects. | def fit_predict(
self,
X,
treatment,
y,
p=None,
return_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
return_components=False,
verbose=True,
seed=None,
):
"""Fit the treatment effect and outcome models of the R learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
seed (int): random seed for cross-fitting
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment]
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
self.fit(X, treatment, y, p, seed)
if p is None:
p = self.propensity
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {
treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()
}
te = self.predict(
X, treatment=treatment, y=y, return_components=return_components
)
if not return_ci:
return te
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_global = deepcopy(self.models_tau)
te_bootstraps = np.zeros(
shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)
)
logger.info("Bootstrap Confidence Intervals")
for i in tqdm(range(n_bootstraps)):
te_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
te_bootstraps[:, :, i] = te_b
te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2)
te_upper = np.percentile(
te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau = deepcopy(models_tau_global)
return (te, te_lower, te_upper) | [
"def",
"fit_predict",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"return_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"seed",
"=",
"None",
",",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"self",
".",
"fit",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"p",
",",
"seed",
")",
"if",
"p",
"is",
"None",
":",
"p",
"=",
"self",
".",
"propensity",
"check_p_conditions",
"(",
"p",
",",
"self",
".",
"t_groups",
")",
"if",
"isinstance",
"(",
"p",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"treatment_name",
"=",
"self",
".",
"t_groups",
"[",
"0",
"]",
"p",
"=",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"p",
")",
"}",
"elif",
"isinstance",
"(",
"p",
",",
"dict",
")",
":",
"p",
"=",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"_p",
")",
"for",
"treatment_name",
",",
"_p",
"in",
"p",
".",
"items",
"(",
")",
"}",
"te",
"=",
"self",
".",
"predict",
"(",
"X",
",",
"treatment",
"=",
"treatment",
",",
"y",
"=",
"y",
",",
"return_components",
"=",
"return_components",
")",
"if",
"not",
"return_ci",
":",
"return",
"te",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_mu_c_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_c",
")",
"models_mu_t_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_t",
")",
"models_tau_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_tau",
")",
"te_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"logger",
".",
"info",
"(",
"\"Bootstrap Confidence Intervals\"",
")",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"te_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"p",
",",
"size",
"=",
"bootstrap_size",
")",
"te_bootstraps",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"te_b",
"te_lower",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"te_upper",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models_mu_c",
"=",
"deepcopy",
"(",
"models_mu_c_global",
")",
"self",
".",
"models_mu_t",
"=",
"deepcopy",
"(",
"models_mu_t_global",
")",
"self",
".",
"models_tau",
"=",
"deepcopy",
"(",
"models_tau_global",
")",
"return",
"(",
"te",
",",
"te_lower",
",",
"te_upper",
")"
] | [
247,
4
] | [
328,
43
] | python | en | ['en', 'en', 'en'] | True |
BaseDRLearner.estimate_ate | (
self,
X,
treatment,
y,
p=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
seed=None,
) | Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
seed (int): random seed for cross-fitting
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
| Estimate the Average Treatment Effect (ATE). | def estimate_ate(
self,
X,
treatment,
y,
p=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
seed=None,
):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
seed (int): random seed for cross-fitting
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
te, yhat_cs, yhat_ts = self.fit_predict(X, treatment, y, p, return_components=True, seed=seed)
X, treatment, y = convert_pd_to_np(X, treatment, y)
if p is None:
p = self.propensity
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {
treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()
}
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
_ate = te[:, i].mean()
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
w = (treatment_filt == group).astype(int)
prob_treatment = float(sum(w)) / w.shape[0]
yhat_c = yhat_cs[group][mask]
yhat_t = yhat_ts[group][mask]
y_filt = y[mask]
# SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.
# "Recent Developments in the Econometrics of Program Evaluation." Journal of Economic Literature
se = np.sqrt((
(y_filt[w == 0] - yhat_c[w == 0]).var()
/ (1 - prob_treatment) +
(y_filt[w == 1] - yhat_t[w == 1]).var()
/ prob_treatment +
(yhat_t - yhat_c).var()
) / y_filt.shape[0])
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_global = deepcopy(self.models_tau)
logger.info("Bootstrap Confidence Intervals for ATE")
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size, seed=seed)
ate_bootstraps[:, n] = cate_b.mean()
ate_lower = np.percentile(
ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1
)
ate_upper = np.percentile(
ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau = deepcopy(models_tau_global)
return ate, ate_lower, ate_upper | [
"def",
"estimate_ate",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"bootstrap_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
",",
"seed",
"=",
"None",
",",
")",
":",
"te",
",",
"yhat_cs",
",",
"yhat_ts",
"=",
"self",
".",
"fit_predict",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"p",
",",
"return_components",
"=",
"True",
",",
"seed",
"=",
"seed",
")",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"if",
"p",
"is",
"None",
":",
"p",
"=",
"self",
".",
"propensity",
"else",
":",
"check_p_conditions",
"(",
"p",
",",
"self",
".",
"t_groups",
")",
"if",
"isinstance",
"(",
"p",
",",
"(",
"np",
".",
"ndarray",
",",
"pd",
".",
"Series",
")",
")",
":",
"treatment_name",
"=",
"self",
".",
"t_groups",
"[",
"0",
"]",
"p",
"=",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"p",
")",
"}",
"elif",
"isinstance",
"(",
"p",
",",
"dict",
")",
":",
"p",
"=",
"{",
"treatment_name",
":",
"convert_pd_to_np",
"(",
"_p",
")",
"for",
"treatment_name",
",",
"_p",
"in",
"p",
".",
"items",
"(",
")",
"}",
"ate",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_lb",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_ub",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"_ate",
"=",
"te",
"[",
":",
",",
"i",
"]",
".",
"mean",
"(",
")",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"prob_treatment",
"=",
"float",
"(",
"sum",
"(",
"w",
")",
")",
"/",
"w",
".",
"shape",
"[",
"0",
"]",
"yhat_c",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"yhat_t",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"# SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.",
"# \"Recent Developments in the Econometrics of Program Evaluation.\" Journal of Economic Literature",
"se",
"=",
"np",
".",
"sqrt",
"(",
"(",
"(",
"y_filt",
"[",
"w",
"==",
"0",
"]",
"-",
"yhat_c",
"[",
"w",
"==",
"0",
"]",
")",
".",
"var",
"(",
")",
"/",
"(",
"1",
"-",
"prob_treatment",
")",
"+",
"(",
"y_filt",
"[",
"w",
"==",
"1",
"]",
"-",
"yhat_t",
"[",
"w",
"==",
"1",
"]",
")",
".",
"var",
"(",
")",
"/",
"prob_treatment",
"+",
"(",
"yhat_t",
"-",
"yhat_c",
")",
".",
"var",
"(",
")",
")",
"/",
"y_filt",
".",
"shape",
"[",
"0",
"]",
")",
"_ate_lb",
"=",
"_ate",
"-",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"_ate_ub",
"=",
"_ate",
"+",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"ate",
"[",
"i",
"]",
"=",
"_ate",
"ate_lb",
"[",
"i",
"]",
"=",
"_ate_lb",
"ate_ub",
"[",
"i",
"]",
"=",
"_ate_ub",
"if",
"not",
"bootstrap_ci",
":",
"return",
"ate",
",",
"ate_lb",
",",
"ate_ub",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_mu_c_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_c",
")",
"models_mu_t_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_mu_t",
")",
"models_tau_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_tau",
")",
"logger",
".",
"info",
"(",
"\"Bootstrap Confidence Intervals for ATE\"",
")",
"ate_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"for",
"n",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"cate_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"p",
",",
"size",
"=",
"bootstrap_size",
",",
"seed",
"=",
"seed",
")",
"ate_bootstraps",
"[",
":",
",",
"n",
"]",
"=",
"cate_b",
".",
"mean",
"(",
")",
"ate_lower",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"ate_upper",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models_mu_c",
"=",
"deepcopy",
"(",
"models_mu_c_global",
")",
"self",
".",
"models_mu_t",
"=",
"deepcopy",
"(",
"models_mu_t_global",
")",
"self",
".",
"models_tau",
"=",
"deepcopy",
"(",
"models_tau_global",
")",
"return",
"ate",
",",
"ate_lower",
",",
"ate_upper"
] | [
330,
4
] | [
434,
44
] | python | en | ['en', 'it', 'en'] | True |
BaseDRRegressor.__init__ | (self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=.05,
control_name=0) | Initialize an DR-learner regressor.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
control_effect_learner (optional): a model to estimate treatment effects in the control group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize an DR-learner regressor. | def __init__(self,
learner=None,
control_outcome_learner=None,
treatment_outcome_learner=None,
treatment_effect_learner=None,
ate_alpha=.05,
control_name=0):
"""Initialize an DR-learner regressor.
Args:
learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment
groups
control_outcome_learner (optional): a model to estimate outcomes in the control group
treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group
control_effect_learner (optional): a model to estimate treatment effects in the control group
treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
super().__init__(
learner=learner,
control_outcome_learner=control_outcome_learner,
treatment_outcome_learner=treatment_outcome_learner,
treatment_effect_learner=treatment_effect_learner,
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"control_outcome_learner",
"=",
"None",
",",
"treatment_outcome_learner",
"=",
"None",
",",
"treatment_effect_learner",
"=",
"None",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"learner",
",",
"control_outcome_learner",
"=",
"control_outcome_learner",
",",
"treatment_outcome_learner",
"=",
"treatment_outcome_learner",
",",
"treatment_effect_learner",
"=",
"treatment_effect_learner",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
442,
4
] | [
467,
38
] | python | en | ['en', 'en', 'nl'] | True |
XGBDRRegressor.__init__ | (self, ate_alpha=.05, control_name=0, *args, **kwargs) | Initialize a DR-learner with two XGBoost models. | Initialize a DR-learner with two XGBoost models. | def __init__(self, ate_alpha=.05, control_name=0, *args, **kwargs):
"""Initialize a DR-learner with two XGBoost models."""
super().__init__(learner=XGBRegressor(*args, **kwargs),
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"XGBRegressor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
470,
4
] | [
474,
51
] | python | en | ['en', 'en', 'en'] | True |
gaussian_fit | (x, y, x_smooth=None, n_pts=n_pts_smooth) |
Fits a Gaussian to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to. Must have range [0,1].
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
|
Fits a Gaussian to some data - x and y. Returns predicted interpolation values. | def gaussian_fit(x, y, x_smooth=None, n_pts=n_pts_smooth):
"""
Fits a Gaussian to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to. Must have range [0,1].
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x), n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
mean, sigma = np.nanmean(y), np.nanstd(y)
popt, pcov = curve_fit(gauss, np_scale(x), y, p0=[1, mean, sigma],
maxfev=np.iinfo(np.int32).max)
y_smooth = gauss(np_scale(x_smooth), *popt)
return x_smooth, y_smooth | [
"def",
"gaussian_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"n_pts_smooth",
")",
":",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth_inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"x",
")",
",",
"n_pts",
")",
"x_smooth",
"=",
"np",
".",
"interp",
"(",
"x_smooth_inds",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
",",
"x",
")",
"mean",
",",
"sigma",
"=",
"np",
".",
"nanmean",
"(",
"y",
")",
",",
"np",
".",
"nanstd",
"(",
"y",
")",
"popt",
",",
"pcov",
"=",
"curve_fit",
"(",
"gauss",
",",
"np_scale",
"(",
"x",
")",
",",
"y",
",",
"p0",
"=",
"[",
"1",
",",
"mean",
",",
"sigma",
"]",
",",
"maxfev",
"=",
"np",
".",
"iinfo",
"(",
"np",
".",
"int32",
")",
".",
"max",
")",
"y_smooth",
"=",
"gauss",
"(",
"np_scale",
"(",
"x_smooth",
")",
",",
"*",
"popt",
")",
"return",
"x_smooth",
",",
"y_smooth"
] | [
13,
0
] | [
40,
29
] | python | en | ['en', 'error', 'th'] | False |
gaussian_filter_fit | (x, y, x_smooth=None, n_pts=n_pts_smooth, sigma=None) |
Fits a Gaussian filter to some data - x and y. Returns predicted interpolation values.
Currently, smoothing is achieved by fitting a cubic spline to the gaussian filter fit
of `x` and `y`.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like, optional
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int, optional
The number of evenly spaced points spanning the range of `x` to interpolate for.
sigma: numeric, optional
The standard deviation of the Gaussian kernel. A larger value yields a smoother curve,
but also reduced the closeness of the fit. By default, it is `4 * np.std(y)`.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
|
Fits a Gaussian filter to some data - x and y. Returns predicted interpolation values.
Currently, smoothing is achieved by fitting a cubic spline to the gaussian filter fit
of `x` and `y`. | def gaussian_filter_fit(x, y, x_smooth=None, n_pts=n_pts_smooth, sigma=None):
"""
Fits a Gaussian filter to some data - x and y. Returns predicted interpolation values.
Currently, smoothing is achieved by fitting a cubic spline to the gaussian filter fit
of `x` and `y`.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like, optional
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int, optional
The number of evenly spaced points spanning the range of `x` to interpolate for.
sigma: numeric, optional
The standard deviation of the Gaussian kernel. A larger value yields a smoother curve,
but also reduced the closeness of the fit. By default, it is `4 * np.std(y)`.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x)-1, n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
sigma = sigma if sigma is not None else 4 * np.std(y)
gauss_filter_y = gaussian_filter1d(y, sigma)
cs = CubicSpline(x, gauss_filter_y)
y_smooth = cs(x_smooth)
return x_smooth, y_smooth | [
"def",
"gaussian_filter_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"n_pts_smooth",
",",
"sigma",
"=",
"None",
")",
":",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth_inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"x",
")",
"-",
"1",
",",
"n_pts",
")",
"x_smooth",
"=",
"np",
".",
"interp",
"(",
"x_smooth_inds",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
",",
"x",
")",
"sigma",
"=",
"sigma",
"if",
"sigma",
"is",
"not",
"None",
"else",
"4",
"*",
"np",
".",
"std",
"(",
"y",
")",
"gauss_filter_y",
"=",
"gaussian_filter1d",
"(",
"y",
",",
"sigma",
")",
"cs",
"=",
"CubicSpline",
"(",
"x",
",",
"gauss_filter_y",
")",
"y_smooth",
"=",
"cs",
"(",
"x_smooth",
")",
"return",
"x_smooth",
",",
"y_smooth"
] | [
43,
0
] | [
75,
29
] | python | en | ['en', 'error', 'th'] | False |
poly_fit | (x, y, degree, x_smooth=None, n_pts=n_pts_smooth) |
Fits a polynomial of any positive integer degree to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
degree: int
The degree of the polynomial to fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
|
Fits a polynomial of any positive integer degree to some data - x and y. Returns predicted interpolation values. | def poly_fit(x, y, degree, x_smooth=None, n_pts=n_pts_smooth):
"""
Fits a polynomial of any positive integer degree to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
degree: int
The degree of the polynomial to fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x), n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
y_smooth = np.array([np.array([coef * (x_val ** current_degree) for
coef, current_degree in zip(np.polyfit(x, y, degree),
range(degree, -1, -1))]).sum() for x_val in x_smooth])
return x_smooth, y_smooth | [
"def",
"poly_fit",
"(",
"x",
",",
"y",
",",
"degree",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"n_pts_smooth",
")",
":",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth_inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"x",
")",
",",
"n_pts",
")",
"x_smooth",
"=",
"np",
".",
"interp",
"(",
"x_smooth_inds",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
",",
"x",
")",
"y_smooth",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"array",
"(",
"[",
"coef",
"*",
"(",
"x_val",
"**",
"current_degree",
")",
"for",
"coef",
",",
"current_degree",
"in",
"zip",
"(",
"np",
".",
"polyfit",
"(",
"x",
",",
"y",
",",
"degree",
")",
",",
"range",
"(",
"degree",
",",
"-",
"1",
",",
"-",
"1",
")",
")",
"]",
")",
".",
"sum",
"(",
")",
"for",
"x_val",
"in",
"x_smooth",
"]",
")",
"return",
"x_smooth",
",",
"y_smooth"
] | [
78,
0
] | [
106,
29
] | python | en | ['en', 'error', 'th'] | False |
fourier_fit | (x, y, n_predict=0, x_smooth=None, n_pts=n_pts_smooth,
n_harm=default_fourier_n_harm) |
Creates a Fourier fit of a NumPy array. Also supports extrapolation.
Credit goes to https://gist.github.com/tartakynov/83f3cd8f44208a1856ce.
Parameters
----------
x, y: numpy.ndarray
1D NumPy arrays of the x and y values to fit to.
Must not contain NaNs.
n_predict: int
The number of points to extrapolate.
The points will be spaced evenly by the mean spacing of values in `x`.
x_smooth: list-like, optional
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int, optional
The number of evenly spaced points spanning the range of `x` to interpolate for.
n_harm: int
The number of harmonics to use. A higher value yields a closer fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
|
Creates a Fourier fit of a NumPy array. Also supports extrapolation.
Credit goes to https://gist.github.com/tartakynov/83f3cd8f44208a1856ce. | def fourier_fit(x, y, n_predict=0, x_smooth=None, n_pts=n_pts_smooth,
n_harm=default_fourier_n_harm):
"""
Creates a Fourier fit of a NumPy array. Also supports extrapolation.
Credit goes to https://gist.github.com/tartakynov/83f3cd8f44208a1856ce.
Parameters
----------
x, y: numpy.ndarray
1D NumPy arrays of the x and y values to fit to.
Must not contain NaNs.
n_predict: int
The number of points to extrapolate.
The points will be spaced evenly by the mean spacing of values in `x`.
x_smooth: list-like, optional
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int, optional
The number of evenly spaced points spanning the range of `x` to interpolate for.
n_harm: int
The number of harmonics to use. A higher value yields a closer fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
"""
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x), n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
n_predict_smooth = int((len(x_smooth) / len(x)) * n_predict)
# These points are evenly spaced for the fourier fit implementation we use.
# More points are selected than are in `x_smooth` so we can interpolate accurately.
fourier_mult_pts = 2
x_smooth_fourier = np.linspace(x_smooth.min(), x_smooth.max(),
fourier_mult_pts * len(x_smooth))
y_smooth_fourier = np.interp(x_smooth_fourier, x, y)
n_predict_smooth_fourier = int((len(x_smooth_fourier) / len(x)) * n_predict)
# Perform the Fourier fit and extrapolation.
n = y_smooth_fourier.size
t = np.arange(0, n)
p = np.polyfit(t, y_smooth_fourier, 1) # find linear trend in arr
x_notrend = y_smooth_fourier - p[0] * t # detrended arr
x_freqdom = fft.fft(x_notrend) # detrended arr in frequency domain
f = fft.fftfreq(n) # frequencies
# sort indexes by frequency, lower -> higher
indexes = list(range(n))
indexes.sort(key=lambda i: np.absolute(x_freqdom[i]))
indexes.reverse()
t = np.arange(0, n + n_predict_smooth_fourier)
restored_sig = np.zeros(t.size)
for i in indexes[:1 + n_harm * 2]:
ampli = np.absolute(x_freqdom[i]) / n # amplitude
phase = np.angle(x_freqdom[i]) # phase
restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)
y_smooth_fourier = restored_sig + p[0] * t
# Find the points in `x_smooth_fourier` that are near to points in `x_smooth`
# and then interpolate the y values to match the new x values.
x_smooth = x_smooth_fourier[np.searchsorted(x_smooth_fourier, x_smooth)]
# Ensure `x_smooth` includes the extrapolations.
mean_x_smooth_space = np.diff(x_smooth).mean()
x_predict_smooth = np.linspace(x_smooth[-1] + mean_x_smooth_space,
x_smooth[-1] + mean_x_smooth_space * n_predict_smooth,
n_predict_smooth)
x_smooth = np.concatenate((x_smooth, x_predict_smooth))
# Ensure `x_smooth_fourier` includes the extrapolations.
mean_x_smooth_fourier_space = np.diff(x_smooth).mean()
x_predict_smooth_fourier = \
np.linspace(
x_smooth_fourier[-1] + mean_x_smooth_fourier_space,
x_smooth_fourier[-1] + mean_x_smooth_fourier_space * n_predict_smooth_fourier,
n_predict_smooth_fourier)
x_smooth_fourier = np.concatenate((x_smooth_fourier, x_predict_smooth_fourier))
y_smooth = np.interp(x_smooth, x_smooth_fourier, y_smooth_fourier)
return x_smooth, y_smooth | [
"def",
"fourier_fit",
"(",
"x",
",",
"y",
",",
"n_predict",
"=",
"0",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"n_pts_smooth",
",",
"n_harm",
"=",
"default_fourier_n_harm",
")",
":",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth_inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"x",
")",
",",
"n_pts",
")",
"x_smooth",
"=",
"np",
".",
"interp",
"(",
"x_smooth_inds",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
",",
"x",
")",
"n_predict_smooth",
"=",
"int",
"(",
"(",
"len",
"(",
"x_smooth",
")",
"/",
"len",
"(",
"x",
")",
")",
"*",
"n_predict",
")",
"# These points are evenly spaced for the fourier fit implementation we use.",
"# More points are selected than are in `x_smooth` so we can interpolate accurately.",
"fourier_mult_pts",
"=",
"2",
"x_smooth_fourier",
"=",
"np",
".",
"linspace",
"(",
"x_smooth",
".",
"min",
"(",
")",
",",
"x_smooth",
".",
"max",
"(",
")",
",",
"fourier_mult_pts",
"*",
"len",
"(",
"x_smooth",
")",
")",
"y_smooth_fourier",
"=",
"np",
".",
"interp",
"(",
"x_smooth_fourier",
",",
"x",
",",
"y",
")",
"n_predict_smooth_fourier",
"=",
"int",
"(",
"(",
"len",
"(",
"x_smooth_fourier",
")",
"/",
"len",
"(",
"x",
")",
")",
"*",
"n_predict",
")",
"# Perform the Fourier fit and extrapolation.",
"n",
"=",
"y_smooth_fourier",
".",
"size",
"t",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"n",
")",
"p",
"=",
"np",
".",
"polyfit",
"(",
"t",
",",
"y_smooth_fourier",
",",
"1",
")",
"# find linear trend in arr",
"x_notrend",
"=",
"y_smooth_fourier",
"-",
"p",
"[",
"0",
"]",
"*",
"t",
"# detrended arr",
"x_freqdom",
"=",
"fft",
".",
"fft",
"(",
"x_notrend",
")",
"# detrended arr in frequency domain",
"f",
"=",
"fft",
".",
"fftfreq",
"(",
"n",
")",
"# frequencies",
"# sort indexes by frequency, lower -> higher",
"indexes",
"=",
"list",
"(",
"range",
"(",
"n",
")",
")",
"indexes",
".",
"sort",
"(",
"key",
"=",
"lambda",
"i",
":",
"np",
".",
"absolute",
"(",
"x_freqdom",
"[",
"i",
"]",
")",
")",
"indexes",
".",
"reverse",
"(",
")",
"t",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"n",
"+",
"n_predict_smooth_fourier",
")",
"restored_sig",
"=",
"np",
".",
"zeros",
"(",
"t",
".",
"size",
")",
"for",
"i",
"in",
"indexes",
"[",
":",
"1",
"+",
"n_harm",
"*",
"2",
"]",
":",
"ampli",
"=",
"np",
".",
"absolute",
"(",
"x_freqdom",
"[",
"i",
"]",
")",
"/",
"n",
"# amplitude",
"phase",
"=",
"np",
".",
"angle",
"(",
"x_freqdom",
"[",
"i",
"]",
")",
"# phase",
"restored_sig",
"+=",
"ampli",
"*",
"np",
".",
"cos",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"f",
"[",
"i",
"]",
"*",
"t",
"+",
"phase",
")",
"y_smooth_fourier",
"=",
"restored_sig",
"+",
"p",
"[",
"0",
"]",
"*",
"t",
"# Find the points in `x_smooth_fourier` that are near to points in `x_smooth`",
"# and then interpolate the y values to match the new x values.",
"x_smooth",
"=",
"x_smooth_fourier",
"[",
"np",
".",
"searchsorted",
"(",
"x_smooth_fourier",
",",
"x_smooth",
")",
"]",
"# Ensure `x_smooth` includes the extrapolations.",
"mean_x_smooth_space",
"=",
"np",
".",
"diff",
"(",
"x_smooth",
")",
".",
"mean",
"(",
")",
"x_predict_smooth",
"=",
"np",
".",
"linspace",
"(",
"x_smooth",
"[",
"-",
"1",
"]",
"+",
"mean_x_smooth_space",
",",
"x_smooth",
"[",
"-",
"1",
"]",
"+",
"mean_x_smooth_space",
"*",
"n_predict_smooth",
",",
"n_predict_smooth",
")",
"x_smooth",
"=",
"np",
".",
"concatenate",
"(",
"(",
"x_smooth",
",",
"x_predict_smooth",
")",
")",
"# Ensure `x_smooth_fourier` includes the extrapolations.",
"mean_x_smooth_fourier_space",
"=",
"np",
".",
"diff",
"(",
"x_smooth",
")",
".",
"mean",
"(",
")",
"x_predict_smooth_fourier",
"=",
"np",
".",
"linspace",
"(",
"x_smooth_fourier",
"[",
"-",
"1",
"]",
"+",
"mean_x_smooth_fourier_space",
",",
"x_smooth_fourier",
"[",
"-",
"1",
"]",
"+",
"mean_x_smooth_fourier_space",
"*",
"n_predict_smooth_fourier",
",",
"n_predict_smooth_fourier",
")",
"x_smooth_fourier",
"=",
"np",
".",
"concatenate",
"(",
"(",
"x_smooth_fourier",
",",
"x_predict_smooth_fourier",
")",
")",
"y_smooth",
"=",
"np",
".",
"interp",
"(",
"x_smooth",
",",
"x_smooth_fourier",
",",
"y_smooth_fourier",
")",
"return",
"x_smooth",
",",
"y_smooth"
] | [
109,
0
] | [
184,
29
] | python | en | ['en', 'error', 'th'] | False |
RslLexer.analyse_text | (text) |
Check for the most common text in the beginning of a RSL file.
|
Check for the most common text in the beginning of a RSL file.
| def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0 | [
"def",
"analyse_text",
"(",
"text",
")",
":",
"if",
"re",
".",
"search",
"(",
"r'scheme\\s*.*?=\\s*class\\s*type'",
",",
"text",
",",
"re",
".",
"I",
")",
"is",
"not",
"None",
":",
"return",
"1.0"
] | [
407,
4
] | [
412,
22
] | python | en | ['en', 'error', 'th'] | False |
RunawayRegexTest.assert_single_token | (self, s, token) | Show that a given string generates only one token. | Show that a given string generates only one token. | def assert_single_token(self, s, token):
"""Show that a given string generates only one token."""
tokens = list(self.lexer.get_tokens_unprocessed(s))
self.assertEqual(len(tokens), 1, tokens)
self.assertEqual(s, tokens[0][2])
self.assertEqual(token, tokens[0][1]) | [
"def",
"assert_single_token",
"(",
"self",
",",
"s",
",",
"token",
")",
":",
"tokens",
"=",
"list",
"(",
"self",
".",
"lexer",
".",
"get_tokens_unprocessed",
"(",
"s",
")",
")",
"self",
".",
"assertEqual",
"(",
"len",
"(",
"tokens",
")",
",",
"1",
",",
"tokens",
")",
"self",
".",
"assertEqual",
"(",
"s",
",",
"tokens",
"[",
"0",
"]",
"[",
"2",
"]",
")",
"self",
".",
"assertEqual",
"(",
"token",
",",
"tokens",
"[",
"0",
"]",
"[",
"1",
"]",
")"
] | [
26,
4
] | [
31,
45
] | python | en | ['en', 'en', 'en'] | True |
RunawayRegexTest.assert_tokens | (self, strings, expected_tokens) | Show that a given string generates the expected tokens. | Show that a given string generates the expected tokens. | def assert_tokens(self, strings, expected_tokens):
"""Show that a given string generates the expected tokens."""
tokens = list(self.lexer.get_tokens_unprocessed(''.join(strings)))
self.assertEqual(len(tokens), len(expected_tokens), tokens)
for index, s in enumerate(strings):
self.assertEqual(s, tokens[index][2])
self.assertEqual(expected_tokens[index], tokens[index][1]) | [
"def",
"assert_tokens",
"(",
"self",
",",
"strings",
",",
"expected_tokens",
")",
":",
"tokens",
"=",
"list",
"(",
"self",
".",
"lexer",
".",
"get_tokens_unprocessed",
"(",
"''",
".",
"join",
"(",
"strings",
")",
")",
")",
"self",
".",
"assertEqual",
"(",
"len",
"(",
"tokens",
")",
",",
"len",
"(",
"expected_tokens",
")",
",",
"tokens",
")",
"for",
"index",
",",
"s",
"in",
"enumerate",
"(",
"strings",
")",
":",
"self",
".",
"assertEqual",
"(",
"s",
",",
"tokens",
"[",
"index",
"]",
"[",
"2",
"]",
")",
"self",
".",
"assertEqual",
"(",
"expected_tokens",
"[",
"index",
"]",
",",
"tokens",
"[",
"index",
"]",
"[",
"1",
"]",
")"
] | [
33,
4
] | [
39,
70
] | python | en | ['en', 'en', 'en'] | True |
RunawayRegexTest.assert_fast_tokenization | (self, s) | Show that a given string is tokenized quickly. | Show that a given string is tokenized quickly. | def assert_fast_tokenization(self, s):
"""Show that a given string is tokenized quickly."""
start = time.time()
tokens = list(self.lexer.get_tokens_unprocessed(s))
end = time.time()
# Isn't 10 seconds kind of a long time? Yes, but we don't want false
# positives when the tests are starved for CPU time.
if end-start > 10:
self.fail('tokenization took too long')
return tokens | [
"def",
"assert_fast_tokenization",
"(",
"self",
",",
"s",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"tokens",
"=",
"list",
"(",
"self",
".",
"lexer",
".",
"get_tokens_unprocessed",
"(",
"s",
")",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"# Isn't 10 seconds kind of a long time? Yes, but we don't want false",
"# positives when the tests are starved for CPU time.",
"if",
"end",
"-",
"start",
">",
"10",
":",
"self",
".",
"fail",
"(",
"'tokenization took too long'",
")",
"return",
"tokens"
] | [
41,
4
] | [
50,
21
] | python | en | ['en', 'en', 'en'] | True |
CALLING_CONVENTION_TYPES.extract | (text, default=UNKNOWN) | extracts calling convention from the text. If the calling convention
could not be found, the "default"is used | extracts calling convention from the text. If the calling convention
could not be found, the "default"is used | def extract(text, default=UNKNOWN):
"""extracts calling convention from the text. If the calling convention
could not be found, the "default"is used"""
if not text:
return default
found = CALLING_CONVENTION_TYPES.pattern.match(text)
if found:
return found.group('cc')
else:
return default | [
"def",
"extract",
"(",
"text",
",",
"default",
"=",
"UNKNOWN",
")",
":",
"if",
"not",
"text",
":",
"return",
"default",
"found",
"=",
"CALLING_CONVENTION_TYPES",
".",
"pattern",
".",
"match",
"(",
"text",
")",
"if",
"found",
":",
"return",
"found",
".",
"group",
"(",
"'cc'",
")",
"else",
":",
"return",
"default"
] | [
37,
4
] | [
46,
26
] | python | en | ['en', 'en', 'en'] | True |
S3LoggingBucket.get_key | (self, _name) | Create a new Key instance with the given name. | Create a new Key instance with the given name. | def get_key(self, _name):
""" Create a new Key instance with the given name. """
return Key(bucket=self.bucket, name=_name) | [
"def",
"get_key",
"(",
"self",
",",
"_name",
")",
":",
"return",
"Key",
"(",
"bucket",
"=",
"self",
".",
"bucket",
",",
"name",
"=",
"_name",
")"
] | [
27,
4
] | [
29,
50
] | python | en | ['en', 'en', 'en'] | True |
S3LoggingBucket.get_bucket | (self) | Return the bucket being used. | Return the bucket being used. | def get_bucket(self):
""" Return the bucket being used. """
return self.bucket | [
"def",
"get_bucket",
"(",
"self",
")",
":",
"return",
"self",
".",
"bucket"
] | [
31,
4
] | [
33,
26
] | python | en | ['en', 'en', 'en'] | True |
S3LoggingBucket.upload_file | (self, file_name, file_path) | Upload a given file from the file_path to the bucket
with the new name/path file_name. | Upload a given file from the file_path to the bucket
with the new name/path file_name. | def upload_file(self, file_name, file_path):
""" Upload a given file from the file_path to the bucket
with the new name/path file_name. """
upload_key = Key(bucket=self.bucket, name=file_name)
content_type = "text/plain"
if file_name.endswith(".html"):
content_type = "text/html"
elif file_name.endswith(".jpg"):
content_type = "image/jpeg"
elif file_name.endswith(".png"):
content_type = "image/png"
upload_key.set_contents_from_filename(
file_path,
headers={"Content-Type": content_type})
upload_key.url = \
upload_key.generate_url(expires_in=3600).split("?")[0]
try:
upload_key.make_public()
except Exception:
pass | [
"def",
"upload_file",
"(",
"self",
",",
"file_name",
",",
"file_path",
")",
":",
"upload_key",
"=",
"Key",
"(",
"bucket",
"=",
"self",
".",
"bucket",
",",
"name",
"=",
"file_name",
")",
"content_type",
"=",
"\"text/plain\"",
"if",
"file_name",
".",
"endswith",
"(",
"\".html\"",
")",
":",
"content_type",
"=",
"\"text/html\"",
"elif",
"file_name",
".",
"endswith",
"(",
"\".jpg\"",
")",
":",
"content_type",
"=",
"\"image/jpeg\"",
"elif",
"file_name",
".",
"endswith",
"(",
"\".png\"",
")",
":",
"content_type",
"=",
"\"image/png\"",
"upload_key",
".",
"set_contents_from_filename",
"(",
"file_path",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"content_type",
"}",
")",
"upload_key",
".",
"url",
"=",
"upload_key",
".",
"generate_url",
"(",
"expires_in",
"=",
"3600",
")",
".",
"split",
"(",
"\"?\"",
")",
"[",
"0",
"]",
"try",
":",
"upload_key",
".",
"make_public",
"(",
")",
"except",
"Exception",
":",
"pass"
] | [
35,
4
] | [
54,
16
] | python | en | ['en', 'en', 'en'] | True |
S3LoggingBucket.upload_index_file | (self, test_address, timestamp) | Create an index.html file with links to all the log files
that were just uploaded. | Create an index.html file with links to all the log files
that were just uploaded. | def upload_index_file(self, test_address, timestamp):
""" Create an index.html file with links to all the log files
that were just uploaded. """
global already_uploaded_files
already_uploaded_files = list(set(already_uploaded_files))
already_uploaded_files.sort()
file_name = "%s/%s/index.html" % (test_address, timestamp)
index = self.get_key(file_name)
index_str = []
for completed_file in already_uploaded_files:
index_str.append("<a href='" + self.bucket_url + ""
"%s'>%s</a>" % (completed_file, completed_file))
index.set_contents_from_string(
"<br>".join(index_str),
headers={"Content-Type": "text/html"})
index.make_public()
return "%s%s" % (self.bucket_url, file_name) | [
"def",
"upload_index_file",
"(",
"self",
",",
"test_address",
",",
"timestamp",
")",
":",
"global",
"already_uploaded_files",
"already_uploaded_files",
"=",
"list",
"(",
"set",
"(",
"already_uploaded_files",
")",
")",
"already_uploaded_files",
".",
"sort",
"(",
")",
"file_name",
"=",
"\"%s/%s/index.html\"",
"%",
"(",
"test_address",
",",
"timestamp",
")",
"index",
"=",
"self",
".",
"get_key",
"(",
"file_name",
")",
"index_str",
"=",
"[",
"]",
"for",
"completed_file",
"in",
"already_uploaded_files",
":",
"index_str",
".",
"append",
"(",
"\"<a href='\"",
"+",
"self",
".",
"bucket_url",
"+",
"\"\"",
"\"%s'>%s</a>\"",
"%",
"(",
"completed_file",
",",
"completed_file",
")",
")",
"index",
".",
"set_contents_from_string",
"(",
"\"<br>\"",
".",
"join",
"(",
"index_str",
")",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"text/html\"",
"}",
")",
"index",
".",
"make_public",
"(",
")",
"return",
"\"%s%s\"",
"%",
"(",
"self",
".",
"bucket_url",
",",
"file_name",
")"
] | [
56,
4
] | [
72,
52
] | python | en | ['en', 'en', 'en'] | True |
S3LoggingBucket.save_uploaded_file_names | (self, files) | Keep a record of all file names that've been uploaded. Upload log
files related to each test after its execution. Once done, use
already_uploaded_files to create an index file. | Keep a record of all file names that've been uploaded. Upload log
files related to each test after its execution. Once done, use
already_uploaded_files to create an index file. | def save_uploaded_file_names(self, files):
""" Keep a record of all file names that've been uploaded. Upload log
files related to each test after its execution. Once done, use
already_uploaded_files to create an index file. """
global already_uploaded_files
already_uploaded_files.extend(files) | [
"def",
"save_uploaded_file_names",
"(",
"self",
",",
"files",
")",
":",
"global",
"already_uploaded_files",
"already_uploaded_files",
".",
"extend",
"(",
"files",
")"
] | [
74,
4
] | [
79,
44
] | python | en | ['en', 'en', 'en'] | True |
System.get_function | (self, code_package: Benchmark, func_name: Optional[str] = None) |
There's no function with that name?
a) yes -> create new function. Implementation might check if a function
with that name already exists in the cloud and update its code.
b) no -> retrieve function from the cache. Function code in cloud will
be updated if the local version is different.
|
There's no function with that name?
a) yes -> create new function. Implementation might check if a function
with that name already exists in the cloud and update its code.
b) no -> retrieve function from the cache. Function code in cloud will
be updated if the local version is different.
| def get_function(self, code_package: Benchmark, func_name: Optional[str] = None) -> Function:
if code_package.language_version not in self.system_config.supported_language_versions(
self.name(), code_package.language_name
):
raise Exception(
"Unsupported {language} version {version} in {system}!".format(
language=code_package.language_name,
version=code_package.language_version,
system=self.name(),
)
)
if not func_name:
func_name = self.default_function_name(code_package)
rebuilt, _ = code_package.build(self.package_code)
"""
There's no function with that name?
a) yes -> create new function. Implementation might check if a function
with that name already exists in the cloud and update its code.
b) no -> retrieve function from the cache. Function code in cloud will
be updated if the local version is different.
"""
functions = code_package.functions
if not functions or func_name not in functions:
msg = (
"function name not provided."
if not func_name
else "function {} not found in cache.".format(func_name)
)
self.logging.info("Creating new function! Reason: " + msg)
function = self.create_function(code_package, func_name)
self.cache_client.add_function(
deployment_name=self.name(),
language_name=code_package.language_name,
code_package=code_package,
function=function,
)
code_package.query_cache()
return function
else:
# retrieve function
cached_function = functions[func_name]
code_location = code_package.code_location
function = self.function_type().deserialize(cached_function)
self.cached_function(function)
self.logging.info(
"Using cached function {fname} in {loc}".format(fname=func_name, loc=code_location)
)
# is the function up-to-date?
if function.code_package_hash != code_package.hash or rebuilt:
self.logging.info(
f"Cached function {func_name} with hash "
f"{function.code_package_hash} is not up to date with "
f"current build {code_package.hash} in "
f"{code_location}, updating cloud version!"
)
self.update_function(function, code_package)
function.code_package_hash = code_package.hash
function.updated_code = True
self.cache_client.add_function(
deployment_name=self.name(),
language_name=code_package.language_name,
code_package=code_package,
function=function,
)
code_package.query_cache()
return function | [
"def",
"get_function",
"(",
"self",
",",
"code_package",
":",
"Benchmark",
",",
"func_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Function",
":",
"if",
"code_package",
".",
"language_version",
"not",
"in",
"self",
".",
"system_config",
".",
"supported_language_versions",
"(",
"self",
".",
"name",
"(",
")",
",",
"code_package",
".",
"language_name",
")",
":",
"raise",
"Exception",
"(",
"\"Unsupported {language} version {version} in {system}!\"",
".",
"format",
"(",
"language",
"=",
"code_package",
".",
"language_name",
",",
"version",
"=",
"code_package",
".",
"language_version",
",",
"system",
"=",
"self",
".",
"name",
"(",
")",
",",
")",
")",
"if",
"not",
"func_name",
":",
"func_name",
"=",
"self",
".",
"default_function_name",
"(",
"code_package",
")",
"rebuilt",
",",
"_",
"=",
"code_package",
".",
"build",
"(",
"self",
".",
"package_code",
")",
"functions",
"=",
"code_package",
".",
"functions",
"if",
"not",
"functions",
"or",
"func_name",
"not",
"in",
"functions",
":",
"msg",
"=",
"(",
"\"function name not provided.\"",
"if",
"not",
"func_name",
"else",
"\"function {} not found in cache.\"",
".",
"format",
"(",
"func_name",
")",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"Creating new function! Reason: \"",
"+",
"msg",
")",
"function",
"=",
"self",
".",
"create_function",
"(",
"code_package",
",",
"func_name",
")",
"self",
".",
"cache_client",
".",
"add_function",
"(",
"deployment_name",
"=",
"self",
".",
"name",
"(",
")",
",",
"language_name",
"=",
"code_package",
".",
"language_name",
",",
"code_package",
"=",
"code_package",
",",
"function",
"=",
"function",
",",
")",
"code_package",
".",
"query_cache",
"(",
")",
"return",
"function",
"else",
":",
"# retrieve function",
"cached_function",
"=",
"functions",
"[",
"func_name",
"]",
"code_location",
"=",
"code_package",
".",
"code_location",
"function",
"=",
"self",
".",
"function_type",
"(",
")",
".",
"deserialize",
"(",
"cached_function",
")",
"self",
".",
"cached_function",
"(",
"function",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"Using cached function {fname} in {loc}\"",
".",
"format",
"(",
"fname",
"=",
"func_name",
",",
"loc",
"=",
"code_location",
")",
")",
"# is the function up-to-date?",
"if",
"function",
".",
"code_package_hash",
"!=",
"code_package",
".",
"hash",
"or",
"rebuilt",
":",
"self",
".",
"logging",
".",
"info",
"(",
"f\"Cached function {func_name} with hash \"",
"f\"{function.code_package_hash} is not up to date with \"",
"f\"current build {code_package.hash} in \"",
"f\"{code_location}, updating cloud version!\"",
")",
"self",
".",
"update_function",
"(",
"function",
",",
"code_package",
")",
"function",
".",
"code_package_hash",
"=",
"code_package",
".",
"hash",
"function",
".",
"updated_code",
"=",
"True",
"self",
".",
"cache_client",
".",
"add_function",
"(",
"deployment_name",
"=",
"self",
".",
"name",
"(",
")",
",",
"language_name",
"=",
"code_package",
".",
"language_name",
",",
"code_package",
"=",
"code_package",
",",
"function",
"=",
"function",
",",
")",
"code_package",
".",
"query_cache",
"(",
")",
"return",
"function"
] | [
121,
4
] | [
189,
27
] | python | en | ['en', 'error', 'th'] | False |
eval_data_from_json | (filename: str, max_docs: Union[int, bool] = None, preprocessor: PreProcessor = None) |
Read Documents + Labels from a SQuAD-style file.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation.
:param filename: Path to file in SQuAD format
:param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.
:return: (List of Documents, List of Labels)
|
Read Documents + Labels from a SQuAD-style file.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation. | def eval_data_from_json(filename: str, max_docs: Union[int, bool] = None, preprocessor: PreProcessor = None) -> Tuple[List[Document], List[Label]]:
"""
Read Documents + Labels from a SQuAD-style file.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation.
:param filename: Path to file in SQuAD format
:param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.
:return: (List of Documents, List of Labels)
"""
docs: List[Document] = []
labels = []
problematic_ids = []
with open(filename, "r", encoding='utf-8') as file:
data = json.load(file)
if "title" not in data["data"][0]:
logger.warning(f"No title information found for documents in QA file: {filename}")
for document in data["data"]:
if max_docs:
if len(docs) > max_docs:
break
# Extracting paragraphs and their labels from a SQuAD document dict
cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict(document, preprocessor)
docs.extend(cur_docs)
labels.extend(cur_labels)
problematic_ids.extend(cur_problematic_ids)
if len(problematic_ids) > 0:
logger.warning(f"Could not convert an answer for {len(problematic_ids)} questions.\n"
f"There were conversion errors for question ids: {problematic_ids}")
return docs, labels | [
"def",
"eval_data_from_json",
"(",
"filename",
":",
"str",
",",
"max_docs",
":",
"Union",
"[",
"int",
",",
"bool",
"]",
"=",
"None",
",",
"preprocessor",
":",
"PreProcessor",
"=",
"None",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Document",
"]",
",",
"List",
"[",
"Label",
"]",
"]",
":",
"docs",
":",
"List",
"[",
"Document",
"]",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"problematic_ids",
"=",
"[",
"]",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"file",
":",
"data",
"=",
"json",
".",
"load",
"(",
"file",
")",
"if",
"\"title\"",
"not",
"in",
"data",
"[",
"\"data\"",
"]",
"[",
"0",
"]",
":",
"logger",
".",
"warning",
"(",
"f\"No title information found for documents in QA file: {filename}\"",
")",
"for",
"document",
"in",
"data",
"[",
"\"data\"",
"]",
":",
"if",
"max_docs",
":",
"if",
"len",
"(",
"docs",
")",
">",
"max_docs",
":",
"break",
"# Extracting paragraphs and their labels from a SQuAD document dict",
"cur_docs",
",",
"cur_labels",
",",
"cur_problematic_ids",
"=",
"_extract_docs_and_labels_from_dict",
"(",
"document",
",",
"preprocessor",
")",
"docs",
".",
"extend",
"(",
"cur_docs",
")",
"labels",
".",
"extend",
"(",
"cur_labels",
")",
"problematic_ids",
".",
"extend",
"(",
"cur_problematic_ids",
")",
"if",
"len",
"(",
"problematic_ids",
")",
">",
"0",
":",
"logger",
".",
"warning",
"(",
"f\"Could not convert an answer for {len(problematic_ids)} questions.\\n\"",
"f\"There were conversion errors for question ids: {problematic_ids}\"",
")",
"return",
"docs",
",",
"labels"
] | [
24,
0
] | [
55,
23
] | python | en | ['en', 'error', 'th'] | False |
eval_data_from_jsonl | (filename: str, batch_size: Optional[int] = None,
max_docs: Union[int, bool] = None, preprocessor: PreProcessor = None) |
Read Documents + Labels from a SQuAD-style file in jsonl format, i.e. one document per line.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation.
This is a generator which will yield one tuple per iteration containing a list
of batch_size documents and a list with the documents' labels.
If batch_size is set to None, this method will yield all documents and labels.
:param filename: Path to file in SQuAD format
:param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.
:return: (List of Documents, List of Labels)
|
Read Documents + Labels from a SQuAD-style file in jsonl format, i.e. one document per line.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation. | def eval_data_from_jsonl(filename: str, batch_size: Optional[int] = None,
max_docs: Union[int, bool] = None, preprocessor: PreProcessor = None) -> Generator[Tuple[List[Document], List[Label]], None, None]:
"""
Read Documents + Labels from a SQuAD-style file in jsonl format, i.e. one document per line.
Document and Labels can then be indexed to the DocumentStore and be used for evaluation.
This is a generator which will yield one tuple per iteration containing a list
of batch_size documents and a list with the documents' labels.
If batch_size is set to None, this method will yield all documents and labels.
:param filename: Path to file in SQuAD format
:param max_docs: This sets the number of documents that will be loaded. By default, this is set to None, thus reading in all available eval documents.
:return: (List of Documents, List of Labels)
"""
docs: List[Document] = []
labels = []
problematic_ids = []
with open(filename, "r", encoding='utf-8') as file:
for document in file:
if max_docs:
if len(docs) > max_docs:
break
# Extracting paragraphs and their labels from a SQuAD document dict
document_dict = json.loads(document)
cur_docs, cur_labels, cur_problematic_ids = _extract_docs_and_labels_from_dict(document_dict, preprocessor)
docs.extend(cur_docs)
labels.extend(cur_labels)
problematic_ids.extend(cur_problematic_ids)
if batch_size is not None:
if len(docs) >= batch_size:
if len(problematic_ids) > 0:
logger.warning(f"Could not convert an answer for {len(problematic_ids)} questions.\n"
f"There were conversion errors for question ids: {problematic_ids}")
yield docs, labels
docs = []
labels = []
problematic_ids = []
yield docs, labels | [
"def",
"eval_data_from_jsonl",
"(",
"filename",
":",
"str",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"max_docs",
":",
"Union",
"[",
"int",
",",
"bool",
"]",
"=",
"None",
",",
"preprocessor",
":",
"PreProcessor",
"=",
"None",
")",
"->",
"Generator",
"[",
"Tuple",
"[",
"List",
"[",
"Document",
"]",
",",
"List",
"[",
"Label",
"]",
"]",
",",
"None",
",",
"None",
"]",
":",
"docs",
":",
"List",
"[",
"Document",
"]",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"problematic_ids",
"=",
"[",
"]",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"file",
":",
"for",
"document",
"in",
"file",
":",
"if",
"max_docs",
":",
"if",
"len",
"(",
"docs",
")",
">",
"max_docs",
":",
"break",
"# Extracting paragraphs and their labels from a SQuAD document dict",
"document_dict",
"=",
"json",
".",
"loads",
"(",
"document",
")",
"cur_docs",
",",
"cur_labels",
",",
"cur_problematic_ids",
"=",
"_extract_docs_and_labels_from_dict",
"(",
"document_dict",
",",
"preprocessor",
")",
"docs",
".",
"extend",
"(",
"cur_docs",
")",
"labels",
".",
"extend",
"(",
"cur_labels",
")",
"problematic_ids",
".",
"extend",
"(",
"cur_problematic_ids",
")",
"if",
"batch_size",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"docs",
")",
">=",
"batch_size",
":",
"if",
"len",
"(",
"problematic_ids",
")",
">",
"0",
":",
"logger",
".",
"warning",
"(",
"f\"Could not convert an answer for {len(problematic_ids)} questions.\\n\"",
"f\"There were conversion errors for question ids: {problematic_ids}\"",
")",
"yield",
"docs",
",",
"labels",
"docs",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"problematic_ids",
"=",
"[",
"]",
"yield",
"docs",
",",
"labels"
] | [
58,
0
] | [
99,
22
] | python | en | ['en', 'error', 'th'] | False |
convert_files_to_dicts | (dir_path: str, clean_func: Optional[Callable] = None, split_paragraphs: bool = False) |
Convert all files(.txt, .pdf, .docx) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
|
Convert all files(.txt, .pdf, .docx) in the sub-directories of the given path to Python dicts that can be written to a
Document Store. | def convert_files_to_dicts(dir_path: str, clean_func: Optional[Callable] = None, split_paragraphs: bool = False) -> \
List[dict]:
"""
Convert all files(.txt, .pdf, .docx) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
file_paths = [p for p in Path(dir_path).glob("**/*")]
allowed_suffixes = [".pdf", ".txt", ".docx"]
suffix2converter: Dict[str, BaseConverter] = {}
suffix2paths: Dict[str, List[Path]] = {}
for path in file_paths:
file_suffix = path.suffix.lower()
if file_suffix in allowed_suffixes:
if file_suffix not in suffix2paths:
suffix2paths[file_suffix] = []
suffix2paths[file_suffix].append(path)
elif not path.is_dir():
logger.warning('Skipped file {0} as type {1} is not supported here. '
'See haystack.file_converter for support of more file types'.format(path, file_suffix))
# No need to initialize converter if file type not present
for file_suffix in suffix2paths.keys():
if file_suffix == ".pdf":
suffix2converter[file_suffix] = PDFToTextConverter()
if file_suffix == ".txt":
suffix2converter[file_suffix] = TextConverter()
if file_suffix == ".docx":
suffix2converter[file_suffix] = DocxToTextConverter()
documents = []
for suffix, paths in suffix2paths.items():
for path in paths:
logger.info('Converting {}'.format(path))
document = suffix2converter[suffix].convert(file_path=path, meta=None)
text = document["text"]
if clean_func:
text = clean_func(text)
if split_paragraphs:
for para in text.split("\n\n"):
if not para.strip(): # skip empty paragraphs
continue
documents.append({"text": para, "meta": {"name": path.name}})
else:
documents.append({"text": text, "meta": {"name": path.name}})
return documents | [
"def",
"convert_files_to_dicts",
"(",
"dir_path",
":",
"str",
",",
"clean_func",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
",",
"split_paragraphs",
":",
"bool",
"=",
"False",
")",
"->",
"List",
"[",
"dict",
"]",
":",
"file_paths",
"=",
"[",
"p",
"for",
"p",
"in",
"Path",
"(",
"dir_path",
")",
".",
"glob",
"(",
"\"**/*\"",
")",
"]",
"allowed_suffixes",
"=",
"[",
"\".pdf\"",
",",
"\".txt\"",
",",
"\".docx\"",
"]",
"suffix2converter",
":",
"Dict",
"[",
"str",
",",
"BaseConverter",
"]",
"=",
"{",
"}",
"suffix2paths",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"Path",
"]",
"]",
"=",
"{",
"}",
"for",
"path",
"in",
"file_paths",
":",
"file_suffix",
"=",
"path",
".",
"suffix",
".",
"lower",
"(",
")",
"if",
"file_suffix",
"in",
"allowed_suffixes",
":",
"if",
"file_suffix",
"not",
"in",
"suffix2paths",
":",
"suffix2paths",
"[",
"file_suffix",
"]",
"=",
"[",
"]",
"suffix2paths",
"[",
"file_suffix",
"]",
".",
"append",
"(",
"path",
")",
"elif",
"not",
"path",
".",
"is_dir",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"'Skipped file {0} as type {1} is not supported here. '",
"'See haystack.file_converter for support of more file types'",
".",
"format",
"(",
"path",
",",
"file_suffix",
")",
")",
"# No need to initialize converter if file type not present",
"for",
"file_suffix",
"in",
"suffix2paths",
".",
"keys",
"(",
")",
":",
"if",
"file_suffix",
"==",
"\".pdf\"",
":",
"suffix2converter",
"[",
"file_suffix",
"]",
"=",
"PDFToTextConverter",
"(",
")",
"if",
"file_suffix",
"==",
"\".txt\"",
":",
"suffix2converter",
"[",
"file_suffix",
"]",
"=",
"TextConverter",
"(",
")",
"if",
"file_suffix",
"==",
"\".docx\"",
":",
"suffix2converter",
"[",
"file_suffix",
"]",
"=",
"DocxToTextConverter",
"(",
")",
"documents",
"=",
"[",
"]",
"for",
"suffix",
",",
"paths",
"in",
"suffix2paths",
".",
"items",
"(",
")",
":",
"for",
"path",
"in",
"paths",
":",
"logger",
".",
"info",
"(",
"'Converting {}'",
".",
"format",
"(",
"path",
")",
")",
"document",
"=",
"suffix2converter",
"[",
"suffix",
"]",
".",
"convert",
"(",
"file_path",
"=",
"path",
",",
"meta",
"=",
"None",
")",
"text",
"=",
"document",
"[",
"\"text\"",
"]",
"if",
"clean_func",
":",
"text",
"=",
"clean_func",
"(",
"text",
")",
"if",
"split_paragraphs",
":",
"for",
"para",
"in",
"text",
".",
"split",
"(",
"\"\\n\\n\"",
")",
":",
"if",
"not",
"para",
".",
"strip",
"(",
")",
":",
"# skip empty paragraphs",
"continue",
"documents",
".",
"append",
"(",
"{",
"\"text\"",
":",
"para",
",",
"\"meta\"",
":",
"{",
"\"name\"",
":",
"path",
".",
"name",
"}",
"}",
")",
"else",
":",
"documents",
".",
"append",
"(",
"{",
"\"text\"",
":",
"text",
",",
"\"meta\"",
":",
"{",
"\"name\"",
":",
"path",
".",
"name",
"}",
"}",
")",
"return",
"documents"
] | [
198,
0
] | [
253,
20
] | python | en | ['en', 'error', 'th'] | False |
tika_convert_files_to_dicts | (
dir_path: str,
clean_func: Optional[Callable] = None,
split_paragraphs: bool = False,
merge_short: bool = True,
merge_lowercase: bool = True
) |
Convert all files(.txt, .pdf) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param merge_lowercase: allow conversion of merged paragraph to lowercase
:param merge_short: allow merging of short paragraphs
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
|
Convert all files(.txt, .pdf) in the sub-directories of the given path to Python dicts that can be written to a
Document Store. | def tika_convert_files_to_dicts(
dir_path: str,
clean_func: Optional[Callable] = None,
split_paragraphs: bool = False,
merge_short: bool = True,
merge_lowercase: bool = True
) -> List[dict]:
"""
Convert all files(.txt, .pdf) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param merge_lowercase: allow conversion of merged paragraph to lowercase
:param merge_short: allow merging of short paragraphs
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
converter = TikaConverter()
paths = [p for p in Path(dir_path).glob("**/*")]
allowed_suffixes = [".pdf", ".txt"]
file_paths: List[Path] = []
for path in paths:
file_suffix = path.suffix.lower()
if file_suffix in allowed_suffixes:
file_paths.append(path)
elif not path.is_dir():
logger.warning('Skipped file {0} as type {1} is not supported here. '
'See haystack.file_converter for support of more file types'.format(path, file_suffix))
documents = []
for path in file_paths:
logger.info('Converting {}'.format(path))
document = converter.convert(path)
meta = document["meta"] or {}
meta["name"] = path.name
text = document["text"]
pages = text.split("\f")
if split_paragraphs:
if pages:
paras = pages[0].split("\n\n")
# pop the last paragraph from the first page
last_para = paras.pop(-1) if paras else ''
for page in pages[1:]:
page_paras = page.split("\n\n")
# merge the last paragraph in previous page to the first paragraph in this page
if page_paras:
page_paras[0] = last_para + ' ' + page_paras[0]
last_para = page_paras.pop(-1)
paras += page_paras
if last_para:
paras.append(last_para)
if paras:
last_para = ''
for para in paras:
para = para.strip()
if not para:
continue
# merge paragraphs to improve qa
# merge this paragraph if less than 10 characters or 2 words
# or this paragraph starts with a lower case and last paragraph does not end with a punctuation
if merge_short and len(para) < 10 or len(re.findall(r'\s+', para)) < 2 \
or merge_lowercase and para and para[0].islower() and last_para \
and last_para[-1] not in r'.?!"\'\]\)':
last_para += ' ' + para
else:
if last_para:
documents.append({"text": last_para, "meta": meta})
last_para = para
# don't forget the last one
if last_para:
documents.append({"text": last_para, "meta": meta})
else:
if clean_func:
text = clean_func(text)
documents.append({"text": text, "meta": meta})
return documents | [
"def",
"tika_convert_files_to_dicts",
"(",
"dir_path",
":",
"str",
",",
"clean_func",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
",",
"split_paragraphs",
":",
"bool",
"=",
"False",
",",
"merge_short",
":",
"bool",
"=",
"True",
",",
"merge_lowercase",
":",
"bool",
"=",
"True",
")",
"->",
"List",
"[",
"dict",
"]",
":",
"converter",
"=",
"TikaConverter",
"(",
")",
"paths",
"=",
"[",
"p",
"for",
"p",
"in",
"Path",
"(",
"dir_path",
")",
".",
"glob",
"(",
"\"**/*\"",
")",
"]",
"allowed_suffixes",
"=",
"[",
"\".pdf\"",
",",
"\".txt\"",
"]",
"file_paths",
":",
"List",
"[",
"Path",
"]",
"=",
"[",
"]",
"for",
"path",
"in",
"paths",
":",
"file_suffix",
"=",
"path",
".",
"suffix",
".",
"lower",
"(",
")",
"if",
"file_suffix",
"in",
"allowed_suffixes",
":",
"file_paths",
".",
"append",
"(",
"path",
")",
"elif",
"not",
"path",
".",
"is_dir",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"'Skipped file {0} as type {1} is not supported here. '",
"'See haystack.file_converter for support of more file types'",
".",
"format",
"(",
"path",
",",
"file_suffix",
")",
")",
"documents",
"=",
"[",
"]",
"for",
"path",
"in",
"file_paths",
":",
"logger",
".",
"info",
"(",
"'Converting {}'",
".",
"format",
"(",
"path",
")",
")",
"document",
"=",
"converter",
".",
"convert",
"(",
"path",
")",
"meta",
"=",
"document",
"[",
"\"meta\"",
"]",
"or",
"{",
"}",
"meta",
"[",
"\"name\"",
"]",
"=",
"path",
".",
"name",
"text",
"=",
"document",
"[",
"\"text\"",
"]",
"pages",
"=",
"text",
".",
"split",
"(",
"\"\\f\"",
")",
"if",
"split_paragraphs",
":",
"if",
"pages",
":",
"paras",
"=",
"pages",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\n\\n\"",
")",
"# pop the last paragraph from the first page",
"last_para",
"=",
"paras",
".",
"pop",
"(",
"-",
"1",
")",
"if",
"paras",
"else",
"''",
"for",
"page",
"in",
"pages",
"[",
"1",
":",
"]",
":",
"page_paras",
"=",
"page",
".",
"split",
"(",
"\"\\n\\n\"",
")",
"# merge the last paragraph in previous page to the first paragraph in this page",
"if",
"page_paras",
":",
"page_paras",
"[",
"0",
"]",
"=",
"last_para",
"+",
"' '",
"+",
"page_paras",
"[",
"0",
"]",
"last_para",
"=",
"page_paras",
".",
"pop",
"(",
"-",
"1",
")",
"paras",
"+=",
"page_paras",
"if",
"last_para",
":",
"paras",
".",
"append",
"(",
"last_para",
")",
"if",
"paras",
":",
"last_para",
"=",
"''",
"for",
"para",
"in",
"paras",
":",
"para",
"=",
"para",
".",
"strip",
"(",
")",
"if",
"not",
"para",
":",
"continue",
"# merge paragraphs to improve qa",
"# merge this paragraph if less than 10 characters or 2 words",
"# or this paragraph starts with a lower case and last paragraph does not end with a punctuation",
"if",
"merge_short",
"and",
"len",
"(",
"para",
")",
"<",
"10",
"or",
"len",
"(",
"re",
".",
"findall",
"(",
"r'\\s+'",
",",
"para",
")",
")",
"<",
"2",
"or",
"merge_lowercase",
"and",
"para",
"and",
"para",
"[",
"0",
"]",
".",
"islower",
"(",
")",
"and",
"last_para",
"and",
"last_para",
"[",
"-",
"1",
"]",
"not",
"in",
"r'.?!\"\\'\\]\\)'",
":",
"last_para",
"+=",
"' '",
"+",
"para",
"else",
":",
"if",
"last_para",
":",
"documents",
".",
"append",
"(",
"{",
"\"text\"",
":",
"last_para",
",",
"\"meta\"",
":",
"meta",
"}",
")",
"last_para",
"=",
"para",
"# don't forget the last one",
"if",
"last_para",
":",
"documents",
".",
"append",
"(",
"{",
"\"text\"",
":",
"last_para",
",",
"\"meta\"",
":",
"meta",
"}",
")",
"else",
":",
"if",
"clean_func",
":",
"text",
"=",
"clean_func",
"(",
"text",
")",
"documents",
".",
"append",
"(",
"{",
"\"text\"",
":",
"text",
",",
"\"meta\"",
":",
"meta",
"}",
")",
"return",
"documents"
] | [
256,
0
] | [
336,
20
] | python | en | ['en', 'error', 'th'] | False |
fetch_archive_from_http | (url: str, output_dir: str, proxies: Optional[dict] = None) |
Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory.
:param url: http address
:type url: str
:param output_dir: local path
:type output_dir: str
:param proxies: proxies details as required by requests library
:type proxies: dict
:return: bool if anything got fetched
|
Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory. | def fetch_archive_from_http(url: str, output_dir: str, proxies: Optional[dict] = None):
"""
Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory.
:param url: http address
:type url: str
:param output_dir: local path
:type output_dir: str
:param proxies: proxies details as required by requests library
:type proxies: dict
:return: bool if anything got fetched
"""
# verify & prepare local directory
path = Path(output_dir)
if not path.exists():
path.mkdir(parents=True)
is_not_empty = len(list(Path(path).rglob("*"))) > 0
if is_not_empty:
logger.info(
f"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data."
)
return False
else:
logger.info(f"Fetching from {url} to `{output_dir}`")
# download & extract
with tempfile.NamedTemporaryFile() as temp_file:
http_get(url, temp_file, proxies=proxies)
temp_file.flush()
temp_file.seek(0) # making tempfile accessible
# extract
if url[-4:] == ".zip":
zip_archive = zipfile.ZipFile(temp_file.name)
zip_archive.extractall(output_dir)
elif url[-7:] == ".tar.gz":
tar_archive = tarfile.open(temp_file.name)
tar_archive.extractall(output_dir)
elif url[-3:] == ".gz":
filename = url.split("/")[-1].replace(".gz", "")
output_filename = Path(output_dir) / filename
with gzip.open(temp_file.name) as f, open(output_filename, "wb") as output:
for line in f:
output.write(line)
else:
logger.warning('Skipped url {0} as file type is not supported here. '
'See haystack documentation for support of more file types'.format(url))
# temp_file gets deleted here
return True | [
"def",
"fetch_archive_from_http",
"(",
"url",
":",
"str",
",",
"output_dir",
":",
"str",
",",
"proxies",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
")",
":",
"# verify & prepare local directory",
"path",
"=",
"Path",
"(",
"output_dir",
")",
"if",
"not",
"path",
".",
"exists",
"(",
")",
":",
"path",
".",
"mkdir",
"(",
"parents",
"=",
"True",
")",
"is_not_empty",
"=",
"len",
"(",
"list",
"(",
"Path",
"(",
"path",
")",
".",
"rglob",
"(",
"\"*\"",
")",
")",
")",
">",
"0",
"if",
"is_not_empty",
":",
"logger",
".",
"info",
"(",
"f\"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data.\"",
")",
"return",
"False",
"else",
":",
"logger",
".",
"info",
"(",
"f\"Fetching from {url} to `{output_dir}`\"",
")",
"# download & extract",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"as",
"temp_file",
":",
"http_get",
"(",
"url",
",",
"temp_file",
",",
"proxies",
"=",
"proxies",
")",
"temp_file",
".",
"flush",
"(",
")",
"temp_file",
".",
"seek",
"(",
"0",
")",
"# making tempfile accessible",
"# extract",
"if",
"url",
"[",
"-",
"4",
":",
"]",
"==",
"\".zip\"",
":",
"zip_archive",
"=",
"zipfile",
".",
"ZipFile",
"(",
"temp_file",
".",
"name",
")",
"zip_archive",
".",
"extractall",
"(",
"output_dir",
")",
"elif",
"url",
"[",
"-",
"7",
":",
"]",
"==",
"\".tar.gz\"",
":",
"tar_archive",
"=",
"tarfile",
".",
"open",
"(",
"temp_file",
".",
"name",
")",
"tar_archive",
".",
"extractall",
"(",
"output_dir",
")",
"elif",
"url",
"[",
"-",
"3",
":",
"]",
"==",
"\".gz\"",
":",
"filename",
"=",
"url",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
".",
"replace",
"(",
"\".gz\"",
",",
"\"\"",
")",
"output_filename",
"=",
"Path",
"(",
"output_dir",
")",
"/",
"filename",
"with",
"gzip",
".",
"open",
"(",
"temp_file",
".",
"name",
")",
"as",
"f",
",",
"open",
"(",
"output_filename",
",",
"\"wb\"",
")",
"as",
"output",
":",
"for",
"line",
"in",
"f",
":",
"output",
".",
"write",
"(",
"line",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Skipped url {0} as file type is not supported here. '",
"'See haystack documentation for support of more file types'",
".",
"format",
"(",
"url",
")",
")",
"# temp_file gets deleted here",
"return",
"True"
] | [
339,
0
] | [
387,
19
] | python | en | ['en', 'error', 'th'] | False |
squad_json_to_jsonl | (squad_file: str, output_file: str) |
Converts a SQuAD-json-file into jsonl format with one document per line.
:param squad_file: SQuAD-file in json format.
:type squad_file: str
:param output_file: Name of output file (SQuAD in jsonl format)
:type output_file: str
|
Converts a SQuAD-json-file into jsonl format with one document per line. | def squad_json_to_jsonl(squad_file: str, output_file: str):
"""
Converts a SQuAD-json-file into jsonl format with one document per line.
:param squad_file: SQuAD-file in json format.
:type squad_file: str
:param output_file: Name of output file (SQuAD in jsonl format)
:type output_file: str
"""
with open(squad_file, encoding='utf-8') as json_file, open(output_file, "w", encoding='utf-8') as jsonl_file:
squad_json = json.load(json_file)
for doc in squad_json["data"]:
json.dump(doc, jsonl_file)
jsonl_file.write("\n") | [
"def",
"squad_json_to_jsonl",
"(",
"squad_file",
":",
"str",
",",
"output_file",
":",
"str",
")",
":",
"with",
"open",
"(",
"squad_file",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"json_file",
",",
"open",
"(",
"output_file",
",",
"\"w\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"jsonl_file",
":",
"squad_json",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"for",
"doc",
"in",
"squad_json",
"[",
"\"data\"",
"]",
":",
"json",
".",
"dump",
"(",
"doc",
",",
"jsonl_file",
")",
"jsonl_file",
".",
"write",
"(",
"\"\\n\"",
")"
] | [
390,
0
] | [
404,
34
] | python | en | ['en', 'error', 'th'] | False |
RtfFormatter.__init__ | (self, **options) | r"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
| r"""
Additional options accepted: | def __init__(self, **options):
r"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"Formatter",
".",
"__init__",
"(",
"self",
",",
"*",
"*",
"options",
")",
"self",
".",
"fontface",
"=",
"options",
".",
"get",
"(",
"'fontface'",
")",
"or",
"''",
"self",
".",
"fontsize",
"=",
"get_int_opt",
"(",
"options",
",",
"'fontsize'",
",",
"0",
")"
] | [
50,
4
] | [
64,
59
] | python | cy | ['en', 'cy', 'hi'] | False |
critical_suite_with_citations | () |
This hand made fixture has a wide range of expectations, and has a mix of
metadata including an BasicSuiteBuilderProfiler entry, and citations.
|
This hand made fixture has a wide range of expectations, and has a mix of
metadata including an BasicSuiteBuilderProfiler entry, and citations.
| def critical_suite_with_citations():
"""
This hand made fixture has a wide range of expectations, and has a mix of
metadata including an BasicSuiteBuilderProfiler entry, and citations.
"""
schema = ExpectationSuiteSchema()
critical_suite = {
"expectation_suite_name": "critical",
"meta": {
"great_expectations_version": "0.9.1+9.gf17eff1f.dirty",
"columns": {
"npi": {"description": ""},
"nppes_provider_last_org_name": {"description": ""},
"nppes_provider_first_name": {"description": ""},
"nppes_provider_mi": {"description": ""},
"nppes_credentials": {"description": ""},
"nppes_provider_gender": {"description": ""},
"nppes_entity_code": {"description": ""},
"nppes_provider_street1": {"description": ""},
"nppes_provider_street2": {"description": ""},
"nppes_provider_city": {"description": ""},
},
"citations": [
{
"citation_date": "2020-02-28T17:34:31.307271",
"batch_kwargs": {
"path": "/home/foo/data/10k.csv",
"datasource": "files_datasource",
},
"batch_markers": {
"ge_load_time": "20200229T013430.655026Z",
"pandas_data_fingerprint": "f6037d92eb4c01f976513bc0aec2420d",
},
"batch_parameters": None,
"comment": "BasicSuiteBuilderProfiler added a citation based on the current batch.",
}
],
"notes": {
"format": "markdown",
"content": [
"#### This is an _example_ suite\n\n- This suite was made by quickly glancing at 1000 rows of your data.\n- This is **not a production suite**. It is meant to show examples of expectations.\n- Because this suite was auto-generated using a very basic profiler that does not know your data like you do, many of the expectations may not be meaningful.\n"
],
},
"BasicSuiteBuilderProfiler": {
"created_by": "BasicSuiteBuilderProfiler",
"created_at": 1582838223.843476,
"batch_kwargs": {
"path": "/Users/foo/data/10k.csv",
"datasource": "files_datasource",
},
},
},
"expectations": [
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "npi"},
"meta": {
"question": True,
"Notes": "There are empty strings that should probably be nulls",
"BasicSuiteBuilderProfiler": {"confidence": "very low"},
},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "provider_type"},
},
],
"data_asset_type": "Dataset",
}
return schema.loads(json.dumps(critical_suite)) | [
"def",
"critical_suite_with_citations",
"(",
")",
":",
"schema",
"=",
"ExpectationSuiteSchema",
"(",
")",
"critical_suite",
"=",
"{",
"\"expectation_suite_name\"",
":",
"\"critical\"",
",",
"\"meta\"",
":",
"{",
"\"great_expectations_version\"",
":",
"\"0.9.1+9.gf17eff1f.dirty\"",
",",
"\"columns\"",
":",
"{",
"\"npi\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_provider_last_org_name\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_provider_first_name\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_provider_mi\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_credentials\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_provider_gender\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_entity_code\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_provider_street1\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_provider_street2\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"\"nppes_provider_city\"",
":",
"{",
"\"description\"",
":",
"\"\"",
"}",
",",
"}",
",",
"\"citations\"",
":",
"[",
"{",
"\"citation_date\"",
":",
"\"2020-02-28T17:34:31.307271\"",
",",
"\"batch_kwargs\"",
":",
"{",
"\"path\"",
":",
"\"/home/foo/data/10k.csv\"",
",",
"\"datasource\"",
":",
"\"files_datasource\"",
",",
"}",
",",
"\"batch_markers\"",
":",
"{",
"\"ge_load_time\"",
":",
"\"20200229T013430.655026Z\"",
",",
"\"pandas_data_fingerprint\"",
":",
"\"f6037d92eb4c01f976513bc0aec2420d\"",
",",
"}",
",",
"\"batch_parameters\"",
":",
"None",
",",
"\"comment\"",
":",
"\"BasicSuiteBuilderProfiler added a citation based on the current batch.\"",
",",
"}",
"]",
",",
"\"notes\"",
":",
"{",
"\"format\"",
":",
"\"markdown\"",
",",
"\"content\"",
":",
"[",
"\"#### This is an _example_ suite\\n\\n- This suite was made by quickly glancing at 1000 rows of your data.\\n- This is **not a production suite**. It is meant to show examples of expectations.\\n- Because this suite was auto-generated using a very basic profiler that does not know your data like you do, many of the expectations may not be meaningful.\\n\"",
"]",
",",
"}",
",",
"\"BasicSuiteBuilderProfiler\"",
":",
"{",
"\"created_by\"",
":",
"\"BasicSuiteBuilderProfiler\"",
",",
"\"created_at\"",
":",
"1582838223.843476",
",",
"\"batch_kwargs\"",
":",
"{",
"\"path\"",
":",
"\"/Users/foo/data/10k.csv\"",
",",
"\"datasource\"",
":",
"\"files_datasource\"",
",",
"}",
",",
"}",
",",
"}",
",",
"\"expectations\"",
":",
"[",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"npi\"",
"}",
",",
"\"meta\"",
":",
"{",
"\"question\"",
":",
"True",
",",
"\"Notes\"",
":",
"\"There are empty strings that should probably be nulls\"",
",",
"\"BasicSuiteBuilderProfiler\"",
":",
"{",
"\"confidence\"",
":",
"\"very low\"",
"}",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"provider_type\"",
"}",
",",
"}",
",",
"]",
",",
"\"data_asset_type\"",
":",
"\"Dataset\"",
",",
"}",
"return",
"schema",
".",
"loads",
"(",
"json",
".",
"dumps",
"(",
"critical_suite",
")",
")"
] | [
22,
0
] | [
91,
51
] | python | en | ['en', 'error', 'th'] | False |
suite_with_multiple_citations | () |
A handmade suite with multiple citations each with different batch_kwargs.
The most recent citation does not have batch_kwargs
|
A handmade suite with multiple citations each with different batch_kwargs. | def suite_with_multiple_citations():
"""
A handmade suite with multiple citations each with different batch_kwargs.
The most recent citation does not have batch_kwargs
"""
schema = ExpectationSuiteSchema()
critical_suite = {
"expectation_suite_name": "critical",
"meta": {
"great_expectations_version": "0.9.1+9.gf17eff1f.dirty",
"citations": [
{
"citation_date": "2001-01-01T00:00:01.000001",
"batch_kwargs": {
"path": "3.csv",
"datasource": "3",
},
},
{
"citation_date": "2000-01-01T00:00:01.000001",
"batch_kwargs": {
"path": "2.csv",
"datasource": "2",
},
},
# This citation is the most recent and has no batch_kwargs
{
"citation_date": "2020-01-01T00:00:01.000001",
},
{
"citation_date": "1999-01-01T00:00:01.000001",
"batch_kwargs": {
"path": "1.csv",
"datasource": "1",
},
},
],
},
"expectations": [
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "npi"},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "provider_type"},
},
],
"data_asset_type": "Dataset",
}
return schema.loads(json.dumps(critical_suite)) | [
"def",
"suite_with_multiple_citations",
"(",
")",
":",
"schema",
"=",
"ExpectationSuiteSchema",
"(",
")",
"critical_suite",
"=",
"{",
"\"expectation_suite_name\"",
":",
"\"critical\"",
",",
"\"meta\"",
":",
"{",
"\"great_expectations_version\"",
":",
"\"0.9.1+9.gf17eff1f.dirty\"",
",",
"\"citations\"",
":",
"[",
"{",
"\"citation_date\"",
":",
"\"2001-01-01T00:00:01.000001\"",
",",
"\"batch_kwargs\"",
":",
"{",
"\"path\"",
":",
"\"3.csv\"",
",",
"\"datasource\"",
":",
"\"3\"",
",",
"}",
",",
"}",
",",
"{",
"\"citation_date\"",
":",
"\"2000-01-01T00:00:01.000001\"",
",",
"\"batch_kwargs\"",
":",
"{",
"\"path\"",
":",
"\"2.csv\"",
",",
"\"datasource\"",
":",
"\"2\"",
",",
"}",
",",
"}",
",",
"# This citation is the most recent and has no batch_kwargs",
"{",
"\"citation_date\"",
":",
"\"2020-01-01T00:00:01.000001\"",
",",
"}",
",",
"{",
"\"citation_date\"",
":",
"\"1999-01-01T00:00:01.000001\"",
",",
"\"batch_kwargs\"",
":",
"{",
"\"path\"",
":",
"\"1.csv\"",
",",
"\"datasource\"",
":",
"\"1\"",
",",
"}",
",",
"}",
",",
"]",
",",
"}",
",",
"\"expectations\"",
":",
"[",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"npi\"",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"provider_type\"",
"}",
",",
"}",
",",
"]",
",",
"\"data_asset_type\"",
":",
"\"Dataset\"",
",",
"}",
"return",
"schema",
".",
"loads",
"(",
"json",
".",
"dumps",
"(",
"critical_suite",
")",
")"
] | [
95,
0
] | [
146,
51
] | python | en | ['en', 'error', 'th'] | False |
warning_suite | () |
This hand made fixture has a wide range of expectations, and has a mix of
metadata including BasicSuiteBuilderProfiler entries.
|
This hand made fixture has a wide range of expectations, and has a mix of
metadata including BasicSuiteBuilderProfiler entries.
| def warning_suite():
"""
This hand made fixture has a wide range of expectations, and has a mix of
metadata including BasicSuiteBuilderProfiler entries.
"""
schema = ExpectationSuiteSchema()
warning_suite = {
"expectation_suite_name": "warning",
"meta": {
"great_expectations_version": "0.8.4.post0",
"citations": [
{
"citation_date": "2020-02-28T17:34:31.307271",
"batch_kwargs": {
"path": "/home/foo/data/10k.csv",
"datasource": "files_datasource",
},
"batch_markers": {
"ge_load_time": "20200229T013430.655026Z",
"pandas_data_fingerprint": "f6037d92eb4c01f976513bc0aec2420d",
},
"batch_parameters": None,
"comment": "BasicSuiteBuilderProfiler added a citation based on the current batch.",
}
],
},
"expectations": [
{
"expectation_type": "expect_table_row_count_to_be_between",
"kwargs": {"min_value": 800000, "max_value": 1200000},
},
{
"expectation_type": "expect_table_column_count_to_equal",
"kwargs": {"value": 71},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "npi"},
"meta": {"BasicSuiteBuilderProfiler": {"confidence": "very low"}},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "provider_type"},
"meta": {"BasicSuiteBuilderProfiler": {"confidence": "very low"}},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "nppes_provider_last_org_name"},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "nppes_provider_gender",
"value_set": ["M", "F", ""],
},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "nppes_entity_code"},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "nppes_entity_code", "value_set": ["I", "O"]},
},
{
"expectation_type": "expect_column_kl_divergence_to_be_less_than",
"kwargs": {
"column": "nppes_entity_code",
"partition_object": {
"values": ["I", "O"],
"weights": [0.9431769750233306, 0.056823024976669335],
},
"threshold": 0.1,
},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "nppes_provider_state",
"value_set": [
"AL",
"AK",
"AZ",
"AR",
"CA",
"CO",
"CT",
"DE",
"FL",
"GA",
"HI",
"ID",
"IL",
"IN",
"IA",
"KS",
"KY",
"LA",
"ME",
"MD",
"MA",
"MI",
"MN",
"MS",
"MO",
"MT",
"NE",
"NV",
"NH",
"NJ",
"NM",
"NY",
"NC",
"ND",
"OH",
"OK",
"OR",
"PA",
"RI",
"SC",
"SD",
"TN",
"TX",
"UT",
"VT",
"VA",
"WA",
"WV",
"WI",
"WY",
"DC",
"PR",
"AE",
"VI",
],
"mostly": 0.999,
},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "medicare_participation_indicator"},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "medicare_participation_indicator",
"value_set": ["Y", "N"],
},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "number_of_hcpcs"},
},
{
"expectation_type": "expect_column_values_to_be_between",
"kwargs": {
"column": "number_of_hcpcs",
"min_value": 0,
"max_value": 500,
"mostly": 0.999,
},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "total_unique_benes"},
},
{
"expectation_type": "expect_column_values_to_be_between",
"kwargs": {
"column": "total_unique_benes",
"min_value": 0,
"max_value": 2000,
"mostly": 0.95,
},
},
{
"expectation_type": "expect_column_values_to_be_null",
"kwargs": {"column": "med_suppress_indicator", "mostly": 0.85},
},
{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "med_suppress_indicator", "value_set": ["#", "*"]},
},
{
"expectation_type": "expect_column_values_to_be_between",
"kwargs": {
"column": "beneficiary_average_age",
"min_value": 40,
"max_value": 90,
"mostly": 0.995,
},
},
{
"expectation_type": "expect_column_kl_divergence_to_be_less_than",
"kwargs": {
"column": "beneficiary_average_age",
"partition_object": {
"bins": [8, 16.5, 25, 33.5, 42, 50.5, 59, 67.5, 76, 84.5, 93],
"weights": [
0.00025259576594384474,
0.00013318685840675451,
0.0009653750909344757,
0.0012363414580378728,
0.01081660996274442,
0.030813927854975127,
0.13495227317818748,
0.6919590041664524,
0.1244213260634741,
0.004449359600843578,
],
},
"threshold": 0.9,
},
},
{
"expectation_type": "expect_column_values_to_be_between",
"kwargs": {
"column": "total_submitted_chrg_amt",
"min_value": 2000,
"max_value": 5000000,
"mostly": 0.98,
},
},
{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {"column": "nppes_provider_first_name", "mostly": 0.9},
},
{
"expectation_type": "expect_column_values_to_match_regex",
"kwargs": {
"column": "nppes_provider_zip",
"regex": "^\\d*$",
"mostly": 0.999,
},
},
],
"data_asset_type": "Dataset",
}
return schema.loads(json.dumps(warning_suite)) | [
"def",
"warning_suite",
"(",
")",
":",
"schema",
"=",
"ExpectationSuiteSchema",
"(",
")",
"warning_suite",
"=",
"{",
"\"expectation_suite_name\"",
":",
"\"warning\"",
",",
"\"meta\"",
":",
"{",
"\"great_expectations_version\"",
":",
"\"0.8.4.post0\"",
",",
"\"citations\"",
":",
"[",
"{",
"\"citation_date\"",
":",
"\"2020-02-28T17:34:31.307271\"",
",",
"\"batch_kwargs\"",
":",
"{",
"\"path\"",
":",
"\"/home/foo/data/10k.csv\"",
",",
"\"datasource\"",
":",
"\"files_datasource\"",
",",
"}",
",",
"\"batch_markers\"",
":",
"{",
"\"ge_load_time\"",
":",
"\"20200229T013430.655026Z\"",
",",
"\"pandas_data_fingerprint\"",
":",
"\"f6037d92eb4c01f976513bc0aec2420d\"",
",",
"}",
",",
"\"batch_parameters\"",
":",
"None",
",",
"\"comment\"",
":",
"\"BasicSuiteBuilderProfiler added a citation based on the current batch.\"",
",",
"}",
"]",
",",
"}",
",",
"\"expectations\"",
":",
"[",
"{",
"\"expectation_type\"",
":",
"\"expect_table_row_count_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"min_value\"",
":",
"800000",
",",
"\"max_value\"",
":",
"1200000",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_table_column_count_to_equal\"",
",",
"\"kwargs\"",
":",
"{",
"\"value\"",
":",
"71",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"npi\"",
"}",
",",
"\"meta\"",
":",
"{",
"\"BasicSuiteBuilderProfiler\"",
":",
"{",
"\"confidence\"",
":",
"\"very low\"",
"}",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"provider_type\"",
"}",
",",
"\"meta\"",
":",
"{",
"\"BasicSuiteBuilderProfiler\"",
":",
"{",
"\"confidence\"",
":",
"\"very low\"",
"}",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_provider_last_org_name\"",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_in_set\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_provider_gender\"",
",",
"\"value_set\"",
":",
"[",
"\"M\"",
",",
"\"F\"",
",",
"\"\"",
"]",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_entity_code\"",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_in_set\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_entity_code\"",
",",
"\"value_set\"",
":",
"[",
"\"I\"",
",",
"\"O\"",
"]",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_kl_divergence_to_be_less_than\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_entity_code\"",
",",
"\"partition_object\"",
":",
"{",
"\"values\"",
":",
"[",
"\"I\"",
",",
"\"O\"",
"]",
",",
"\"weights\"",
":",
"[",
"0.9431769750233306",
",",
"0.056823024976669335",
"]",
",",
"}",
",",
"\"threshold\"",
":",
"0.1",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_in_set\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_provider_state\"",
",",
"\"value_set\"",
":",
"[",
"\"AL\"",
",",
"\"AK\"",
",",
"\"AZ\"",
",",
"\"AR\"",
",",
"\"CA\"",
",",
"\"CO\"",
",",
"\"CT\"",
",",
"\"DE\"",
",",
"\"FL\"",
",",
"\"GA\"",
",",
"\"HI\"",
",",
"\"ID\"",
",",
"\"IL\"",
",",
"\"IN\"",
",",
"\"IA\"",
",",
"\"KS\"",
",",
"\"KY\"",
",",
"\"LA\"",
",",
"\"ME\"",
",",
"\"MD\"",
",",
"\"MA\"",
",",
"\"MI\"",
",",
"\"MN\"",
",",
"\"MS\"",
",",
"\"MO\"",
",",
"\"MT\"",
",",
"\"NE\"",
",",
"\"NV\"",
",",
"\"NH\"",
",",
"\"NJ\"",
",",
"\"NM\"",
",",
"\"NY\"",
",",
"\"NC\"",
",",
"\"ND\"",
",",
"\"OH\"",
",",
"\"OK\"",
",",
"\"OR\"",
",",
"\"PA\"",
",",
"\"RI\"",
",",
"\"SC\"",
",",
"\"SD\"",
",",
"\"TN\"",
",",
"\"TX\"",
",",
"\"UT\"",
",",
"\"VT\"",
",",
"\"VA\"",
",",
"\"WA\"",
",",
"\"WV\"",
",",
"\"WI\"",
",",
"\"WY\"",
",",
"\"DC\"",
",",
"\"PR\"",
",",
"\"AE\"",
",",
"\"VI\"",
",",
"]",
",",
"\"mostly\"",
":",
"0.999",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"medicare_participation_indicator\"",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_in_set\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"medicare_participation_indicator\"",
",",
"\"value_set\"",
":",
"[",
"\"Y\"",
",",
"\"N\"",
"]",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"number_of_hcpcs\"",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"number_of_hcpcs\"",
",",
"\"min_value\"",
":",
"0",
",",
"\"max_value\"",
":",
"500",
",",
"\"mostly\"",
":",
"0.999",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"total_unique_benes\"",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"total_unique_benes\"",
",",
"\"min_value\"",
":",
"0",
",",
"\"max_value\"",
":",
"2000",
",",
"\"mostly\"",
":",
"0.95",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"med_suppress_indicator\"",
",",
"\"mostly\"",
":",
"0.85",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_in_set\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"med_suppress_indicator\"",
",",
"\"value_set\"",
":",
"[",
"\"#\"",
",",
"\"*\"",
"]",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"beneficiary_average_age\"",
",",
"\"min_value\"",
":",
"40",
",",
"\"max_value\"",
":",
"90",
",",
"\"mostly\"",
":",
"0.995",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_kl_divergence_to_be_less_than\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"beneficiary_average_age\"",
",",
"\"partition_object\"",
":",
"{",
"\"bins\"",
":",
"[",
"8",
",",
"16.5",
",",
"25",
",",
"33.5",
",",
"42",
",",
"50.5",
",",
"59",
",",
"67.5",
",",
"76",
",",
"84.5",
",",
"93",
"]",
",",
"\"weights\"",
":",
"[",
"0.00025259576594384474",
",",
"0.00013318685840675451",
",",
"0.0009653750909344757",
",",
"0.0012363414580378728",
",",
"0.01081660996274442",
",",
"0.030813927854975127",
",",
"0.13495227317818748",
",",
"0.6919590041664524",
",",
"0.1244213260634741",
",",
"0.004449359600843578",
",",
"]",
",",
"}",
",",
"\"threshold\"",
":",
"0.9",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"total_submitted_chrg_amt\"",
",",
"\"min_value\"",
":",
"2000",
",",
"\"max_value\"",
":",
"5000000",
",",
"\"mostly\"",
":",
"0.98",
",",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_provider_first_name\"",
",",
"\"mostly\"",
":",
"0.9",
"}",
",",
"}",
",",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_match_regex\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"nppes_provider_zip\"",
",",
"\"regex\"",
":",
"\"^\\\\d*$\"",
",",
"\"mostly\"",
":",
"0.999",
",",
"}",
",",
"}",
",",
"]",
",",
"\"data_asset_type\"",
":",
"\"Dataset\"",
",",
"}",
"return",
"schema",
".",
"loads",
"(",
"json",
".",
"dumps",
"(",
"warning_suite",
")",
")"
] | [
150,
0
] | [
388,
50
] | python | en | ['en', 'error', 'th'] | False |
test_notebook_execution_with_pandas_backend | (titanic_data_context_no_data_docs) |
To set this test up we:
- create a suite
- add a few expectations (both table and column level)
- verify that no validations have happened
- create the suite edit notebook by hijacking the private cli method
We then:
- execute that notebook (Note this will raise various errors like
CellExecutionError if any cell in the notebook fails
- create a new context from disk
- verify that a validation has been run with our expectation suite
|
To set this test up we: | def test_notebook_execution_with_pandas_backend(titanic_data_context_no_data_docs):
"""
To set this test up we:
- create a suite
- add a few expectations (both table and column level)
- verify that no validations have happened
- create the suite edit notebook by hijacking the private cli method
We then:
- execute that notebook (Note this will raise various errors like
CellExecutionError if any cell in the notebook fails
- create a new context from disk
- verify that a validation has been run with our expectation suite
"""
# Since we'll run the notebook, we use a context with no data docs to avoid
# the renderer's default behavior of building and opening docs, which is not
# part of this test.
context = titanic_data_context_no_data_docs
root_dir = context.root_directory
uncommitted_dir = os.path.join(root_dir, "uncommitted")
suite_name = "warning"
context.create_expectation_suite(suite_name)
csv_path = os.path.join(root_dir, "..", "data", "Titanic.csv")
batch_kwargs = {"datasource": "mydatasource", "path": csv_path}
batch = context.get_batch(batch_kwargs, suite_name)
batch.expect_table_column_count_to_equal(1)
batch.expect_table_row_count_to_equal(1313)
batch.expect_column_values_to_be_in_set("Sex", ["female", "male"])
batch.save_expectation_suite(discard_failed_expectations=False)
# Sanity check test setup
suite = context.get_expectation_suite(suite_name)
original_suite = suite
assert len(suite.expectations) == 3
assert context.list_expectation_suite_names() == [suite_name]
assert context.list_datasources() == [
{
"module_name": "great_expectations.datasource",
"class_name": "PandasDatasource",
"data_asset_type": {
"module_name": "great_expectations.dataset",
"class_name": "PandasDataset",
},
"batch_kwargs_generators": {
"mygenerator": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data",
}
},
"name": "mydatasource",
}
]
assert context.get_validation_result("warning") == {}
# Create notebook
json_batch_kwargs = json.dumps(batch_kwargs)
_suite_edit(
suite_name,
"mydatasource",
directory=root_dir,
jupyter=False,
batch_kwargs=json_batch_kwargs,
usage_event="test_notebook_execution",
)
edit_notebook_path = os.path.join(uncommitted_dir, "edit_warning.ipynb")
assert os.path.isfile(edit_notebook_path)
with open(edit_notebook_path) as f:
nb = nbformat.read(f, as_version=4)
# Run notebook
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": uncommitted_dir}})
# Assertions about output
context = DataContext(root_dir)
obs_validation_result = context.get_validation_result("warning")
assert obs_validation_result.statistics == {
"evaluated_expectations": 3,
"successful_expectations": 2,
"unsuccessful_expectations": 1,
"success_percent": 66.66666666666666,
}
suite = context.get_expectation_suite(suite_name)
assert suite == original_suite | [
"def",
"test_notebook_execution_with_pandas_backend",
"(",
"titanic_data_context_no_data_docs",
")",
":",
"# Since we'll run the notebook, we use a context with no data docs to avoid",
"# the renderer's default behavior of building and opening docs, which is not",
"# part of this test.",
"context",
"=",
"titanic_data_context_no_data_docs",
"root_dir",
"=",
"context",
".",
"root_directory",
"uncommitted_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
")",
"suite_name",
"=",
"\"warning\"",
"context",
".",
"create_expectation_suite",
"(",
"suite_name",
")",
"csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"..\"",
",",
"\"data\"",
",",
"\"Titanic.csv\"",
")",
"batch_kwargs",
"=",
"{",
"\"datasource\"",
":",
"\"mydatasource\"",
",",
"\"path\"",
":",
"csv_path",
"}",
"batch",
"=",
"context",
".",
"get_batch",
"(",
"batch_kwargs",
",",
"suite_name",
")",
"batch",
".",
"expect_table_column_count_to_equal",
"(",
"1",
")",
"batch",
".",
"expect_table_row_count_to_equal",
"(",
"1313",
")",
"batch",
".",
"expect_column_values_to_be_in_set",
"(",
"\"Sex\"",
",",
"[",
"\"female\"",
",",
"\"male\"",
"]",
")",
"batch",
".",
"save_expectation_suite",
"(",
"discard_failed_expectations",
"=",
"False",
")",
"# Sanity check test setup",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"suite_name",
")",
"original_suite",
"=",
"suite",
"assert",
"len",
"(",
"suite",
".",
"expectations",
")",
"==",
"3",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"suite_name",
"]",
"assert",
"context",
".",
"list_datasources",
"(",
")",
"==",
"[",
"{",
"\"module_name\"",
":",
"\"great_expectations.datasource\"",
",",
"\"class_name\"",
":",
"\"PandasDatasource\"",
",",
"\"data_asset_type\"",
":",
"{",
"\"module_name\"",
":",
"\"great_expectations.dataset\"",
",",
"\"class_name\"",
":",
"\"PandasDataset\"",
",",
"}",
",",
"\"batch_kwargs_generators\"",
":",
"{",
"\"mygenerator\"",
":",
"{",
"\"class_name\"",
":",
"\"SubdirReaderBatchKwargsGenerator\"",
",",
"\"base_directory\"",
":",
"\"../data\"",
",",
"}",
"}",
",",
"\"name\"",
":",
"\"mydatasource\"",
",",
"}",
"]",
"assert",
"context",
".",
"get_validation_result",
"(",
"\"warning\"",
")",
"==",
"{",
"}",
"# Create notebook",
"json_batch_kwargs",
"=",
"json",
".",
"dumps",
"(",
"batch_kwargs",
")",
"_suite_edit",
"(",
"suite_name",
",",
"\"mydatasource\"",
",",
"directory",
"=",
"root_dir",
",",
"jupyter",
"=",
"False",
",",
"batch_kwargs",
"=",
"json_batch_kwargs",
",",
"usage_event",
"=",
"\"test_notebook_execution\"",
",",
")",
"edit_notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"uncommitted_dir",
",",
"\"edit_warning.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"edit_notebook_path",
")",
"with",
"open",
"(",
"edit_notebook_path",
")",
"as",
"f",
":",
"nb",
"=",
"nbformat",
".",
"read",
"(",
"f",
",",
"as_version",
"=",
"4",
")",
"# Run notebook",
"ep",
"=",
"ExecutePreprocessor",
"(",
"timeout",
"=",
"600",
",",
"kernel_name",
"=",
"\"python3\"",
")",
"ep",
".",
"preprocess",
"(",
"nb",
",",
"{",
"\"metadata\"",
":",
"{",
"\"path\"",
":",
"uncommitted_dir",
"}",
"}",
")",
"# Assertions about output",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"obs_validation_result",
"=",
"context",
".",
"get_validation_result",
"(",
"\"warning\"",
")",
"assert",
"obs_validation_result",
".",
"statistics",
"==",
"{",
"\"evaluated_expectations\"",
":",
"3",
",",
"\"successful_expectations\"",
":",
"2",
",",
"\"unsuccessful_expectations\"",
":",
"1",
",",
"\"success_percent\"",
":",
"66.66666666666666",
",",
"}",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"suite_name",
")",
"assert",
"suite",
"==",
"original_suite"
] | [
1319,
0
] | [
1406,
34
] | python | en | ['en', 'error', 'th'] | False |
test_notebook_execution_with_custom_notebooks_wrong_module | (
suite_with_multiple_citations, data_context_with_bad_notebooks
) |
Test the error message in case of "bad" custom module is clear
|
Test the error message in case of "bad" custom module is clear
| def test_notebook_execution_with_custom_notebooks_wrong_module(
suite_with_multiple_citations, data_context_with_bad_notebooks
):
"""
Test the error message in case of "bad" custom module is clear
"""
with pytest.raises(
SuiteEditNotebookCustomTemplateModuleNotFoundError, match=r"invalid\.module"
):
SuiteEditNotebookRenderer.from_data_context(
data_context_with_bad_notebooks
).render(suite_with_multiple_citations) | [
"def",
"test_notebook_execution_with_custom_notebooks_wrong_module",
"(",
"suite_with_multiple_citations",
",",
"data_context_with_bad_notebooks",
")",
":",
"with",
"pytest",
".",
"raises",
"(",
"SuiteEditNotebookCustomTemplateModuleNotFoundError",
",",
"match",
"=",
"r\"invalid\\.module\"",
")",
":",
"SuiteEditNotebookRenderer",
".",
"from_data_context",
"(",
"data_context_with_bad_notebooks",
")",
".",
"render",
"(",
"suite_with_multiple_citations",
")"
] | [
1409,
0
] | [
1420,
47
] | python | en | ['en', 'error', 'th'] | False |
test_notebook_execution_with_custom_notebooks | (
suite_with_multiple_citations, data_context_custom_notebooks
) |
Test the different parts of the notebooks can be modified
|
Test the different parts of the notebooks can be modified
| def test_notebook_execution_with_custom_notebooks(
suite_with_multiple_citations, data_context_custom_notebooks
):
"""
Test the different parts of the notebooks can be modified
"""
obs = SuiteEditNotebookRenderer.from_data_context(
data_context_custom_notebooks
).render(suite_with_multiple_citations)
assert isinstance(obs, dict)
expected = {
"nbformat": 4,
"nbformat_minor": 4,
"metadata": {},
"cells": [
{
"cell_type": "markdown",
"source": "# Custom header for MyCompany",
"metadata": {},
},
{
"cell_type": "code",
"metadata": {},
"execution_count": None,
"source": 'import datetime\nimport great_expectations as ge\nimport great_expectations.jupyter_ux\nfrom great_expectations.checkpoint import LegacyCheckpoint\nfrom great_expectations.data_context.types.resource_identifiers import (\n ValidationResultIdentifier,\n)\n\ncontext = ge.data_context.DataContext()\n\n# Feel free to change the name of your suite here. Renaming this will not\n# remove the other one.\nexpectation_suite_name = "critical"\nsuite = context.get_expectation_suite(expectation_suite_name)\nsuite.expectations = []\n\nbatch_kwargs = {"path": "../../3.csv", "datasource": "3"}\nbatch = context.get_batch(batch_kwargs, suite)\nbatch.head()',
"outputs": [],
},
{
"cell_type": "markdown",
"source": "## Create & Edit Expectations\n\nAdd expectations by calling specific expectation methods on the `batch` object. They all begin with `.expect_` which makes autocompleting easy using tab.\n\nYou can see all the available expectations in the **[expectation glossary](https://docs.greatexpectations.io/en/latest/reference/glossary_of_expectations.html?utm_source=notebook&utm_medium=create_expectations)**.",
"metadata": {},
},
{
"cell_type": "markdown",
"source": "### Table Expectation(s)",
"metadata": {},
},
{
"cell_type": "markdown",
"source": "No table level expectations are in this suite. Feel free to add some here. They all begin with `batch.expect_table_...`.",
"metadata": {},
},
{
"cell_type": "markdown",
"source": "### Column Expectation(s)\nwrite your column expectations here",
"metadata": {},
},
{"cell_type": "markdown", "source": "#### `npi`", "metadata": {}},
{
"cell_type": "code",
"metadata": {},
"execution_count": None,
"source": 'batch.expect_column_values_to_not_be_null(column="npi")',
"outputs": [],
},
{"cell_type": "markdown", "source": "#### `provider_type`", "metadata": {}},
{
"cell_type": "code",
"metadata": {},
"execution_count": None,
"source": 'batch.expect_column_values_to_not_be_null(column="provider_type")',
"outputs": [],
},
{
"cell_type": "markdown",
"source": "## Save & Review Your Expectations\n\nLet's save the expectation suite as a JSON file in the `great_expectations/expectations` directory of your project.\nIf you decide not to save some expectations that you created, use [remove_expectation method](https://docs.greatexpectations.io/en/latest/autoapi/great_expectations/data_asset/index.html?highlight=remove_expectation&utm_source=notebook&utm_medium=edit_expectations#great_expectations.data_asset.DataAsset.remove_expectation).\n\nLet's now rebuild your Data Docs, which helps you communicate about your data with both machines and humans.",
"metadata": {},
},
{
"cell_type": "code",
"metadata": {},
"execution_count": None,
"source": 'batch.save_expectation_suite(discard_failed_expectations=False)\nrun_id = {\n "run_name": "some_string_that_uniquely_identifies_this_run", # insert your own run_name here\n "run_time": datetime.datetime.now(datetime.timezone.utc),\n}\nresults = context.run_validation_operator(\n "local", assets_to_validate=[batch], run_id=run_id\n)\nvalidation_result_identifier = results.list_validation_result_identifiers()[0]\ncontext.build_data_docs(site_names=["site_local"])\ncontext.open_data_docs(validation_result_identifier, site_name="site_local")',
"outputs": [],
},
],
}
del expected["nbformat_minor"]
del obs["nbformat_minor"]
for obs_cell, expected_cell in zip(obs["cells"], expected["cells"]):
obs_cell.pop("id", None)
assert obs_cell == expected_cell
assert obs == expected | [
"def",
"test_notebook_execution_with_custom_notebooks",
"(",
"suite_with_multiple_citations",
",",
"data_context_custom_notebooks",
")",
":",
"obs",
"=",
"SuiteEditNotebookRenderer",
".",
"from_data_context",
"(",
"data_context_custom_notebooks",
")",
".",
"render",
"(",
"suite_with_multiple_citations",
")",
"assert",
"isinstance",
"(",
"obs",
",",
"dict",
")",
"expected",
"=",
"{",
"\"nbformat\"",
":",
"4",
",",
"\"nbformat_minor\"",
":",
"4",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"\"cells\"",
":",
"[",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"# Custom header for MyCompany\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"code\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"\"execution_count\"",
":",
"None",
",",
"\"source\"",
":",
"'import datetime\\nimport great_expectations as ge\\nimport great_expectations.jupyter_ux\\nfrom great_expectations.checkpoint import LegacyCheckpoint\\nfrom great_expectations.data_context.types.resource_identifiers import (\\n ValidationResultIdentifier,\\n)\\n\\ncontext = ge.data_context.DataContext()\\n\\n# Feel free to change the name of your suite here. Renaming this will not\\n# remove the other one.\\nexpectation_suite_name = \"critical\"\\nsuite = context.get_expectation_suite(expectation_suite_name)\\nsuite.expectations = []\\n\\nbatch_kwargs = {\"path\": \"../../3.csv\", \"datasource\": \"3\"}\\nbatch = context.get_batch(batch_kwargs, suite)\\nbatch.head()'",
",",
"\"outputs\"",
":",
"[",
"]",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"## Create & Edit Expectations\\n\\nAdd expectations by calling specific expectation methods on the `batch` object. They all begin with `.expect_` which makes autocompleting easy using tab.\\n\\nYou can see all the available expectations in the **[expectation glossary](https://docs.greatexpectations.io/en/latest/reference/glossary_of_expectations.html?utm_source=notebook&utm_medium=create_expectations)**.\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"### Table Expectation(s)\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"No table level expectations are in this suite. Feel free to add some here. They all begin with `batch.expect_table_...`.\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"### Column Expectation(s)\\nwrite your column expectations here\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"#### `npi`\"",
",",
"\"metadata\"",
":",
"{",
"}",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"code\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"\"execution_count\"",
":",
"None",
",",
"\"source\"",
":",
"'batch.expect_column_values_to_not_be_null(column=\"npi\")'",
",",
"\"outputs\"",
":",
"[",
"]",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"#### `provider_type`\"",
",",
"\"metadata\"",
":",
"{",
"}",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"code\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"\"execution_count\"",
":",
"None",
",",
"\"source\"",
":",
"'batch.expect_column_values_to_not_be_null(column=\"provider_type\")'",
",",
"\"outputs\"",
":",
"[",
"]",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"markdown\"",
",",
"\"source\"",
":",
"\"## Save & Review Your Expectations\\n\\nLet's save the expectation suite as a JSON file in the `great_expectations/expectations` directory of your project.\\nIf you decide not to save some expectations that you created, use [remove_expectation method](https://docs.greatexpectations.io/en/latest/autoapi/great_expectations/data_asset/index.html?highlight=remove_expectation&utm_source=notebook&utm_medium=edit_expectations#great_expectations.data_asset.DataAsset.remove_expectation).\\n\\nLet's now rebuild your Data Docs, which helps you communicate about your data with both machines and humans.\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"}",
",",
"{",
"\"cell_type\"",
":",
"\"code\"",
",",
"\"metadata\"",
":",
"{",
"}",
",",
"\"execution_count\"",
":",
"None",
",",
"\"source\"",
":",
"'batch.save_expectation_suite(discard_failed_expectations=False)\\nrun_id = {\\n \"run_name\": \"some_string_that_uniquely_identifies_this_run\", # insert your own run_name here\\n \"run_time\": datetime.datetime.now(datetime.timezone.utc),\\n}\\nresults = context.run_validation_operator(\\n \"local\", assets_to_validate=[batch], run_id=run_id\\n)\\nvalidation_result_identifier = results.list_validation_result_identifiers()[0]\\ncontext.build_data_docs(site_names=[\"site_local\"])\\ncontext.open_data_docs(validation_result_identifier, site_name=\"site_local\")'",
",",
"\"outputs\"",
":",
"[",
"]",
",",
"}",
",",
"]",
",",
"}",
"del",
"expected",
"[",
"\"nbformat_minor\"",
"]",
"del",
"obs",
"[",
"\"nbformat_minor\"",
"]",
"for",
"obs_cell",
",",
"expected_cell",
"in",
"zip",
"(",
"obs",
"[",
"\"cells\"",
"]",
",",
"expected",
"[",
"\"cells\"",
"]",
")",
":",
"obs_cell",
".",
"pop",
"(",
"\"id\"",
",",
"None",
")",
"assert",
"obs_cell",
"==",
"expected_cell",
"assert",
"obs",
"==",
"expected"
] | [
1423,
0
] | [
1505,
26
] | python | en | ['en', 'error', 'th'] | False |
plot | (dfs, anomalies=[]) | Line plot for time series.
This function plots time series and highlights anomalous regions.
The first anomaly in anomalies is considered the ground truth.
Args:
dfs (list or `pd.DataFrame`): List of time series in `pd.DataFrame`.
Or a single dataframe. All dataframes must have the same shape.
anomalies (list): List of anomalies in tuple format.
| Line plot for time series.
This function plots time series and highlights anomalous regions.
The first anomaly in anomalies is considered the ground truth.
Args:
dfs (list or `pd.DataFrame`): List of time series in `pd.DataFrame`.
Or a single dataframe. All dataframes must have the same shape.
anomalies (list): List of anomalies in tuple format.
| def plot(dfs, anomalies=[]):
""" Line plot for time series.
This function plots time series and highlights anomalous regions.
The first anomaly in anomalies is considered the ground truth.
Args:
dfs (list or `pd.DataFrame`): List of time series in `pd.DataFrame`.
Or a single dataframe. All dataframes must have the same shape.
anomalies (list): List of anomalies in tuple format.
"""
if isinstance(dfs, pd.DataFrame):
dfs = [dfs]
if not isinstance(anomalies, list):
anomalies = [anomalies]
df = dfs[0]
time = convert_date(df['timestamp'])
months = mdates.MonthLocator() # every month
days = mdates.DayLocator() # every day
month_fmt = mdates.DateFormatter('%b')
fig = plt.figure(figsize=(30, 6))
ax = fig.add_subplot(111)
for df in dfs:
plt.plot(time, df['value'])
colors = ['red'] + ['green'] * (len(anomalies) - 1)
for i, anomaly in enumerate(anomalies):
if not isinstance(anomaly, list):
anomaly = list(anomaly[['start', 'end']].itertuples(index=False))
for _, anom in enumerate(anomaly):
t1 = convert_date_single(anom[0])
t2 = convert_date_single(anom[1])
plt.axvspan(t1, t2, color=colors[i], alpha=0.2)
plt.title('NYC Taxi Demand', size=34)
plt.ylabel('# passengers', size=30)
plt.xlabel('Time', size=30)
plt.xticks(size=26)
plt.yticks(size=26)
plt.xlim([time[0], time[-1]])
# format xticks
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(month_fmt)
ax.xaxis.set_minor_locator(days)
# format yticks
ylabels = ['{:,.0f}'.format(x) + 'K' for x in ax.get_yticks()/1000]
ax.set_yticklabels(ylabels)
plt.show() | [
"def",
"plot",
"(",
"dfs",
",",
"anomalies",
"=",
"[",
"]",
")",
":",
"if",
"isinstance",
"(",
"dfs",
",",
"pd",
".",
"DataFrame",
")",
":",
"dfs",
"=",
"[",
"dfs",
"]",
"if",
"not",
"isinstance",
"(",
"anomalies",
",",
"list",
")",
":",
"anomalies",
"=",
"[",
"anomalies",
"]",
"df",
"=",
"dfs",
"[",
"0",
"]",
"time",
"=",
"convert_date",
"(",
"df",
"[",
"'timestamp'",
"]",
")",
"months",
"=",
"mdates",
".",
"MonthLocator",
"(",
")",
"# every month",
"days",
"=",
"mdates",
".",
"DayLocator",
"(",
")",
"# every day",
"month_fmt",
"=",
"mdates",
".",
"DateFormatter",
"(",
"'%b'",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"30",
",",
"6",
")",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"for",
"df",
"in",
"dfs",
":",
"plt",
".",
"plot",
"(",
"time",
",",
"df",
"[",
"'value'",
"]",
")",
"colors",
"=",
"[",
"'red'",
"]",
"+",
"[",
"'green'",
"]",
"*",
"(",
"len",
"(",
"anomalies",
")",
"-",
"1",
")",
"for",
"i",
",",
"anomaly",
"in",
"enumerate",
"(",
"anomalies",
")",
":",
"if",
"not",
"isinstance",
"(",
"anomaly",
",",
"list",
")",
":",
"anomaly",
"=",
"list",
"(",
"anomaly",
"[",
"[",
"'start'",
",",
"'end'",
"]",
"]",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
")",
"for",
"_",
",",
"anom",
"in",
"enumerate",
"(",
"anomaly",
")",
":",
"t1",
"=",
"convert_date_single",
"(",
"anom",
"[",
"0",
"]",
")",
"t2",
"=",
"convert_date_single",
"(",
"anom",
"[",
"1",
"]",
")",
"plt",
".",
"axvspan",
"(",
"t1",
",",
"t2",
",",
"color",
"=",
"colors",
"[",
"i",
"]",
",",
"alpha",
"=",
"0.2",
")",
"plt",
".",
"title",
"(",
"'NYC Taxi Demand'",
",",
"size",
"=",
"34",
")",
"plt",
".",
"ylabel",
"(",
"'# passengers'",
",",
"size",
"=",
"30",
")",
"plt",
".",
"xlabel",
"(",
"'Time'",
",",
"size",
"=",
"30",
")",
"plt",
".",
"xticks",
"(",
"size",
"=",
"26",
")",
"plt",
".",
"yticks",
"(",
"size",
"=",
"26",
")",
"plt",
".",
"xlim",
"(",
"[",
"time",
"[",
"0",
"]",
",",
"time",
"[",
"-",
"1",
"]",
"]",
")",
"# format xticks",
"ax",
".",
"xaxis",
".",
"set_major_locator",
"(",
"months",
")",
"ax",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"month_fmt",
")",
"ax",
".",
"xaxis",
".",
"set_minor_locator",
"(",
"days",
")",
"# format yticks",
"ylabels",
"=",
"[",
"'{:,.0f}'",
".",
"format",
"(",
"x",
")",
"+",
"'K'",
"for",
"x",
"in",
"ax",
".",
"get_yticks",
"(",
")",
"/",
"1000",
"]",
"ax",
".",
"set_yticklabels",
"(",
"ylabels",
")",
"plt",
".",
"show",
"(",
")"
] | [
90,
0
] | [
146,
14
] | python | en | ['en', 'en', 'en'] | True |
StatsmodelsOLS.__init__ | (self, cov_type='HC1', alpha=.05) | Initialize a statsmodels' OLS wrapper class object.
Args:
cov_type (str, optional): covariance estimator type.
alpha (float, optional): the confidence level alpha.
| Initialize a statsmodels' OLS wrapper class object.
Args:
cov_type (str, optional): covariance estimator type.
alpha (float, optional): the confidence level alpha.
| def __init__(self, cov_type='HC1', alpha=.05):
"""Initialize a statsmodels' OLS wrapper class object.
Args:
cov_type (str, optional): covariance estimator type.
alpha (float, optional): the confidence level alpha.
"""
self.cov_type = cov_type
self.alpha = alpha | [
"def",
"__init__",
"(",
"self",
",",
"cov_type",
"=",
"'HC1'",
",",
"alpha",
"=",
".05",
")",
":",
"self",
".",
"cov_type",
"=",
"cov_type",
"self",
".",
"alpha",
"=",
"alpha"
] | [
20,
4
] | [
27,
26
] | python | en | ['ca', 'en', 'en'] | True |
StatsmodelsOLS.fit | (self, X, y) | Fit OLS.
Args:
X (np.matrix): a feature matrix
y (np.array): a label vector
| Fit OLS.
Args:
X (np.matrix): a feature matrix
y (np.array): a label vector
| def fit(self, X, y):
"""Fit OLS.
Args:
X (np.matrix): a feature matrix
y (np.array): a label vector
"""
# Append ones. The first column is for the treatment indicator.
X = sm.add_constant(X, prepend=False, has_constant='add')
self.model = sm.OLS(y, X).fit(cov_type=self.cov_type)
self.coefficients = self.model.params
self.conf_ints = self.model.conf_int(alpha=self.alpha) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"# Append ones. The first column is for the treatment indicator.",
"X",
"=",
"sm",
".",
"add_constant",
"(",
"X",
",",
"prepend",
"=",
"False",
",",
"has_constant",
"=",
"'add'",
")",
"self",
".",
"model",
"=",
"sm",
".",
"OLS",
"(",
"y",
",",
"X",
")",
".",
"fit",
"(",
"cov_type",
"=",
"self",
".",
"cov_type",
")",
"self",
".",
"coefficients",
"=",
"self",
".",
"model",
".",
"params",
"self",
".",
"conf_ints",
"=",
"self",
".",
"model",
".",
"conf_int",
"(",
"alpha",
"=",
"self",
".",
"alpha",
")"
] | [
29,
4
] | [
39,
62
] | python | en | ['en', 'id', 'en'] | False |
BaseSLearner.__init__ | (self, learner=None, ate_alpha=0.05, control_name=0) | Initialize an S-learner.
Args:
learner (optional): a model to estimate the treatment effect
control_name (str or int, optional): name of control group
| Initialize an S-learner.
Args:
learner (optional): a model to estimate the treatment effect
control_name (str or int, optional): name of control group
| def __init__(self, learner=None, ate_alpha=0.05, control_name=0):
"""Initialize an S-learner.
Args:
learner (optional): a model to estimate the treatment effect
control_name (str or int, optional): name of control group
"""
if learner is not None:
self.model = learner
else:
self.model = DummyRegressor()
self.ate_alpha = ate_alpha
self.control_name = control_name | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"ate_alpha",
"=",
"0.05",
",",
"control_name",
"=",
"0",
")",
":",
"if",
"learner",
"is",
"not",
"None",
":",
"self",
".",
"model",
"=",
"learner",
"else",
":",
"self",
".",
"model",
"=",
"DummyRegressor",
"(",
")",
"self",
".",
"ate_alpha",
"=",
"ate_alpha",
"self",
".",
"control_name",
"=",
"control_name"
] | [
53,
4
] | [
64,
40
] | python | en | ['en', 'en', 'nl'] | True |
BaseSLearner.fit | (self, X, treatment, y, p=None) | Fit the inference model
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
| Fit the inference model
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
| def fit(self, X, treatment, y, p=None):
"""Fit the inference model
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
check_treatment_vector(treatment, self.control_name)
self.t_groups = np.unique(treatment[treatment != self.control_name])
self.t_groups.sort()
self._classes = {group: i for i, group in enumerate(self.t_groups)}
self.models = {group: deepcopy(self.model) for group in self.t_groups}
for group in self.t_groups:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
X_new = np.hstack((w.reshape((-1, 1)), X_filt))
self.models[group].fit(X_new, y_filt) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"check_treatment_vector",
"(",
"treatment",
",",
"self",
".",
"control_name",
")",
"self",
".",
"t_groups",
"=",
"np",
".",
"unique",
"(",
"treatment",
"[",
"treatment",
"!=",
"self",
".",
"control_name",
"]",
")",
"self",
".",
"t_groups",
".",
"sort",
"(",
")",
"self",
".",
"_classes",
"=",
"{",
"group",
":",
"i",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
"}",
"self",
".",
"models",
"=",
"{",
"group",
":",
"deepcopy",
"(",
"self",
".",
"model",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"X_filt",
"=",
"X",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"X_new",
"=",
"np",
".",
"hstack",
"(",
"(",
"w",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
",",
"X_filt",
")",
")",
"self",
".",
"models",
"[",
"group",
"]",
".",
"fit",
"(",
"X_new",
",",
"y_filt",
")"
] | [
70,
4
] | [
92,
49
] | python | en | ['en', 'en', 'en'] | True |
BaseSLearner.predict | (self, X, treatment=None, y=None, p=None, return_components=False, verbose=True) | Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
| Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
| def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True):
"""Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
yhat_cs = {}
yhat_ts = {}
for group in self.t_groups:
model = self.models[group]
# set the treatment column to zero (the control group)
X_new = np.hstack((np.zeros((X.shape[0], 1)), X))
yhat_cs[group] = model.predict(X_new)
# set the treatment column to one (the treatment group)
X_new[:, 0] = 1
yhat_ts[group] = model.predict(X_new)
if (y is not None) and (treatment is not None) and verbose:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
w = (treatment_filt == group).astype(int)
y_filt = y[mask]
yhat = np.zeros_like(y_filt, dtype=float)
yhat[w == 0] = yhat_cs[group][mask][w == 0]
yhat[w == 1] = yhat_ts[group][mask][w == 1]
logger.info('Error metrics for group {}'.format(group))
regression_metrics(y_filt, yhat, w)
te = np.zeros((X.shape[0], self.t_groups.shape[0]))
for i, group in enumerate(self.t_groups):
te[:, i] = yhat_ts[group] - yhat_cs[group]
if not return_components:
return te
else:
return te, yhat_cs, yhat_ts | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"treatment",
"=",
"None",
",",
"y",
"=",
"None",
",",
"p",
"=",
"None",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"yhat_cs",
"=",
"{",
"}",
"yhat_ts",
"=",
"{",
"}",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"model",
"=",
"self",
".",
"models",
"[",
"group",
"]",
"# set the treatment column to zero (the control group)",
"X_new",
"=",
"np",
".",
"hstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
",",
"X",
")",
")",
"yhat_cs",
"[",
"group",
"]",
"=",
"model",
".",
"predict",
"(",
"X_new",
")",
"# set the treatment column to one (the treatment group)",
"X_new",
"[",
":",
",",
"0",
"]",
"=",
"1",
"yhat_ts",
"[",
"group",
"]",
"=",
"model",
".",
"predict",
"(",
"X_new",
")",
"if",
"(",
"y",
"is",
"not",
"None",
")",
"and",
"(",
"treatment",
"is",
"not",
"None",
")",
"and",
"verbose",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"yhat",
"=",
"np",
".",
"zeros_like",
"(",
"y_filt",
",",
"dtype",
"=",
"float",
")",
"yhat",
"[",
"w",
"==",
"0",
"]",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"0",
"]",
"yhat",
"[",
"w",
"==",
"1",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"1",
"]",
"logger",
".",
"info",
"(",
"'Error metrics for group {}'",
".",
"format",
"(",
"group",
")",
")",
"regression_metrics",
"(",
"y_filt",
",",
"yhat",
",",
"w",
")",
"te",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"te",
"[",
":",
",",
"i",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"-",
"yhat_cs",
"[",
"group",
"]",
"if",
"not",
"return_components",
":",
"return",
"te",
"else",
":",
"return",
"te",
",",
"yhat_cs",
",",
"yhat_ts"
] | [
94,
4
] | [
140,
39
] | python | en | ['fr', 'en', 'en'] | True |
BaseSLearner.fit_predict | (self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000,
return_components=False, verbose=True) | Fit the inference model of the S learner and predict treatment effects.
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
return_ci (bool, optional): whether to return confidence intervals
n_bootstraps (int, optional): number of bootstrap iterations
bootstrap_size (int, optional): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment].
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
| Fit the inference model of the S learner and predict treatment effects.
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
return_ci (bool, optional): whether to return confidence intervals
n_bootstraps (int, optional): number of bootstrap iterations
bootstrap_size (int, optional): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment].
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
| def fit_predict(self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000,
return_components=False, verbose=True):
"""Fit the inference model of the S learner and predict treatment effects.
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
return_ci (bool, optional): whether to return confidence intervals
n_bootstraps (int, optional): number of bootstrap iterations
bootstrap_size (int, optional): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment].
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
"""
self.fit(X, treatment, y)
te = self.predict(X, treatment, y, return_components=return_components)
if not return_ci:
return te
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_global = deepcopy(self.models)
te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps))
logger.info('Bootstrap Confidence Intervals')
for i in tqdm(range(n_bootstraps)):
te_b = self.bootstrap(X, treatment, y, size=bootstrap_size)
te_bootstraps[:, :, i] = te_b
te_lower = np.percentile(te_bootstraps, (self.ate_alpha/2)*100, axis=2)
te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models = deepcopy(models_global)
return (te, te_lower, te_upper) | [
"def",
"fit_predict",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"return_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"self",
".",
"fit",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"te",
"=",
"self",
".",
"predict",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"return_components",
"=",
"return_components",
")",
"if",
"not",
"return_ci",
":",
"return",
"te",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_global",
"=",
"deepcopy",
"(",
"self",
".",
"models",
")",
"te_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"logger",
".",
"info",
"(",
"'Bootstrap Confidence Intervals'",
")",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"te_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"size",
"=",
"bootstrap_size",
")",
"te_bootstraps",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"te_b",
"te_lower",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"te_upper",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models",
"=",
"deepcopy",
"(",
"models_global",
")",
"return",
"(",
"te",
",",
"te_lower",
",",
"te_upper",
")"
] | [
142,
4
] | [
183,
43
] | python | en | ['en', 'en', 'en'] | True |
BaseSLearner.estimate_ate | (self, X, treatment, y, p=None, return_ci=False, bootstrap_ci=False,
n_bootstraps=1000, bootstrap_size=10000) | Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
return_ci (bool, optional): whether to return confidence intervals
bootstrap_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
| Estimate the Average Treatment Effect (ATE). | def estimate_ate(self, X, treatment, y, p=None, return_ci=False, bootstrap_ci=False,
n_bootstraps=1000, bootstrap_size=10000):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
return_ci (bool, optional): whether to return confidence intervals
bootstrap_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
te, yhat_cs, yhat_ts = self.fit_predict(X, treatment, y, return_components=True)
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
_ate = te[:, i].mean()
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
prob_treatment = float(sum(w)) / w.shape[0]
yhat_c = yhat_cs[group][mask]
yhat_t = yhat_ts[group][mask]
se = np.sqrt((
(y_filt[w == 0] - yhat_c[w == 0]).var()
/ (1 - prob_treatment) +
(y_filt[w == 1] - yhat_t[w == 1]).var()
/ prob_treatment +
(yhat_t - yhat_c).var()
) / y_filt.shape[0])
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not return_ci:
return ate
elif return_ci and not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_global = deepcopy(self.models)
logger.info('Bootstrap Confidence Intervals for ATE')
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
ate_b = self.bootstrap(X, treatment, y, size=bootstrap_size)
ate_bootstraps[:, n] = ate_b.mean()
ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1)
ate_upper = np.percentile(ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models = deepcopy(models_global)
return ate, ate_lower, ate_upper | [
"def",
"estimate_ate",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"return_ci",
"=",
"False",
",",
"bootstrap_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"te",
",",
"yhat_cs",
",",
"yhat_ts",
"=",
"self",
".",
"fit_predict",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"return_components",
"=",
"True",
")",
"ate",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_lb",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_ub",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"_ate",
"=",
"te",
"[",
":",
",",
"i",
"]",
".",
"mean",
"(",
")",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"prob_treatment",
"=",
"float",
"(",
"sum",
"(",
"w",
")",
")",
"/",
"w",
".",
"shape",
"[",
"0",
"]",
"yhat_c",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"yhat_t",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"se",
"=",
"np",
".",
"sqrt",
"(",
"(",
"(",
"y_filt",
"[",
"w",
"==",
"0",
"]",
"-",
"yhat_c",
"[",
"w",
"==",
"0",
"]",
")",
".",
"var",
"(",
")",
"/",
"(",
"1",
"-",
"prob_treatment",
")",
"+",
"(",
"y_filt",
"[",
"w",
"==",
"1",
"]",
"-",
"yhat_t",
"[",
"w",
"==",
"1",
"]",
")",
".",
"var",
"(",
")",
"/",
"prob_treatment",
"+",
"(",
"yhat_t",
"-",
"yhat_c",
")",
".",
"var",
"(",
")",
")",
"/",
"y_filt",
".",
"shape",
"[",
"0",
"]",
")",
"_ate_lb",
"=",
"_ate",
"-",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"_ate_ub",
"=",
"_ate",
"+",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"ate",
"[",
"i",
"]",
"=",
"_ate",
"ate_lb",
"[",
"i",
"]",
"=",
"_ate_lb",
"ate_ub",
"[",
"i",
"]",
"=",
"_ate_ub",
"if",
"not",
"return_ci",
":",
"return",
"ate",
"elif",
"return_ci",
"and",
"not",
"bootstrap_ci",
":",
"return",
"ate",
",",
"ate_lb",
",",
"ate_ub",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_global",
"=",
"deepcopy",
"(",
"self",
".",
"models",
")",
"logger",
".",
"info",
"(",
"'Bootstrap Confidence Intervals for ATE'",
")",
"ate_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"for",
"n",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"ate_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"size",
"=",
"bootstrap_size",
")",
"ate_bootstraps",
"[",
":",
",",
"n",
"]",
"=",
"ate_b",
".",
"mean",
"(",
")",
"ate_lower",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"ate_upper",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models",
"=",
"deepcopy",
"(",
"models_global",
")",
"return",
"ate",
",",
"ate_lower",
",",
"ate_upper"
] | [
185,
4
] | [
258,
44
] | python | en | ['en', 'it', 'en'] | True |
BaseSRegressor.__init__ | (self, learner=None, ate_alpha=0.05, control_name=0) | Initialize an S-learner regressor.
Args:
learner (optional): a model to estimate the treatment effect
control_name (str or int, optional): name of control group
| Initialize an S-learner regressor.
Args:
learner (optional): a model to estimate the treatment effect
control_name (str or int, optional): name of control group
| def __init__(self, learner=None, ate_alpha=0.05, control_name=0):
"""Initialize an S-learner regressor.
Args:
learner (optional): a model to estimate the treatment effect
control_name (str or int, optional): name of control group
"""
super().__init__(
learner=learner,
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"ate_alpha",
"=",
"0.05",
",",
"control_name",
"=",
"0",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"learner",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
266,
4
] | [
275,
38
] | python | en | ['en', 'fy', 'nl'] | False |
BaseSClassifier.__init__ | (self, learner=None, ate_alpha=0.05, control_name=0) | Initialize an S-learner classifier.
Args:
learner (optional): a model to estimate the treatment effect.
Should have a predict_proba() method.
control_name (str or int, optional): name of control group
| Initialize an S-learner classifier.
Args:
learner (optional): a model to estimate the treatment effect.
Should have a predict_proba() method.
control_name (str or int, optional): name of control group
| def __init__(self, learner=None, ate_alpha=0.05, control_name=0):
"""Initialize an S-learner classifier.
Args:
learner (optional): a model to estimate the treatment effect.
Should have a predict_proba() method.
control_name (str or int, optional): name of control group
"""
super().__init__(
learner=learner,
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"ate_alpha",
"=",
"0.05",
",",
"control_name",
"=",
"0",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"learner",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
283,
4
] | [
293,
38
] | python | en | ['en', 'fy', 'nl'] | False |
BaseSClassifier.predict | (self, X, treatment=None, y=None, p=None, return_components=False, verbose=True) | Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
| Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
| def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True):
"""Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
yhat_cs = {}
yhat_ts = {}
for group in self.t_groups:
model = self.models[group]
# set the treatment column to zero (the control group)
X_new = np.hstack((np.zeros((X.shape[0], 1)), X))
yhat_cs[group] = model.predict_proba(X_new)[:, 1]
# set the treatment column to one (the treatment group)
X_new[:, 0] = 1
yhat_ts[group] = model.predict_proba(X_new)[:, 1]
if y is not None and (treatment is not None) and verbose:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
w = (treatment_filt == group).astype(int)
y_filt = y[mask]
yhat = np.zeros_like(y_filt, dtype=float)
yhat[w == 0] = yhat_cs[group][mask][w == 0]
yhat[w == 1] = yhat_ts[group][mask][w == 1]
logger.info('Error metrics for group {}'.format(group))
classification_metrics(y_filt, yhat, w)
te = np.zeros((X.shape[0], self.t_groups.shape[0]))
for i, group in enumerate(self.t_groups):
te[:, i] = yhat_ts[group] - yhat_cs[group]
if not return_components:
return te
else:
return te, yhat_cs, yhat_ts | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"treatment",
"=",
"None",
",",
"y",
"=",
"None",
",",
"p",
"=",
"None",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"yhat_cs",
"=",
"{",
"}",
"yhat_ts",
"=",
"{",
"}",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"model",
"=",
"self",
".",
"models",
"[",
"group",
"]",
"# set the treatment column to zero (the control group)",
"X_new",
"=",
"np",
".",
"hstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
")",
",",
"X",
")",
")",
"yhat_cs",
"[",
"group",
"]",
"=",
"model",
".",
"predict_proba",
"(",
"X_new",
")",
"[",
":",
",",
"1",
"]",
"# set the treatment column to one (the treatment group)",
"X_new",
"[",
":",
",",
"0",
"]",
"=",
"1",
"yhat_ts",
"[",
"group",
"]",
"=",
"model",
".",
"predict_proba",
"(",
"X_new",
")",
"[",
":",
",",
"1",
"]",
"if",
"y",
"is",
"not",
"None",
"and",
"(",
"treatment",
"is",
"not",
"None",
")",
"and",
"verbose",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"yhat",
"=",
"np",
".",
"zeros_like",
"(",
"y_filt",
",",
"dtype",
"=",
"float",
")",
"yhat",
"[",
"w",
"==",
"0",
"]",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"0",
"]",
"yhat",
"[",
"w",
"==",
"1",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"1",
"]",
"logger",
".",
"info",
"(",
"'Error metrics for group {}'",
".",
"format",
"(",
"group",
")",
")",
"classification_metrics",
"(",
"y_filt",
",",
"yhat",
",",
"w",
")",
"te",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"te",
"[",
":",
",",
"i",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"-",
"yhat_cs",
"[",
"group",
"]",
"if",
"not",
"return_components",
":",
"return",
"te",
"else",
":",
"return",
"te",
",",
"yhat_cs",
",",
"yhat_ts"
] | [
295,
4
] | [
341,
39
] | python | en | ['fr', 'en', 'en'] | True |
LRSRegressor.__init__ | (self, ate_alpha=.05, control_name=0) | Initialize an S-learner with a linear regression model.
Args:
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize an S-learner with a linear regression model.
Args:
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| def __init__(self, ate_alpha=.05, control_name=0):
"""Initialize an S-learner with a linear regression model.
Args:
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
super().__init__(StatsmodelsOLS(alpha=ate_alpha), ate_alpha, control_name) | [
"def",
"__init__",
"(",
"self",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"StatsmodelsOLS",
"(",
"alpha",
"=",
"ate_alpha",
")",
",",
"ate_alpha",
",",
"control_name",
")"
] | [
345,
4
] | [
351,
82
] | python | en | ['en', 'en', 'en'] | True |
LRSRegressor.estimate_ate | (self, X, treatment, y, p=None) | Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
| Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
| def estimate_ate(self, X, treatment, y, p=None):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix, np.array, or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
self.fit(X, treatment, y)
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
ate[i] = self.models[group].coefficients[0]
ate_lb[i] = self.models[group].conf_ints[0, 0]
ate_ub[i] = self.models[group].conf_ints[0, 1]
return ate, ate_lb, ate_ub | [
"def",
"estimate_ate",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"self",
".",
"fit",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"ate",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_lb",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_ub",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"ate",
"[",
"i",
"]",
"=",
"self",
".",
"models",
"[",
"group",
"]",
".",
"coefficients",
"[",
"0",
"]",
"ate_lb",
"[",
"i",
"]",
"=",
"self",
".",
"models",
"[",
"group",
"]",
".",
"conf_ints",
"[",
"0",
",",
"0",
"]",
"ate_ub",
"[",
"i",
"]",
"=",
"self",
".",
"models",
"[",
"group",
"]",
".",
"conf_ints",
"[",
"0",
",",
"1",
"]",
"return",
"ate",
",",
"ate_lb",
",",
"ate_ub"
] | [
353,
4
] | [
374,
34
] | python | en | ['en', 'it', 'en'] | True |
S3Logging.configure | (self, options, conf) | Get the options. | Get the options. | def configure(self, options, conf):
""" Get the options. """
super(S3Logging, self).configure(options, conf)
self.options = options | [
"def",
"configure",
"(",
"self",
",",
"options",
",",
"conf",
")",
":",
"super",
"(",
"S3Logging",
",",
"self",
")",
".",
"configure",
"(",
"options",
",",
"conf",
")",
"self",
".",
"options",
"=",
"options"
] | [
17,
4
] | [
20,
30
] | python | en | ['en', 'en', 'en'] | True |
S3Logging.afterTest | (self, test) | After each testcase, upload logs to the S3 bucket. | After each testcase, upload logs to the S3 bucket. | def afterTest(self, test):
""" After each testcase, upload logs to the S3 bucket. """
s3_bucket = S3LoggingBucket()
guid = str(uuid.uuid4().hex)
path = "%s/%s" % (self.options.log_path,
test.test.id())
uploaded_files = []
for logfile in os.listdir(path):
logfile_name = "%s/%s/%s" % (guid,
test.test.id(),
logfile.split(path)[-1])
s3_bucket.upload_file(logfile_name,
"%s/%s" % (path, logfile))
uploaded_files.append(logfile_name)
s3_bucket.save_uploaded_file_names(uploaded_files)
index_file = s3_bucket.upload_index_file(test.id(), guid)
print("\n\n*** Log files uploaded: ***\n%s\n" % index_file)
logging.error("\n\n*** Log files uploaded: ***\n%s\n" % index_file)
# If the database plugin is running, attach a link
# to the logs index database row
if hasattr(test.test, "testcase_guid"):
from seleniumbase.core.testcase_manager \
import TestcaseDataPayload, TestcaseManager
self.testcase_manager = TestcaseManager(self.options.database_env)
data_payload = TestcaseDataPayload()
data_payload.guid = test.test.testcase_guid
data_payload.log_url = index_file
self.testcase_manager.update_testcase_log_url(data_payload) | [
"def",
"afterTest",
"(",
"self",
",",
"test",
")",
":",
"s3_bucket",
"=",
"S3LoggingBucket",
"(",
")",
"guid",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"path",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"options",
".",
"log_path",
",",
"test",
".",
"test",
".",
"id",
"(",
")",
")",
"uploaded_files",
"=",
"[",
"]",
"for",
"logfile",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"logfile_name",
"=",
"\"%s/%s/%s\"",
"%",
"(",
"guid",
",",
"test",
".",
"test",
".",
"id",
"(",
")",
",",
"logfile",
".",
"split",
"(",
"path",
")",
"[",
"-",
"1",
"]",
")",
"s3_bucket",
".",
"upload_file",
"(",
"logfile_name",
",",
"\"%s/%s\"",
"%",
"(",
"path",
",",
"logfile",
")",
")",
"uploaded_files",
".",
"append",
"(",
"logfile_name",
")",
"s3_bucket",
".",
"save_uploaded_file_names",
"(",
"uploaded_files",
")",
"index_file",
"=",
"s3_bucket",
".",
"upload_index_file",
"(",
"test",
".",
"id",
"(",
")",
",",
"guid",
")",
"print",
"(",
"\"\\n\\n*** Log files uploaded: ***\\n%s\\n\"",
"%",
"index_file",
")",
"logging",
".",
"error",
"(",
"\"\\n\\n*** Log files uploaded: ***\\n%s\\n\"",
"%",
"index_file",
")",
"# If the database plugin is running, attach a link",
"# to the logs index database row",
"if",
"hasattr",
"(",
"test",
".",
"test",
",",
"\"testcase_guid\"",
")",
":",
"from",
"seleniumbase",
".",
"core",
".",
"testcase_manager",
"import",
"TestcaseDataPayload",
",",
"TestcaseManager",
"self",
".",
"testcase_manager",
"=",
"TestcaseManager",
"(",
"self",
".",
"options",
".",
"database_env",
")",
"data_payload",
"=",
"TestcaseDataPayload",
"(",
")",
"data_payload",
".",
"guid",
"=",
"test",
".",
"test",
".",
"testcase_guid",
"data_payload",
".",
"log_url",
"=",
"index_file",
"self",
".",
"testcase_manager",
".",
"update_testcase_log_url",
"(",
"data_payload",
")"
] | [
22,
4
] | [
50,
71
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.