body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
dd19af383bc878257dfd229b01492ba7bc18ffd776a0429adf74d3a43f95a44f
def get_geo_data(self, geo_dset=None, band=None): "\n Returns data of selected datasets from the GEODATA group\n\n Parameters\n ----------\n geo_dset : string\n Name(s) of datasets in the GEODATA group, comma separated\n Default is 'satellite_latitude,satellite_longitude'\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n\n Returns\n -------\n out : dict of numpy\n Compound array with data of selected datasets from the GEODATA group\n " if (self.__msm_path is None): return None if (geo_dset is None): geo_dset = self.geo_dset if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'GEODATA'))] res = {} for name in geo_dset.split(','): res[name] = grp[name][(0, ...)] return res
Returns data of selected datasets from the GEODATA group Parameters ---------- geo_dset : string Name(s) of datasets in the GEODATA group, comma separated Default is 'satellite_latitude,satellite_longitude' band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band Returns ------- out : dict of numpy Compound array with data of selected datasets from the GEODATA group
src/pys5p/l1b_io.py
get_geo_data
rmvanhees/pys5p
10
python
def get_geo_data(self, geo_dset=None, band=None): "\n Returns data of selected datasets from the GEODATA group\n\n Parameters\n ----------\n geo_dset : string\n Name(s) of datasets in the GEODATA group, comma separated\n Default is 'satellite_latitude,satellite_longitude'\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n\n Returns\n -------\n out : dict of numpy\n Compound array with data of selected datasets from the GEODATA group\n " if (self.__msm_path is None): return None if (geo_dset is None): geo_dset = self.geo_dset if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'GEODATA'))] res = {} for name in geo_dset.split(','): res[name] = grp[name][(0, ...)] return res
def get_geo_data(self, geo_dset=None, band=None): "\n Returns data of selected datasets from the GEODATA group\n\n Parameters\n ----------\n geo_dset : string\n Name(s) of datasets in the GEODATA group, comma separated\n Default is 'satellite_latitude,satellite_longitude'\n band : None or {'1', '2', '3', ..., '8'}\n Select one of the band present in the product\n Default is 'None' which returns the first available band\n\n Returns\n -------\n out : dict of numpy\n Compound array with data of selected datasets from the GEODATA group\n " if (self.__msm_path is None): return None if (geo_dset is None): geo_dset = self.geo_dset if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) grp = self.fid[str(PurePosixPath(msm_path, 'GEODATA'))] res = {} for name in geo_dset.split(','): res[name] = grp[name][(0, ...)] return res<|docstring|>Returns data of selected datasets from the GEODATA group Parameters ---------- geo_dset : string Name(s) of datasets in the GEODATA group, comma separated Default is 'satellite_latitude,satellite_longitude' band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band Returns ------- out : dict of numpy Compound array with data of selected datasets from the GEODATA group<|endoftext|>
a16ead5006f90fa6b8f4d80a9dc4145a2c24aeb846990652cdc64fc4706c7c9a
def get_msm_attr(self, msm_dset, attr_name, band=None): '\n Returns value attribute of measurement dataset "msm_dset"\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n msm_dset : string\n Name of measurement dataset\n band : None or {\'1\', \'2\', \'3\', ..., \'8\'}\n Select one of the band present in the product\n Default is \'None\' which returns the first available band\n\n Returns\n -------\n out : scalar or numpy array\n Value of attribute "attr_name"\n ' if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) if (attr_name in self.fid[ds_path].attrs.keys()): attr = self.fid[ds_path].attrs[attr_name] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None
Returns value attribute of measurement dataset "msm_dset" Parameters ---------- attr_name : string Name of the attribute msm_dset : string Name of measurement dataset band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band Returns ------- out : scalar or numpy array Value of attribute "attr_name"
src/pys5p/l1b_io.py
get_msm_attr
rmvanhees/pys5p
10
python
def get_msm_attr(self, msm_dset, attr_name, band=None): '\n Returns value attribute of measurement dataset "msm_dset"\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n msm_dset : string\n Name of measurement dataset\n band : None or {\'1\', \'2\', \'3\', ..., \'8\'}\n Select one of the band present in the product\n Default is \'None\' which returns the first available band\n\n Returns\n -------\n out : scalar or numpy array\n Value of attribute "attr_name"\n ' if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) if (attr_name in self.fid[ds_path].attrs.keys()): attr = self.fid[ds_path].attrs[attr_name] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None
def get_msm_attr(self, msm_dset, attr_name, band=None): '\n Returns value attribute of measurement dataset "msm_dset"\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n msm_dset : string\n Name of measurement dataset\n band : None or {\'1\', \'2\', \'3\', ..., \'8\'}\n Select one of the band present in the product\n Default is \'None\' which returns the first available band\n\n Returns\n -------\n out : scalar or numpy array\n Value of attribute "attr_name"\n ' if (self.__msm_path is None): return None if (band is None): band = self.bands[0] msm_path = self.__msm_path.replace('%', band) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) if (attr_name in self.fid[ds_path].attrs.keys()): attr = self.fid[ds_path].attrs[attr_name] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None<|docstring|>Returns value attribute of measurement dataset "msm_dset" Parameters ---------- attr_name : string Name of the attribute msm_dset : string Name of measurement dataset band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns the first available band Returns ------- out : scalar or numpy array Value of attribute "attr_name"<|endoftext|>
9fcb1e9e3ba452f2a532bc53cd87e6518ce826367bc1a97f7aecb9a52797adb2
def get_msm_data(self, msm_dset, band=None, fill_as_nan=False, msm_to_row=None): '\n Reads data from dataset "msm_dset"\n\n Parameters\n ----------\n msm_dset : string\n Name of measurement dataset.\n band : None or {\'1\', \'2\', \'3\', ..., \'8\'}\n Select one of the band present in the product\n Default is \'None\' which returns\n both bands (Calibration, Irradiance)\n or one band (Radiance)\n fill_as_nan : boolean\n Set data values equal (KNMI) FillValue to NaN\n msm_to_row : boolean\n Combine two bands using padding if necessary\n\n Returns\n -------\n out : values read from or written to dataset "msm_dset"\n ' fillvalue = float.fromhex('0x1.ep+122') if (self.__msm_path is None): return None if (band is None): band = self.bands elif (not isinstance(band, str)): raise TypeError('band must be a string') elif (band not in self.bands): raise ValueError('band not found in product') if ((len(band) == 2) and (msm_to_row is None)): msm_to_row = 'padding' data = () for ii in band: msm_path = self.__msm_path.replace('%', ii) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) dset = self.fid[ds_path] if (fill_as_nan and (dset.attrs['_FillValue'] == fillvalue)): buff = np.squeeze(dset) buff[(buff == fillvalue)] = np.nan data += (buff,) else: data += (np.squeeze(dset),) if (len(band) == 1): return data[0] if (msm_to_row == 'padding'): data = pad_rows(data[0], data[1]) return np.concatenate(data, axis=(data[0].ndim - 1))
Reads data from dataset "msm_dset" Parameters ---------- msm_dset : string Name of measurement dataset. band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns both bands (Calibration, Irradiance) or one band (Radiance) fill_as_nan : boolean Set data values equal (KNMI) FillValue to NaN msm_to_row : boolean Combine two bands using padding if necessary Returns ------- out : values read from or written to dataset "msm_dset"
src/pys5p/l1b_io.py
get_msm_data
rmvanhees/pys5p
10
python
def get_msm_data(self, msm_dset, band=None, fill_as_nan=False, msm_to_row=None): '\n Reads data from dataset "msm_dset"\n\n Parameters\n ----------\n msm_dset : string\n Name of measurement dataset.\n band : None or {\'1\', \'2\', \'3\', ..., \'8\'}\n Select one of the band present in the product\n Default is \'None\' which returns\n both bands (Calibration, Irradiance)\n or one band (Radiance)\n fill_as_nan : boolean\n Set data values equal (KNMI) FillValue to NaN\n msm_to_row : boolean\n Combine two bands using padding if necessary\n\n Returns\n -------\n out : values read from or written to dataset "msm_dset"\n ' fillvalue = float.fromhex('0x1.ep+122') if (self.__msm_path is None): return None if (band is None): band = self.bands elif (not isinstance(band, str)): raise TypeError('band must be a string') elif (band not in self.bands): raise ValueError('band not found in product') if ((len(band) == 2) and (msm_to_row is None)): msm_to_row = 'padding' data = () for ii in band: msm_path = self.__msm_path.replace('%', ii) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) dset = self.fid[ds_path] if (fill_as_nan and (dset.attrs['_FillValue'] == fillvalue)): buff = np.squeeze(dset) buff[(buff == fillvalue)] = np.nan data += (buff,) else: data += (np.squeeze(dset),) if (len(band) == 1): return data[0] if (msm_to_row == 'padding'): data = pad_rows(data[0], data[1]) return np.concatenate(data, axis=(data[0].ndim - 1))
def get_msm_data(self, msm_dset, band=None, fill_as_nan=False, msm_to_row=None): '\n Reads data from dataset "msm_dset"\n\n Parameters\n ----------\n msm_dset : string\n Name of measurement dataset.\n band : None or {\'1\', \'2\', \'3\', ..., \'8\'}\n Select one of the band present in the product\n Default is \'None\' which returns\n both bands (Calibration, Irradiance)\n or one band (Radiance)\n fill_as_nan : boolean\n Set data values equal (KNMI) FillValue to NaN\n msm_to_row : boolean\n Combine two bands using padding if necessary\n\n Returns\n -------\n out : values read from or written to dataset "msm_dset"\n ' fillvalue = float.fromhex('0x1.ep+122') if (self.__msm_path is None): return None if (band is None): band = self.bands elif (not isinstance(band, str)): raise TypeError('band must be a string') elif (band not in self.bands): raise ValueError('band not found in product') if ((len(band) == 2) and (msm_to_row is None)): msm_to_row = 'padding' data = () for ii in band: msm_path = self.__msm_path.replace('%', ii) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) dset = self.fid[ds_path] if (fill_as_nan and (dset.attrs['_FillValue'] == fillvalue)): buff = np.squeeze(dset) buff[(buff == fillvalue)] = np.nan data += (buff,) else: data += (np.squeeze(dset),) if (len(band) == 1): return data[0] if (msm_to_row == 'padding'): data = pad_rows(data[0], data[1]) return np.concatenate(data, axis=(data[0].ndim - 1))<|docstring|>Reads data from dataset "msm_dset" Parameters ---------- msm_dset : string Name of measurement dataset. band : None or {'1', '2', '3', ..., '8'} Select one of the band present in the product Default is 'None' which returns both bands (Calibration, Irradiance) or one band (Radiance) fill_as_nan : boolean Set data values equal (KNMI) FillValue to NaN msm_to_row : boolean Combine two bands using padding if necessary Returns ------- out : values read from or written to dataset "msm_dset"<|endoftext|>
b6cf5e5f201ec6cae552e060dfd262d3119f99c84312334edae861c78d461ae8
def set_msm_data(self, msm_dset, new_data): '\n Replace data of dataset "msm_dset" with new_data\n\n Parameters\n ----------\n msm_dset : string\n Name of measurement dataset.\n new_data : array-like\n Data to be written with same dimensions as dataset "msm_dset"\n ' if (self.__msm_path is None): return if (not self.__rw): raise PermissionError('read/write access required') col = 0 for ii in self.bands: msm_path = self.__msm_path.replace('%', ii) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) dset = self.fid[ds_path] dims = dset.shape dset[(0, ...)] = new_data[(..., col:(col + dims[(- 1)]))] col += dims[(- 1)] self.__patched_msm.append(ds_path)
Replace data of dataset "msm_dset" with new_data Parameters ---------- msm_dset : string Name of measurement dataset. new_data : array-like Data to be written with same dimensions as dataset "msm_dset"
src/pys5p/l1b_io.py
set_msm_data
rmvanhees/pys5p
10
python
def set_msm_data(self, msm_dset, new_data): '\n Replace data of dataset "msm_dset" with new_data\n\n Parameters\n ----------\n msm_dset : string\n Name of measurement dataset.\n new_data : array-like\n Data to be written with same dimensions as dataset "msm_dset"\n ' if (self.__msm_path is None): return if (not self.__rw): raise PermissionError('read/write access required') col = 0 for ii in self.bands: msm_path = self.__msm_path.replace('%', ii) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) dset = self.fid[ds_path] dims = dset.shape dset[(0, ...)] = new_data[(..., col:(col + dims[(- 1)]))] col += dims[(- 1)] self.__patched_msm.append(ds_path)
def set_msm_data(self, msm_dset, new_data): '\n Replace data of dataset "msm_dset" with new_data\n\n Parameters\n ----------\n msm_dset : string\n Name of measurement dataset.\n new_data : array-like\n Data to be written with same dimensions as dataset "msm_dset"\n ' if (self.__msm_path is None): return if (not self.__rw): raise PermissionError('read/write access required') col = 0 for ii in self.bands: msm_path = self.__msm_path.replace('%', ii) ds_path = str(PurePosixPath(msm_path, 'OBSERVATIONS', msm_dset)) dset = self.fid[ds_path] dims = dset.shape dset[(0, ...)] = new_data[(..., col:(col + dims[(- 1)]))] col += dims[(- 1)] self.__patched_msm.append(ds_path)<|docstring|>Replace data of dataset "msm_dset" with new_data Parameters ---------- msm_dset : string Name of measurement dataset. new_data : array-like Data to be written with same dimensions as dataset "msm_dset"<|endoftext|>
aec7354a16891bf263f0cb4515235eb7ff6bd10114fad2bc9fb21cd3ee3d8af9
def __init__(self, l1b_product): '\n Initialize access to a Tropomi offline L1b product\n ' if (not Path(l1b_product).is_file()): raise FileNotFoundError(f'{l1b_product} does not exist') self.filename = l1b_product self.fid = h5py.File(l1b_product, 'r')
Initialize access to a Tropomi offline L1b product
src/pys5p/l1b_io.py
__init__
rmvanhees/pys5p
10
python
def __init__(self, l1b_product): '\n \n ' if (not Path(l1b_product).is_file()): raise FileNotFoundError(f'{l1b_product} does not exist') self.filename = l1b_product self.fid = h5py.File(l1b_product, 'r')
def __init__(self, l1b_product): '\n \n ' if (not Path(l1b_product).is_file()): raise FileNotFoundError(f'{l1b_product} does not exist') self.filename = l1b_product self.fid = h5py.File(l1b_product, 'r')<|docstring|>Initialize access to a Tropomi offline L1b product<|endoftext|>
3fe98a939b862bbfeda66c816f8a32ab391f5a19a2a86e4b74d710e34c75127e
def __enter__(self): '\n method called to initiate the context manager\n ' return self
method called to initiate the context manager
src/pys5p/l1b_io.py
__enter__
rmvanhees/pys5p
10
python
def __enter__(self): '\n \n ' return self
def __enter__(self): '\n \n ' return self<|docstring|>method called to initiate the context manager<|endoftext|>
a845abdbeae5f39d06b5d1da3e7bb86971111311cbc47b27bbe00cbf9d7b283f
def __exit__(self, exc_type, exc_value, traceback): '\n method called when exiting the context manager\n ' self.close() return False
method called when exiting the context manager
src/pys5p/l1b_io.py
__exit__
rmvanhees/pys5p
10
python
def __exit__(self, exc_type, exc_value, traceback): '\n \n ' self.close() return False
def __exit__(self, exc_type, exc_value, traceback): '\n \n ' self.close() return False<|docstring|>method called when exiting the context manager<|endoftext|>
8642c4962ef77e7d7f9b956ecc65fbe0f6d62ec92cad1b21fbd46ed822e87335
def close(self): '\n close access to product\n ' if (self.fid is None): return self.fid.close() self.fid = None
close access to product
src/pys5p/l1b_io.py
close
rmvanhees/pys5p
10
python
def close(self): '\n \n ' if (self.fid is None): return self.fid.close() self.fid = None
def close(self): '\n \n ' if (self.fid is None): return self.fid.close() self.fid = None<|docstring|>close access to product<|endoftext|>
c47a75ae91a189db08c3e4526b673ae8b09f5412a39402abc4fa6921d82ed12f
def get_attr(self, attr_name): '\n Obtain value of an HDF5 file attribute\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n ' if (attr_name not in self.fid.attrs.keys()): return None attr = self.fid.attrs[attr_name] if (attr.shape is None): return None return attr
Obtain value of an HDF5 file attribute Parameters ---------- attr_name : string Name of the attribute
src/pys5p/l1b_io.py
get_attr
rmvanhees/pys5p
10
python
def get_attr(self, attr_name): '\n Obtain value of an HDF5 file attribute\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n ' if (attr_name not in self.fid.attrs.keys()): return None attr = self.fid.attrs[attr_name] if (attr.shape is None): return None return attr
def get_attr(self, attr_name): '\n Obtain value of an HDF5 file attribute\n\n Parameters\n ----------\n attr_name : string\n Name of the attribute\n ' if (attr_name not in self.fid.attrs.keys()): return None attr = self.fid.attrs[attr_name] if (attr.shape is None): return None return attr<|docstring|>Obtain value of an HDF5 file attribute Parameters ---------- attr_name : string Name of the attribute<|endoftext|>
8cebf5becd45e824ffdfbb60aa554c79403787cd0d77670998873d0678ae7c03
def get_orbit(self): '\n Returns absolute orbit number\n ' res = self.get_attr('orbit') if (res is None): return None return int(res)
Returns absolute orbit number
src/pys5p/l1b_io.py
get_orbit
rmvanhees/pys5p
10
python
def get_orbit(self): '\n \n ' res = self.get_attr('orbit') if (res is None): return None return int(res)
def get_orbit(self): '\n \n ' res = self.get_attr('orbit') if (res is None): return None return int(res)<|docstring|>Returns absolute orbit number<|endoftext|>
84fe905dc79d32d37a43761f4e804059ae9d5d3a8732c42f5cc0743099b0eff5
def get_processor_version(self): '\n Returns version of the L01b processor\n ' attr = self.get_attr('processor_version') if (attr is None): return None return attr.decode('ascii')
Returns version of the L01b processor
src/pys5p/l1b_io.py
get_processor_version
rmvanhees/pys5p
10
python
def get_processor_version(self): '\n \n ' attr = self.get_attr('processor_version') if (attr is None): return None return attr.decode('ascii')
def get_processor_version(self): '\n \n ' attr = self.get_attr('processor_version') if (attr is None): return None return attr.decode('ascii')<|docstring|>Returns version of the L01b processor<|endoftext|>
b75a6e4230de265a80abf91e3ce7292b442b33ffe29bb6d35ec0bcc384888548
def get_coverage_time(self): '\n Returns start and end of the measurement coverage time\n ' attr_start = self.get_attr('time_coverage_start') if (attr_start is None): return None attr_end = self.get_attr('time_coverage_end') if (attr_end is None): return None return (attr_start.decode('ascii'), attr_end.decode('ascii'))
Returns start and end of the measurement coverage time
src/pys5p/l1b_io.py
get_coverage_time
rmvanhees/pys5p
10
python
def get_coverage_time(self): '\n \n ' attr_start = self.get_attr('time_coverage_start') if (attr_start is None): return None attr_end = self.get_attr('time_coverage_end') if (attr_end is None): return None return (attr_start.decode('ascii'), attr_end.decode('ascii'))
def get_coverage_time(self): '\n \n ' attr_start = self.get_attr('time_coverage_start') if (attr_start is None): return None attr_end = self.get_attr('time_coverage_end') if (attr_end is None): return None return (attr_start.decode('ascii'), attr_end.decode('ascii'))<|docstring|>Returns start and end of the measurement coverage time<|endoftext|>
052e7187d24235786bf1e3729f0f53bbca9d2be91bdc86a936950954a87b7d46
def get_creation_time(self): '\n Returns datetime when the L1b product was created\n ' grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header'] dset = grp['fixed_header/source'] if ('Creation_Date' in self.fid.attrs.keys()): attr = dset.attrs['Creation_Date'] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None
Returns datetime when the L1b product was created
src/pys5p/l1b_io.py
get_creation_time
rmvanhees/pys5p
10
python
def get_creation_time(self): '\n \n ' grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header'] dset = grp['fixed_header/source'] if ('Creation_Date' in self.fid.attrs.keys()): attr = dset.attrs['Creation_Date'] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None
def get_creation_time(self): '\n \n ' grp = self.fid['/METADATA/ESA_METADATA/earth_explorer_header'] dset = grp['fixed_header/source'] if ('Creation_Date' in self.fid.attrs.keys()): attr = dset.attrs['Creation_Date'] if isinstance(attr, bytes): return attr.decode('ascii') return attr return None<|docstring|>Returns datetime when the L1b product was created<|endoftext|>
936dbca41789c9158520ee769e3a6574a6a210c3caff4651d787c2667851b90d
def get_ref_time(self): '\n Returns reference start time of measurements\n ' return self.fid['reference_time'][0].astype(int)
Returns reference start time of measurements
src/pys5p/l1b_io.py
get_ref_time
rmvanhees/pys5p
10
python
def get_ref_time(self): '\n \n ' return self.fid['reference_time'][0].astype(int)
def get_ref_time(self): '\n \n ' return self.fid['reference_time'][0].astype(int)<|docstring|>Returns reference start time of measurements<|endoftext|>
e00c0d888fadfa63fcbb15bf3759a9d10eef8a1c2032657c0891dea16de5d056
def get_delta_time(self): '\n Returns offset from the reference start time of measurement\n ' return self.fid['/MSMTSET/msmtset']['delta_time'][:].astype(int)
Returns offset from the reference start time of measurement
src/pys5p/l1b_io.py
get_delta_time
rmvanhees/pys5p
10
python
def get_delta_time(self): '\n \n ' return self.fid['/MSMTSET/msmtset']['delta_time'][:].astype(int)
def get_delta_time(self): '\n \n ' return self.fid['/MSMTSET/msmtset']['delta_time'][:].astype(int)<|docstring|>Returns offset from the reference start time of measurement<|endoftext|>
1e3976f9e63dea9b291c8e06d2e19a2f5c13629ade4a06d940a1c9108a40f742
def get_msmtset(self): '\n Returns L1B_ENG_DB/SATELLITE_INFO/satellite_pos\n ' return self.fid['/SATELLITE_INFO/satellite_pos'][:]
Returns L1B_ENG_DB/SATELLITE_INFO/satellite_pos
src/pys5p/l1b_io.py
get_msmtset
rmvanhees/pys5p
10
python
def get_msmtset(self): '\n \n ' return self.fid['/SATELLITE_INFO/satellite_pos'][:]
def get_msmtset(self): '\n \n ' return self.fid['/SATELLITE_INFO/satellite_pos'][:]<|docstring|>Returns L1B_ENG_DB/SATELLITE_INFO/satellite_pos<|endoftext|>
88966e903d64a58757477c96373f7d9f7ff17da476e942ef9c2c3f6dbeaa49d5
def get_msmtset_db(self): '\n Returns compressed msmtset from L1B_ENG_DB/MSMTSET/msmtset\n\n Notes\n -----\n This function is used to fill the SQLite product databases\n ' dtype_msmt_db = np.dtype([('meta_id', np.int32), ('ic_id', np.uint16), ('ic_version', np.uint8), ('class', np.uint8), ('repeats', np.uint16), ('exp_per_mcp', np.uint16), ('exp_time_us', np.uint32), ('mcp_us', np.uint32), ('delta_time_start', np.int32), ('delta_time_end', np.int32)]) msmtset = self.fid['/MSMTSET/msmtset'][:] icid = msmtset['icid'] indx = ((np.diff(icid) != 0).nonzero()[0] + 1) indx = np.insert(indx, 0, 0) indx = np.append(indx, (- 1)) msmt = np.zeros((indx.size - 1), dtype=dtype_msmt_db) msmt['ic_id'][:] = msmtset['icid'][indx[0:(- 1)]] msmt['ic_version'][:] = msmtset['icv'][indx[0:(- 1)]] msmt['class'][:] = msmtset['class'][indx[0:(- 1)]] msmt['delta_time_start'][:] = msmtset['delta_time'][indx[0:(- 1)]] msmt['delta_time_end'][:] = msmtset['delta_time'][indx[1:]] timing = self.fid['/DETECTOR4/timing'][:] msmt['mcp_us'][:] = timing['mcp_us'][(indx[1:] - 1)] msmt['exp_time_us'][:] = timing['exp_time_us'][(indx[1:] - 1)] msmt['exp_per_mcp'][:] = timing['exp_per_mcp'][(indx[1:] - 1)] duration = (1000 * (msmt['delta_time_end'] - msmt['delta_time_start'])) mask = (msmt['mcp_us'] > 0) msmt['repeats'][mask] = (duration[mask] / msmt['mcp_us'][mask]).astype(np.uint16) return msmt
Returns compressed msmtset from L1B_ENG_DB/MSMTSET/msmtset Notes ----- This function is used to fill the SQLite product databases
src/pys5p/l1b_io.py
get_msmtset_db
rmvanhees/pys5p
10
python
def get_msmtset_db(self): '\n Returns compressed msmtset from L1B_ENG_DB/MSMTSET/msmtset\n\n Notes\n -----\n This function is used to fill the SQLite product databases\n ' dtype_msmt_db = np.dtype([('meta_id', np.int32), ('ic_id', np.uint16), ('ic_version', np.uint8), ('class', np.uint8), ('repeats', np.uint16), ('exp_per_mcp', np.uint16), ('exp_time_us', np.uint32), ('mcp_us', np.uint32), ('delta_time_start', np.int32), ('delta_time_end', np.int32)]) msmtset = self.fid['/MSMTSET/msmtset'][:] icid = msmtset['icid'] indx = ((np.diff(icid) != 0).nonzero()[0] + 1) indx = np.insert(indx, 0, 0) indx = np.append(indx, (- 1)) msmt = np.zeros((indx.size - 1), dtype=dtype_msmt_db) msmt['ic_id'][:] = msmtset['icid'][indx[0:(- 1)]] msmt['ic_version'][:] = msmtset['icv'][indx[0:(- 1)]] msmt['class'][:] = msmtset['class'][indx[0:(- 1)]] msmt['delta_time_start'][:] = msmtset['delta_time'][indx[0:(- 1)]] msmt['delta_time_end'][:] = msmtset['delta_time'][indx[1:]] timing = self.fid['/DETECTOR4/timing'][:] msmt['mcp_us'][:] = timing['mcp_us'][(indx[1:] - 1)] msmt['exp_time_us'][:] = timing['exp_time_us'][(indx[1:] - 1)] msmt['exp_per_mcp'][:] = timing['exp_per_mcp'][(indx[1:] - 1)] duration = (1000 * (msmt['delta_time_end'] - msmt['delta_time_start'])) mask = (msmt['mcp_us'] > 0) msmt['repeats'][mask] = (duration[mask] / msmt['mcp_us'][mask]).astype(np.uint16) return msmt
def get_msmtset_db(self): '\n Returns compressed msmtset from L1B_ENG_DB/MSMTSET/msmtset\n\n Notes\n -----\n This function is used to fill the SQLite product databases\n ' dtype_msmt_db = np.dtype([('meta_id', np.int32), ('ic_id', np.uint16), ('ic_version', np.uint8), ('class', np.uint8), ('repeats', np.uint16), ('exp_per_mcp', np.uint16), ('exp_time_us', np.uint32), ('mcp_us', np.uint32), ('delta_time_start', np.int32), ('delta_time_end', np.int32)]) msmtset = self.fid['/MSMTSET/msmtset'][:] icid = msmtset['icid'] indx = ((np.diff(icid) != 0).nonzero()[0] + 1) indx = np.insert(indx, 0, 0) indx = np.append(indx, (- 1)) msmt = np.zeros((indx.size - 1), dtype=dtype_msmt_db) msmt['ic_id'][:] = msmtset['icid'][indx[0:(- 1)]] msmt['ic_version'][:] = msmtset['icv'][indx[0:(- 1)]] msmt['class'][:] = msmtset['class'][indx[0:(- 1)]] msmt['delta_time_start'][:] = msmtset['delta_time'][indx[0:(- 1)]] msmt['delta_time_end'][:] = msmtset['delta_time'][indx[1:]] timing = self.fid['/DETECTOR4/timing'][:] msmt['mcp_us'][:] = timing['mcp_us'][(indx[1:] - 1)] msmt['exp_time_us'][:] = timing['exp_time_us'][(indx[1:] - 1)] msmt['exp_per_mcp'][:] = timing['exp_per_mcp'][(indx[1:] - 1)] duration = (1000 * (msmt['delta_time_end'] - msmt['delta_time_start'])) mask = (msmt['mcp_us'] > 0) msmt['repeats'][mask] = (duration[mask] / msmt['mcp_us'][mask]).astype(np.uint16) return msmt<|docstring|>Returns compressed msmtset from L1B_ENG_DB/MSMTSET/msmtset Notes ----- This function is used to fill the SQLite product databases<|endoftext|>
370b6ca40997292535016fcd4a2d687d8151bd3160535b1bb6a73aeb28f1dd16
def get_swir_hk_db(self, stats=None, fill_as_nan=False): "\n Returns the most important SWIR house keeping parameters\n\n Parameters\n ----------\n fill_as_nan : boolean\n Replace (float) FillValues with Nan's, when True\n\n Notes\n -----\n This function is used to fill the SQLite product datbase and\n HDF5 monitoring database\n " dtype_hk_db = np.dtype([('detector_temp', np.float32), ('grating_temp', np.float32), ('imager_temp', np.float32), ('obm_temp', np.float32), ('calib_unit_temp', np.float32), ('fee_inner_temp', np.float32), ('fee_board_temp', np.float32), ('fee_ref_volt_temp', np.float32), ('fee_video_amp_temp', np.float32), ('fee_video_adc_temp', np.float32), ('detector_heater', np.float32), ('obm_heater_cycle', np.float32), ('fee_box_heater_cycle', np.float32), ('obm_heater', np.float32), ('fee_box_heater', np.float32)]) num_eng_pkts = self.fid['nr_of_engdat_pkts'].size swir_hk = np.empty(num_eng_pkts, dtype=dtype_hk_db) hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/temperature_info'][:] swir_hk['detector_temp'] = hk_tbl['temp_det_ts2'] swir_hk['fee_inner_temp'] = hk_tbl['temp_d1_box'] swir_hk['fee_board_temp'] = hk_tbl['temp_d5_cold'] swir_hk['fee_ref_volt_temp'] = hk_tbl['temp_a3_vref'] swir_hk['fee_video_amp_temp'] = hk_tbl['temp_d6_vamp'] swir_hk['fee_video_adc_temp'] = hk_tbl['temp_d4_vadc'] hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/hires_temperatures'][:] swir_hk['grating_temp'] = hk_tbl['hires_temp_1'] hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/instr_temperatures'][:] swir_hk['imager_temp'] = hk_tbl['instr_temp_29'] swir_hk['obm_temp'] = hk_tbl['instr_temp_28'] swir_hk['calib_unit_temp'] = hk_tbl['instr_temp_25'] hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/heater_data'][:] swir_hk['detector_heater'] = hk_tbl['det_htr_curr'] hk_tbl = self.fid['/NOMINAL_HK/HEATERS/heater_data'][:] swir_hk['obm_heater'] = hk_tbl['meas_cur_val_htr12'] swir_hk['obm_heater_cycle'] = hk_tbl['last_pwm_val_htr12'] swir_hk['fee_box_heater'] = hk_tbl['meas_cur_val_htr13'] swir_hk['fee_box_heater_cycle'] = hk_tbl['last_pwm_val_htr13'] if fill_as_nan: for key in dtype_hk_db.names: swir_hk[key][(swir_hk[key] == 999.0)] = np.nan if (stats is None): return swir_hk if (stats == 'median'): hk_median = np.empty(1, dtype=dtype_hk_db) for key in dtype_hk_db.names: if np.all(np.isnan(swir_hk[key])): hk_median[key][0] = np.nan elif (np.nanmin(swir_hk[key]) == np.nanmax(swir_hk[key])): hk_median[key][0] = swir_hk[key][0] else: hk_median[key][0] = biweight(swir_hk[key]) return hk_median if (stats == 'range'): hk_min = np.empty(1, dtype=dtype_hk_db) hk_max = np.empty(1, dtype=dtype_hk_db) for key in dtype_hk_db.names: if np.all(np.isnan(swir_hk[key])): hk_min[key][0] = np.nan hk_max[key][0] = np.nan elif (np.nanmin(swir_hk[key]) == np.nanmax(swir_hk[key])): hk_min[key][0] = swir_hk[key][0] hk_max[key][0] = swir_hk[key][0] else: hk_min[key][0] = np.nanmin(swir_hk[key]) hk_max[key][0] = np.nanmax(swir_hk[key]) return (hk_min, hk_max) return None
Returns the most important SWIR house keeping parameters Parameters ---------- fill_as_nan : boolean Replace (float) FillValues with Nan's, when True Notes ----- This function is used to fill the SQLite product datbase and HDF5 monitoring database
src/pys5p/l1b_io.py
get_swir_hk_db
rmvanhees/pys5p
10
python
def get_swir_hk_db(self, stats=None, fill_as_nan=False): "\n Returns the most important SWIR house keeping parameters\n\n Parameters\n ----------\n fill_as_nan : boolean\n Replace (float) FillValues with Nan's, when True\n\n Notes\n -----\n This function is used to fill the SQLite product datbase and\n HDF5 monitoring database\n " dtype_hk_db = np.dtype([('detector_temp', np.float32), ('grating_temp', np.float32), ('imager_temp', np.float32), ('obm_temp', np.float32), ('calib_unit_temp', np.float32), ('fee_inner_temp', np.float32), ('fee_board_temp', np.float32), ('fee_ref_volt_temp', np.float32), ('fee_video_amp_temp', np.float32), ('fee_video_adc_temp', np.float32), ('detector_heater', np.float32), ('obm_heater_cycle', np.float32), ('fee_box_heater_cycle', np.float32), ('obm_heater', np.float32), ('fee_box_heater', np.float32)]) num_eng_pkts = self.fid['nr_of_engdat_pkts'].size swir_hk = np.empty(num_eng_pkts, dtype=dtype_hk_db) hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/temperature_info'][:] swir_hk['detector_temp'] = hk_tbl['temp_det_ts2'] swir_hk['fee_inner_temp'] = hk_tbl['temp_d1_box'] swir_hk['fee_board_temp'] = hk_tbl['temp_d5_cold'] swir_hk['fee_ref_volt_temp'] = hk_tbl['temp_a3_vref'] swir_hk['fee_video_amp_temp'] = hk_tbl['temp_d6_vamp'] swir_hk['fee_video_adc_temp'] = hk_tbl['temp_d4_vadc'] hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/hires_temperatures'][:] swir_hk['grating_temp'] = hk_tbl['hires_temp_1'] hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/instr_temperatures'][:] swir_hk['imager_temp'] = hk_tbl['instr_temp_29'] swir_hk['obm_temp'] = hk_tbl['instr_temp_28'] swir_hk['calib_unit_temp'] = hk_tbl['instr_temp_25'] hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/heater_data'][:] swir_hk['detector_heater'] = hk_tbl['det_htr_curr'] hk_tbl = self.fid['/NOMINAL_HK/HEATERS/heater_data'][:] swir_hk['obm_heater'] = hk_tbl['meas_cur_val_htr12'] swir_hk['obm_heater_cycle'] = hk_tbl['last_pwm_val_htr12'] swir_hk['fee_box_heater'] = hk_tbl['meas_cur_val_htr13'] swir_hk['fee_box_heater_cycle'] = hk_tbl['last_pwm_val_htr13'] if fill_as_nan: for key in dtype_hk_db.names: swir_hk[key][(swir_hk[key] == 999.0)] = np.nan if (stats is None): return swir_hk if (stats == 'median'): hk_median = np.empty(1, dtype=dtype_hk_db) for key in dtype_hk_db.names: if np.all(np.isnan(swir_hk[key])): hk_median[key][0] = np.nan elif (np.nanmin(swir_hk[key]) == np.nanmax(swir_hk[key])): hk_median[key][0] = swir_hk[key][0] else: hk_median[key][0] = biweight(swir_hk[key]) return hk_median if (stats == 'range'): hk_min = np.empty(1, dtype=dtype_hk_db) hk_max = np.empty(1, dtype=dtype_hk_db) for key in dtype_hk_db.names: if np.all(np.isnan(swir_hk[key])): hk_min[key][0] = np.nan hk_max[key][0] = np.nan elif (np.nanmin(swir_hk[key]) == np.nanmax(swir_hk[key])): hk_min[key][0] = swir_hk[key][0] hk_max[key][0] = swir_hk[key][0] else: hk_min[key][0] = np.nanmin(swir_hk[key]) hk_max[key][0] = np.nanmax(swir_hk[key]) return (hk_min, hk_max) return None
def get_swir_hk_db(self, stats=None, fill_as_nan=False): "\n Returns the most important SWIR house keeping parameters\n\n Parameters\n ----------\n fill_as_nan : boolean\n Replace (float) FillValues with Nan's, when True\n\n Notes\n -----\n This function is used to fill the SQLite product datbase and\n HDF5 monitoring database\n " dtype_hk_db = np.dtype([('detector_temp', np.float32), ('grating_temp', np.float32), ('imager_temp', np.float32), ('obm_temp', np.float32), ('calib_unit_temp', np.float32), ('fee_inner_temp', np.float32), ('fee_board_temp', np.float32), ('fee_ref_volt_temp', np.float32), ('fee_video_amp_temp', np.float32), ('fee_video_adc_temp', np.float32), ('detector_heater', np.float32), ('obm_heater_cycle', np.float32), ('fee_box_heater_cycle', np.float32), ('obm_heater', np.float32), ('fee_box_heater', np.float32)]) num_eng_pkts = self.fid['nr_of_engdat_pkts'].size swir_hk = np.empty(num_eng_pkts, dtype=dtype_hk_db) hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/temperature_info'][:] swir_hk['detector_temp'] = hk_tbl['temp_det_ts2'] swir_hk['fee_inner_temp'] = hk_tbl['temp_d1_box'] swir_hk['fee_board_temp'] = hk_tbl['temp_d5_cold'] swir_hk['fee_ref_volt_temp'] = hk_tbl['temp_a3_vref'] swir_hk['fee_video_amp_temp'] = hk_tbl['temp_d6_vamp'] swir_hk['fee_video_adc_temp'] = hk_tbl['temp_d4_vadc'] hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/hires_temperatures'][:] swir_hk['grating_temp'] = hk_tbl['hires_temp_1'] hk_tbl = self.fid['/NOMINAL_HK/TEMPERATURES/instr_temperatures'][:] swir_hk['imager_temp'] = hk_tbl['instr_temp_29'] swir_hk['obm_temp'] = hk_tbl['instr_temp_28'] swir_hk['calib_unit_temp'] = hk_tbl['instr_temp_25'] hk_tbl = self.fid['/DETECTOR4/DETECTOR_HK/heater_data'][:] swir_hk['detector_heater'] = hk_tbl['det_htr_curr'] hk_tbl = self.fid['/NOMINAL_HK/HEATERS/heater_data'][:] swir_hk['obm_heater'] = hk_tbl['meas_cur_val_htr12'] swir_hk['obm_heater_cycle'] = hk_tbl['last_pwm_val_htr12'] swir_hk['fee_box_heater'] = hk_tbl['meas_cur_val_htr13'] swir_hk['fee_box_heater_cycle'] = hk_tbl['last_pwm_val_htr13'] if fill_as_nan: for key in dtype_hk_db.names: swir_hk[key][(swir_hk[key] == 999.0)] = np.nan if (stats is None): return swir_hk if (stats == 'median'): hk_median = np.empty(1, dtype=dtype_hk_db) for key in dtype_hk_db.names: if np.all(np.isnan(swir_hk[key])): hk_median[key][0] = np.nan elif (np.nanmin(swir_hk[key]) == np.nanmax(swir_hk[key])): hk_median[key][0] = swir_hk[key][0] else: hk_median[key][0] = biweight(swir_hk[key]) return hk_median if (stats == 'range'): hk_min = np.empty(1, dtype=dtype_hk_db) hk_max = np.empty(1, dtype=dtype_hk_db) for key in dtype_hk_db.names: if np.all(np.isnan(swir_hk[key])): hk_min[key][0] = np.nan hk_max[key][0] = np.nan elif (np.nanmin(swir_hk[key]) == np.nanmax(swir_hk[key])): hk_min[key][0] = swir_hk[key][0] hk_max[key][0] = swir_hk[key][0] else: hk_min[key][0] = np.nanmin(swir_hk[key]) hk_max[key][0] = np.nanmax(swir_hk[key]) return (hk_min, hk_max) return None<|docstring|>Returns the most important SWIR house keeping parameters Parameters ---------- fill_as_nan : boolean Replace (float) FillValues with Nan's, when True Notes ----- This function is used to fill the SQLite product datbase and HDF5 monitoring database<|endoftext|>
a8b1c1700715be0c0808e339578ed936e80dbe319b4a5e111421b34ba29440e4
def reset(self): 'reset game to default' self.actions = [] self.tie = False self.board = [0 for i in range(42)] self.player = 1 self.isDone = self.getIsDone() self.ends = [] self.getAllowedActions()
reset game to default
Clients/pyClient/game.py
reset
JulianWww/AlphaZero
1
python
def reset(self): self.actions = [] self.tie = False self.board = [0 for i in range(42)] self.player = 1 self.isDone = self.getIsDone() self.ends = [] self.getAllowedActions()
def reset(self): self.actions = [] self.tie = False self.board = [0 for i in range(42)] self.player = 1 self.isDone = self.getIsDone() self.ends = [] self.getAllowedActions()<|docstring|>reset game to default<|endoftext|>
05af15e7548fe42b2c00452357fc273e233e7eaa8ca4658f6e0e0528e033cc9a
def actionModifier(self, action): 'for console client convert the inputed action to the internal game action' for potAction in self.allowedActions: if ((potAction % 7) == action): return potAction return (- 1)
for console client convert the inputed action to the internal game action
Clients/pyClient/game.py
actionModifier
JulianWww/AlphaZero
1
python
def actionModifier(self, action): for potAction in self.allowedActions: if ((potAction % 7) == action): return potAction return (- 1)
def actionModifier(self, action): for potAction in self.allowedActions: if ((potAction % 7) == action): return potAction return (- 1)<|docstring|>for console client convert the inputed action to the internal game action<|endoftext|>
26e3b18fa87ccbfcb938b89b248ebf44db3cfa9612cf410ac5b23315cfd90e70
@staticmethod def encodeAction(x, y): 'convert position to action' return (x + (7 * y))
convert position to action
Clients/pyClient/game.py
encodeAction
JulianWww/AlphaZero
1
python
@staticmethod def encodeAction(x, y): return (x + (7 * y))
@staticmethod def encodeAction(x, y): return (x + (7 * y))<|docstring|>convert position to action<|endoftext|>
9c7a7a4089b30e64be1e79837b7cd46b6398ab00501ca3c00ca10f0d5c4fcc3b
def getAllowedActions(self): 'get the allowd actions and write to allowedActions list' self.allowedActions = [] for x in range(7): if (self.board[self.encodeAction(x, 0)] == 0): hasPassed = False for y in range(5): if ((self.board[self.encodeAction(x, y)] == 0) and (self.board[self.encodeAction(x, (y + 1))] != 0)): self.allowedActions.append(self.encodeAction(x, y)) hasPassed = True break if (not hasPassed): self.allowedActions.append(self.encodeAction(x, 5))
get the allowd actions and write to allowedActions list
Clients/pyClient/game.py
getAllowedActions
JulianWww/AlphaZero
1
python
def getAllowedActions(self): self.allowedActions = [] for x in range(7): if (self.board[self.encodeAction(x, 0)] == 0): hasPassed = False for y in range(5): if ((self.board[self.encodeAction(x, y)] == 0) and (self.board[self.encodeAction(x, (y + 1))] != 0)): self.allowedActions.append(self.encodeAction(x, y)) hasPassed = True break if (not hasPassed): self.allowedActions.append(self.encodeAction(x, 5))
def getAllowedActions(self): self.allowedActions = [] for x in range(7): if (self.board[self.encodeAction(x, 0)] == 0): hasPassed = False for y in range(5): if ((self.board[self.encodeAction(x, y)] == 0) and (self.board[self.encodeAction(x, (y + 1))] != 0)): self.allowedActions.append(self.encodeAction(x, y)) hasPassed = True break if (not hasPassed): self.allowedActions.append(self.encodeAction(x, 5))<|docstring|>get the allowd actions and write to allowedActions list<|endoftext|>
994343f76c22823e99695c2011ee13a7fde2a899462aa4cbd064e1479ec4ebcc
def takeAction(self, action): 'if action is valid (in allowedActions) modify game to perform move' if (action in self.allowedActions): self.actions.append(action) self.board[action] = self.player self.isDone = self.getIsDone() self.player = (- self.player) self.getAllowedActions()
if action is valid (in allowedActions) modify game to perform move
Clients/pyClient/game.py
takeAction
JulianWww/AlphaZero
1
python
def takeAction(self, action): if (action in self.allowedActions): self.actions.append(action) self.board[action] = self.player self.isDone = self.getIsDone() self.player = (- self.player) self.getAllowedActions()
def takeAction(self, action): if (action in self.allowedActions): self.actions.append(action) self.board[action] = self.player self.isDone = self.getIsDone() self.player = (- self.player) self.getAllowedActions()<|docstring|>if action is valid (in allowedActions) modify game to perform move<|endoftext|>
f5cba0753bc753831a2211be7a99115d12285d2c709d2c715bce4fe16566d8a7
def consoleRender(self): 'render state to Console' for y in range(6): for x in range(7): if (self.encodeAction(x, y) in self.allowedActions): print('+ ', end='') else: print(self.pieces[self.board[(x + (y * 7))]], end=' ') print('') print('') print(0, 1, 2, 3, 4, 5, 6) print(f'player {self.pieces[self.player]} is up')
render state to Console
Clients/pyClient/game.py
consoleRender
JulianWww/AlphaZero
1
python
def consoleRender(self): for y in range(6): for x in range(7): if (self.encodeAction(x, y) in self.allowedActions): print('+ ', end=) else: print(self.pieces[self.board[(x + (y * 7))]], end=' ') print() print() print(0, 1, 2, 3, 4, 5, 6) print(f'player {self.pieces[self.player]} is up')
def consoleRender(self): for y in range(6): for x in range(7): if (self.encodeAction(x, y) in self.allowedActions): print('+ ', end=) else: print(self.pieces[self.board[(x + (y * 7))]], end=' ') print() print() print(0, 1, 2, 3, 4, 5, 6) print(f'player {self.pieces[self.player]} is up')<|docstring|>render state to Console<|endoftext|>
2b1f2bb14fba3998502ba38d8afbb8631d6cb4db851f5b550798c3e4fbcf7e13
def toServerProtocol(self): 'convert to binary int array to send to server' out = ([0] * 85) out[(- 1)] = self.player for (idx, val) in enumerate(self.board): if (val == 1): out[idx] = 1 elif (val == (- 1)): out[(idx + len(self.board))] = 1 return out
convert to binary int array to send to server
Clients/pyClient/game.py
toServerProtocol
JulianWww/AlphaZero
1
python
def toServerProtocol(self): out = ([0] * 85) out[(- 1)] = self.player for (idx, val) in enumerate(self.board): if (val == 1): out[idx] = 1 elif (val == (- 1)): out[(idx + len(self.board))] = 1 return out
def toServerProtocol(self): out = ([0] * 85) out[(- 1)] = self.player for (idx, val) in enumerate(self.board): if (val == 1): out[idx] = 1 elif (val == (- 1)): out[(idx + len(self.board))] = 1 return out<|docstring|>convert to binary int array to send to server<|endoftext|>
71a9bbd0082b604b0df7dda3c75a5eb43a6a2d0f1922492d100c2009bcc12476
def getIsDone(self): 'check if game is done' if (self.board.count(0) == 0): self.tie = True return True done = False for option in winStates: val = 0 for pos in option: val += self.board[pos] if (val == (4 * self.player)): done = True self.ends.append((option[0], option[(- 1)])) return done
check if game is done
Clients/pyClient/game.py
getIsDone
JulianWww/AlphaZero
1
python
def getIsDone(self): if (self.board.count(0) == 0): self.tie = True return True done = False for option in winStates: val = 0 for pos in option: val += self.board[pos] if (val == (4 * self.player)): done = True self.ends.append((option[0], option[(- 1)])) return done
def getIsDone(self): if (self.board.count(0) == 0): self.tie = True return True done = False for option in winStates: val = 0 for pos in option: val += self.board[pos] if (val == (4 * self.player)): done = True self.ends.append((option[0], option[(- 1)])) return done<|docstring|>check if game is done<|endoftext|>
bf2626ddb4c44936175d5050d1b53bc4bc1464b5ad83f58037a3caab8a1c09d7
def _initialize_actors(self, config): '\n Custom initialization\n ' self._distance = random.sample(range(10, 80), 3) self._distance = sorted(self._distance) self._dist_prop = [(x - 2) for x in self._distance] (self.first_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[0]) (self.second_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[1]) (self.third_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[2]) self.first_transform = carla.Transform(self.first_location) self.second_transform = carla.Transform(self.second_location) self.third_transform = carla.Transform(self.third_location) self.first_transform = carla.Transform(carla.Location(self.first_location.x, self.first_location.y, self.first_location.z)) self.second_transform = carla.Transform(carla.Location(self.second_location.x, self.second_location.y, self.second_location.z)) self.third_transform = carla.Transform(carla.Location(self.third_location.x, self.third_location.y, self.third_location.z)) first_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.first_transform) second_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.second_transform) third_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.third_transform) first_debris.set_transform(self.first_transform) second_debris.set_transform(self.second_transform) third_debris.set_transform(self.third_transform) self.object.extend([first_debris, second_debris, third_debris]) for debris in self.object: debris.set_simulate_physics(False) self.other_actors.append(first_debris) self.other_actors.append(second_debris) self.other_actors.append(third_debris)
Custom initialization
core/simulators/srunner/scenarios/control_loss_new.py
_initialize_actors
AkiraHero/DI-drive
219
python
def _initialize_actors(self, config): '\n \n ' self._distance = random.sample(range(10, 80), 3) self._distance = sorted(self._distance) self._dist_prop = [(x - 2) for x in self._distance] (self.first_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[0]) (self.second_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[1]) (self.third_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[2]) self.first_transform = carla.Transform(self.first_location) self.second_transform = carla.Transform(self.second_location) self.third_transform = carla.Transform(self.third_location) self.first_transform = carla.Transform(carla.Location(self.first_location.x, self.first_location.y, self.first_location.z)) self.second_transform = carla.Transform(carla.Location(self.second_location.x, self.second_location.y, self.second_location.z)) self.third_transform = carla.Transform(carla.Location(self.third_location.x, self.third_location.y, self.third_location.z)) first_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.first_transform) second_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.second_transform) third_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.third_transform) first_debris.set_transform(self.first_transform) second_debris.set_transform(self.second_transform) third_debris.set_transform(self.third_transform) self.object.extend([first_debris, second_debris, third_debris]) for debris in self.object: debris.set_simulate_physics(False) self.other_actors.append(first_debris) self.other_actors.append(second_debris) self.other_actors.append(third_debris)
def _initialize_actors(self, config): '\n \n ' self._distance = random.sample(range(10, 80), 3) self._distance = sorted(self._distance) self._dist_prop = [(x - 2) for x in self._distance] (self.first_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[0]) (self.second_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[1]) (self.third_location, _) = get_location_in_distance_from_wp(self._reference_waypoint, self._dist_prop[2]) self.first_transform = carla.Transform(self.first_location) self.second_transform = carla.Transform(self.second_location) self.third_transform = carla.Transform(self.third_location) self.first_transform = carla.Transform(carla.Location(self.first_location.x, self.first_location.y, self.first_location.z)) self.second_transform = carla.Transform(carla.Location(self.second_location.x, self.second_location.y, self.second_location.z)) self.third_transform = carla.Transform(carla.Location(self.third_location.x, self.third_location.y, self.third_location.z)) first_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.first_transform) second_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.second_transform) third_debris = CarlaDataProvider.request_new_actor('static.prop.dirtdebris01', self.third_transform) first_debris.set_transform(self.first_transform) second_debris.set_transform(self.second_transform) third_debris.set_transform(self.third_transform) self.object.extend([first_debris, second_debris, third_debris]) for debris in self.object: debris.set_simulate_physics(False) self.other_actors.append(first_debris) self.other_actors.append(second_debris) self.other_actors.append(third_debris)<|docstring|>Custom initialization<|endoftext|>
d82deee9067da8db0988ec68c114a06335e1732c916bfe603c980528fb08b2bc
def _create_test_criteria(self): '\n A list of all test criteria will be created that is later used\n in parallel behavior tree.\n ' criteria = [] collision_criterion = CollisionTest(self.ego_vehicles[0]) criteria.append(collision_criterion) return criteria
A list of all test criteria will be created that is later used in parallel behavior tree.
core/simulators/srunner/scenarios/control_loss_new.py
_create_test_criteria
AkiraHero/DI-drive
219
python
def _create_test_criteria(self): '\n A list of all test criteria will be created that is later used\n in parallel behavior tree.\n ' criteria = [] collision_criterion = CollisionTest(self.ego_vehicles[0]) criteria.append(collision_criterion) return criteria
def _create_test_criteria(self): '\n A list of all test criteria will be created that is later used\n in parallel behavior tree.\n ' criteria = [] collision_criterion = CollisionTest(self.ego_vehicles[0]) criteria.append(collision_criterion) return criteria<|docstring|>A list of all test criteria will be created that is later used in parallel behavior tree.<|endoftext|>
a11bb4b6ff1db3aa3d0113a457c28c65029003f0bef2e3186d0540ece6737aeb
def change_control(self, control): '\n This is a function that changes the control based on the scenario determination\n :param control: a carla vehicle control\n :return: a control to be changed by the scenario.\n ' control.steer += self._current_steer_noise[0] control.throttle += self._current_throttle_noise[0] return control
This is a function that changes the control based on the scenario determination :param control: a carla vehicle control :return: a control to be changed by the scenario.
core/simulators/srunner/scenarios/control_loss_new.py
change_control
AkiraHero/DI-drive
219
python
def change_control(self, control): '\n This is a function that changes the control based on the scenario determination\n :param control: a carla vehicle control\n :return: a control to be changed by the scenario.\n ' control.steer += self._current_steer_noise[0] control.throttle += self._current_throttle_noise[0] return control
def change_control(self, control): '\n This is a function that changes the control based on the scenario determination\n :param control: a carla vehicle control\n :return: a control to be changed by the scenario.\n ' control.steer += self._current_steer_noise[0] control.throttle += self._current_throttle_noise[0] return control<|docstring|>This is a function that changes the control based on the scenario determination :param control: a carla vehicle control :return: a control to be changed by the scenario.<|endoftext|>
cf15b0f862b93c0d598fc2054b8f613860b921f335a9d5a01a9c514ee92f6d77
def __del__(self): '\n Remove all actors upon deletion\n ' self.remove_all_actors()
Remove all actors upon deletion
core/simulators/srunner/scenarios/control_loss_new.py
__del__
AkiraHero/DI-drive
219
python
def __del__(self): '\n \n ' self.remove_all_actors()
def __del__(self): '\n \n ' self.remove_all_actors()<|docstring|>Remove all actors upon deletion<|endoftext|>
8fa4ddef3f0c7ba97fa2f631f323c45d5691d312491a01d030e8f30ffd14c612
def generate_markdown(source_file, dest_dir): 'generates a new html file in the dest directory, returns the name of the \n newly-created file' md = '' with open(source_file, 'r') as opened_file: md = opened_file.read() html = content_to_html(md) new_name = os.path.split(source_file)[1].replace('md', 'html') new_path = os.path.join(dest_dir, new_name) with open(new_path, 'w+') as opened_file: opened_file.write(html) return new_name
generates a new html file in the dest directory, returns the name of the newly-created file
markdown-journal.py
generate_markdown
fire-wally/markdown-notebook
0
python
def generate_markdown(source_file, dest_dir): 'generates a new html file in the dest directory, returns the name of the \n newly-created file' md = with open(source_file, 'r') as opened_file: md = opened_file.read() html = content_to_html(md) new_name = os.path.split(source_file)[1].replace('md', 'html') new_path = os.path.join(dest_dir, new_name) with open(new_path, 'w+') as opened_file: opened_file.write(html) return new_name
def generate_markdown(source_file, dest_dir): 'generates a new html file in the dest directory, returns the name of the \n newly-created file' md = with open(source_file, 'r') as opened_file: md = opened_file.read() html = content_to_html(md) new_name = os.path.split(source_file)[1].replace('md', 'html') new_path = os.path.join(dest_dir, new_name) with open(new_path, 'w+') as opened_file: opened_file.write(html) return new_name<|docstring|>generates a new html file in the dest directory, returns the name of the newly-created file<|endoftext|>
a4e6b98532dcc61ab440814c25e40fa9bee84282f1b722ba0372b9c0562fd7f2
def makeMeasures(self, network, exclude): 'Make the network measures' g = network.g gu = network.gu timings = [] T = t.time() self.N = network.g.number_of_nodes() self.E = network.g.number_of_edges() self.E_ = network.gu.number_of_edges() self.edges = g.edges(data=True) self.nodes = g.nodes(data=True) timings.append(((t.time() - T), 'edges and nodes')) T = t.time() self.degrees = dict(g.degree()) self.nodes_ = sorted(g.nodes(), key=(lambda x: self.degrees[x])) self.degrees_ = [self.degrees[i] for i in self.nodes_] self.in_degrees = dict(g.in_degree()) self.in_degrees_ = [self.in_degrees[i] for i in self.nodes_] self.out_degrees = dict(g.out_degree()) self.out_degrees_ = [self.out_degrees[i] for i in self.nodes_] timings.append(((t.time() - T), 'in_out_total_degrees')) T = t.time() self.strengths = dict(g.degree(weight='weight')) self.nodes__ = sorted(g.nodes(), key=(lambda x: self.strengths[x])) self.strengths_ = [self.strengths[i] for i in self.nodes_] self.in_strengths = dict(g.in_degree(weight='weight')) self.in_strengths_ = [self.in_strengths[i] for i in self.nodes_] self.out_strengths = dict(g.out_degree(weight='weight')) self.out_strengths_ = [self.out_strengths[i] for i in self.nodes_] timings.append(((t.time() - T), 'in_out_total_strengths')) self.asymmetries = asymmetries = [] self.disequilibrium = disequilibriums = [] self.asymmetries_edge_mean = asymmetries_edge_mean = [] self.asymmetries_edge_std = asymmetries_edge_std = [] self.disequilibrium_edge_mean = disequilibrium_edge_mean = [] self.disequilibrium_edge_std = disequilibrium_edge_std = [] for node in self.nodes_: if (not self.degrees[node]): asymmetries.append(0.0) disequilibriums.append(0.0) asymmetries_edge_mean.append(0.0) asymmetries_edge_std.append(0.0) disequilibrium_edge_mean.append(0.0) disequilibrium_edge_std.append(0.0) else: asymmetries.append(((self.in_degrees[node] - self.out_degrees[node]) / self.degrees[node])) disequilibriums.append(((self.in_strengths[node] - self.out_strengths[node]) / self.strengths[node])) edge_asymmetries = ea = [] edge_disequilibriums = ed = [] predecessors = g.predecessors(node) successors = g.successors(node) for pred in predecessors: if (pred in successors): ea.append(0.0) ed.append(((g[pred][node]['weight'] - g[node][pred]['weight']) / self.strengths[node])) else: ea.append(1.0) ed.append((g[pred][node]['weight'] / self.strengths[node])) for suc in successors: if (suc in predecessors): pass else: ea.append((- 1.0)) ed.append(((- g[node][suc]['weight']) / self.strengths[node])) asymmetries_edge_mean.append(n.mean(ea)) asymmetries_edge_std.append(n.std(ea)) disequilibrium_edge_mean.append(n.mean(ed)) disequilibrium_edge_std.append(n.std(ed)) if ('weighted_directed_betweenness' not in exclude): T = t.time() self.weighted_directed_betweenness = x.betweenness_centrality(g, weight='weight') self.weighted_directed_betweenness_ = [self.weighted_directed_betweenness[i] for i in self.nodes_] timings.append(((t.time() - T), 'weighted_directed_betweenness')) if ('unweighted_directed_betweenness' not in exclude): T = t.time() self.unweighted_directed_betweenness = x.betweenness_centrality(g) timings.append(((t.time() - T), 'unweighted_directed_betweenness')) if ('weighted_undirected_betweenness' not in exclude): T = t.time() self.weighted_undirected_betweenness = x.betweenness_centrality(gu, weight='weight') timings.append(((t.time() - T), 'weighted_undirected_betweenness')) if ('unweighted_undirected_betweenness' not in exclude): T = t.time() self.weighted_undirected_betweenness = x.betweenness_centrality(gu) timings.append(((t.time() - T), 'unweighted_undirected_betweenness')) if ('wiener' not in exclude): T = t.time() self.wiener = x.wiener_index(g, weight='weight') timings.append(((t.time() - T), 'weiner')) if ('closeness' not in exclude): T = t.time() self.closeness = x.vitality.closeness_vitality(g, weight='weight') timings.append(((t.time() - T), 'closeness')) if ('transitivity' not in exclude): T = t.time() self.transitivity = x.transitivity(g) timings.append(((t.time() - T), 'transitivity')) if ('rich_club' not in exclude): T = t.time() self.rich_club = x.rich_club_coefficient(gu) timings.append(((t.time() - T), 'rich_club')) if ('weighted_clustering' not in exclude): T = t.time() self.weighted_clusterings = x.clustering(network.gu, weight='weight') self.weighted_clusterings_ = [self.weighted_clusterings[i] for i in self.nodes_] timings.append(((t.time() - T), 'weighted_clustering')) if ('clustering' not in exclude): T = t.time() self.clusterings = x.clustering(network.gu) self.clusterings_ = [self.clusterings[i] for i in self.clusterings] timings.append(((t.time() - T), 'clustering')) if ('triangles' not in exclude): T = t.time() self.triangles = x.triangles(gu) timings.append(((t.time() - T), 'clustering')) if ('n_weakly_connected_components' not in exclude): T = t.time() self.n_weakly_connected_components = x.number_weakly_connected_components(g) timings.append(((t.time() - T), 'n_weakly_connected_components')) if ('n_strongly_connected_components' not in exclude): T = t.time() self.n_strongly_connected_components = x.number_strongly_connected_components(g) timings.append(((t.time() - T), 'n_strongly_connected_components')) T = t.time() foo = [i for i in x.connected_component_subgraphs(gu)] bar = sorted(foo, key=(lambda x: x.number_of_nodes()), reverse=True) self.component = c = bar[0] timings.append(((t.time() - T), 'component')) T = t.time() self.diameter = x.diameter(c) self.radius = x.radius(c) self.center = x.center(c) self.periphery = x.periphery(c) timings.append(((t.time() - T), 'radius_diameter_center_periphery')) self.timings = timings T = t.time() self.n_connected_components = x.number_connected_components(gu) nodes = [] nodes_components = [foo.nodes() for foo in x.connected_component_subgraphs(gu)][:1] for nodes_ in nodes_components: nodes += nodes_ self.periphery_ = nodes self.timings = timings
Make the network measures
gmaneLegacy/networkMeasures.py
makeMeasures
ttm/gmaneLegacy
1
python
def makeMeasures(self, network, exclude): g = network.g gu = network.gu timings = [] T = t.time() self.N = network.g.number_of_nodes() self.E = network.g.number_of_edges() self.E_ = network.gu.number_of_edges() self.edges = g.edges(data=True) self.nodes = g.nodes(data=True) timings.append(((t.time() - T), 'edges and nodes')) T = t.time() self.degrees = dict(g.degree()) self.nodes_ = sorted(g.nodes(), key=(lambda x: self.degrees[x])) self.degrees_ = [self.degrees[i] for i in self.nodes_] self.in_degrees = dict(g.in_degree()) self.in_degrees_ = [self.in_degrees[i] for i in self.nodes_] self.out_degrees = dict(g.out_degree()) self.out_degrees_ = [self.out_degrees[i] for i in self.nodes_] timings.append(((t.time() - T), 'in_out_total_degrees')) T = t.time() self.strengths = dict(g.degree(weight='weight')) self.nodes__ = sorted(g.nodes(), key=(lambda x: self.strengths[x])) self.strengths_ = [self.strengths[i] for i in self.nodes_] self.in_strengths = dict(g.in_degree(weight='weight')) self.in_strengths_ = [self.in_strengths[i] for i in self.nodes_] self.out_strengths = dict(g.out_degree(weight='weight')) self.out_strengths_ = [self.out_strengths[i] for i in self.nodes_] timings.append(((t.time() - T), 'in_out_total_strengths')) self.asymmetries = asymmetries = [] self.disequilibrium = disequilibriums = [] self.asymmetries_edge_mean = asymmetries_edge_mean = [] self.asymmetries_edge_std = asymmetries_edge_std = [] self.disequilibrium_edge_mean = disequilibrium_edge_mean = [] self.disequilibrium_edge_std = disequilibrium_edge_std = [] for node in self.nodes_: if (not self.degrees[node]): asymmetries.append(0.0) disequilibriums.append(0.0) asymmetries_edge_mean.append(0.0) asymmetries_edge_std.append(0.0) disequilibrium_edge_mean.append(0.0) disequilibrium_edge_std.append(0.0) else: asymmetries.append(((self.in_degrees[node] - self.out_degrees[node]) / self.degrees[node])) disequilibriums.append(((self.in_strengths[node] - self.out_strengths[node]) / self.strengths[node])) edge_asymmetries = ea = [] edge_disequilibriums = ed = [] predecessors = g.predecessors(node) successors = g.successors(node) for pred in predecessors: if (pred in successors): ea.append(0.0) ed.append(((g[pred][node]['weight'] - g[node][pred]['weight']) / self.strengths[node])) else: ea.append(1.0) ed.append((g[pred][node]['weight'] / self.strengths[node])) for suc in successors: if (suc in predecessors): pass else: ea.append((- 1.0)) ed.append(((- g[node][suc]['weight']) / self.strengths[node])) asymmetries_edge_mean.append(n.mean(ea)) asymmetries_edge_std.append(n.std(ea)) disequilibrium_edge_mean.append(n.mean(ed)) disequilibrium_edge_std.append(n.std(ed)) if ('weighted_directed_betweenness' not in exclude): T = t.time() self.weighted_directed_betweenness = x.betweenness_centrality(g, weight='weight') self.weighted_directed_betweenness_ = [self.weighted_directed_betweenness[i] for i in self.nodes_] timings.append(((t.time() - T), 'weighted_directed_betweenness')) if ('unweighted_directed_betweenness' not in exclude): T = t.time() self.unweighted_directed_betweenness = x.betweenness_centrality(g) timings.append(((t.time() - T), 'unweighted_directed_betweenness')) if ('weighted_undirected_betweenness' not in exclude): T = t.time() self.weighted_undirected_betweenness = x.betweenness_centrality(gu, weight='weight') timings.append(((t.time() - T), 'weighted_undirected_betweenness')) if ('unweighted_undirected_betweenness' not in exclude): T = t.time() self.weighted_undirected_betweenness = x.betweenness_centrality(gu) timings.append(((t.time() - T), 'unweighted_undirected_betweenness')) if ('wiener' not in exclude): T = t.time() self.wiener = x.wiener_index(g, weight='weight') timings.append(((t.time() - T), 'weiner')) if ('closeness' not in exclude): T = t.time() self.closeness = x.vitality.closeness_vitality(g, weight='weight') timings.append(((t.time() - T), 'closeness')) if ('transitivity' not in exclude): T = t.time() self.transitivity = x.transitivity(g) timings.append(((t.time() - T), 'transitivity')) if ('rich_club' not in exclude): T = t.time() self.rich_club = x.rich_club_coefficient(gu) timings.append(((t.time() - T), 'rich_club')) if ('weighted_clustering' not in exclude): T = t.time() self.weighted_clusterings = x.clustering(network.gu, weight='weight') self.weighted_clusterings_ = [self.weighted_clusterings[i] for i in self.nodes_] timings.append(((t.time() - T), 'weighted_clustering')) if ('clustering' not in exclude): T = t.time() self.clusterings = x.clustering(network.gu) self.clusterings_ = [self.clusterings[i] for i in self.clusterings] timings.append(((t.time() - T), 'clustering')) if ('triangles' not in exclude): T = t.time() self.triangles = x.triangles(gu) timings.append(((t.time() - T), 'clustering')) if ('n_weakly_connected_components' not in exclude): T = t.time() self.n_weakly_connected_components = x.number_weakly_connected_components(g) timings.append(((t.time() - T), 'n_weakly_connected_components')) if ('n_strongly_connected_components' not in exclude): T = t.time() self.n_strongly_connected_components = x.number_strongly_connected_components(g) timings.append(((t.time() - T), 'n_strongly_connected_components')) T = t.time() foo = [i for i in x.connected_component_subgraphs(gu)] bar = sorted(foo, key=(lambda x: x.number_of_nodes()), reverse=True) self.component = c = bar[0] timings.append(((t.time() - T), 'component')) T = t.time() self.diameter = x.diameter(c) self.radius = x.radius(c) self.center = x.center(c) self.periphery = x.periphery(c) timings.append(((t.time() - T), 'radius_diameter_center_periphery')) self.timings = timings T = t.time() self.n_connected_components = x.number_connected_components(gu) nodes = [] nodes_components = [foo.nodes() for foo in x.connected_component_subgraphs(gu)][:1] for nodes_ in nodes_components: nodes += nodes_ self.periphery_ = nodes self.timings = timings
def makeMeasures(self, network, exclude): g = network.g gu = network.gu timings = [] T = t.time() self.N = network.g.number_of_nodes() self.E = network.g.number_of_edges() self.E_ = network.gu.number_of_edges() self.edges = g.edges(data=True) self.nodes = g.nodes(data=True) timings.append(((t.time() - T), 'edges and nodes')) T = t.time() self.degrees = dict(g.degree()) self.nodes_ = sorted(g.nodes(), key=(lambda x: self.degrees[x])) self.degrees_ = [self.degrees[i] for i in self.nodes_] self.in_degrees = dict(g.in_degree()) self.in_degrees_ = [self.in_degrees[i] for i in self.nodes_] self.out_degrees = dict(g.out_degree()) self.out_degrees_ = [self.out_degrees[i] for i in self.nodes_] timings.append(((t.time() - T), 'in_out_total_degrees')) T = t.time() self.strengths = dict(g.degree(weight='weight')) self.nodes__ = sorted(g.nodes(), key=(lambda x: self.strengths[x])) self.strengths_ = [self.strengths[i] for i in self.nodes_] self.in_strengths = dict(g.in_degree(weight='weight')) self.in_strengths_ = [self.in_strengths[i] for i in self.nodes_] self.out_strengths = dict(g.out_degree(weight='weight')) self.out_strengths_ = [self.out_strengths[i] for i in self.nodes_] timings.append(((t.time() - T), 'in_out_total_strengths')) self.asymmetries = asymmetries = [] self.disequilibrium = disequilibriums = [] self.asymmetries_edge_mean = asymmetries_edge_mean = [] self.asymmetries_edge_std = asymmetries_edge_std = [] self.disequilibrium_edge_mean = disequilibrium_edge_mean = [] self.disequilibrium_edge_std = disequilibrium_edge_std = [] for node in self.nodes_: if (not self.degrees[node]): asymmetries.append(0.0) disequilibriums.append(0.0) asymmetries_edge_mean.append(0.0) asymmetries_edge_std.append(0.0) disequilibrium_edge_mean.append(0.0) disequilibrium_edge_std.append(0.0) else: asymmetries.append(((self.in_degrees[node] - self.out_degrees[node]) / self.degrees[node])) disequilibriums.append(((self.in_strengths[node] - self.out_strengths[node]) / self.strengths[node])) edge_asymmetries = ea = [] edge_disequilibriums = ed = [] predecessors = g.predecessors(node) successors = g.successors(node) for pred in predecessors: if (pred in successors): ea.append(0.0) ed.append(((g[pred][node]['weight'] - g[node][pred]['weight']) / self.strengths[node])) else: ea.append(1.0) ed.append((g[pred][node]['weight'] / self.strengths[node])) for suc in successors: if (suc in predecessors): pass else: ea.append((- 1.0)) ed.append(((- g[node][suc]['weight']) / self.strengths[node])) asymmetries_edge_mean.append(n.mean(ea)) asymmetries_edge_std.append(n.std(ea)) disequilibrium_edge_mean.append(n.mean(ed)) disequilibrium_edge_std.append(n.std(ed)) if ('weighted_directed_betweenness' not in exclude): T = t.time() self.weighted_directed_betweenness = x.betweenness_centrality(g, weight='weight') self.weighted_directed_betweenness_ = [self.weighted_directed_betweenness[i] for i in self.nodes_] timings.append(((t.time() - T), 'weighted_directed_betweenness')) if ('unweighted_directed_betweenness' not in exclude): T = t.time() self.unweighted_directed_betweenness = x.betweenness_centrality(g) timings.append(((t.time() - T), 'unweighted_directed_betweenness')) if ('weighted_undirected_betweenness' not in exclude): T = t.time() self.weighted_undirected_betweenness = x.betweenness_centrality(gu, weight='weight') timings.append(((t.time() - T), 'weighted_undirected_betweenness')) if ('unweighted_undirected_betweenness' not in exclude): T = t.time() self.weighted_undirected_betweenness = x.betweenness_centrality(gu) timings.append(((t.time() - T), 'unweighted_undirected_betweenness')) if ('wiener' not in exclude): T = t.time() self.wiener = x.wiener_index(g, weight='weight') timings.append(((t.time() - T), 'weiner')) if ('closeness' not in exclude): T = t.time() self.closeness = x.vitality.closeness_vitality(g, weight='weight') timings.append(((t.time() - T), 'closeness')) if ('transitivity' not in exclude): T = t.time() self.transitivity = x.transitivity(g) timings.append(((t.time() - T), 'transitivity')) if ('rich_club' not in exclude): T = t.time() self.rich_club = x.rich_club_coefficient(gu) timings.append(((t.time() - T), 'rich_club')) if ('weighted_clustering' not in exclude): T = t.time() self.weighted_clusterings = x.clustering(network.gu, weight='weight') self.weighted_clusterings_ = [self.weighted_clusterings[i] for i in self.nodes_] timings.append(((t.time() - T), 'weighted_clustering')) if ('clustering' not in exclude): T = t.time() self.clusterings = x.clustering(network.gu) self.clusterings_ = [self.clusterings[i] for i in self.clusterings] timings.append(((t.time() - T), 'clustering')) if ('triangles' not in exclude): T = t.time() self.triangles = x.triangles(gu) timings.append(((t.time() - T), 'clustering')) if ('n_weakly_connected_components' not in exclude): T = t.time() self.n_weakly_connected_components = x.number_weakly_connected_components(g) timings.append(((t.time() - T), 'n_weakly_connected_components')) if ('n_strongly_connected_components' not in exclude): T = t.time() self.n_strongly_connected_components = x.number_strongly_connected_components(g) timings.append(((t.time() - T), 'n_strongly_connected_components')) T = t.time() foo = [i for i in x.connected_component_subgraphs(gu)] bar = sorted(foo, key=(lambda x: x.number_of_nodes()), reverse=True) self.component = c = bar[0] timings.append(((t.time() - T), 'component')) T = t.time() self.diameter = x.diameter(c) self.radius = x.radius(c) self.center = x.center(c) self.periphery = x.periphery(c) timings.append(((t.time() - T), 'radius_diameter_center_periphery')) self.timings = timings T = t.time() self.n_connected_components = x.number_connected_components(gu) nodes = [] nodes_components = [foo.nodes() for foo in x.connected_component_subgraphs(gu)][:1] for nodes_ in nodes_components: nodes += nodes_ self.periphery_ = nodes self.timings = timings<|docstring|>Make the network measures<|endoftext|>
d7ce5866cc8172df9e6a3fafada155aa2e1d22c5dcf20f776b2ac78516f5f25e
@contextmanager def session_context(self): 'Provide a transactional scope around a series of operations.' session = self.Session() try: (yield session) session.commit() except: session.rollback() raise finally: session.close()
Provide a transactional scope around a series of operations.
models/base.py
session_context
etoews/google-photos-sync-check
4
python
@contextmanager def session_context(self): session = self.Session() try: (yield session) session.commit() except: session.rollback() raise finally: session.close()
@contextmanager def session_context(self): session = self.Session() try: (yield session) session.commit() except: session.rollback() raise finally: session.close()<|docstring|>Provide a transactional scope around a series of operations.<|endoftext|>
4898f1976ab49a67c209851f4b3ca0a8d50b49411ebf61763ca0d694a6a2c882
def missingNumber(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' sum = reduce((lambda x, y: (x + y)), nums) multiply = ((len(nums) * (len(nums) + 1)) / 2) return (multiply - sum)
:type nums: List[int] :rtype: int
hudeven/P268.py
missingNumber
hudeven/Algorithms
0
python
def missingNumber(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' sum = reduce((lambda x, y: (x + y)), nums) multiply = ((len(nums) * (len(nums) + 1)) / 2) return (multiply - sum)
def missingNumber(self, nums): '\n :type nums: List[int]\n :rtype: int\n ' sum = reduce((lambda x, y: (x + y)), nums) multiply = ((len(nums) * (len(nums) + 1)) / 2) return (multiply - sum)<|docstring|>:type nums: List[int] :rtype: int<|endoftext|>
395792d7c68cb5c7d1fc43a34851fd50fa7ea9fc9b132bc08dd07f2eedace47e
def __init__(self, rag, feature_names): '\n Parameters\n ----------\n rag:\n The rag.\n \n feature_names\n A list of feature names to compute with this accumulator.\n ' pass
Parameters ---------- rag: The rag. feature_names A list of feature names to compute with this accumulator.
ilastikrag/accumulators/base/base_sp_accumulator.py
__init__
ilastik/ilastikrag
0
python
def __init__(self, rag, feature_names): '\n Parameters\n ----------\n rag:\n The rag.\n \n feature_names\n A list of feature names to compute with this accumulator.\n ' pass
def __init__(self, rag, feature_names): '\n Parameters\n ----------\n rag:\n The rag.\n \n feature_names\n A list of feature names to compute with this accumulator.\n ' pass<|docstring|>Parameters ---------- rag: The rag. feature_names A list of feature names to compute with this accumulator.<|endoftext|>
efb34bee3fa5cdc7fe33daf82fb0f8d41617bd09df8cca9b14cc8684bb1847b0
def cleanup(self): '\n Called by the Rag to indicate that processing has completed, and\n the accumulator should discard all cached data and intermediate results.\n Subclasses must reimplement this function.\n ' raise NotImplementedError
Called by the Rag to indicate that processing has completed, and the accumulator should discard all cached data and intermediate results. Subclasses must reimplement this function.
ilastikrag/accumulators/base/base_sp_accumulator.py
cleanup
ilastik/ilastikrag
0
python
def cleanup(self): '\n Called by the Rag to indicate that processing has completed, and\n the accumulator should discard all cached data and intermediate results.\n Subclasses must reimplement this function.\n ' raise NotImplementedError
def cleanup(self): '\n Called by the Rag to indicate that processing has completed, and\n the accumulator should discard all cached data and intermediate results.\n Subclasses must reimplement this function.\n ' raise NotImplementedError<|docstring|>Called by the Rag to indicate that processing has completed, and the accumulator should discard all cached data and intermediate results. Subclasses must reimplement this function.<|endoftext|>
c48d904eaebbbc29010d6204f2a63e8352fd53a0e8ce1b7beddfa247d25d9f16
@classmethod def supported_features(cls, rag): '\n Returns the list of feature names that can be computed for the given Rag.\n ' raise NotImplementedError
Returns the list of feature names that can be computed for the given Rag.
ilastikrag/accumulators/base/base_sp_accumulator.py
supported_features
ilastik/ilastikrag
0
python
@classmethod def supported_features(cls, rag): '\n \n ' raise NotImplementedError
@classmethod def supported_features(cls, rag): '\n \n ' raise NotImplementedError<|docstring|>Returns the list of feature names that can be computed for the given Rag.<|endoftext|>
55a29d9ade8897d3e27130b76226b11973e5748a3cb8690b5ee9a9895a961746
def ingest_values(self, rag, value_img): '\n Ingest the given (single-channel) pixel values, using the superpixels stored in ``rag.label_img``.\n \n Parameters\n ----------\n rag\n *Rag*\n \n value_img\n *VigraArray*, same shape as ``rag.label_img``\n ' raise NotImplementedError
Ingest the given (single-channel) pixel values, using the superpixels stored in ``rag.label_img``. Parameters ---------- rag *Rag* value_img *VigraArray*, same shape as ``rag.label_img``
ilastikrag/accumulators/base/base_sp_accumulator.py
ingest_values
ilastik/ilastikrag
0
python
def ingest_values(self, rag, value_img): '\n Ingest the given (single-channel) pixel values, using the superpixels stored in ``rag.label_img``.\n \n Parameters\n ----------\n rag\n *Rag*\n \n value_img\n *VigraArray*, same shape as ``rag.label_img``\n ' raise NotImplementedError
def ingest_values(self, rag, value_img): '\n Ingest the given (single-channel) pixel values, using the superpixels stored in ``rag.label_img``.\n \n Parameters\n ----------\n rag\n *Rag*\n \n value_img\n *VigraArray*, same shape as ``rag.label_img``\n ' raise NotImplementedError<|docstring|>Ingest the given (single-channel) pixel values, using the superpixels stored in ``rag.label_img``. Parameters ---------- rag *Rag* value_img *VigraArray*, same shape as ``rag.label_img``<|endoftext|>
8796f9fcf7e362a0b6e109ac1e50ee991d642c8f0e90fb93dcda4fe8335a138c
def append_edge_features_to_df(self, edge_df): '\n Called by the Rag after ``ingest_values()``.\n\n Merges the features of ingested data into a final set of edge\n feature columns, and appends those columns to the given\n ``pandas.DataFrame`` object.\n \n This involves converting pairs superpixel features into edge features,\n typically by taking the sum and/or difference between the features of\n each superpixel in an adjacent pair.\n ' raise NotImplementedError
Called by the Rag after ``ingest_values()``. Merges the features of ingested data into a final set of edge feature columns, and appends those columns to the given ``pandas.DataFrame`` object. This involves converting pairs superpixel features into edge features, typically by taking the sum and/or difference between the features of each superpixel in an adjacent pair.
ilastikrag/accumulators/base/base_sp_accumulator.py
append_edge_features_to_df
ilastik/ilastikrag
0
python
def append_edge_features_to_df(self, edge_df): '\n Called by the Rag after ``ingest_values()``.\n\n Merges the features of ingested data into a final set of edge\n feature columns, and appends those columns to the given\n ``pandas.DataFrame`` object.\n \n This involves converting pairs superpixel features into edge features,\n typically by taking the sum and/or difference between the features of\n each superpixel in an adjacent pair.\n ' raise NotImplementedError
def append_edge_features_to_df(self, edge_df): '\n Called by the Rag after ``ingest_values()``.\n\n Merges the features of ingested data into a final set of edge\n feature columns, and appends those columns to the given\n ``pandas.DataFrame`` object.\n \n This involves converting pairs superpixel features into edge features,\n typically by taking the sum and/or difference between the features of\n each superpixel in an adjacent pair.\n ' raise NotImplementedError<|docstring|>Called by the Rag after ``ingest_values()``. Merges the features of ingested data into a final set of edge feature columns, and appends those columns to the given ``pandas.DataFrame`` object. This involves converting pairs superpixel features into edge features, typically by taking the sum and/or difference between the features of each superpixel in an adjacent pair.<|endoftext|>
e5f3d3638cb8576e402a26760c9f80ba80a1356a4e45e5f164b76794c1df3da2
def add(self, predictions: List[float], targets: List[float]): '\n Function adds predictions and targets computation of regression metrics.\n\n Args:\n predictions (List[float]):\n targets (List[float]):\n\n Raises:\n NotImplementedError: in case targets do not fall into continuous support\n ValueError: incase missing validation or predictions\n ' tgt_type = type_of_target(targets) if (tgt_type not in 'continuous'): raise NotImplementedError(f'target type: {tgt_type} not supported for these metrics') for (idx, target) in enumerate(targets): self.sum_abs_diff += abs((predictions[idx] - target)) self.sum_diff += (predictions[idx] - target) self.sum2_diff += ((predictions[idx] - target) ** 2) self.count += 1
Function adds predictions and targets computation of regression metrics. Args: predictions (List[float]): targets (List[float]): Raises: NotImplementedError: in case targets do not fall into continuous support ValueError: incase missing validation or predictions
src/whylogs/core/metrics/regression_metrics.py
add
valer-whylabs/whylogs
0
python
def add(self, predictions: List[float], targets: List[float]): '\n Function adds predictions and targets computation of regression metrics.\n\n Args:\n predictions (List[float]):\n targets (List[float]):\n\n Raises:\n NotImplementedError: in case targets do not fall into continuous support\n ValueError: incase missing validation or predictions\n ' tgt_type = type_of_target(targets) if (tgt_type not in 'continuous'): raise NotImplementedError(f'target type: {tgt_type} not supported for these metrics') for (idx, target) in enumerate(targets): self.sum_abs_diff += abs((predictions[idx] - target)) self.sum_diff += (predictions[idx] - target) self.sum2_diff += ((predictions[idx] - target) ** 2) self.count += 1
def add(self, predictions: List[float], targets: List[float]): '\n Function adds predictions and targets computation of regression metrics.\n\n Args:\n predictions (List[float]):\n targets (List[float]):\n\n Raises:\n NotImplementedError: in case targets do not fall into continuous support\n ValueError: incase missing validation or predictions\n ' tgt_type = type_of_target(targets) if (tgt_type not in 'continuous'): raise NotImplementedError(f'target type: {tgt_type} not supported for these metrics') for (idx, target) in enumerate(targets): self.sum_abs_diff += abs((predictions[idx] - target)) self.sum_diff += (predictions[idx] - target) self.sum2_diff += ((predictions[idx] - target) ** 2) self.count += 1<|docstring|>Function adds predictions and targets computation of regression metrics. Args: predictions (List[float]): targets (List[float]): Raises: NotImplementedError: in case targets do not fall into continuous support ValueError: incase missing validation or predictions<|endoftext|>
68062f36a1d11a2aa562c006875de139cb9065c0f7b7ea744974cce09c0569c4
def merge(self, other): '\n Merge two seperate confusion matrix which may or may not overlap in labels.\n\n Args:\n other : regression metrics to merge with self\n Returns:\n RegressionMetrics: merged regression metrics\n ' if (other is None): return self if (self.count == 0): return other if (other.count == 0): return self if (self.prediction_field != other.prediction_field): raise ValueError('prediction fields differ') if (self.target_field != other.target_field): raise ValueError('target fields differ') new_reg = RegressionMetrics(prediction_field=self.prediction_field, target_field=self.target_field) new_reg.count = (self.count + other.count) new_reg.sum_abs_diff = (self.sum_abs_diff + other.sum_abs_diff) new_reg.sum_diff = (self.sum_diff + other.sum_diff) new_reg.sum2_diff = (self.sum2_diff + other.sum2_diff) return new_reg
Merge two seperate confusion matrix which may or may not overlap in labels. Args: other : regression metrics to merge with self Returns: RegressionMetrics: merged regression metrics
src/whylogs/core/metrics/regression_metrics.py
merge
valer-whylabs/whylogs
0
python
def merge(self, other): '\n Merge two seperate confusion matrix which may or may not overlap in labels.\n\n Args:\n other : regression metrics to merge with self\n Returns:\n RegressionMetrics: merged regression metrics\n ' if (other is None): return self if (self.count == 0): return other if (other.count == 0): return self if (self.prediction_field != other.prediction_field): raise ValueError('prediction fields differ') if (self.target_field != other.target_field): raise ValueError('target fields differ') new_reg = RegressionMetrics(prediction_field=self.prediction_field, target_field=self.target_field) new_reg.count = (self.count + other.count) new_reg.sum_abs_diff = (self.sum_abs_diff + other.sum_abs_diff) new_reg.sum_diff = (self.sum_diff + other.sum_diff) new_reg.sum2_diff = (self.sum2_diff + other.sum2_diff) return new_reg
def merge(self, other): '\n Merge two seperate confusion matrix which may or may not overlap in labels.\n\n Args:\n other : regression metrics to merge with self\n Returns:\n RegressionMetrics: merged regression metrics\n ' if (other is None): return self if (self.count == 0): return other if (other.count == 0): return self if (self.prediction_field != other.prediction_field): raise ValueError('prediction fields differ') if (self.target_field != other.target_field): raise ValueError('target fields differ') new_reg = RegressionMetrics(prediction_field=self.prediction_field, target_field=self.target_field) new_reg.count = (self.count + other.count) new_reg.sum_abs_diff = (self.sum_abs_diff + other.sum_abs_diff) new_reg.sum_diff = (self.sum_diff + other.sum_diff) new_reg.sum2_diff = (self.sum2_diff + other.sum2_diff) return new_reg<|docstring|>Merge two seperate confusion matrix which may or may not overlap in labels. Args: other : regression metrics to merge with self Returns: RegressionMetrics: merged regression metrics<|endoftext|>
8373133f03ecaa0e7f4ebaf61a2db0d675611c5d974e57b7012354e0c18a6ec0
def to_protobuf(self): '\n Convert to protobuf\n\n Returns:\n TYPE: Protobuf Message\n ' return RegressionMetricsMessage(prediction_field=self.prediction_field, target_field=self.target_field, count=self.count, sum_abs_diff=self.sum_abs_diff, sum_diff=self.sum_diff, sum2_diff=self.sum2_diff)
Convert to protobuf Returns: TYPE: Protobuf Message
src/whylogs/core/metrics/regression_metrics.py
to_protobuf
valer-whylabs/whylogs
0
python
def to_protobuf(self): '\n Convert to protobuf\n\n Returns:\n TYPE: Protobuf Message\n ' return RegressionMetricsMessage(prediction_field=self.prediction_field, target_field=self.target_field, count=self.count, sum_abs_diff=self.sum_abs_diff, sum_diff=self.sum_diff, sum2_diff=self.sum2_diff)
def to_protobuf(self): '\n Convert to protobuf\n\n Returns:\n TYPE: Protobuf Message\n ' return RegressionMetricsMessage(prediction_field=self.prediction_field, target_field=self.target_field, count=self.count, sum_abs_diff=self.sum_abs_diff, sum_diff=self.sum_diff, sum2_diff=self.sum2_diff)<|docstring|>Convert to protobuf Returns: TYPE: Protobuf Message<|endoftext|>
08a6565d4b6bd3e83fe192e71f5e3fbd7e1028fada16aae66ca6d579027fc8ee
def load(self): 'load config with default mutable setting loaded from self._override.\n\n Default is not overloading env var\n ' self.silo = self.loader.load(override=self._overload) return self.silo
load config with default mutable setting loaded from self._override. Default is not overloading env var
dotenv/env.py
load
widnyana/py-dotenv
1
python
def load(self): 'load config with default mutable setting loaded from self._override.\n\n Default is not overloading env var\n ' self.silo = self.loader.load(override=self._overload) return self.silo
def load(self): 'load config with default mutable setting loaded from self._override.\n\n Default is not overloading env var\n ' self.silo = self.loader.load(override=self._overload) return self.silo<|docstring|>load config with default mutable setting loaded from self._override. Default is not overloading env var<|endoftext|>
d04bfec066589578cff5da895b558849575c75410f00a59bda8edf12718b7963
def override(self): 'load config and override os environment variable' self.silo = self.loader.load(override=False) return self.silo
load config and override os environment variable
dotenv/env.py
override
widnyana/py-dotenv
1
python
def override(self): self.silo = self.loader.load(override=False) return self.silo
def override(self): self.silo = self.loader.load(override=False) return self.silo<|docstring|>load config and override os environment variable<|endoftext|>
50afcdcf52a020236e3690e98cf593194ba92ad6fa0668f3dfc440dbea428ca1
async def send(self, message): "\n Simulates sending a message, by appending a message objects to the\n channel's log list.\n " msg_obj = MockMessage(random.randint(1, 1000), message) self._log.append(msg_obj) return msg_obj
Simulates sending a message, by appending a message objects to the channel's log list.
tests/mocks.py
send
enkwolf/ITEE-discord-bot
0
python
async def send(self, message): "\n Simulates sending a message, by appending a message objects to the\n channel's log list.\n " msg_obj = MockMessage(random.randint(1, 1000), message) self._log.append(msg_obj) return msg_obj
async def send(self, message): "\n Simulates sending a message, by appending a message objects to the\n channel's log list.\n " msg_obj = MockMessage(random.randint(1, 1000), message) self._log.append(msg_obj) return msg_obj<|docstring|>Simulates sending a message, by appending a message objects to the channel's log list.<|endoftext|>
8423c3ece564ff5c371e08d33f72675462d254cd1f44bfaa09518aee60635b41
def create_role(self, role_id): '\n Creates and returns a new mock role object with the given ID. The role\n is stored internally with its ID in the roles dictionary.\n \n * role_id (int) - ID number for the new role\n ' role = MockRole(role_id) self._roles[role_id] = role return role
Creates and returns a new mock role object with the given ID. The role is stored internally with its ID in the roles dictionary. * role_id (int) - ID number for the new role
tests/mocks.py
create_role
enkwolf/ITEE-discord-bot
0
python
def create_role(self, role_id): '\n Creates and returns a new mock role object with the given ID. The role\n is stored internally with its ID in the roles dictionary.\n \n * role_id (int) - ID number for the new role\n ' role = MockRole(role_id) self._roles[role_id] = role return role
def create_role(self, role_id): '\n Creates and returns a new mock role object with the given ID. The role\n is stored internally with its ID in the roles dictionary.\n \n * role_id (int) - ID number for the new role\n ' role = MockRole(role_id) self._roles[role_id] = role return role<|docstring|>Creates and returns a new mock role object with the given ID. The role is stored internally with its ID in the roles dictionary. * role_id (int) - ID number for the new role<|endoftext|>
b9acd1bba66614ce2ed9f03e51b627fef59a51c4a19587b85e82629d585455c5
def create_member(self, user_id): '\n Creates and returns a mock member object with the given ID. The member is\n is stored internally with its ID in the members dictionary.\n \n * user_id (intt) - ID number for the new member\n ' member = MockMember(user_id) self._members[user_id] = member return member
Creates and returns a mock member object with the given ID. The member is is stored internally with its ID in the members dictionary. * user_id (intt) - ID number for the new member
tests/mocks.py
create_member
enkwolf/ITEE-discord-bot
0
python
def create_member(self, user_id): '\n Creates and returns a mock member object with the given ID. The member is\n is stored internally with its ID in the members dictionary.\n \n * user_id (intt) - ID number for the new member\n ' member = MockMember(user_id) self._members[user_id] = member return member
def create_member(self, user_id): '\n Creates and returns a mock member object with the given ID. The member is\n is stored internally with its ID in the members dictionary.\n \n * user_id (intt) - ID number for the new member\n ' member = MockMember(user_id) self._members[user_id] = member return member<|docstring|>Creates and returns a mock member object with the given ID. The member is is stored internally with its ID in the members dictionary. * user_id (intt) - ID number for the new member<|endoftext|>
91cfd3912536595f6b1ec9f8fb55a915fd89866ce5a389c76c59439e48060db7
def get_role(self, role_id): '\n Returns a role object that corresponds to role_id\n \n * role_id (int) - ID number for role lookup\n ' return self._roles[role_id]
Returns a role object that corresponds to role_id * role_id (int) - ID number for role lookup
tests/mocks.py
get_role
enkwolf/ITEE-discord-bot
0
python
def get_role(self, role_id): '\n Returns a role object that corresponds to role_id\n \n * role_id (int) - ID number for role lookup\n ' return self._roles[role_id]
def get_role(self, role_id): '\n Returns a role object that corresponds to role_id\n \n * role_id (int) - ID number for role lookup\n ' return self._roles[role_id]<|docstring|>Returns a role object that corresponds to role_id * role_id (int) - ID number for role lookup<|endoftext|>
0c9ebd8a034ffe974f34aff7d6ef45a437c6064894a5066e80760c2a472179d8
def get_member(self, user_id): '\n Returns a member object that corresponds to user_id\n \n * user_id (int) - ID number for member lookup\n ' return self._members[user_id]
Returns a member object that corresponds to user_id * user_id (int) - ID number for member lookup
tests/mocks.py
get_member
enkwolf/ITEE-discord-bot
0
python
def get_member(self, user_id): '\n Returns a member object that corresponds to user_id\n \n * user_id (int) - ID number for member lookup\n ' return self._members[user_id]
def get_member(self, user_id): '\n Returns a member object that corresponds to user_id\n \n * user_id (int) - ID number for member lookup\n ' return self._members[user_id]<|docstring|>Returns a member object that corresponds to user_id * user_id (int) - ID number for member lookup<|endoftext|>
4a5384358023d0b41f577984e106bf5f65689db2c7d9392195f7866ce5518011
async def ci_user(token: str=Depends(oauth2_scheme)) -> ci.CompassInterface: 'Returns an initialised ci.CompassInterface object.\n\n Note `Depends` adds the oAuth2 integration with OpenAPI.\n TODO: manual integration without depends?\n ' return (await get_current_user(token))
Returns an initialised ci.CompassInterface object. Note `Depends` adds the oAuth2 integration with OpenAPI. TODO: manual integration without depends?
compass/api/util/oauth2.py
ci_user
the-scouts/compass-interface
8
python
async def ci_user(token: str=Depends(oauth2_scheme)) -> ci.CompassInterface: 'Returns an initialised ci.CompassInterface object.\n\n Note `Depends` adds the oAuth2 integration with OpenAPI.\n TODO: manual integration without depends?\n ' return (await get_current_user(token))
async def ci_user(token: str=Depends(oauth2_scheme)) -> ci.CompassInterface: 'Returns an initialised ci.CompassInterface object.\n\n Note `Depends` adds the oAuth2 integration with OpenAPI.\n TODO: manual integration without depends?\n ' return (await get_current_user(token))<|docstring|>Returns an initialised ci.CompassInterface object. Note `Depends` adds the oAuth2 integration with OpenAPI. TODO: manual integration without depends?<|endoftext|>
bd79b4788f0faa3ccfc4857807acc9867f07f0d667ca04b939a8a7342e1db9cf
def has_fun(e): 'Return true if e has function.' if (e.ty in (CONST, VAR)): return False elif (e.ty == OP): return any((has_fun(arg) for arg in e.args)) elif ((e.ty == FUN) and (e.func_name != 'sqrt')): return True else: return False
Return true if e has function.
integral/slagle.py
has_fun
crisperdue/holpy
22
python
def has_fun(e): if (e.ty in (CONST, VAR)): return False elif (e.ty == OP): return any((has_fun(arg) for arg in e.args)) elif ((e.ty == FUN) and (e.func_name != 'sqrt')): return True else: return False
def has_fun(e): if (e.ty in (CONST, VAR)): return False elif (e.ty == OP): return any((has_fun(arg) for arg in e.args)) elif ((e.ty == FUN) and (e.func_name != 'sqrt')): return True else: return False<|docstring|>Return true if e has function.<|endoftext|>
17efb325adddd7a0986f9ba1a8ad437b7f01e09944db91da3c9969dfdd8d4d53
def is_mono(var, e, lower, upper): 'Determine whether an expression is monotonic in the given interval.' e_deriv = deriv(var, e) zeros = solveset(sympy_style(e_deriv), sympy_style(var), Interval(sympy_style(lower), sympy_style(upper), left_open=True, right_open=True)) return list([holpy_style(z) for z in zeros])
Determine whether an expression is monotonic in the given interval.
integral/slagle.py
is_mono
crisperdue/holpy
22
python
def is_mono(var, e, lower, upper): e_deriv = deriv(var, e) zeros = solveset(sympy_style(e_deriv), sympy_style(var), Interval(sympy_style(lower), sympy_style(upper), left_open=True, right_open=True)) return list([holpy_style(z) for z in zeros])
def is_mono(var, e, lower, upper): e_deriv = deriv(var, e) zeros = solveset(sympy_style(e_deriv), sympy_style(var), Interval(sympy_style(lower), sympy_style(upper), left_open=True, right_open=True)) return list([holpy_style(z) for z in zeros])<|docstring|>Determine whether an expression is monotonic in the given interval.<|endoftext|>
37f3d101e1d34bc9f6b3bd208e9ce9b9c5969c1c6722b471d7fc5be7f51e529d
def timeout(max_timeout): 'Timeout decorator, parameter in seconds.' def timeout_decorator(item): 'Wrap the original function.' @functools.wraps(item) def func_wrapper(*args, **kwargs): 'Closure for function.' pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res return func_wrapper return timeout_decorator
Timeout decorator, parameter in seconds.
integral/slagle.py
timeout
crisperdue/holpy
22
python
def timeout(max_timeout): def timeout_decorator(item): 'Wrap the original function.' @functools.wraps(item) def func_wrapper(*args, **kwargs): 'Closure for function.' pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res return func_wrapper return timeout_decorator
def timeout(max_timeout): def timeout_decorator(item): 'Wrap the original function.' @functools.wraps(item) def func_wrapper(*args, **kwargs): 'Closure for function.' pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res return func_wrapper return timeout_decorator<|docstring|>Timeout decorator, parameter in seconds.<|endoftext|>
d97fc477633550351c30acf3418cc8e628ec19b4b0d52adf357fc93368b2eec1
def perform_steps(node): '\n Perform the real solving steps. \n ' real_steps = [] current = node.root for step in node.trace(): loc = step.loc if (step.reason == 'Simplification'): rule = rules.FullSimplify() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'location': str(loc)}) elif (step.reason == 'Substitution'): rule = rules.Substitution1(step.var_name, step.var_subst) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc), 'params': {'f': str(step.f), 'g': str(step.var_subst), 'var_name': step.var_name}, '_latex_reason': ('Substitute \\(%s\\) for \\(%s\\)' % (latex.convert_expr(Var(step.var_name)), latex.convert_expr(step.var_subst))), 'reason': step.reason}) elif (step.reason == 'Integrate by parts'): rule = rules.IntegrationByParts(step.u, step.v) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc), 'reason': step.reason, '_latex_reason': ('Integrate by parts, \\(u = %s, v = %s\\)' % (latex.convert_expr(step.u), latex.convert_expr(step.v))), 'params': {'parts_u': str(step.u), 'parts_v': str(step.v)}}) elif (step.reason == 'Rewrite trigonometric'): rule = rules.RewriteTrigonometric(step.rule_name) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'reason': step.reason, 'text': str(current), 'latex': latex.convert_expr(current), 'params': {'rule': step.rule_name}, '_latex_reason': ('Rewrite trigonometric \\(%s\\) to \\(%s\\)' % (latex.convert_expr(step.before_trig), latex.convert_expr(step.after_trig))), 'location': str(step.loc)}) elif (step.reason == 'Elim abs'): rule = rules.ElimAbs() current = rules.OnLocation(rule, loc).eval(current) info = {'reason': step.reason, 'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc)} if (step.zero_point is not None): info['params'] = {'c': str(step.zero_point)} real_steps.append(info) elif (step.reason == 'Substitution inverse'): rule = rules.Substitution2(step.var_name, step.var_subst) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), '_latex_reason': ('Substitute \\(%s\\) for \\(%s\\)' % (latex.convert_expr(Var(step.var_name)), latex.convert_expr(step.var_subst))), 'reason': step.reason, 'location': str(loc), 'params': {'a': str(step.e.lower), 'b': str(step.e.upper), 'g': str(step.var_subst), 'var_name': str(step.var_name)}}) elif (step.reason == 'Unfold power'): rule = rules.UnfoldPower() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': 'Unfold power', 'location': str(loc)}) elif (step.reason == 'Rewrite fraction'): rule = rules.PolynomialDivision() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'params': {'rhs': str(step.rhs), 'denom': str(step.denom)}, 'location': str(step.loc)}) elif (step.reason == 'Split region'): rule = rules.SplitRegion(step.zero_point) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'location': str(step.loc), 'params': {'c': str(step.zero_point)}}) else: raise NotImplementedError(step.reason) last_expr = parse_expr(real_steps[(- 1)]['text']) if (last_expr.is_constant() and (last_expr.normalize() == last_expr)): return real_steps final_expr = rules.FullSimplify().eval(last_expr) real_steps.append({'text': str(final_expr), 'latex': latex.convert_expr(final_expr), 'reason': 'Simplification', 'location': '.'}) return real_steps
Perform the real solving steps.
integral/slagle.py
perform_steps
crisperdue/holpy
22
python
def perform_steps(node): '\n \n ' real_steps = [] current = node.root for step in node.trace(): loc = step.loc if (step.reason == 'Simplification'): rule = rules.FullSimplify() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'location': str(loc)}) elif (step.reason == 'Substitution'): rule = rules.Substitution1(step.var_name, step.var_subst) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc), 'params': {'f': str(step.f), 'g': str(step.var_subst), 'var_name': step.var_name}, '_latex_reason': ('Substitute \\(%s\\) for \\(%s\\)' % (latex.convert_expr(Var(step.var_name)), latex.convert_expr(step.var_subst))), 'reason': step.reason}) elif (step.reason == 'Integrate by parts'): rule = rules.IntegrationByParts(step.u, step.v) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc), 'reason': step.reason, '_latex_reason': ('Integrate by parts, \\(u = %s, v = %s\\)' % (latex.convert_expr(step.u), latex.convert_expr(step.v))), 'params': {'parts_u': str(step.u), 'parts_v': str(step.v)}}) elif (step.reason == 'Rewrite trigonometric'): rule = rules.RewriteTrigonometric(step.rule_name) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'reason': step.reason, 'text': str(current), 'latex': latex.convert_expr(current), 'params': {'rule': step.rule_name}, '_latex_reason': ('Rewrite trigonometric \\(%s\\) to \\(%s\\)' % (latex.convert_expr(step.before_trig), latex.convert_expr(step.after_trig))), 'location': str(step.loc)}) elif (step.reason == 'Elim abs'): rule = rules.ElimAbs() current = rules.OnLocation(rule, loc).eval(current) info = {'reason': step.reason, 'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc)} if (step.zero_point is not None): info['params'] = {'c': str(step.zero_point)} real_steps.append(info) elif (step.reason == 'Substitution inverse'): rule = rules.Substitution2(step.var_name, step.var_subst) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), '_latex_reason': ('Substitute \\(%s\\) for \\(%s\\)' % (latex.convert_expr(Var(step.var_name)), latex.convert_expr(step.var_subst))), 'reason': step.reason, 'location': str(loc), 'params': {'a': str(step.e.lower), 'b': str(step.e.upper), 'g': str(step.var_subst), 'var_name': str(step.var_name)}}) elif (step.reason == 'Unfold power'): rule = rules.UnfoldPower() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': 'Unfold power', 'location': str(loc)}) elif (step.reason == 'Rewrite fraction'): rule = rules.PolynomialDivision() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'params': {'rhs': str(step.rhs), 'denom': str(step.denom)}, 'location': str(step.loc)}) elif (step.reason == 'Split region'): rule = rules.SplitRegion(step.zero_point) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'location': str(step.loc), 'params': {'c': str(step.zero_point)}}) else: raise NotImplementedError(step.reason) last_expr = parse_expr(real_steps[(- 1)]['text']) if (last_expr.is_constant() and (last_expr.normalize() == last_expr)): return real_steps final_expr = rules.FullSimplify().eval(last_expr) real_steps.append({'text': str(final_expr), 'latex': latex.convert_expr(final_expr), 'reason': 'Simplification', 'location': '.'}) return real_steps
def perform_steps(node): '\n \n ' real_steps = [] current = node.root for step in node.trace(): loc = step.loc if (step.reason == 'Simplification'): rule = rules.FullSimplify() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'location': str(loc)}) elif (step.reason == 'Substitution'): rule = rules.Substitution1(step.var_name, step.var_subst) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc), 'params': {'f': str(step.f), 'g': str(step.var_subst), 'var_name': step.var_name}, '_latex_reason': ('Substitute \\(%s\\) for \\(%s\\)' % (latex.convert_expr(Var(step.var_name)), latex.convert_expr(step.var_subst))), 'reason': step.reason}) elif (step.reason == 'Integrate by parts'): rule = rules.IntegrationByParts(step.u, step.v) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc), 'reason': step.reason, '_latex_reason': ('Integrate by parts, \\(u = %s, v = %s\\)' % (latex.convert_expr(step.u), latex.convert_expr(step.v))), 'params': {'parts_u': str(step.u), 'parts_v': str(step.v)}}) elif (step.reason == 'Rewrite trigonometric'): rule = rules.RewriteTrigonometric(step.rule_name) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'reason': step.reason, 'text': str(current), 'latex': latex.convert_expr(current), 'params': {'rule': step.rule_name}, '_latex_reason': ('Rewrite trigonometric \\(%s\\) to \\(%s\\)' % (latex.convert_expr(step.before_trig), latex.convert_expr(step.after_trig))), 'location': str(step.loc)}) elif (step.reason == 'Elim abs'): rule = rules.ElimAbs() current = rules.OnLocation(rule, loc).eval(current) info = {'reason': step.reason, 'text': str(current), 'latex': latex.convert_expr(current), 'location': str(loc)} if (step.zero_point is not None): info['params'] = {'c': str(step.zero_point)} real_steps.append(info) elif (step.reason == 'Substitution inverse'): rule = rules.Substitution2(step.var_name, step.var_subst) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), '_latex_reason': ('Substitute \\(%s\\) for \\(%s\\)' % (latex.convert_expr(Var(step.var_name)), latex.convert_expr(step.var_subst))), 'reason': step.reason, 'location': str(loc), 'params': {'a': str(step.e.lower), 'b': str(step.e.upper), 'g': str(step.var_subst), 'var_name': str(step.var_name)}}) elif (step.reason == 'Unfold power'): rule = rules.UnfoldPower() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': 'Unfold power', 'location': str(loc)}) elif (step.reason == 'Rewrite fraction'): rule = rules.PolynomialDivision() current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'params': {'rhs': str(step.rhs), 'denom': str(step.denom)}, 'location': str(step.loc)}) elif (step.reason == 'Split region'): rule = rules.SplitRegion(step.zero_point) current = rules.OnLocation(rule, loc).eval(current) real_steps.append({'text': str(current), 'latex': latex.convert_expr(current), 'reason': step.reason, 'location': str(step.loc), 'params': {'c': str(step.zero_point)}}) else: raise NotImplementedError(step.reason) last_expr = parse_expr(real_steps[(- 1)]['text']) if (last_expr.is_constant() and (last_expr.normalize() == last_expr)): return real_steps final_expr = rules.FullSimplify().eval(last_expr) real_steps.append({'text': str(final_expr), 'latex': latex.convert_expr(final_expr), 'reason': 'Simplification', 'location': '.'}) return real_steps<|docstring|>Perform the real solving steps.<|endoftext|>
e869dfcd66aa5c2502235b4bf4a722729063f9b86076d0d3812db15454be2cfc
def eval(self, e): 'Algorithmic transformation of e.\n\n Parameters:\n e: original integral.\n\n Returns:\n If succeed, returns the new integral. Otherwise return e.\n\n ' pass
Algorithmic transformation of e. Parameters: e: original integral. Returns: If succeed, returns the new integral. Otherwise return e.
integral/slagle.py
eval
crisperdue/holpy
22
python
def eval(self, e): 'Algorithmic transformation of e.\n\n Parameters:\n e: original integral.\n\n Returns:\n If succeed, returns the new integral. Otherwise return e.\n\n ' pass
def eval(self, e): 'Algorithmic transformation of e.\n\n Parameters:\n e: original integral.\n\n Returns:\n If succeed, returns the new integral. Otherwise return e.\n\n ' pass<|docstring|>Algorithmic transformation of e. Parameters: e: original integral. Returns: If succeed, returns the new integral. Otherwise return e.<|endoftext|>
8657ce5657e35819a50a0ab0f5e0e1956f00875a24603656bf0ddb2acf127bde
def eval(self, e): 'Heuristic transformation of e.\n\n Parameters:\n e: original integral.\n\n Returns:\n A list of possible new integrals. Each of which should equal e.\n\n ' pass
Heuristic transformation of e. Parameters: e: original integral. Returns: A list of possible new integrals. Each of which should equal e.
integral/slagle.py
eval
crisperdue/holpy
22
python
def eval(self, e): 'Heuristic transformation of e.\n\n Parameters:\n e: original integral.\n\n Returns:\n A list of possible new integrals. Each of which should equal e.\n\n ' pass
def eval(self, e): 'Heuristic transformation of e.\n\n Parameters:\n e: original integral.\n\n Returns:\n A list of possible new integrals. Each of which should equal e.\n\n ' pass<|docstring|>Heuristic transformation of e. Parameters: e: original integral. Returns: A list of possible new integrals. Each of which should equal e.<|endoftext|>
75b8bd43c24b1b97a121c5277d7c2d9fb434a1dbdac416122f86e3319620c166
def sin_cos(self, e): '1) Transform to sine and cosine.\n\n a) tan(x) => sin(x)/cos(x)\n b) cot(x) => cos(x)/sin(x)\n c) sec(x) => 1/cos(x)\n d) csc(x) => 1/sin(x)\n\n TR1, TR2\n \n ' x = Symbol('x', [OP, CONST, VAR, FUN]) tan_pat = tan(x) cot_pat = cot(x) sec_pat = sec(x) csc_pat = csc(x) tan_expr = find_pattern(e, tan_pat) cot_expr = find_pattern(e, cot_pat) sec_expr = find_pattern(e, sec_pat) csc_expr = find_pattern(e, csc_pat) steps = [] reason = 'sine cosine' for (t, loc, _) in tan_expr: e = e.replace_trig(t, (sin(t.args[0]) / cos(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR2', t, (sin(t.args[0]) / cos(t.args[0])), loc)) for (t, loc, _) in cot_expr: e = e.replace_trig(t, (cos(t.args[0]) / sin(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR2', t, (cos(t.args[0]) / sin(t.args[0])), loc)) for (t, loc, _) in sec_expr: e = e.replace_trig(t, (Const(1) / cos(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR1', t, (Const(1) / cos(t.args[0])), loc)) for (t, loc, _) in csc_expr: e = e.replace_trig(t, (Const(1) / sin(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR1', t, (Const(1) / sin(t.args[0])), loc)) return (e, steps)
1) Transform to sine and cosine. a) tan(x) => sin(x)/cos(x) b) cot(x) => cos(x)/sin(x) c) sec(x) => 1/cos(x) d) csc(x) => 1/sin(x) TR1, TR2
integral/slagle.py
sin_cos
crisperdue/holpy
22
python
def sin_cos(self, e): '1) Transform to sine and cosine.\n\n a) tan(x) => sin(x)/cos(x)\n b) cot(x) => cos(x)/sin(x)\n c) sec(x) => 1/cos(x)\n d) csc(x) => 1/sin(x)\n\n TR1, TR2\n \n ' x = Symbol('x', [OP, CONST, VAR, FUN]) tan_pat = tan(x) cot_pat = cot(x) sec_pat = sec(x) csc_pat = csc(x) tan_expr = find_pattern(e, tan_pat) cot_expr = find_pattern(e, cot_pat) sec_expr = find_pattern(e, sec_pat) csc_expr = find_pattern(e, csc_pat) steps = [] reason = 'sine cosine' for (t, loc, _) in tan_expr: e = e.replace_trig(t, (sin(t.args[0]) / cos(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR2', t, (sin(t.args[0]) / cos(t.args[0])), loc)) for (t, loc, _) in cot_expr: e = e.replace_trig(t, (cos(t.args[0]) / sin(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR2', t, (cos(t.args[0]) / sin(t.args[0])), loc)) for (t, loc, _) in sec_expr: e = e.replace_trig(t, (Const(1) / cos(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR1', t, (Const(1) / cos(t.args[0])), loc)) for (t, loc, _) in csc_expr: e = e.replace_trig(t, (Const(1) / sin(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR1', t, (Const(1) / sin(t.args[0])), loc)) return (e, steps)
def sin_cos(self, e): '1) Transform to sine and cosine.\n\n a) tan(x) => sin(x)/cos(x)\n b) cot(x) => cos(x)/sin(x)\n c) sec(x) => 1/cos(x)\n d) csc(x) => 1/sin(x)\n\n TR1, TR2\n \n ' x = Symbol('x', [OP, CONST, VAR, FUN]) tan_pat = tan(x) cot_pat = cot(x) sec_pat = sec(x) csc_pat = csc(x) tan_expr = find_pattern(e, tan_pat) cot_expr = find_pattern(e, cot_pat) sec_expr = find_pattern(e, sec_pat) csc_expr = find_pattern(e, csc_pat) steps = [] reason = 'sine cosine' for (t, loc, _) in tan_expr: e = e.replace_trig(t, (sin(t.args[0]) / cos(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR2', t, (sin(t.args[0]) / cos(t.args[0])), loc)) for (t, loc, _) in cot_expr: e = e.replace_trig(t, (cos(t.args[0]) / sin(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR2', t, (cos(t.args[0]) / sin(t.args[0])), loc)) for (t, loc, _) in sec_expr: e = e.replace_trig(t, (Const(1) / cos(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR1', t, (Const(1) / cos(t.args[0])), loc)) for (t, loc, _) in csc_expr: e = e.replace_trig(t, (Const(1) / sin(t.args[0]))) steps.append(calc.TrigIdentityStep(e, 'TR1', t, (Const(1) / sin(t.args[0])), loc)) return (e, steps)<|docstring|>1) Transform to sine and cosine. a) tan(x) => sin(x)/cos(x) b) cot(x) => cos(x)/sin(x) c) sec(x) => 1/cos(x) d) csc(x) => 1/sin(x) TR1, TR2<|endoftext|>
750428d5093ed0ae71ae3d77b179e0f132b0a4e0923bd67e006a604e91a6151e
def tan_sec(self, e): '1) Transform to tangent and secant.\n\n a) sin(x) => tan(x)/sec(x)\n b) cos(x) => 1/sec(x)\n c) cot(x) => 1/tan(x)\n d) csc(x) => sec(x)/tan(x)\n \n ' x = Symbol('x', [OP, CONST, VAR, FUN]) sin_pat = sin(x) cos_pat = cos(x) cot_pat = cot(x) csc_pat = csc(x) sin_expr = find_pattern(e, sin_pat) cos_expr = find_pattern(e, cos_pat) cot_expr = find_pattern(e, cot_pat) csc_expr = find_pattern(e, csc_pat) steps = [] reason = 'tangent secant' for (t, loc, _) in sin_expr: e = e.replace_trig(t, (tan(t.args[0]) / sec(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (tan(t.args[0]) / sec(t.args[0])), reason)) for (t, loc, _) in cos_expr: e = e.replace_trig(t, (Const(1) / sec(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / sec(t.args[0])), reason)) for (t, loc, _) in cot_expr: e = e.replace_trig(t, (Const(1) / tan(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / tan(t.args[0])), reason)) for (t, loc, _) in csc_expr: e = e.replace_trig(t, (sec(t.args[0]) / tan(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (sec(t.args[0]) / tan(t.args[0])), reason)) return (e, steps)
1) Transform to tangent and secant. a) sin(x) => tan(x)/sec(x) b) cos(x) => 1/sec(x) c) cot(x) => 1/tan(x) d) csc(x) => sec(x)/tan(x)
integral/slagle.py
tan_sec
crisperdue/holpy
22
python
def tan_sec(self, e): '1) Transform to tangent and secant.\n\n a) sin(x) => tan(x)/sec(x)\n b) cos(x) => 1/sec(x)\n c) cot(x) => 1/tan(x)\n d) csc(x) => sec(x)/tan(x)\n \n ' x = Symbol('x', [OP, CONST, VAR, FUN]) sin_pat = sin(x) cos_pat = cos(x) cot_pat = cot(x) csc_pat = csc(x) sin_expr = find_pattern(e, sin_pat) cos_expr = find_pattern(e, cos_pat) cot_expr = find_pattern(e, cot_pat) csc_expr = find_pattern(e, csc_pat) steps = [] reason = 'tangent secant' for (t, loc, _) in sin_expr: e = e.replace_trig(t, (tan(t.args[0]) / sec(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (tan(t.args[0]) / sec(t.args[0])), reason)) for (t, loc, _) in cos_expr: e = e.replace_trig(t, (Const(1) / sec(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / sec(t.args[0])), reason)) for (t, loc, _) in cot_expr: e = e.replace_trig(t, (Const(1) / tan(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / tan(t.args[0])), reason)) for (t, loc, _) in csc_expr: e = e.replace_trig(t, (sec(t.args[0]) / tan(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (sec(t.args[0]) / tan(t.args[0])), reason)) return (e, steps)
def tan_sec(self, e): '1) Transform to tangent and secant.\n\n a) sin(x) => tan(x)/sec(x)\n b) cos(x) => 1/sec(x)\n c) cot(x) => 1/tan(x)\n d) csc(x) => sec(x)/tan(x)\n \n ' x = Symbol('x', [OP, CONST, VAR, FUN]) sin_pat = sin(x) cos_pat = cos(x) cot_pat = cot(x) csc_pat = csc(x) sin_expr = find_pattern(e, sin_pat) cos_expr = find_pattern(e, cos_pat) cot_expr = find_pattern(e, cot_pat) csc_expr = find_pattern(e, csc_pat) steps = [] reason = 'tangent secant' for (t, loc, _) in sin_expr: e = e.replace_trig(t, (tan(t.args[0]) / sec(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (tan(t.args[0]) / sec(t.args[0])), reason)) for (t, loc, _) in cos_expr: e = e.replace_trig(t, (Const(1) / sec(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / sec(t.args[0])), reason)) for (t, loc, _) in cot_expr: e = e.replace_trig(t, (Const(1) / tan(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / tan(t.args[0])), reason)) for (t, loc, _) in csc_expr: e = e.replace_trig(t, (sec(t.args[0]) / tan(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (sec(t.args[0]) / tan(t.args[0])), reason)) return (e, steps)<|docstring|>1) Transform to tangent and secant. a) sin(x) => tan(x)/sec(x) b) cos(x) => 1/sec(x) c) cot(x) => 1/tan(x) d) csc(x) => sec(x)/tan(x)<|endoftext|>
259a63c2003c10a211b81145944be0ee301cc3212568bc7b30189bbc9bfe4bd3
def cot_csc(self, e): '3) Transform to cotangent and cosecant.\n \n a) sin(x) => 1/csc(x)\n b) cos(x) => cot(x)/csc(x)\n c) tan(x) => 1/cot(x)\n d) sec(x) => csc(x)/cot(x)\n ' x = Symbol('x', [OP, CONST, VAR, FUN]) sin_pat = sin(x) cos_pat = cos(x) tan_pat = tan(x) sec_pat = sec(x) sin_expr = find_pattern(e, sin_pat) cos_expr = find_pattern(e, cos_pat) tan_expr = find_pattern(e, tan_pat) sec_expr = find_pattern(e, sec_pat) steps = [] reason = 'cotangent cosecant' for (t, loc, _) in sin_expr: e = e.replace_trig(t, (Const(1) / csc(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / csc(t.args[0])), reason)) for (t, loc, _) in cos_expr: e = e.replace_trig(t, (cot(t.args[0]) / csc(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (cot(t.args[0]) / csc(t.args[0])), reason)) for (t, loc, _) in tan_expr: e = e.replace_trig(t, (Const(1) / cot(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / cot(t.args[0])), reason)) for (t, loc, _) in sec_expr: e = e.replace_trig(t, (csc(t.args[0]) / cot(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (csc(t.args[0]) / cot(t.args[0])), reason)) return (e, steps)
3) Transform to cotangent and cosecant. a) sin(x) => 1/csc(x) b) cos(x) => cot(x)/csc(x) c) tan(x) => 1/cot(x) d) sec(x) => csc(x)/cot(x)
integral/slagle.py
cot_csc
crisperdue/holpy
22
python
def cot_csc(self, e): '3) Transform to cotangent and cosecant.\n \n a) sin(x) => 1/csc(x)\n b) cos(x) => cot(x)/csc(x)\n c) tan(x) => 1/cot(x)\n d) sec(x) => csc(x)/cot(x)\n ' x = Symbol('x', [OP, CONST, VAR, FUN]) sin_pat = sin(x) cos_pat = cos(x) tan_pat = tan(x) sec_pat = sec(x) sin_expr = find_pattern(e, sin_pat) cos_expr = find_pattern(e, cos_pat) tan_expr = find_pattern(e, tan_pat) sec_expr = find_pattern(e, sec_pat) steps = [] reason = 'cotangent cosecant' for (t, loc, _) in sin_expr: e = e.replace_trig(t, (Const(1) / csc(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / csc(t.args[0])), reason)) for (t, loc, _) in cos_expr: e = e.replace_trig(t, (cot(t.args[0]) / csc(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (cot(t.args[0]) / csc(t.args[0])), reason)) for (t, loc, _) in tan_expr: e = e.replace_trig(t, (Const(1) / cot(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / cot(t.args[0])), reason)) for (t, loc, _) in sec_expr: e = e.replace_trig(t, (csc(t.args[0]) / cot(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (csc(t.args[0]) / cot(t.args[0])), reason)) return (e, steps)
def cot_csc(self, e): '3) Transform to cotangent and cosecant.\n \n a) sin(x) => 1/csc(x)\n b) cos(x) => cot(x)/csc(x)\n c) tan(x) => 1/cot(x)\n d) sec(x) => csc(x)/cot(x)\n ' x = Symbol('x', [OP, CONST, VAR, FUN]) sin_pat = sin(x) cos_pat = cos(x) tan_pat = tan(x) sec_pat = sec(x) sin_expr = find_pattern(e, sin_pat) cos_expr = find_pattern(e, cos_pat) tan_expr = find_pattern(e, tan_pat) sec_expr = find_pattern(e, sec_pat) steps = [] reason = 'cotangent cosecant' for (t, loc, _) in sin_expr: e = e.replace_trig(t, (Const(1) / csc(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / csc(t.args[0])), reason)) for (t, loc, _) in cos_expr: e = e.replace_trig(t, (cot(t.args[0]) / csc(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (cot(t.args[0]) / csc(t.args[0])), reason)) for (t, loc, _) in tan_expr: e = e.replace_trig(t, (Const(1) / cot(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (Const(1) / cot(t.args[0])), reason)) for (t, loc, _) in sec_expr: e = e.replace_trig(t, (csc(t.args[0]) / cot(t.args[0]))) steps.append(calc.TrigSubstitutionStep(e, loc, t, (csc(t.args[0]) / cot(t.args[0])), reason)) return (e, steps)<|docstring|>3) Transform to cotangent and cosecant. a) sin(x) => 1/csc(x) b) cos(x) => cot(x)/csc(x) c) tan(x) => 1/cot(x) d) sec(x) => csc(x)/cot(x)<|endoftext|>
d7e686be3a5d1b4a8f99f1e8b5053b58afd8d7290f3d24281e052802aab9a824
def trace(self): 'Give computation trace for resolved integration.' assert (self.resolved == True), ("%s haven't been solved" % self.root) return self.resolved_steps
Give computation trace for resolved integration.
integral/slagle.py
trace
crisperdue/holpy
22
python
def trace(self): assert (self.resolved == True), ("%s haven't been solved" % self.root) return self.resolved_steps
def trace(self): assert (self.resolved == True), ("%s haven't been solved" % self.root) return self.resolved_steps<|docstring|>Give computation trace for resolved integration.<|endoftext|>
fceadbc13b25ff711a0d1537effa720740865d21833700ac3e77523b5619ba73
def expand(self, not_solved_integral): 'Expand the current node.\n\n This tries all algorithm rules. If the result is itself an integral, then\n apply each of the heuristic rules and collect the results. If the\n result is a linear combination of integrals, then put a single AndNode\n as the child nodes.\n\n If we get a new integral after transformation, we need to store them in a set, \n in case of repeatedly try to solve same integral(Trigonometric functions can \n transform to them self). \n\n ' cur_integral = self.root algo_steps = [] not_solved_integral.add(cur_integral) for rule in algorithm_rules: (cur_integral, cur_steps) = rule().eval(cur_integral) if cur_steps: for step in cur_steps: step.prepend_loc(self.loc) algo_steps.append(step) if (rule == AlgoNonLinearSubstitution): continue norm_integral = rules.FullSimplify().eval(cur_integral) if (norm_integral != cur_integral): algo_steps.append(calc.SimplifyStep(norm_integral, self.loc)) cur_integral = norm_integral if (cur_integral.ty == INTEGRAL): for rule in heuristic_rules: res = rule().eval(cur_integral) for (r, steps) in res: if steps: for step in steps: step.prepend_loc(self.loc) norm_r = rules.FullSimplify().eval(r) if (norm_r != r): steps.append(calc.SimplifyStep(norm_r, self.loc)) if ((norm_r.ty == INTEGRAL) and (norm_r not in not_solved_integral)): self.children.append(OrNode(norm_r, loc=self.loc, parent=self, steps=(algo_steps + steps))) elif (norm_r not in not_solved_integral): self.children.append(AndNode(norm_r, loc=self.loc, parent=self, steps=(algo_steps + steps))) else: not_solved_integral.remove(self.root) self.children.append(AndNode(cur_integral, loc=self.loc, parent=self, steps=algo_steps)) self.compute_resolved()
Expand the current node. This tries all algorithm rules. If the result is itself an integral, then apply each of the heuristic rules and collect the results. If the result is a linear combination of integrals, then put a single AndNode as the child nodes. If we get a new integral after transformation, we need to store them in a set, in case of repeatedly try to solve same integral(Trigonometric functions can transform to them self).
integral/slagle.py
expand
crisperdue/holpy
22
python
def expand(self, not_solved_integral): 'Expand the current node.\n\n This tries all algorithm rules. If the result is itself an integral, then\n apply each of the heuristic rules and collect the results. If the\n result is a linear combination of integrals, then put a single AndNode\n as the child nodes.\n\n If we get a new integral after transformation, we need to store them in a set, \n in case of repeatedly try to solve same integral(Trigonometric functions can \n transform to them self). \n\n ' cur_integral = self.root algo_steps = [] not_solved_integral.add(cur_integral) for rule in algorithm_rules: (cur_integral, cur_steps) = rule().eval(cur_integral) if cur_steps: for step in cur_steps: step.prepend_loc(self.loc) algo_steps.append(step) if (rule == AlgoNonLinearSubstitution): continue norm_integral = rules.FullSimplify().eval(cur_integral) if (norm_integral != cur_integral): algo_steps.append(calc.SimplifyStep(norm_integral, self.loc)) cur_integral = norm_integral if (cur_integral.ty == INTEGRAL): for rule in heuristic_rules: res = rule().eval(cur_integral) for (r, steps) in res: if steps: for step in steps: step.prepend_loc(self.loc) norm_r = rules.FullSimplify().eval(r) if (norm_r != r): steps.append(calc.SimplifyStep(norm_r, self.loc)) if ((norm_r.ty == INTEGRAL) and (norm_r not in not_solved_integral)): self.children.append(OrNode(norm_r, loc=self.loc, parent=self, steps=(algo_steps + steps))) elif (norm_r not in not_solved_integral): self.children.append(AndNode(norm_r, loc=self.loc, parent=self, steps=(algo_steps + steps))) else: not_solved_integral.remove(self.root) self.children.append(AndNode(cur_integral, loc=self.loc, parent=self, steps=algo_steps)) self.compute_resolved()
def expand(self, not_solved_integral): 'Expand the current node.\n\n This tries all algorithm rules. If the result is itself an integral, then\n apply each of the heuristic rules and collect the results. If the\n result is a linear combination of integrals, then put a single AndNode\n as the child nodes.\n\n If we get a new integral after transformation, we need to store them in a set, \n in case of repeatedly try to solve same integral(Trigonometric functions can \n transform to them self). \n\n ' cur_integral = self.root algo_steps = [] not_solved_integral.add(cur_integral) for rule in algorithm_rules: (cur_integral, cur_steps) = rule().eval(cur_integral) if cur_steps: for step in cur_steps: step.prepend_loc(self.loc) algo_steps.append(step) if (rule == AlgoNonLinearSubstitution): continue norm_integral = rules.FullSimplify().eval(cur_integral) if (norm_integral != cur_integral): algo_steps.append(calc.SimplifyStep(norm_integral, self.loc)) cur_integral = norm_integral if (cur_integral.ty == INTEGRAL): for rule in heuristic_rules: res = rule().eval(cur_integral) for (r, steps) in res: if steps: for step in steps: step.prepend_loc(self.loc) norm_r = rules.FullSimplify().eval(r) if (norm_r != r): steps.append(calc.SimplifyStep(norm_r, self.loc)) if ((norm_r.ty == INTEGRAL) and (norm_r not in not_solved_integral)): self.children.append(OrNode(norm_r, loc=self.loc, parent=self, steps=(algo_steps + steps))) elif (norm_r not in not_solved_integral): self.children.append(AndNode(norm_r, loc=self.loc, parent=self, steps=(algo_steps + steps))) else: not_solved_integral.remove(self.root) self.children.append(AndNode(cur_integral, loc=self.loc, parent=self, steps=algo_steps)) self.compute_resolved()<|docstring|>Expand the current node. This tries all algorithm rules. If the result is itself an integral, then apply each of the heuristic rules and collect the results. If the result is a linear combination of integrals, then put a single AndNode as the child nodes. If we get a new integral after transformation, we need to store them in a set, in case of repeatedly try to solve same integral(Trigonometric functions can transform to them self).<|endoftext|>
e8be5326c19e004b9b5ba39fba6cb152b3f66a1ab620a37dbbff939c71faec92
def timeout_decorator(item): 'Wrap the original function.' @functools.wraps(item) def func_wrapper(*args, **kwargs): 'Closure for function.' pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res return func_wrapper
Wrap the original function.
integral/slagle.py
timeout_decorator
crisperdue/holpy
22
python
def timeout_decorator(item): @functools.wraps(item) def func_wrapper(*args, **kwargs): 'Closure for function.' pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res return func_wrapper
def timeout_decorator(item): @functools.wraps(item) def func_wrapper(*args, **kwargs): 'Closure for function.' pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res return func_wrapper<|docstring|>Wrap the original function.<|endoftext|>
79b6fdf662485a65be66e835902447032196e3e7e5df54a2fb24efa58729f763
def is_pat1(e): 'elf{sin(v),cos^2(v)}cos^{2n+1}(v)' v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = ((cos(v) ^ n) * sin(v)) pat2 = (cos(v) * sin(v)) pat3 = (cos(v) ^ n) if match(e, pat1): n_value = e.args[0].args[1].val if ((n_value % 2) == 0): return (False, None) return ((True, e.args[1].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat3): n_value = e.args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat2): n = e.args[1].val if ((n % 2) == 0): return (False, None) return (True, e.args[0].args[0]) else: return (False, None)
elf{sin(v),cos^2(v)}cos^{2n+1}(v)
integral/slagle.py
is_pat1
crisperdue/holpy
22
python
def is_pat1(e): v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = ((cos(v) ^ n) * sin(v)) pat2 = (cos(v) * sin(v)) pat3 = (cos(v) ^ n) if match(e, pat1): n_value = e.args[0].args[1].val if ((n_value % 2) == 0): return (False, None) return ((True, e.args[1].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat3): n_value = e.args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat2): n = e.args[1].val if ((n % 2) == 0): return (False, None) return (True, e.args[0].args[0]) else: return (False, None)
def is_pat1(e): v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = ((cos(v) ^ n) * sin(v)) pat2 = (cos(v) * sin(v)) pat3 = (cos(v) ^ n) if match(e, pat1): n_value = e.args[0].args[1].val if ((n_value % 2) == 0): return (False, None) return ((True, e.args[1].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat3): n_value = e.args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat2): n = e.args[1].val if ((n % 2) == 0): return (False, None) return (True, e.args[0].args[0]) else: return (False, None)<|docstring|>elf{sin(v),cos^2(v)}cos^{2n+1}(v)<|endoftext|>
2ac09b68a937c0cbbff36f994bcf480cbbffc3d9af63c11b07b649f79c4f66f8
def is_pat2(e): 'elf{cos(v),sin^2(v)}sin^{2n+1}(v)' v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = (cos(v) * (sin(v) ^ n)) pat2 = (cos(v) * sin(v)) pat3 = (sin(v) ^ n) if match(e, pat1): n_value = e.args[1].args[1].val return (True, (e.args[0].args[0] if ((n_value % 2) == 1) else (False, None))) elif match(e, pat3): n_value = e.args[1].val return (True, (e.args[0].args[0] if ((n_value % 2) == 1) else (False, None))) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
elf{cos(v),sin^2(v)}sin^{2n+1}(v)
integral/slagle.py
is_pat2
crisperdue/holpy
22
python
def is_pat2(e): v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = (cos(v) * (sin(v) ^ n)) pat2 = (cos(v) * sin(v)) pat3 = (sin(v) ^ n) if match(e, pat1): n_value = e.args[1].args[1].val return (True, (e.args[0].args[0] if ((n_value % 2) == 1) else (False, None))) elif match(e, pat3): n_value = e.args[1].val return (True, (e.args[0].args[0] if ((n_value % 2) == 1) else (False, None))) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
def is_pat2(e): v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = (cos(v) * (sin(v) ^ n)) pat2 = (cos(v) * sin(v)) pat3 = (sin(v) ^ n) if match(e, pat1): n_value = e.args[1].args[1].val return (True, (e.args[0].args[0] if ((n_value % 2) == 1) else (False, None))) elif match(e, pat3): n_value = e.args[1].val return (True, (e.args[0].args[0] if ((n_value % 2) == 1) else (False, None))) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)<|docstring|>elf{cos(v),sin^2(v)}sin^{2n+1}(v)<|endoftext|>
94bec98d4a6f4c681280f7dee49d26bc4f75937aaf0ff7c574f4561831417705
def is_pat3(e): 'elf{tan(v),sec^2(v)}' v = Symbol('v', [VAR, OP, FUN]) pat1 = tan(v) pat2 = (sec(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
elf{tan(v),sec^2(v)}
integral/slagle.py
is_pat3
crisperdue/holpy
22
python
def is_pat3(e): v = Symbol('v', [VAR, OP, FUN]) pat1 = tan(v) pat2 = (sec(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
def is_pat3(e): v = Symbol('v', [VAR, OP, FUN]) pat1 = tan(v) pat2 = (sec(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)<|docstring|>elf{tan(v),sec^2(v)}<|endoftext|>
7494d223446c280b9555ddaf0c2cf953378f790d58e08afbc43282a57a292c60
def is_pat4(e): 'elf{cot(v),csc^2(v)}' v = Symbol('v', [VAR, OP, FUN]) pat1 = cot(v) pat2 = (csc(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
elf{cot(v),csc^2(v)}
integral/slagle.py
is_pat4
crisperdue/holpy
22
python
def is_pat4(e): v = Symbol('v', [VAR, OP, FUN]) pat1 = cot(v) pat2 = (csc(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
def is_pat4(e): v = Symbol('v', [VAR, OP, FUN]) pat1 = cot(v) pat2 = (csc(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)<|docstring|>elf{cot(v),csc^2(v)}<|endoftext|>
45700215feb3b9a61263766f9cc0202397fedcf9e1855b5e4af41f19047ef690
def is_pat5(e): 'elf{sec(v),tan^2(v)}tan^{2n+1}(v)' v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = (sec(v) * (tan(v) ^ n)) pat2 = (sec(v) * tan(v)) pat3 = (tan(v) ^ n) if match(e, pat1): n_value = e.args[1].args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat3): n_value = e.args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
elf{sec(v),tan^2(v)}tan^{2n+1}(v)
integral/slagle.py
is_pat5
crisperdue/holpy
22
python
def is_pat5(e): v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = (sec(v) * (tan(v) ^ n)) pat2 = (sec(v) * tan(v)) pat3 = (tan(v) ^ n) if match(e, pat1): n_value = e.args[1].args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat3): n_value = e.args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
def is_pat5(e): v = Symbol('v', [VAR, OP, FUN]) n = Symbol('n', [CONST]) pat1 = (sec(v) * (tan(v) ^ n)) pat2 = (sec(v) * tan(v)) pat3 = (tan(v) ^ n) if match(e, pat1): n_value = e.args[1].args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat3): n_value = e.args[1].val return ((True, e.args[0].args[0]) if ((n_value % 2) == 1) else (False, None)) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)<|docstring|>elf{sec(v),tan^2(v)}tan^{2n+1}(v)<|endoftext|>
9ce2c26269f03de50f9f48b7f2c418e2eefc72ad7f2b318d3af4fa9ee1d6b9bd
def is_pat6(e): 'elf{csc(v),cot^2(v)}' v = Symbol('v', [VAR, OP, FUN]) pat1 = csc(v) pat2 = (cot(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
elf{csc(v),cot^2(v)}
integral/slagle.py
is_pat6
crisperdue/holpy
22
python
def is_pat6(e): v = Symbol('v', [VAR, OP, FUN]) pat1 = csc(v) pat2 = (cot(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)
def is_pat6(e): v = Symbol('v', [VAR, OP, FUN]) pat1 = csc(v) pat2 = (cot(v) ^ Const(2)) if match(e, pat1): return (True, e.args[0]) elif match(e, pat2): return (True, e.args[0].args[0]) else: return (False, None)<|docstring|>elf{csc(v),cot^2(v)}<|endoftext|>
339b1b2fa332e672e7551a761c5084ef10178d4ad3dddee73eb4858387659bb1
@functools.wraps(item) def func_wrapper(*args, **kwargs): 'Closure for function.' pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res
Closure for function.
integral/slagle.py
func_wrapper
crisperdue/holpy
22
python
@functools.wraps(item) def func_wrapper(*args, **kwargs): pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res
@functools.wraps(item) def func_wrapper(*args, **kwargs): pool = multiprocessing.pool.ThreadPool(processes=1) async_result = pool.apply_async(item, args, kwargs) res = async_result.get(max_timeout) pool.close() return res<|docstring|>Closure for function.<|endoftext|>
9446db6e7889f7ab5dd70224db40b558c93bc5b9d63a7ad134f9826edb466e85
def __init__(self, *args, **kwargs): '\n Add placeholders and classes, remove auto-generated\n labels and set autofocus on first field\n ' super().__init__(*args, **kwargs) placeholders = {'full_name': 'Full Name', 'email': 'Email Address', 'phone_number': 'Phone Number', 'street_address1': 'Street Address 1', 'street_address2': 'Street Address 2', 'town_or_city': 'Town or City', 'county': 'County, State or Locality', 'postcode': 'Postal Code'} self.fields['full_name'].widget.attrs['autofocus'] = True for field in self.fields: if (field != 'country'): if self.fields[field].required: placeholder = f'{placeholders[field]} *' else: placeholder = placeholders[field] self.fields[field].widget.attrs['placeholder'] = placeholder self.fields[field].widget.attrs['class'] = 'border-green text-green' self.fields[field].label = False self.fields['phone_number'].widget.attrs['class'] = 'number' self.fields['full_name'].widget.attrs['class'] = 'letters'
Add placeholders and classes, remove auto-generated labels and set autofocus on first field
checkout/forms.py
__init__
FraL96/Monstera-MS4
0
python
def __init__(self, *args, **kwargs): '\n Add placeholders and classes, remove auto-generated\n labels and set autofocus on first field\n ' super().__init__(*args, **kwargs) placeholders = {'full_name': 'Full Name', 'email': 'Email Address', 'phone_number': 'Phone Number', 'street_address1': 'Street Address 1', 'street_address2': 'Street Address 2', 'town_or_city': 'Town or City', 'county': 'County, State or Locality', 'postcode': 'Postal Code'} self.fields['full_name'].widget.attrs['autofocus'] = True for field in self.fields: if (field != 'country'): if self.fields[field].required: placeholder = f'{placeholders[field]} *' else: placeholder = placeholders[field] self.fields[field].widget.attrs['placeholder'] = placeholder self.fields[field].widget.attrs['class'] = 'border-green text-green' self.fields[field].label = False self.fields['phone_number'].widget.attrs['class'] = 'number' self.fields['full_name'].widget.attrs['class'] = 'letters'
def __init__(self, *args, **kwargs): '\n Add placeholders and classes, remove auto-generated\n labels and set autofocus on first field\n ' super().__init__(*args, **kwargs) placeholders = {'full_name': 'Full Name', 'email': 'Email Address', 'phone_number': 'Phone Number', 'street_address1': 'Street Address 1', 'street_address2': 'Street Address 2', 'town_or_city': 'Town or City', 'county': 'County, State or Locality', 'postcode': 'Postal Code'} self.fields['full_name'].widget.attrs['autofocus'] = True for field in self.fields: if (field != 'country'): if self.fields[field].required: placeholder = f'{placeholders[field]} *' else: placeholder = placeholders[field] self.fields[field].widget.attrs['placeholder'] = placeholder self.fields[field].widget.attrs['class'] = 'border-green text-green' self.fields[field].label = False self.fields['phone_number'].widget.attrs['class'] = 'number' self.fields['full_name'].widget.attrs['class'] = 'letters'<|docstring|>Add placeholders and classes, remove auto-generated labels and set autofocus on first field<|endoftext|>
a29f808550d44b6a940ce2eb55299a4564e6d5d518933e0f7fab002cbb399795
def editor(arg): 'Do the editing.' (fn, scenario, multiplier) = arg newfn = fn.replace('/i/0/', ('/i/%s/' % (scenario,))) newdir = os.path.dirname(newfn) if (not os.path.isdir(newdir)): try: os.makedirs(newdir) except FileExistsError: pass fp = open(newfn, 'w') for line in open(fn): tokens = line.split() if (len(tokens) != 2): fp.write(line) continue try: fp.write(('%s %.2f\n' % (tokens[0], (float(tokens[1]) * multiplier)))) except Exception as exp: print(('Editing %s hit exp: %s' % (fn, exp))) sys.exit() fp.close()
Do the editing.
scripts/cligen/arb_precip_delta.py
editor
jarad/dep
0
python
def editor(arg): (fn, scenario, multiplier) = arg newfn = fn.replace('/i/0/', ('/i/%s/' % (scenario,))) newdir = os.path.dirname(newfn) if (not os.path.isdir(newdir)): try: os.makedirs(newdir) except FileExistsError: pass fp = open(newfn, 'w') for line in open(fn): tokens = line.split() if (len(tokens) != 2): fp.write(line) continue try: fp.write(('%s %.2f\n' % (tokens[0], (float(tokens[1]) * multiplier)))) except Exception as exp: print(('Editing %s hit exp: %s' % (fn, exp))) sys.exit() fp.close()
def editor(arg): (fn, scenario, multiplier) = arg newfn = fn.replace('/i/0/', ('/i/%s/' % (scenario,))) newdir = os.path.dirname(newfn) if (not os.path.isdir(newdir)): try: os.makedirs(newdir) except FileExistsError: pass fp = open(newfn, 'w') for line in open(fn): tokens = line.split() if (len(tokens) != 2): fp.write(line) continue try: fp.write(('%s %.2f\n' % (tokens[0], (float(tokens[1]) * multiplier)))) except Exception as exp: print(('Editing %s hit exp: %s' % (fn, exp))) sys.exit() fp.close()<|docstring|>Do the editing.<|endoftext|>
da823a346ba7f56ce7e939e6c7bcf0ccaac582ce85e2d3770a7bbcb5c50a7664
def finder(scenario, multiplier): 'yield what we can find.' res = [] for (dirname, _dirpath, filenames) in os.walk('/i/0/cli'): for fn in filenames: res.append([('%s/%s' % (dirname, fn)), scenario, multiplier]) return res
yield what we can find.
scripts/cligen/arb_precip_delta.py
finder
jarad/dep
0
python
def finder(scenario, multiplier): res = [] for (dirname, _dirpath, filenames) in os.walk('/i/0/cli'): for fn in filenames: res.append([('%s/%s' % (dirname, fn)), scenario, multiplier]) return res
def finder(scenario, multiplier): res = [] for (dirname, _dirpath, filenames) in os.walk('/i/0/cli'): for fn in filenames: res.append([('%s/%s' % (dirname, fn)), scenario, multiplier]) return res<|docstring|>yield what we can find.<|endoftext|>
b36db0ad9c2b0330919172809f97829197c3ed8c554038924483774d92922483
def main(argv): 'Go Main Go.' scenario = int(argv[1]) if (scenario == 0): print('NO!') return multiplier = float(argv[2]) queue = finder(scenario, multiplier) print(('Applying %.2f multiplier for scenario %s' % (multiplier, scenario))) pool = Pool() for _ in tqdm(pool.imap_unordered(editor, queue), total=len(queue)): pass
Go Main Go.
scripts/cligen/arb_precip_delta.py
main
jarad/dep
0
python
def main(argv): scenario = int(argv[1]) if (scenario == 0): print('NO!') return multiplier = float(argv[2]) queue = finder(scenario, multiplier) print(('Applying %.2f multiplier for scenario %s' % (multiplier, scenario))) pool = Pool() for _ in tqdm(pool.imap_unordered(editor, queue), total=len(queue)): pass
def main(argv): scenario = int(argv[1]) if (scenario == 0): print('NO!') return multiplier = float(argv[2]) queue = finder(scenario, multiplier) print(('Applying %.2f multiplier for scenario %s' % (multiplier, scenario))) pool = Pool() for _ in tqdm(pool.imap_unordered(editor, queue), total=len(queue)): pass<|docstring|>Go Main Go.<|endoftext|>
fd230741e582f4d580d3a3932965c16bdc7d005412fa13bb06cab83dd64b4535
def _assert_task_ids_match_states(self, dr, task_ids_to_states): 'Helper that asserts task instances with a given id are in a given state' tis = dr.get_task_instances() for ti in tis: try: expected_state = task_ids_to_states[ti.task_id] except KeyError: raise ValueError(f'Invalid task id {ti.task_id} found!') else: self.assertEqual(ti.state, expected_state, f'Task {ti.task_id} has state {ti.state} instead of expected {expected_state}')
Helper that asserts task instances with a given id are in a given state
tests/operators/test_weekday.py
_assert_task_ids_match_states
jiantao01/airflow
15,947
python
def _assert_task_ids_match_states(self, dr, task_ids_to_states): tis = dr.get_task_instances() for ti in tis: try: expected_state = task_ids_to_states[ti.task_id] except KeyError: raise ValueError(f'Invalid task id {ti.task_id} found!') else: self.assertEqual(ti.state, expected_state, f'Task {ti.task_id} has state {ti.state} instead of expected {expected_state}')
def _assert_task_ids_match_states(self, dr, task_ids_to_states): tis = dr.get_task_instances() for ti in tis: try: expected_state = task_ids_to_states[ti.task_id] except KeyError: raise ValueError(f'Invalid task id {ti.task_id} found!') else: self.assertEqual(ti.state, expected_state, f'Task {ti.task_id} has state {ti.state} instead of expected {expected_state}')<|docstring|>Helper that asserts task instances with a given id are in a given state<|endoftext|>
1f49a9084b1eb43d462ea71721187f477c3775589ff3cf92b37196000b32f41f
@parameterized.expand([('with-string', 'Monday'), ('with-enum', WeekDay.MONDAY), ('with-enum-set', {WeekDay.MONDAY}), ('with-enum-list', [WeekDay.MONDAY]), ('with-enum-dict', {WeekDay.MONDAY: 'some_value'}), ('with-enum-set-2-items', {WeekDay.MONDAY, WeekDay.FRIDAY}), ('with-enum-list-2-items', [WeekDay.MONDAY, WeekDay.FRIDAY]), ('with-enum-dict-2-items', {WeekDay.MONDAY: 'some_value', WeekDay.FRIDAY: 'some_value_2'}), ('with-string-set', {'Monday'}), ('with-string-set-2-items', {'Monday', 'Friday'}), ('with-set-mix-types', {'Monday', WeekDay.FRIDAY}), ('with-list-mix-types', ['Monday', WeekDay.FRIDAY]), ('with-dict-mix-types', {'Monday': 'some_value', WeekDay.FRIDAY: 'some_value_2'})]) @freeze_time('2021-01-25') def test_branch_follow_true(self, _, weekday): 'Checks if BranchDayOfWeekOperator follows true branch' print(datetime.datetime.now()) branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true=['branch_1', 'branch_2'], follow_task_ids_if_false='branch_3', week_day=weekday, dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.branch_3 = DummyOperator(task_id='branch_3', dag=self.dag) self.branch_3.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.NONE, 'branch_3': State.SKIPPED})
Checks if BranchDayOfWeekOperator follows true branch
tests/operators/test_weekday.py
test_branch_follow_true
jiantao01/airflow
15,947
python
@parameterized.expand([('with-string', 'Monday'), ('with-enum', WeekDay.MONDAY), ('with-enum-set', {WeekDay.MONDAY}), ('with-enum-list', [WeekDay.MONDAY]), ('with-enum-dict', {WeekDay.MONDAY: 'some_value'}), ('with-enum-set-2-items', {WeekDay.MONDAY, WeekDay.FRIDAY}), ('with-enum-list-2-items', [WeekDay.MONDAY, WeekDay.FRIDAY]), ('with-enum-dict-2-items', {WeekDay.MONDAY: 'some_value', WeekDay.FRIDAY: 'some_value_2'}), ('with-string-set', {'Monday'}), ('with-string-set-2-items', {'Monday', 'Friday'}), ('with-set-mix-types', {'Monday', WeekDay.FRIDAY}), ('with-list-mix-types', ['Monday', WeekDay.FRIDAY]), ('with-dict-mix-types', {'Monday': 'some_value', WeekDay.FRIDAY: 'some_value_2'})]) @freeze_time('2021-01-25') def test_branch_follow_true(self, _, weekday): print(datetime.datetime.now()) branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true=['branch_1', 'branch_2'], follow_task_ids_if_false='branch_3', week_day=weekday, dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.branch_3 = DummyOperator(task_id='branch_3', dag=self.dag) self.branch_3.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.NONE, 'branch_3': State.SKIPPED})
@parameterized.expand([('with-string', 'Monday'), ('with-enum', WeekDay.MONDAY), ('with-enum-set', {WeekDay.MONDAY}), ('with-enum-list', [WeekDay.MONDAY]), ('with-enum-dict', {WeekDay.MONDAY: 'some_value'}), ('with-enum-set-2-items', {WeekDay.MONDAY, WeekDay.FRIDAY}), ('with-enum-list-2-items', [WeekDay.MONDAY, WeekDay.FRIDAY]), ('with-enum-dict-2-items', {WeekDay.MONDAY: 'some_value', WeekDay.FRIDAY: 'some_value_2'}), ('with-string-set', {'Monday'}), ('with-string-set-2-items', {'Monday', 'Friday'}), ('with-set-mix-types', {'Monday', WeekDay.FRIDAY}), ('with-list-mix-types', ['Monday', WeekDay.FRIDAY]), ('with-dict-mix-types', {'Monday': 'some_value', WeekDay.FRIDAY: 'some_value_2'})]) @freeze_time('2021-01-25') def test_branch_follow_true(self, _, weekday): print(datetime.datetime.now()) branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true=['branch_1', 'branch_2'], follow_task_ids_if_false='branch_3', week_day=weekday, dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.branch_3 = DummyOperator(task_id='branch_3', dag=self.dag) self.branch_3.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.NONE, 'branch_3': State.SKIPPED})<|docstring|>Checks if BranchDayOfWeekOperator follows true branch<|endoftext|>
bb96d076cc2b8f1690b4d2dc8a450915df58b0ea8817244e686237c381aac877
@freeze_time('2021-01-25') def test_branch_follow_true_with_execution_date(self): 'Checks if BranchDayOfWeekOperator follows true branch when set use_task_execution_day' branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Wednesday', use_task_execution_day=True, dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.SKIPPED})
Checks if BranchDayOfWeekOperator follows true branch when set use_task_execution_day
tests/operators/test_weekday.py
test_branch_follow_true_with_execution_date
jiantao01/airflow
15,947
python
@freeze_time('2021-01-25') def test_branch_follow_true_with_execution_date(self): branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Wednesday', use_task_execution_day=True, dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.SKIPPED})
@freeze_time('2021-01-25') def test_branch_follow_true_with_execution_date(self): branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Wednesday', use_task_execution_day=True, dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.SKIPPED})<|docstring|>Checks if BranchDayOfWeekOperator follows true branch when set use_task_execution_day<|endoftext|>
1723e77675e2fe85b00039e905df98d38d536e438dc62263a28d9d50bf2dafbc
@freeze_time('2021-01-25') def test_branch_follow_false(self): 'Checks if BranchDayOfWeekOperator follow false branch' branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Sunday', dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.SKIPPED, 'branch_2': State.NONE})
Checks if BranchDayOfWeekOperator follow false branch
tests/operators/test_weekday.py
test_branch_follow_false
jiantao01/airflow
15,947
python
@freeze_time('2021-01-25') def test_branch_follow_false(self): branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Sunday', dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.SKIPPED, 'branch_2': State.NONE})
@freeze_time('2021-01-25') def test_branch_follow_false(self): branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Sunday', dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states(dr, {'make_choice': State.SUCCESS, 'branch_1': State.SKIPPED, 'branch_2': State.NONE})<|docstring|>Checks if BranchDayOfWeekOperator follow false branch<|endoftext|>
fbc7781aefadc307ae24322895a2222f9ffefcc022049bf018f0081aaa511d27
def test_branch_with_no_weekday(self): 'Check if BranchDayOfWeekOperator raises exception on missing weekday' with self.assertRaises(AirflowException): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', dag=self.dag)
Check if BranchDayOfWeekOperator raises exception on missing weekday
tests/operators/test_weekday.py
test_branch_with_no_weekday
jiantao01/airflow
15,947
python
def test_branch_with_no_weekday(self): with self.assertRaises(AirflowException): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', dag=self.dag)
def test_branch_with_no_weekday(self): with self.assertRaises(AirflowException): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', dag=self.dag)<|docstring|>Check if BranchDayOfWeekOperator raises exception on missing weekday<|endoftext|>
b46d9c67a0a149e5a0f4157e6268a61def57321d8cf02dbf8e680f42b9545b0c
def test_branch_with_invalid_type(self): 'Check if BranchDayOfWeekOperator raises exception on unsupported weekday type' invalid_week_day = 5 with pytest.raises(TypeError, match=f'Unsupported Type for week_day parameter: {type(invalid_week_day)}.Input should be iterable type:str, set, list, dict or Weekday enum type'): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day=invalid_week_day, dag=self.dag)
Check if BranchDayOfWeekOperator raises exception on unsupported weekday type
tests/operators/test_weekday.py
test_branch_with_invalid_type
jiantao01/airflow
15,947
python
def test_branch_with_invalid_type(self): invalid_week_day = 5 with pytest.raises(TypeError, match=f'Unsupported Type for week_day parameter: {type(invalid_week_day)}.Input should be iterable type:str, set, list, dict or Weekday enum type'): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day=invalid_week_day, dag=self.dag)
def test_branch_with_invalid_type(self): invalid_week_day = 5 with pytest.raises(TypeError, match=f'Unsupported Type for week_day parameter: {type(invalid_week_day)}.Input should be iterable type:str, set, list, dict or Weekday enum type'): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day=invalid_week_day, dag=self.dag)<|docstring|>Check if BranchDayOfWeekOperator raises exception on unsupported weekday type<|endoftext|>
9f0afe0c5bb1f8d5382ce38a4faa26870a05e48fc41f580dfa047918fa8ade93
@parameterized.expand([('string', 'Thsday', 'Thsday'), ('list', ['Monday', 'Thsday'], 'Thsday'), ('set', {WeekDay.MONDAY, 'Thsday'}, 'Thsday')]) def test_weekday_branch_invalid_weekday_value(self, _, week_day, fail_msg): 'Check if BranchDayOfWeekOperator raises exception on wrong value of weekday' with pytest.raises(AttributeError, match=f'Invalid Week Day passed: "{fail_msg}"'): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day=week_day, dag=self.dag)
Check if BranchDayOfWeekOperator raises exception on wrong value of weekday
tests/operators/test_weekday.py
test_weekday_branch_invalid_weekday_value
jiantao01/airflow
15,947
python
@parameterized.expand([('string', 'Thsday', 'Thsday'), ('list', ['Monday', 'Thsday'], 'Thsday'), ('set', {WeekDay.MONDAY, 'Thsday'}, 'Thsday')]) def test_weekday_branch_invalid_weekday_value(self, _, week_day, fail_msg): with pytest.raises(AttributeError, match=f'Invalid Week Day passed: "{fail_msg}"'): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day=week_day, dag=self.dag)
@parameterized.expand([('string', 'Thsday', 'Thsday'), ('list', ['Monday', 'Thsday'], 'Thsday'), ('set', {WeekDay.MONDAY, 'Thsday'}, 'Thsday')]) def test_weekday_branch_invalid_weekday_value(self, _, week_day, fail_msg): with pytest.raises(AttributeError, match=f'Invalid Week Day passed: "{fail_msg}"'): BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day=week_day, dag=self.dag)<|docstring|>Check if BranchDayOfWeekOperator raises exception on wrong value of weekday<|endoftext|>
bea75568018ddfb7d5a2f45bffe0e0ae460897362b1cdd987c7ea23961bea810
@freeze_time('2021-01-25') def test_branch_xcom_push_true_branch(self): 'Check if BranchDayOfWeekOperator push to xcom value of follow_task_ids_if_true' branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Monday', dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) tis = dr.get_task_instances() for ti in tis: if (ti.task_id == 'make_choice'): assert (ti.xcom_pull(task_ids='make_choice') == 'branch_1')
Check if BranchDayOfWeekOperator push to xcom value of follow_task_ids_if_true
tests/operators/test_weekday.py
test_branch_xcom_push_true_branch
jiantao01/airflow
15,947
python
@freeze_time('2021-01-25') def test_branch_xcom_push_true_branch(self): branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Monday', dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) tis = dr.get_task_instances() for ti in tis: if (ti.task_id == 'make_choice'): assert (ti.xcom_pull(task_ids='make_choice') == 'branch_1')
@freeze_time('2021-01-25') def test_branch_xcom_push_true_branch(self): branch_op = BranchDayOfWeekOperator(task_id='make_choice', follow_task_ids_if_true='branch_1', follow_task_ids_if_false='branch_2', week_day='Monday', dag=self.dag) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun(run_id='manual__', start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) tis = dr.get_task_instances() for ti in tis: if (ti.task_id == 'make_choice'): assert (ti.xcom_pull(task_ids='make_choice') == 'branch_1')<|docstring|>Check if BranchDayOfWeekOperator push to xcom value of follow_task_ids_if_true<|endoftext|>
8db3b0858974b2ff54be0ee7401712c72f08a22460c444b85a9105ecb0e88c80
def get_player(nickname): 'Retrieve single player instance from database.' player = Player.get_player_stats(nickname) if (player is None): return (jsonify({'message': 'Player instance could not be found.'}), 404) response = player.to_dict() return (jsonify(response), 200)
Retrieve single player instance from database.
src/app/views/players.py
get_player
PetrushynskyiOleksii/shooter-stats
0
python
def get_player(nickname): player = Player.get_player_stats(nickname) if (player is None): return (jsonify({'message': 'Player instance could not be found.'}), 404) response = player.to_dict() return (jsonify(response), 200)
def get_player(nickname): player = Player.get_player_stats(nickname) if (player is None): return (jsonify({'message': 'Player instance could not be found.'}), 404) response = player.to_dict() return (jsonify(response), 200)<|docstring|>Retrieve single player instance from database.<|endoftext|>
d6a5c167cd7e830eb79777909eae88d483ca549c62e517cab94c33fdc7d02709
def create_player(): 'Create new player instance.' json_data = request.get_json() if (not json_data): return (jsonify({'error': 'No required input data provided.'}), 400) (data, errors) = Player.from_dict(json_data) if errors: return (jsonify(errors), 400) player = Player.get(data.get('nickname')) if player: return (jsonify({'error': 'Player with this nickname already exists.'}), 409) player = Player(data) response = player.to_dict() return (jsonify(response), 201)
Create new player instance.
src/app/views/players.py
create_player
PetrushynskyiOleksii/shooter-stats
0
python
def create_player(): json_data = request.get_json() if (not json_data): return (jsonify({'error': 'No required input data provided.'}), 400) (data, errors) = Player.from_dict(json_data) if errors: return (jsonify(errors), 400) player = Player.get(data.get('nickname')) if player: return (jsonify({'error': 'Player with this nickname already exists.'}), 409) player = Player(data) response = player.to_dict() return (jsonify(response), 201)
def create_player(): json_data = request.get_json() if (not json_data): return (jsonify({'error': 'No required input data provided.'}), 400) (data, errors) = Player.from_dict(json_data) if errors: return (jsonify(errors), 400) player = Player.get(data.get('nickname')) if player: return (jsonify({'error': 'Player with this nickname already exists.'}), 409) player = Player(data) response = player.to_dict() return (jsonify(response), 201)<|docstring|>Create new player instance.<|endoftext|>
cde363f26f8e02533a26ab47eb8506e72680907e79900047cc993386e85fd2b7
def get_server_players(endpoint): 'Return list of players on server.' page = request.args.get('page', 1, type=int) order_by = request.args.get('order_by') players = Player.get_all(order_by=order_by, endpoint=endpoint) response = paginate_response(players, page=page) return (jsonify(response), 200)
Return list of players on server.
src/app/views/players.py
get_server_players
PetrushynskyiOleksii/shooter-stats
0
python
def get_server_players(endpoint): page = request.args.get('page', 1, type=int) order_by = request.args.get('order_by') players = Player.get_all(order_by=order_by, endpoint=endpoint) response = paginate_response(players, page=page) return (jsonify(response), 200)
def get_server_players(endpoint): page = request.args.get('page', 1, type=int) order_by = request.args.get('order_by') players = Player.get_all(order_by=order_by, endpoint=endpoint) response = paginate_response(players, page=page) return (jsonify(response), 200)<|docstring|>Return list of players on server.<|endoftext|>
7ad23f72634cc0960b05a92ba64404862859eb152817231a93df88f4cce0af62
def get_players(): 'Return all existing players in database.' page = request.args.get('page', 1, type=int) order_by = request.args.get('order_by') players = Player.get_all(order_by=order_by) response = paginate_response(players, page=page) return (jsonify(response), 200)
Return all existing players in database.
src/app/views/players.py
get_players
PetrushynskyiOleksii/shooter-stats
0
python
def get_players(): page = request.args.get('page', 1, type=int) order_by = request.args.get('order_by') players = Player.get_all(order_by=order_by) response = paginate_response(players, page=page) return (jsonify(response), 200)
def get_players(): page = request.args.get('page', 1, type=int) order_by = request.args.get('order_by') players = Player.get_all(order_by=order_by) response = paginate_response(players, page=page) return (jsonify(response), 200)<|docstring|>Return all existing players in database.<|endoftext|>
52bfe408f1490cfdbad4316635b8b44f625e154bd81c4cf4e3b95c62fade77a1
def __init__(self, lam, validate_args=False, allow_nan_stats=True, name='Poisson'): 'Construct Poisson distributions.\n\n Args:\n lam: Floating point tensor, the rate parameter of the\n distribution(s). `lam` must be positive.\n validate_args: `Boolean`, default `False`. Whether to assert that\n `lam > 0` as well as inputs to pmf computations are non-negative\n integers. If validate_args is `False`, then `pmf` computations might\n return `NaN`, but can be evaluated at any real value.\n allow_nan_stats: `Boolean`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: A name for this distribution.\n ' parameters = locals() parameters.pop('self') with ops.name_scope(name, values=[lam]) as ns: with ops.control_dependencies(([check_ops.assert_positive(lam)] if validate_args else [])): self._lam = array_ops.identity(lam, name='lam') super(Poisson, self).__init__(dtype=self._lam.dtype, is_continuous=False, is_reparameterized=False, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._lam], name=ns)
Construct Poisson distributions. Args: lam: Floating point tensor, the rate parameter of the distribution(s). `lam` must be positive. validate_args: `Boolean`, default `False`. Whether to assert that `lam > 0` as well as inputs to pmf computations are non-negative integers. If validate_args is `False`, then `pmf` computations might return `NaN`, but can be evaluated at any real value. allow_nan_stats: `Boolean`, default `True`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: A name for this distribution.
tensorflow/contrib/distributions/python/ops/poisson.py
__init__
HowieYang0/notmnist-ex
101
python
def __init__(self, lam, validate_args=False, allow_nan_stats=True, name='Poisson'): 'Construct Poisson distributions.\n\n Args:\n lam: Floating point tensor, the rate parameter of the\n distribution(s). `lam` must be positive.\n validate_args: `Boolean`, default `False`. Whether to assert that\n `lam > 0` as well as inputs to pmf computations are non-negative\n integers. If validate_args is `False`, then `pmf` computations might\n return `NaN`, but can be evaluated at any real value.\n allow_nan_stats: `Boolean`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: A name for this distribution.\n ' parameters = locals() parameters.pop('self') with ops.name_scope(name, values=[lam]) as ns: with ops.control_dependencies(([check_ops.assert_positive(lam)] if validate_args else [])): self._lam = array_ops.identity(lam, name='lam') super(Poisson, self).__init__(dtype=self._lam.dtype, is_continuous=False, is_reparameterized=False, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._lam], name=ns)
def __init__(self, lam, validate_args=False, allow_nan_stats=True, name='Poisson'): 'Construct Poisson distributions.\n\n Args:\n lam: Floating point tensor, the rate parameter of the\n distribution(s). `lam` must be positive.\n validate_args: `Boolean`, default `False`. Whether to assert that\n `lam > 0` as well as inputs to pmf computations are non-negative\n integers. If validate_args is `False`, then `pmf` computations might\n return `NaN`, but can be evaluated at any real value.\n allow_nan_stats: `Boolean`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: A name for this distribution.\n ' parameters = locals() parameters.pop('self') with ops.name_scope(name, values=[lam]) as ns: with ops.control_dependencies(([check_ops.assert_positive(lam)] if validate_args else [])): self._lam = array_ops.identity(lam, name='lam') super(Poisson, self).__init__(dtype=self._lam.dtype, is_continuous=False, is_reparameterized=False, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._lam], name=ns)<|docstring|>Construct Poisson distributions. Args: lam: Floating point tensor, the rate parameter of the distribution(s). `lam` must be positive. validate_args: `Boolean`, default `False`. Whether to assert that `lam > 0` as well as inputs to pmf computations are non-negative integers. If validate_args is `False`, then `pmf` computations might return `NaN`, but can be evaluated at any real value. allow_nan_stats: `Boolean`, default `True`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: A name for this distribution.<|endoftext|>
d173330f2f8a510b3f59f9a82e65c77c7f98f91cb1ba8fc943f5b9165a66a080
@property def lam(self): 'Rate parameter.' return self._lam
Rate parameter.
tensorflow/contrib/distributions/python/ops/poisson.py
lam
HowieYang0/notmnist-ex
101
python
@property def lam(self): return self._lam
@property def lam(self): return self._lam<|docstring|>Rate parameter.<|endoftext|>
d781ce9148ed7189c38a90a1950c52b708afb351628bb90789216bf7be400c71
def __init__(self, ec_address=None, fct_address=None, host=None, username=None, password=None, certfile=None): '\n Instantiate a new API client.\n\n Args:\n ec_address (str): A default entry credit address to use for\n transactions. Credits will be spent from this address.\n fct_address (str): A default factoid address to use for\n transactions.\n host (str): Hostname, including http(s)://, of the node\n username (str): RPC username for protected APIs.\n password (str): RPC password for protected APIs.\n certfile (str): Path to certificate file to verify for TLS\n connections (mostly untested).\n ' self.ec_address = ec_address self.fct_address = fct_address self.version = 'v1' if host: self.host = host self.session = APISession() if (username and password): self.session.init_basic_auth(username, password) if certfile: self.session.init_tls(certfile)
Instantiate a new API client. Args: ec_address (str): A default entry credit address to use for transactions. Credits will be spent from this address. fct_address (str): A default factoid address to use for transactions. host (str): Hostname, including http(s)://, of the node username (str): RPC username for protected APIs. password (str): RPC password for protected APIs. certfile (str): Path to certificate file to verify for TLS connections (mostly untested).
pegnet_py/client.py
__init__
pegnet/pegnet-py
2
python
def __init__(self, ec_address=None, fct_address=None, host=None, username=None, password=None, certfile=None): '\n Instantiate a new API client.\n\n Args:\n ec_address (str): A default entry credit address to use for\n transactions. Credits will be spent from this address.\n fct_address (str): A default factoid address to use for\n transactions.\n host (str): Hostname, including http(s)://, of the node\n username (str): RPC username for protected APIs.\n password (str): RPC password for protected APIs.\n certfile (str): Path to certificate file to verify for TLS\n connections (mostly untested).\n ' self.ec_address = ec_address self.fct_address = fct_address self.version = 'v1' if host: self.host = host self.session = APISession() if (username and password): self.session.init_basic_auth(username, password) if certfile: self.session.init_tls(certfile)
def __init__(self, ec_address=None, fct_address=None, host=None, username=None, password=None, certfile=None): '\n Instantiate a new API client.\n\n Args:\n ec_address (str): A default entry credit address to use for\n transactions. Credits will be spent from this address.\n fct_address (str): A default factoid address to use for\n transactions.\n host (str): Hostname, including http(s)://, of the node\n username (str): RPC username for protected APIs.\n password (str): RPC password for protected APIs.\n certfile (str): Path to certificate file to verify for TLS\n connections (mostly untested).\n ' self.ec_address = ec_address self.fct_address = fct_address self.version = 'v1' if host: self.host = host self.session = APISession() if (username and password): self.session.init_basic_auth(username, password) if certfile: self.session.init_tls(certfile)<|docstring|>Instantiate a new API client. Args: ec_address (str): A default entry credit address to use for transactions. Credits will be spent from this address. fct_address (str): A default factoid address to use for transactions. host (str): Hostname, including http(s)://, of the node username (str): RPC username for protected APIs. password (str): RPC password for protected APIs. certfile (str): Path to certificate file to verify for TLS connections (mostly untested).<|endoftext|>
c9fcf5340b542d4b4a5e26baee3188ebd302bce5ba5f1479f70a213f02c69679
def get_sync_status(self): 'Retrieve the current sync status of the node.' return self._request('get-sync-status')
Retrieve the current sync status of the node.
pegnet_py/client.py
get_sync_status
pegnet/pegnet-py
2
python
def get_sync_status(self): return self._request('get-sync-status')
def get_sync_status(self): return self._request('get-sync-status')<|docstring|>Retrieve the current sync status of the node.<|endoftext|>
8050811788d319d002495b97bee3a6504e450be617e214c1305fa79a2e33a7bc
def get_balances(self, address: FactoidAddress): 'Retrieve all current pegnet balances for the given address' return self._request('get-pegnet-balances', {'address': address.to_string()})
Retrieve all current pegnet balances for the given address
pegnet_py/client.py
get_balances
pegnet/pegnet-py
2
python
def get_balances(self, address: FactoidAddress): return self._request('get-pegnet-balances', {'address': address.to_string()})
def get_balances(self, address: FactoidAddress): return self._request('get-pegnet-balances', {'address': address.to_string()})<|docstring|>Retrieve all current pegnet balances for the given address<|endoftext|>
eb6bcf9cf23964e195bb0e3640d967df65ec9d1c54a19973cf2dc0f643a70f9e
def get_issuance(self): 'Retrieve the token issuance for all pegnet assets' return self._request('get-pegnet-issuance')
Retrieve the token issuance for all pegnet assets
pegnet_py/client.py
get_issuance
pegnet/pegnet-py
2
python
def get_issuance(self): return self._request('get-pegnet-issuance')
def get_issuance(self): return self._request('get-pegnet-issuance')<|docstring|>Retrieve the token issuance for all pegnet assets<|endoftext|>
da6bb3734ffa12f3f409b5e93d6567be1f3eb4fd98be7ac84317fd4299b7513c
def get_rates(self, height: int): 'Retrieve the PegNet conversion rates for a given height' return self._request('get-pegnet-rates', {'height': height})
Retrieve the PegNet conversion rates for a given height
pegnet_py/client.py
get_rates
pegnet/pegnet-py
2
python
def get_rates(self, height: int): return self._request('get-pegnet-rates', {'height': height})
def get_rates(self, height: int): return self._request('get-pegnet-rates', {'height': height})<|docstring|>Retrieve the PegNet conversion rates for a given height<|endoftext|>
b4e15de6e9939306b2ad2705142d4677ecd803a22963f90e4acecc8e9ed8146f
def get_tx_status(self, entry_hash: Union[(bytes, str)]): 'Retrieve the status for a PegNet transaction' return self._request('get-transaction-status', {'entryhash': (entry_hash.hex() if (type(entry_hash) is bytes) else entry_hash)})
Retrieve the status for a PegNet transaction
pegnet_py/client.py
get_tx_status
pegnet/pegnet-py
2
python
def get_tx_status(self, entry_hash: Union[(bytes, str)]): return self._request('get-transaction-status', {'entryhash': (entry_hash.hex() if (type(entry_hash) is bytes) else entry_hash)})
def get_tx_status(self, entry_hash: Union[(bytes, str)]): return self._request('get-transaction-status', {'entryhash': (entry_hash.hex() if (type(entry_hash) is bytes) else entry_hash)})<|docstring|>Retrieve the status for a PegNet transaction<|endoftext|>
10f2089f4a5ef1f39a0741d4a3ddaeea98e419137bba7e3e93b1e81d2e590661
def get_txs(self, entry_hash: Union[(bytes, str)]=None, address: str=None, height: int=None, offset: int=0, desc: bool=False, transfer: bool=True, conversion: bool=True, coinbase: bool=True, burn: bool=True): 'Retrieve the transactions associated with the provided entry_hash or address or height' request_params = {} if entry_hash: request_params['entryhash'] = (entry_hash.hex() if (type(entry_hash) is bytes) else entry_hash) elif address: request_params['address'] = address elif height: request_params['height'] = height else: raise ValueError('One of entry_hash, address or height must be specified') request_params.update({'offset': offset, 'desc': desc, 'transfer': transfer, 'conversion': conversion, 'coinbase': coinbase, 'burn': burn}) return self._request('get-transactions', request_params)
Retrieve the transactions associated with the provided entry_hash or address or height
pegnet_py/client.py
get_txs
pegnet/pegnet-py
2
python
def get_txs(self, entry_hash: Union[(bytes, str)]=None, address: str=None, height: int=None, offset: int=0, desc: bool=False, transfer: bool=True, conversion: bool=True, coinbase: bool=True, burn: bool=True): request_params = {} if entry_hash: request_params['entryhash'] = (entry_hash.hex() if (type(entry_hash) is bytes) else entry_hash) elif address: request_params['address'] = address elif height: request_params['height'] = height else: raise ValueError('One of entry_hash, address or height must be specified') request_params.update({'offset': offset, 'desc': desc, 'transfer': transfer, 'conversion': conversion, 'coinbase': coinbase, 'burn': burn}) return self._request('get-transactions', request_params)
def get_txs(self, entry_hash: Union[(bytes, str)]=None, address: str=None, height: int=None, offset: int=0, desc: bool=False, transfer: bool=True, conversion: bool=True, coinbase: bool=True, burn: bool=True): request_params = {} if entry_hash: request_params['entryhash'] = (entry_hash.hex() if (type(entry_hash) is bytes) else entry_hash) elif address: request_params['address'] = address elif height: request_params['height'] = height else: raise ValueError('One of entry_hash, address or height must be specified') request_params.update({'offset': offset, 'desc': desc, 'transfer': transfer, 'conversion': conversion, 'coinbase': coinbase, 'burn': burn}) return self._request('get-transactions', request_params)<|docstring|>Retrieve the transactions associated with the provided entry_hash or address or height<|endoftext|>
e337f5d7a29b64c14cf334fba16ab2838e8f7a38ef5a8bfb9d4f39cbc6dbcbac
def send_transaction(self, chain_id: bytes, ext_ids: List[bytes], content: bytes): 'Send a transaction with the specified external ids and content' return self._request('send-transaction', {'chainid': chain_id.hex(), 'extids': [x.hex() for x in ext_ids], 'content': content.hex()})
Send a transaction with the specified external ids and content
pegnet_py/client.py
send_transaction
pegnet/pegnet-py
2
python
def send_transaction(self, chain_id: bytes, ext_ids: List[bytes], content: bytes): return self._request('send-transaction', {'chainid': chain_id.hex(), 'extids': [x.hex() for x in ext_ids], 'content': content.hex()})
def send_transaction(self, chain_id: bytes, ext_ids: List[bytes], content: bytes): return self._request('send-transaction', {'chainid': chain_id.hex(), 'extids': [x.hex() for x in ext_ids], 'content': content.hex()})<|docstring|>Send a transaction with the specified external ids and content<|endoftext|>
f22540f7b0f38716049dc92735a68c09c5c3ffa1d99770e44bbab922d698d61b
def dot(colour, interval): 'Continually light a randomly-chosen pixel with a weighted interval.' while True: index = random.randint(0, (COUNT - 1)) payload = json.dumps({'colour': colour}) requests.post(f'http://{LIGHTSHOST}:5000/lights/single/{index}', data=payload) time.sleep((random.random() * interval))
Continually light a randomly-chosen pixel with a weighted interval.
xmas/scripts/lamplighter.py
dot
pikesley/christmas-pixels
0
python
def dot(colour, interval): while True: index = random.randint(0, (COUNT - 1)) payload = json.dumps({'colour': colour}) requests.post(f'http://{LIGHTSHOST}:5000/lights/single/{index}', data=payload) time.sleep((random.random() * interval))
def dot(colour, interval): while True: index = random.randint(0, (COUNT - 1)) payload = json.dumps({'colour': colour}) requests.post(f'http://{LIGHTSHOST}:5000/lights/single/{index}', data=payload) time.sleep((random.random() * interval))<|docstring|>Continually light a randomly-chosen pixel with a weighted interval.<|endoftext|>
325218d8e65ab592ae9ce87b345c9ae0ff8a3db26951b071569aa7f400543374
@rutils.log_task_wrapper(LOG.info, _('Enter context: `EC2 creds`')) def setup(self): 'This method is called before the task start.' try: for user in self.context['users']: osclient = osclients.Clients(user['endpoint']) keystone = osclient.keystone() creds = keystone.ec2.list(user['id']) if (not creds): creds = keystone.ec2.create(user['id'], user['tenant_id']) else: creds = creds[0] url = keystone.service_catalog.url_for(service_type='ec2') url_parts = url.rpartition(':') nova_url = ((url_parts[0] + ':8773/') + url_parts[2].partition('/')[2]) self.context['users'][0]['ec2args'] = {'region': 'RegionOne', 'url': url, 'nova_url': nova_url, 'access': creds.access, 'secret': creds.secret} if (self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON): for (user, tenant_id) in rutils.iterate_per_tenants(self.context['users']): body = {'quota': {'router': (- 1), 'floatingip': (- 1)}} self.net_wrapper.client.update_quota(tenant_id, body) network = self.net_wrapper.create_network(tenant_id, add_router=True, subnets_num=1) self.context['tenants'][tenant_id]['network'] = network except Exception as e: msg = ("Can't prepare ec2 client: %s" % e.message) if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
This method is called before the task start.
rally-scenarios/plugins/context_plugin_ec2_creds.py
setup
JioCloudVPC/compute-ec2-api-vagrant
0
python
@rutils.log_task_wrapper(LOG.info, _('Enter context: `EC2 creds`')) def setup(self): try: for user in self.context['users']: osclient = osclients.Clients(user['endpoint']) keystone = osclient.keystone() creds = keystone.ec2.list(user['id']) if (not creds): creds = keystone.ec2.create(user['id'], user['tenant_id']) else: creds = creds[0] url = keystone.service_catalog.url_for(service_type='ec2') url_parts = url.rpartition(':') nova_url = ((url_parts[0] + ':8773/') + url_parts[2].partition('/')[2]) self.context['users'][0]['ec2args'] = {'region': 'RegionOne', 'url': url, 'nova_url': nova_url, 'access': creds.access, 'secret': creds.secret} if (self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON): for (user, tenant_id) in rutils.iterate_per_tenants(self.context['users']): body = {'quota': {'router': (- 1), 'floatingip': (- 1)}} self.net_wrapper.client.update_quota(tenant_id, body) network = self.net_wrapper.create_network(tenant_id, add_router=True, subnets_num=1) self.context['tenants'][tenant_id]['network'] = network except Exception as e: msg = ("Can't prepare ec2 client: %s" % e.message) if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
@rutils.log_task_wrapper(LOG.info, _('Enter context: `EC2 creds`')) def setup(self): try: for user in self.context['users']: osclient = osclients.Clients(user['endpoint']) keystone = osclient.keystone() creds = keystone.ec2.list(user['id']) if (not creds): creds = keystone.ec2.create(user['id'], user['tenant_id']) else: creds = creds[0] url = keystone.service_catalog.url_for(service_type='ec2') url_parts = url.rpartition(':') nova_url = ((url_parts[0] + ':8773/') + url_parts[2].partition('/')[2]) self.context['users'][0]['ec2args'] = {'region': 'RegionOne', 'url': url, 'nova_url': nova_url, 'access': creds.access, 'secret': creds.secret} if (self.net_wrapper.SERVICE_IMPL == consts.Service.NEUTRON): for (user, tenant_id) in rutils.iterate_per_tenants(self.context['users']): body = {'quota': {'router': (- 1), 'floatingip': (- 1)}} self.net_wrapper.client.update_quota(tenant_id, body) network = self.net_wrapper.create_network(tenant_id, add_router=True, subnets_num=1) self.context['tenants'][tenant_id]['network'] = network except Exception as e: msg = ("Can't prepare ec2 client: %s" % e.message) if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)<|docstring|>This method is called before the task start.<|endoftext|>
20ca38b19b648ae6df3481e1701164b7f89e88de05eea9d95bbc42bb3f99f918
def _get_meta(exception): 'Convert an unhandled error into an managed error.' meta = EXCEPTION_MAP.get(type(exception)) if meta: return meta for (exception_type, meta) in EXCEPTION_MAP.items(): if isinstance(exception, exception_type): return meta return EXCEPTION_MAP.get(UnhandledException)
Convert an unhandled error into an managed error.
via/views/exceptions.py
_get_meta
mattdricker/via-1
0
python
def _get_meta(exception): meta = EXCEPTION_MAP.get(type(exception)) if meta: return meta for (exception_type, meta) in EXCEPTION_MAP.items(): if isinstance(exception, exception_type): return meta return EXCEPTION_MAP.get(UnhandledException)
def _get_meta(exception): meta = EXCEPTION_MAP.get(type(exception)) if meta: return meta for (exception_type, meta) in EXCEPTION_MAP.items(): if isinstance(exception, exception_type): return meta return EXCEPTION_MAP.get(UnhandledException)<|docstring|>Convert an unhandled error into an managed error.<|endoftext|>
0aa7d74512e53f4cb5cfc0a4a00711f38d5c7483f0d32ff90f20805e69793bb3
@exception_view_config(Exception, renderer='via:templates/exception.html.jinja2') @exception_view_config(HTTPError, renderer='via:templates/exception.html.jinja2') def all_exceptions(exc, request): 'Catch all errors (Pyramid or Python) and display an HTML page.' try: status_code = exc.status_int except AttributeError: status_code = HTTPExpectationFailed.code request.response.status_int = status_code exception_meta = _get_meta(exc) exception_meta.update({'class': exc.__class__.__name__, 'details': str(exc)}) return {'status_code': status_code, 'exception': exception_meta, 'url': {'original': request.GET.get('url', None), 'retry': request.url}, 'static_url': request.static_url}
Catch all errors (Pyramid or Python) and display an HTML page.
via/views/exceptions.py
all_exceptions
mattdricker/via-1
0
python
@exception_view_config(Exception, renderer='via:templates/exception.html.jinja2') @exception_view_config(HTTPError, renderer='via:templates/exception.html.jinja2') def all_exceptions(exc, request): try: status_code = exc.status_int except AttributeError: status_code = HTTPExpectationFailed.code request.response.status_int = status_code exception_meta = _get_meta(exc) exception_meta.update({'class': exc.__class__.__name__, 'details': str(exc)}) return {'status_code': status_code, 'exception': exception_meta, 'url': {'original': request.GET.get('url', None), 'retry': request.url}, 'static_url': request.static_url}
@exception_view_config(Exception, renderer='via:templates/exception.html.jinja2') @exception_view_config(HTTPError, renderer='via:templates/exception.html.jinja2') def all_exceptions(exc, request): try: status_code = exc.status_int except AttributeError: status_code = HTTPExpectationFailed.code request.response.status_int = status_code exception_meta = _get_meta(exc) exception_meta.update({'class': exc.__class__.__name__, 'details': str(exc)}) return {'status_code': status_code, 'exception': exception_meta, 'url': {'original': request.GET.get('url', None), 'retry': request.url}, 'static_url': request.static_url}<|docstring|>Catch all errors (Pyramid or Python) and display an HTML page.<|endoftext|>
a036caed3e047b63ef5dc48cddc80722225fe993b6ddf313044f66bcfed35c3c
def data_cleaning(fname, platforms=[], merge_keywords=[], keywords=[], del_keywords=[], start_year=2004): '\n Filtering out unwanted game data.\n \n Args:\n :param fname: string. Name of data file.\n :param platforms: list of strings. Filtering out games other than these platforms.\n :param merge_keywords: list constains list with two elements. Fill the first element with the value of secon element.\n :param keywords: list of strings. Filtering out games lacking these values.\n :param del_keywords: list of strings. Deleting columns.\n :param start_year: integer. Filtering out games released before this year.\n Return:\n A cleaned dataframe\n ' assert isinstance(fname, str), 'fname is not a string' assert isinstance(platforms, list), 'platforms is not a list' assert isinstance(merge_keywords, list), 'merge_keywords is not a list' assert isinstance(keywords, list), 'keywords is not a list' assert isinstance(del_keywords, list), 'del_keywords is not a list' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape print(f'There are {nrow} rows and {ncol} columns in raw data') df.drop(del_keywords, axis=1, inplace=True) for i_merge_keywords in merge_keywords: for i in range(nrow): if pd.isna(df[i_merge_keywords[0]][i]): df.loc[(i, i_merge_keywords[0])] = df.loc[(i, i_merge_keywords[1])] del_line = [] for i in range(nrow): if (df.Year[i] < start_year): del_line.append(i) elif (df.Platform[i] not in platforms): del_line.append(i) else: for i_keywords in keywords: if (pd.isna(df[i_keywords][i]) or (df[i_keywords][i] == 'Unknown') or (df[i_keywords][i] == 'NaN') or (df[i_keywords][i] is None) or (df[i_keywords][i] == '') or (df[i_keywords][i] == 'nan')): del_line.append(i) break df.drop(list(set(del_line)), inplace=True) (nrow, ncol) = df.shape print(f'There are {nrow} rows and {ncol} columns in refined data') df.to_csv('../../../conf/video_games/output/vgsales-refined-data.csv', index=False) print('Genre includes', df['Genre'].value_counts().to_dict()) print('ESRB_rating includes', df['ESRB_Rating'].value_counts().to_dict()) print('Platform includes', df['Platform'].value_counts().to_dict()) print('Publisher includes', df['Publisher'].value_counts().to_dict()) print('Year includes', df['Year'].value_counts().to_dict()) return df
Filtering out unwanted game data. Args: :param fname: string. Name of data file. :param platforms: list of strings. Filtering out games other than these platforms. :param merge_keywords: list constains list with two elements. Fill the first element with the value of secon element. :param keywords: list of strings. Filtering out games lacking these values. :param del_keywords: list of strings. Deleting columns. :param start_year: integer. Filtering out games released before this year. Return: A cleaned dataframe
src/analytics/video_games/data_preprocessing.py
data_cleaning
manjotms10/google-trends-analytics
6
python
def data_cleaning(fname, platforms=[], merge_keywords=[], keywords=[], del_keywords=[], start_year=2004): '\n Filtering out unwanted game data.\n \n Args:\n :param fname: string. Name of data file.\n :param platforms: list of strings. Filtering out games other than these platforms.\n :param merge_keywords: list constains list with two elements. Fill the first element with the value of secon element.\n :param keywords: list of strings. Filtering out games lacking these values.\n :param del_keywords: list of strings. Deleting columns.\n :param start_year: integer. Filtering out games released before this year.\n Return:\n A cleaned dataframe\n ' assert isinstance(fname, str), 'fname is not a string' assert isinstance(platforms, list), 'platforms is not a list' assert isinstance(merge_keywords, list), 'merge_keywords is not a list' assert isinstance(keywords, list), 'keywords is not a list' assert isinstance(del_keywords, list), 'del_keywords is not a list' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape print(f'There are {nrow} rows and {ncol} columns in raw data') df.drop(del_keywords, axis=1, inplace=True) for i_merge_keywords in merge_keywords: for i in range(nrow): if pd.isna(df[i_merge_keywords[0]][i]): df.loc[(i, i_merge_keywords[0])] = df.loc[(i, i_merge_keywords[1])] del_line = [] for i in range(nrow): if (df.Year[i] < start_year): del_line.append(i) elif (df.Platform[i] not in platforms): del_line.append(i) else: for i_keywords in keywords: if (pd.isna(df[i_keywords][i]) or (df[i_keywords][i] == 'Unknown') or (df[i_keywords][i] == 'NaN') or (df[i_keywords][i] is None) or (df[i_keywords][i] == ) or (df[i_keywords][i] == 'nan')): del_line.append(i) break df.drop(list(set(del_line)), inplace=True) (nrow, ncol) = df.shape print(f'There are {nrow} rows and {ncol} columns in refined data') df.to_csv('../../../conf/video_games/output/vgsales-refined-data.csv', index=False) print('Genre includes', df['Genre'].value_counts().to_dict()) print('ESRB_rating includes', df['ESRB_Rating'].value_counts().to_dict()) print('Platform includes', df['Platform'].value_counts().to_dict()) print('Publisher includes', df['Publisher'].value_counts().to_dict()) print('Year includes', df['Year'].value_counts().to_dict()) return df
def data_cleaning(fname, platforms=[], merge_keywords=[], keywords=[], del_keywords=[], start_year=2004): '\n Filtering out unwanted game data.\n \n Args:\n :param fname: string. Name of data file.\n :param platforms: list of strings. Filtering out games other than these platforms.\n :param merge_keywords: list constains list with two elements. Fill the first element with the value of secon element.\n :param keywords: list of strings. Filtering out games lacking these values.\n :param del_keywords: list of strings. Deleting columns.\n :param start_year: integer. Filtering out games released before this year.\n Return:\n A cleaned dataframe\n ' assert isinstance(fname, str), 'fname is not a string' assert isinstance(platforms, list), 'platforms is not a list' assert isinstance(merge_keywords, list), 'merge_keywords is not a list' assert isinstance(keywords, list), 'keywords is not a list' assert isinstance(del_keywords, list), 'del_keywords is not a list' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape print(f'There are {nrow} rows and {ncol} columns in raw data') df.drop(del_keywords, axis=1, inplace=True) for i_merge_keywords in merge_keywords: for i in range(nrow): if pd.isna(df[i_merge_keywords[0]][i]): df.loc[(i, i_merge_keywords[0])] = df.loc[(i, i_merge_keywords[1])] del_line = [] for i in range(nrow): if (df.Year[i] < start_year): del_line.append(i) elif (df.Platform[i] not in platforms): del_line.append(i) else: for i_keywords in keywords: if (pd.isna(df[i_keywords][i]) or (df[i_keywords][i] == 'Unknown') or (df[i_keywords][i] == 'NaN') or (df[i_keywords][i] is None) or (df[i_keywords][i] == ) or (df[i_keywords][i] == 'nan')): del_line.append(i) break df.drop(list(set(del_line)), inplace=True) (nrow, ncol) = df.shape print(f'There are {nrow} rows and {ncol} columns in refined data') df.to_csv('../../../conf/video_games/output/vgsales-refined-data.csv', index=False) print('Genre includes', df['Genre'].value_counts().to_dict()) print('ESRB_rating includes', df['ESRB_Rating'].value_counts().to_dict()) print('Platform includes', df['Platform'].value_counts().to_dict()) print('Publisher includes', df['Publisher'].value_counts().to_dict()) print('Year includes', df['Year'].value_counts().to_dict()) return df<|docstring|>Filtering out unwanted game data. Args: :param fname: string. Name of data file. :param platforms: list of strings. Filtering out games other than these platforms. :param merge_keywords: list constains list with two elements. Fill the first element with the value of secon element. :param keywords: list of strings. Filtering out games lacking these values. :param del_keywords: list of strings. Deleting columns. :param start_year: integer. Filtering out games released before this year. Return: A cleaned dataframe<|endoftext|>