body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
0eeb6ed1fec646ea9834661ae1fd15fc7d2dbb729b6e1f013155448e47921428 | def parse_table(table, idx_map: dict):
'\n Remove unneccessary rows and convert\n undesirable string format to float\n '
nan_rows = table.isnull().all(axis=1)
table = table.loc[(~ nan_rows)]
idx = table.index.to_list()
idx = DBC2SBC(idx)
for (i, ix) in enumerate(idx):
if (ix in idx_map.keys()):
idx[i] = idx_map[ix]
for (i, ix) in enumerate(idx):
try:
idx[i] = re.search(':([一-龥].*)', ix).group(1)
except AttributeError:
idx[i] = ix
table.index = idx
try:
rename_cols = ['本月值', '本月同比', '累计值', '累计同比']
if (len(table.columns) == (len(rename_cols) + 1)):
table = table.iloc[(:, 1:)]
table.columns = rename_cols
except ValueError:
rename_cols = ['累计值', '累计同比']
if (len(table.columns) == (len(rename_cols) + 1)):
table = table.iloc[(:, 1:)]
table.columns = rename_cols
for col in table.columns:
table[col] = table[col].str.extract('((\\-|\\+)?\\d+(\\.\\d+)?)')[0]
table[col] = table[col].astype('float64')
return table | Remove unneccessary rows and convert
undesirable string format to float | industry/industry.py | parse_table | linusqzdeng/macroind-crawler | 0 | python | def parse_table(table, idx_map: dict):
'\n Remove unneccessary rows and convert\n undesirable string format to float\n '
nan_rows = table.isnull().all(axis=1)
table = table.loc[(~ nan_rows)]
idx = table.index.to_list()
idx = DBC2SBC(idx)
for (i, ix) in enumerate(idx):
if (ix in idx_map.keys()):
idx[i] = idx_map[ix]
for (i, ix) in enumerate(idx):
try:
idx[i] = re.search(':([一-龥].*)', ix).group(1)
except AttributeError:
idx[i] = ix
table.index = idx
try:
rename_cols = ['本月值', '本月同比', '累计值', '累计同比']
if (len(table.columns) == (len(rename_cols) + 1)):
table = table.iloc[(:, 1:)]
table.columns = rename_cols
except ValueError:
rename_cols = ['累计值', '累计同比']
if (len(table.columns) == (len(rename_cols) + 1)):
table = table.iloc[(:, 1:)]
table.columns = rename_cols
for col in table.columns:
table[col] = table[col].str.extract('((\\-|\\+)?\\d+(\\.\\d+)?)')[0]
table[col] = table[col].astype('float64')
return table | def parse_table(table, idx_map: dict):
'\n Remove unneccessary rows and convert\n undesirable string format to float\n '
nan_rows = table.isnull().all(axis=1)
table = table.loc[(~ nan_rows)]
idx = table.index.to_list()
idx = DBC2SBC(idx)
for (i, ix) in enumerate(idx):
if (ix in idx_map.keys()):
idx[i] = idx_map[ix]
for (i, ix) in enumerate(idx):
try:
idx[i] = re.search(':([一-龥].*)', ix).group(1)
except AttributeError:
idx[i] = ix
table.index = idx
try:
rename_cols = ['本月值', '本月同比', '累计值', '累计同比']
if (len(table.columns) == (len(rename_cols) + 1)):
table = table.iloc[(:, 1:)]
table.columns = rename_cols
except ValueError:
rename_cols = ['累计值', '累计同比']
if (len(table.columns) == (len(rename_cols) + 1)):
table = table.iloc[(:, 1:)]
table.columns = rename_cols
for col in table.columns:
table[col] = table[col].str.extract('((\\-|\\+)?\\d+(\\.\\d+)?)')[0]
table[col] = table[col].astype('float64')
return table<|docstring|>Remove unneccessary rows and convert
undesirable string format to float<|endoftext|> |
d95c5656419f741b1052006a88179e44bd6d22883a0ff5724d5d2fc86122242a | def DBC2SBC(ustring_list):
'全角转半角'
ustring_list = list(map((lambda x: x.strip()), ustring_list))
normal_str_list = []
for i in range(len(ustring_list)):
rstring = ''
for uchar in ustring_list[i]:
if (uchar == ' '):
continue
inside_code = ord(uchar)
if (inside_code == 12288):
inside_code = 32
else:
inside_code -= 65248
if (not ((33 <= inside_code) and (inside_code <= 126))):
rstring += uchar
continue
rstring += chr(inside_code)
normal_str_list.append(rstring)
return normal_str_list | 全角转半角 | industry/industry.py | DBC2SBC | linusqzdeng/macroind-crawler | 0 | python | def DBC2SBC(ustring_list):
ustring_list = list(map((lambda x: x.strip()), ustring_list))
normal_str_list = []
for i in range(len(ustring_list)):
rstring =
for uchar in ustring_list[i]:
if (uchar == ' '):
continue
inside_code = ord(uchar)
if (inside_code == 12288):
inside_code = 32
else:
inside_code -= 65248
if (not ((33 <= inside_code) and (inside_code <= 126))):
rstring += uchar
continue
rstring += chr(inside_code)
normal_str_list.append(rstring)
return normal_str_list | def DBC2SBC(ustring_list):
ustring_list = list(map((lambda x: x.strip()), ustring_list))
normal_str_list = []
for i in range(len(ustring_list)):
rstring =
for uchar in ustring_list[i]:
if (uchar == ' '):
continue
inside_code = ord(uchar)
if (inside_code == 12288):
inside_code = 32
else:
inside_code -= 65248
if (not ((33 <= inside_code) and (inside_code <= 126))):
rstring += uchar
continue
rstring += chr(inside_code)
normal_str_list.append(rstring)
return normal_str_list<|docstring|>全角转半角<|endoftext|> |
01a4885f0f1e8b85ecc06c3d8ac8ef43e697a8fcc69e28475a7dd54f8fd7a55c | def main(page_num: int, bypass_pages: list=None):
'\n Main loop of the crawler\n\n Params\n ------\n - page_num: int\n Total number of page to scrape \n - bypass_pages: list\n List of page number that you widh the program to ignore \n '
ua = UserAgent()
headers = {'User-Agent': ua.random}
search_page_url = 'http://www.stats.gov.cn/was5/web/search?page=1&channelid=288041&orderby=-DOCRELTIME&was_custom_expr=DOCTITLE%3D%28like%28%E5%B7%A5%E4%B8%9A%E5%A2%9E%E5%8A%A0%E5%80%BC%29%2Fsen%29&perpage=10&outlinepage=10'
schema = 'http://www.stats.gov.cn/was5/web/'
skip_pages = []
for i in range(page_num):
results = pd.DataFrame()
print(f'Fetching urls in page {(i + 1)}...')
search_page_html = get_html(search_page_url, headers)
(urllist, next_page_url) = parse_search_page(search_page_html)
search_page_url = (schema + next_page_url)
if (bypass_pages and ((i + 1) in bypass_pages)):
print(f'Bypassing page {(i + 1)}')
continue
for (n, url) in enumerate(urllist[:2]):
print(f'Page number: {(n + 1)}')
table_html = get_html(url, headers)
(title, table, release_dt, report_dt) = parse_html(table_html)
try:
table = parse_table(table, INDEX_MAP)
output = redesign_table(table, release_dt, report_dt)
results = pd.concat([results, output], ignore_index=True)
except Exception as e:
print('===Errors detected:', e)
print(f'Skipping the page {title}')
skip_pages.append(title)
continue
print('Collected all tables, going to next page...')
results.to_csv(f'../data/industry_growth_page{(i + 1)}.csv', index=False)
print('All done!') | Main loop of the crawler
Params
------
- page_num: int
Total number of page to scrape
- bypass_pages: list
List of page number that you widh the program to ignore | industry/industry.py | main | linusqzdeng/macroind-crawler | 0 | python | def main(page_num: int, bypass_pages: list=None):
'\n Main loop of the crawler\n\n Params\n ------\n - page_num: int\n Total number of page to scrape \n - bypass_pages: list\n List of page number that you widh the program to ignore \n '
ua = UserAgent()
headers = {'User-Agent': ua.random}
search_page_url = 'http://www.stats.gov.cn/was5/web/search?page=1&channelid=288041&orderby=-DOCRELTIME&was_custom_expr=DOCTITLE%3D%28like%28%E5%B7%A5%E4%B8%9A%E5%A2%9E%E5%8A%A0%E5%80%BC%29%2Fsen%29&perpage=10&outlinepage=10'
schema = 'http://www.stats.gov.cn/was5/web/'
skip_pages = []
for i in range(page_num):
results = pd.DataFrame()
print(f'Fetching urls in page {(i + 1)}...')
search_page_html = get_html(search_page_url, headers)
(urllist, next_page_url) = parse_search_page(search_page_html)
search_page_url = (schema + next_page_url)
if (bypass_pages and ((i + 1) in bypass_pages)):
print(f'Bypassing page {(i + 1)}')
continue
for (n, url) in enumerate(urllist[:2]):
print(f'Page number: {(n + 1)}')
table_html = get_html(url, headers)
(title, table, release_dt, report_dt) = parse_html(table_html)
try:
table = parse_table(table, INDEX_MAP)
output = redesign_table(table, release_dt, report_dt)
results = pd.concat([results, output], ignore_index=True)
except Exception as e:
print('===Errors detected:', e)
print(f'Skipping the page {title}')
skip_pages.append(title)
continue
print('Collected all tables, going to next page...')
results.to_csv(f'../data/industry_growth_page{(i + 1)}.csv', index=False)
print('All done!') | def main(page_num: int, bypass_pages: list=None):
'\n Main loop of the crawler\n\n Params\n ------\n - page_num: int\n Total number of page to scrape \n - bypass_pages: list\n List of page number that you widh the program to ignore \n '
ua = UserAgent()
headers = {'User-Agent': ua.random}
search_page_url = 'http://www.stats.gov.cn/was5/web/search?page=1&channelid=288041&orderby=-DOCRELTIME&was_custom_expr=DOCTITLE%3D%28like%28%E5%B7%A5%E4%B8%9A%E5%A2%9E%E5%8A%A0%E5%80%BC%29%2Fsen%29&perpage=10&outlinepage=10'
schema = 'http://www.stats.gov.cn/was5/web/'
skip_pages = []
for i in range(page_num):
results = pd.DataFrame()
print(f'Fetching urls in page {(i + 1)}...')
search_page_html = get_html(search_page_url, headers)
(urllist, next_page_url) = parse_search_page(search_page_html)
search_page_url = (schema + next_page_url)
if (bypass_pages and ((i + 1) in bypass_pages)):
print(f'Bypassing page {(i + 1)}')
continue
for (n, url) in enumerate(urllist[:2]):
print(f'Page number: {(n + 1)}')
table_html = get_html(url, headers)
(title, table, release_dt, report_dt) = parse_html(table_html)
try:
table = parse_table(table, INDEX_MAP)
output = redesign_table(table, release_dt, report_dt)
results = pd.concat([results, output], ignore_index=True)
except Exception as e:
print('===Errors detected:', e)
print(f'Skipping the page {title}')
skip_pages.append(title)
continue
print('Collected all tables, going to next page...')
results.to_csv(f'../data/industry_growth_page{(i + 1)}.csv', index=False)
print('All done!')<|docstring|>Main loop of the crawler
Params
------
- page_num: int
Total number of page to scrape
- bypass_pages: list
List of page number that you widh the program to ignore<|endoftext|> |
3684315eb7b5ba7b26320dac54e161c5c4584632cda87b0043f7b076af2a8815 | def _create_mesh_motion_solving_strategy(self):
'Create the mesh motion solving strategy.\n\n The mesh motion solving strategy must provide the functions defined in SolutionStrategy.\n '
raise Exception('Mesh motion solving strategy must be created by the derived class.') | Create the mesh motion solving strategy.
The mesh motion solving strategy must provide the functions defined in SolutionStrategy. | applications/MeshMovingApplication/python_scripts/mesh_solver_base.py | _create_mesh_motion_solving_strategy | lkusch/Kratos | 778 | python | def _create_mesh_motion_solving_strategy(self):
'Create the mesh motion solving strategy.\n\n The mesh motion solving strategy must provide the functions defined in SolutionStrategy.\n '
raise Exception('Mesh motion solving strategy must be created by the derived class.') | def _create_mesh_motion_solving_strategy(self):
'Create the mesh motion solving strategy.\n\n The mesh motion solving strategy must provide the functions defined in SolutionStrategy.\n '
raise Exception('Mesh motion solving strategy must be created by the derived class.')<|docstring|>Create the mesh motion solving strategy.
The mesh motion solving strategy must provide the functions defined in SolutionStrategy.<|endoftext|> |
08b3ee57d10d44e14d598f357a25d9e58b85012ada840dc8ba8d0ecca9925a7f | def _set_and_fill_buffer(self):
'Prepare nodal solution step data containers and time step information. '
buffer_size = self.GetMinimumBufferSize()
self.mesh_model_part.SetBufferSize(buffer_size)
delta_time = self.mesh_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
time = self.mesh_model_part.ProcessInfo[KratosMultiphysics.TIME]
step = (- buffer_size)
time = (time - (delta_time * buffer_size))
self.mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.TIME, time)
for i in range(0, buffer_size):
step = (step + 1)
time = (time + delta_time)
self.mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.STEP, step)
self.mesh_model_part.CloneTimeStep(time)
self.mesh_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] = False | Prepare nodal solution step data containers and time step information. | applications/MeshMovingApplication/python_scripts/mesh_solver_base.py | _set_and_fill_buffer | lkusch/Kratos | 778 | python | def _set_and_fill_buffer(self):
' '
buffer_size = self.GetMinimumBufferSize()
self.mesh_model_part.SetBufferSize(buffer_size)
delta_time = self.mesh_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
time = self.mesh_model_part.ProcessInfo[KratosMultiphysics.TIME]
step = (- buffer_size)
time = (time - (delta_time * buffer_size))
self.mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.TIME, time)
for i in range(0, buffer_size):
step = (step + 1)
time = (time + delta_time)
self.mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.STEP, step)
self.mesh_model_part.CloneTimeStep(time)
self.mesh_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] = False | def _set_and_fill_buffer(self):
' '
buffer_size = self.GetMinimumBufferSize()
self.mesh_model_part.SetBufferSize(buffer_size)
delta_time = self.mesh_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
time = self.mesh_model_part.ProcessInfo[KratosMultiphysics.TIME]
step = (- buffer_size)
time = (time - (delta_time * buffer_size))
self.mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.TIME, time)
for i in range(0, buffer_size):
step = (step + 1)
time = (time + delta_time)
self.mesh_model_part.ProcessInfo.SetValue(KratosMultiphysics.STEP, step)
self.mesh_model_part.CloneTimeStep(time)
self.mesh_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] = False<|docstring|>Prepare nodal solution step data containers and time step information.<|endoftext|> |
ae3d1a5b28a20e919984313d7d449f6213510550f7c03a4a1b266f8a2c2f3954 | def __CreateTimeIntegratorHelper(self):
'Initializing the helper-class for the time-integration\n '
mesh_vel_calc_setting = self.settings['mesh_velocity_calculation']
time_scheme = mesh_vel_calc_setting['time_scheme'].GetString()
if (time_scheme == 'bdf1'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.BDF1()
elif (time_scheme == 'bdf2'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.BDF2()
elif (time_scheme == 'newmark'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Newmark()
elif (time_scheme == 'bossak'):
if mesh_vel_calc_setting.Has('alpha_m'):
alpha_m = mesh_vel_calc_setting['alpha_m'].GetDouble()
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Bossak(alpha_m)
else:
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Bossak()
elif (time_scheme == 'generalized_alpha'):
alpha_m = mesh_vel_calc_setting['alpha_m'].GetDouble()
alpha_f = mesh_vel_calc_setting['alpha_f'].GetDouble()
self.time_int_helper = KratosMultiphysics.TimeDiscretization.GeneralizedAlpha(alpha_m, alpha_f)
else:
err_msg = 'The requested time scheme "{}" is not available for the calculation of the mesh velocity!\n'.format(time_scheme)
err_msg += 'Available options are: "bdf1", "bdf2", "newmark", "bossak", "generalized_alpha"'
raise Exception(err_msg) | Initializing the helper-class for the time-integration | applications/MeshMovingApplication/python_scripts/mesh_solver_base.py | __CreateTimeIntegratorHelper | lkusch/Kratos | 778 | python | def __CreateTimeIntegratorHelper(self):
'\n '
mesh_vel_calc_setting = self.settings['mesh_velocity_calculation']
time_scheme = mesh_vel_calc_setting['time_scheme'].GetString()
if (time_scheme == 'bdf1'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.BDF1()
elif (time_scheme == 'bdf2'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.BDF2()
elif (time_scheme == 'newmark'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Newmark()
elif (time_scheme == 'bossak'):
if mesh_vel_calc_setting.Has('alpha_m'):
alpha_m = mesh_vel_calc_setting['alpha_m'].GetDouble()
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Bossak(alpha_m)
else:
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Bossak()
elif (time_scheme == 'generalized_alpha'):
alpha_m = mesh_vel_calc_setting['alpha_m'].GetDouble()
alpha_f = mesh_vel_calc_setting['alpha_f'].GetDouble()
self.time_int_helper = KratosMultiphysics.TimeDiscretization.GeneralizedAlpha(alpha_m, alpha_f)
else:
err_msg = 'The requested time scheme "{}" is not available for the calculation of the mesh velocity!\n'.format(time_scheme)
err_msg += 'Available options are: "bdf1", "bdf2", "newmark", "bossak", "generalized_alpha"'
raise Exception(err_msg) | def __CreateTimeIntegratorHelper(self):
'\n '
mesh_vel_calc_setting = self.settings['mesh_velocity_calculation']
time_scheme = mesh_vel_calc_setting['time_scheme'].GetString()
if (time_scheme == 'bdf1'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.BDF1()
elif (time_scheme == 'bdf2'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.BDF2()
elif (time_scheme == 'newmark'):
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Newmark()
elif (time_scheme == 'bossak'):
if mesh_vel_calc_setting.Has('alpha_m'):
alpha_m = mesh_vel_calc_setting['alpha_m'].GetDouble()
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Bossak(alpha_m)
else:
self.time_int_helper = KratosMultiphysics.TimeDiscretization.Bossak()
elif (time_scheme == 'generalized_alpha'):
alpha_m = mesh_vel_calc_setting['alpha_m'].GetDouble()
alpha_f = mesh_vel_calc_setting['alpha_f'].GetDouble()
self.time_int_helper = KratosMultiphysics.TimeDiscretization.GeneralizedAlpha(alpha_m, alpha_f)
else:
err_msg = 'The requested time scheme "{}" is not available for the calculation of the mesh velocity!\n'.format(time_scheme)
err_msg += 'Available options are: "bdf1", "bdf2", "newmark", "bossak", "generalized_alpha"'
raise Exception(err_msg)<|docstring|>Initializing the helper-class for the time-integration<|endoftext|> |
59761d93770f6cf7c62d5d5383bfa3dd028b658040eb336c8b303b7a14af819d | def urlopen(url, params=None, **kwargs):
'Thin wrapper around requests get content.\n\n See requests.get docs for the `params` and `kwargs` options.\n\n '
return io.BytesIO(requests.get(url, params=params, **kwargs).content) | Thin wrapper around requests get content.
See requests.get docs for the `params` and `kwargs` options. | erddapy/utilities.py | urlopen | jmunroe/erddapy | 0 | python | def urlopen(url, params=None, **kwargs):
'Thin wrapper around requests get content.\n\n See requests.get docs for the `params` and `kwargs` options.\n\n '
return io.BytesIO(requests.get(url, params=params, **kwargs).content) | def urlopen(url, params=None, **kwargs):
'Thin wrapper around requests get content.\n\n See requests.get docs for the `params` and `kwargs` options.\n\n '
return io.BytesIO(requests.get(url, params=params, **kwargs).content)<|docstring|>Thin wrapper around requests get content.
See requests.get docs for the `params` and `kwargs` options.<|endoftext|> |
4b645b62d96812221995c4176d4062559f86bacc082f40ab50abccc662a441d2 | @functools.lru_cache(maxsize=None)
def _check_url_response(url):
'Shortcut to `raise_for_status` instead of fetching the whole content.'
r = requests.head(url)
r.raise_for_status()
return url | Shortcut to `raise_for_status` instead of fetching the whole content. | erddapy/utilities.py | _check_url_response | jmunroe/erddapy | 0 | python | @functools.lru_cache(maxsize=None)
def _check_url_response(url):
r = requests.head(url)
r.raise_for_status()
return url | @functools.lru_cache(maxsize=None)
def _check_url_response(url):
r = requests.head(url)
r.raise_for_status()
return url<|docstring|>Shortcut to `raise_for_status` instead of fetching the whole content.<|endoftext|> |
cc48637b25c2c9d5e9afbbfd387552ff1d74743fce1e4a377efe03c85fe66002 | def _clean_response(response):
'Allow for `ext` or `.ext` format.\n\n The user can, for example, use either `.csv` or `csv` in the response kwarg.\n\n '
return response.lstrip('.') | Allow for `ext` or `.ext` format.
The user can, for example, use either `.csv` or `csv` in the response kwarg. | erddapy/utilities.py | _clean_response | jmunroe/erddapy | 0 | python | def _clean_response(response):
'Allow for `ext` or `.ext` format.\n\n The user can, for example, use either `.csv` or `csv` in the response kwarg.\n\n '
return response.lstrip('.') | def _clean_response(response):
'Allow for `ext` or `.ext` format.\n\n The user can, for example, use either `.csv` or `csv` in the response kwarg.\n\n '
return response.lstrip('.')<|docstring|>Allow for `ext` or `.ext` format.
The user can, for example, use either `.csv` or `csv` in the response kwarg.<|endoftext|> |
c9175f1e5cba725fc0fac5ae99acfeff88c2db98ab10b9143ca03a29c97a70cb | def parse_dates(date_time):
'\n ERDDAP ReSTful API standardizes the representation of dates as either ISO\n strings or seconds since 1970, but internally ERDDAPY uses datetime-like\n objects. `timestamp` returns the expected strings in seconds since 1970.\n\n '
date_time = parse_time_string(date_time)
if isinstance(date_time, tuple):
date_time = date_time[0]
if (not date_time.tzinfo):
date_time = pytz.utc.localize(date_time)
else:
date_time = date_time.astimezone(pytz.utc)
return date_time.timestamp() | ERDDAP ReSTful API standardizes the representation of dates as either ISO
strings or seconds since 1970, but internally ERDDAPY uses datetime-like
objects. `timestamp` returns the expected strings in seconds since 1970. | erddapy/utilities.py | parse_dates | jmunroe/erddapy | 0 | python | def parse_dates(date_time):
'\n ERDDAP ReSTful API standardizes the representation of dates as either ISO\n strings or seconds since 1970, but internally ERDDAPY uses datetime-like\n objects. `timestamp` returns the expected strings in seconds since 1970.\n\n '
date_time = parse_time_string(date_time)
if isinstance(date_time, tuple):
date_time = date_time[0]
if (not date_time.tzinfo):
date_time = pytz.utc.localize(date_time)
else:
date_time = date_time.astimezone(pytz.utc)
return date_time.timestamp() | def parse_dates(date_time):
'\n ERDDAP ReSTful API standardizes the representation of dates as either ISO\n strings or seconds since 1970, but internally ERDDAPY uses datetime-like\n objects. `timestamp` returns the expected strings in seconds since 1970.\n\n '
date_time = parse_time_string(date_time)
if isinstance(date_time, tuple):
date_time = date_time[0]
if (not date_time.tzinfo):
date_time = pytz.utc.localize(date_time)
else:
date_time = date_time.astimezone(pytz.utc)
return date_time.timestamp()<|docstring|>ERDDAP ReSTful API standardizes the representation of dates as either ISO
strings or seconds since 1970, but internally ERDDAPY uses datetime-like
objects. `timestamp` returns the expected strings in seconds since 1970.<|endoftext|> |
404021ef0fa8436b1ecdd6248cf1056a8a33cf8a7798853d0ae68d61b85745a5 | def quote_string_constraints(kwargs):
'\n For constraints of String variables,\n the right-hand-side value must be surrounded by double quotes.\n\n '
return {k: (f'"{v}"' if isinstance(v, str) else v) for (k, v) in kwargs.items()} | For constraints of String variables,
the right-hand-side value must be surrounded by double quotes. | erddapy/utilities.py | quote_string_constraints | jmunroe/erddapy | 0 | python | def quote_string_constraints(kwargs):
'\n For constraints of String variables,\n the right-hand-side value must be surrounded by double quotes.\n\n '
return {k: (f'"{v}"' if isinstance(v, str) else v) for (k, v) in kwargs.items()} | def quote_string_constraints(kwargs):
'\n For constraints of String variables,\n the right-hand-side value must be surrounded by double quotes.\n\n '
return {k: (f'"{v}"' if isinstance(v, str) else v) for (k, v) in kwargs.items()}<|docstring|>For constraints of String variables,
the right-hand-side value must be surrounded by double quotes.<|endoftext|> |
b50aaaa1b9539cab047f56d22efd2986e065c8926491b0723030ee8dc3201598 | def __init__(self, env):
' YOUR CODE HERE '
super().__init__()
self.env = env | YOUR CODE HERE | hw4/controllers.py | __init__ | zhenghaoz/cs294 | 4 | python | def __init__(self, env):
' '
super().__init__()
self.env = env | def __init__(self, env):
' '
super().__init__()
self.env = env<|docstring|>YOUR CODE HERE<|endoftext|> |
76c48bc864efb36b3e7a28c8bd5a55c880d0e525b51c49021feed67b98f886e1 | def get_action(self, state):
' YOUR CODE HERE '
' Your code should randomly sample an action uniformly from the action space '
return self.env.action_space.sample() | YOUR CODE HERE | hw4/controllers.py | get_action | zhenghaoz/cs294 | 4 | python | def get_action(self, state):
' '
' Your code should randomly sample an action uniformly from the action space '
return self.env.action_space.sample() | def get_action(self, state):
' '
' Your code should randomly sample an action uniformly from the action space '
return self.env.action_space.sample()<|docstring|>YOUR CODE HERE<|endoftext|> |
9f0289c112b603e90f632082f3c8968933d0499756fc905dd89eda952736c3c1 | def get_action(self, state):
' YOUR CODE HERE '
' Note: be careful to batch your simulations through the model for speed '
action_dim = self.env.action_space.shape[0]
state_dim = self.env.observation_space.shape[0]
path_actions = np.zeros([self.horizon, self.num_simulated_paths, action_dim])
path_states = np.zeros([self.horizon, self.num_simulated_paths, state_dim])
path_next_states = np.zeros([self.horizon, self.num_simulated_paths, state_dim])
states = (np.ones([self.num_simulated_paths, state_dim]) * state.reshape([(- 1), state_dim]))
for i in range(self.horizon):
path_states[i] = state
path_actions[i] = np.asarray([self.env.action_space.sample() for _ in range(self.num_simulated_paths)])
states = self.dyn_model.predict(states, path_actions[i])
path_next_states[i] = states
path_costs = trajectory_cost_fn(self.cost_fn, path_states, path_actions, path_next_states)
best = np.argmin(path_costs)
return path_actions[(0, best)] | YOUR CODE HERE | hw4/controllers.py | get_action | zhenghaoz/cs294 | 4 | python | def get_action(self, state):
' '
' Note: be careful to batch your simulations through the model for speed '
action_dim = self.env.action_space.shape[0]
state_dim = self.env.observation_space.shape[0]
path_actions = np.zeros([self.horizon, self.num_simulated_paths, action_dim])
path_states = np.zeros([self.horizon, self.num_simulated_paths, state_dim])
path_next_states = np.zeros([self.horizon, self.num_simulated_paths, state_dim])
states = (np.ones([self.num_simulated_paths, state_dim]) * state.reshape([(- 1), state_dim]))
for i in range(self.horizon):
path_states[i] = state
path_actions[i] = np.asarray([self.env.action_space.sample() for _ in range(self.num_simulated_paths)])
states = self.dyn_model.predict(states, path_actions[i])
path_next_states[i] = states
path_costs = trajectory_cost_fn(self.cost_fn, path_states, path_actions, path_next_states)
best = np.argmin(path_costs)
return path_actions[(0, best)] | def get_action(self, state):
' '
' Note: be careful to batch your simulations through the model for speed '
action_dim = self.env.action_space.shape[0]
state_dim = self.env.observation_space.shape[0]
path_actions = np.zeros([self.horizon, self.num_simulated_paths, action_dim])
path_states = np.zeros([self.horizon, self.num_simulated_paths, state_dim])
path_next_states = np.zeros([self.horizon, self.num_simulated_paths, state_dim])
states = (np.ones([self.num_simulated_paths, state_dim]) * state.reshape([(- 1), state_dim]))
for i in range(self.horizon):
path_states[i] = state
path_actions[i] = np.asarray([self.env.action_space.sample() for _ in range(self.num_simulated_paths)])
states = self.dyn_model.predict(states, path_actions[i])
path_next_states[i] = states
path_costs = trajectory_cost_fn(self.cost_fn, path_states, path_actions, path_next_states)
best = np.argmin(path_costs)
return path_actions[(0, best)]<|docstring|>YOUR CODE HERE<|endoftext|> |
c810c4178dfbd489e2591001b6fe3ace18c87675a19109e571c33bff8de4f168 | def process_sentence(self, sentence):
'\n 处理句子,返回分词和词性标注结果\n 返回格式:[(word, pos), (word, pos) ...(word, pos)]\n '
seg = Segment(sentence, d_store=self.d_store)
seg.atom_segment()
seg.word_match()
words_graph = seg.get_words_graph()
detection = OOVDetection(words_graph, self.d_store)
detection.oov_detection()
words_graph.generate_words_dag(self.d_store.bigram_dct)
seg_words_result = words_graph.words_segment()
pre_poss = 0
(best_words, best_tags) = (None, None)
for seg_result in seg_words_result:
words = seg_result['words']
tagging = POSTagging()
tags = tagging.generate_pos_tags(words, self.d_store.core_dct, self.d_store.lexical_ctx)
poss = self.compute_possibility(words, tags, self.d_store)
if (poss > pre_poss):
pre_poss = poss
best_words = words
best_tags = tags
best_words = map((lambda w: w.content), best_words)
return {'words': best_words[1:(- 1)], 'tags': best_tags[1:(- 1)]} | 处理句子,返回分词和词性标注结果
返回格式:[(word, pos), (word, pos) ...(word, pos)] | pycseg/__init__.py | process_sentence | lizonghai/pycseg | 4 | python | def process_sentence(self, sentence):
'\n 处理句子,返回分词和词性标注结果\n 返回格式:[(word, pos), (word, pos) ...(word, pos)]\n '
seg = Segment(sentence, d_store=self.d_store)
seg.atom_segment()
seg.word_match()
words_graph = seg.get_words_graph()
detection = OOVDetection(words_graph, self.d_store)
detection.oov_detection()
words_graph.generate_words_dag(self.d_store.bigram_dct)
seg_words_result = words_graph.words_segment()
pre_poss = 0
(best_words, best_tags) = (None, None)
for seg_result in seg_words_result:
words = seg_result['words']
tagging = POSTagging()
tags = tagging.generate_pos_tags(words, self.d_store.core_dct, self.d_store.lexical_ctx)
poss = self.compute_possibility(words, tags, self.d_store)
if (poss > pre_poss):
pre_poss = poss
best_words = words
best_tags = tags
best_words = map((lambda w: w.content), best_words)
return {'words': best_words[1:(- 1)], 'tags': best_tags[1:(- 1)]} | def process_sentence(self, sentence):
'\n 处理句子,返回分词和词性标注结果\n 返回格式:[(word, pos), (word, pos) ...(word, pos)]\n '
seg = Segment(sentence, d_store=self.d_store)
seg.atom_segment()
seg.word_match()
words_graph = seg.get_words_graph()
detection = OOVDetection(words_graph, self.d_store)
detection.oov_detection()
words_graph.generate_words_dag(self.d_store.bigram_dct)
seg_words_result = words_graph.words_segment()
pre_poss = 0
(best_words, best_tags) = (None, None)
for seg_result in seg_words_result:
words = seg_result['words']
tagging = POSTagging()
tags = tagging.generate_pos_tags(words, self.d_store.core_dct, self.d_store.lexical_ctx)
poss = self.compute_possibility(words, tags, self.d_store)
if (poss > pre_poss):
pre_poss = poss
best_words = words
best_tags = tags
best_words = map((lambda w: w.content), best_words)
return {'words': best_words[1:(- 1)], 'tags': best_tags[1:(- 1)]}<|docstring|>处理句子,返回分词和词性标注结果
返回格式:[(word, pos), (word, pos) ...(word, pos)]<|endoftext|> |
dec84e664d9fe00551a5558c7db61d407bce93450d755316a3fc0cfad8bc9f11 | def process(self, content):
'\n 处理文本,返回分词和词性标注结果\n 返回格式:[(word, pos), (word, pos) ...(word, pos)]\n '
sentences = self._split_by(content, definitions.SEPERATOR_C_SENTENCE, contains_delimiter=True)
results = {'words': [], 'tags': []}
for sentence in sentences:
result = self.process_sentence(sentence)
results['words'].extend(result['words'])
results['tags'].extend(result['tags'])
return results | 处理文本,返回分词和词性标注结果
返回格式:[(word, pos), (word, pos) ...(word, pos)] | pycseg/__init__.py | process | lizonghai/pycseg | 4 | python | def process(self, content):
'\n 处理文本,返回分词和词性标注结果\n 返回格式:[(word, pos), (word, pos) ...(word, pos)]\n '
sentences = self._split_by(content, definitions.SEPERATOR_C_SENTENCE, contains_delimiter=True)
results = {'words': [], 'tags': []}
for sentence in sentences:
result = self.process_sentence(sentence)
results['words'].extend(result['words'])
results['tags'].extend(result['tags'])
return results | def process(self, content):
'\n 处理文本,返回分词和词性标注结果\n 返回格式:[(word, pos), (word, pos) ...(word, pos)]\n '
sentences = self._split_by(content, definitions.SEPERATOR_C_SENTENCE, contains_delimiter=True)
results = {'words': [], 'tags': []}
for sentence in sentences:
result = self.process_sentence(sentence)
results['words'].extend(result['words'])
results['tags'].extend(result['tags'])
return results<|docstring|>处理文本,返回分词和词性标注结果
返回格式:[(word, pos), (word, pos) ...(word, pos)]<|endoftext|> |
49ebd6712111c4046d7daa7576c9749e3137470210a3456689c0248d79c43f58 | def process_file(self, filename, out_filename=None):
'\n 处理文件,结果写入文件或将结果返回\n '
results = {'words': [], 'tags': []}
with codecs.open(filename, 'r', 'utf-8') as input_file:
for line in input_file:
print('PROCESS LINE:{}'.format(line))
result = self.process(line.strip())
print(self.format_result(result))
results['words'].extend(result['words'])
results['tags'].extend(result['tags'])
if (out_filename is None):
return results
else:
with codecs.open(out_filename, 'w', 'utf-8') as output_file:
output_file.write(self.format_result(results))
output_file.write('\n') | 处理文件,结果写入文件或将结果返回 | pycseg/__init__.py | process_file | lizonghai/pycseg | 4 | python | def process_file(self, filename, out_filename=None):
'\n \n '
results = {'words': [], 'tags': []}
with codecs.open(filename, 'r', 'utf-8') as input_file:
for line in input_file:
print('PROCESS LINE:{}'.format(line))
result = self.process(line.strip())
print(self.format_result(result))
results['words'].extend(result['words'])
results['tags'].extend(result['tags'])
if (out_filename is None):
return results
else:
with codecs.open(out_filename, 'w', 'utf-8') as output_file:
output_file.write(self.format_result(results))
output_file.write('\n') | def process_file(self, filename, out_filename=None):
'\n \n '
results = {'words': [], 'tags': []}
with codecs.open(filename, 'r', 'utf-8') as input_file:
for line in input_file:
print('PROCESS LINE:{}'.format(line))
result = self.process(line.strip())
print(self.format_result(result))
results['words'].extend(result['words'])
results['tags'].extend(result['tags'])
if (out_filename is None):
return results
else:
with codecs.open(out_filename, 'w', 'utf-8') as output_file:
output_file.write(self.format_result(results))
output_file.write('\n')<|docstring|>处理文件,结果写入文件或将结果返回<|endoftext|> |
4ed3677338d1738588e3b8026d04429dc95e6b2b13650c00311395ba52497c23 | def __init__(self, rootDirectory, filesToContents, rootModuleNames):
"Initialize a codebase.\n\n Args:\n rootDirectory - the path to the root where the filesystem lives.\n For instance, if the code is in /home/ubuntu/code/typed_python,\n this would be '/home/ubuntu/code'\n filesToContents - a dict containing the filename (relative to\n rootDirectory) of each file, mapping to the byte contents.\n rootModuleNames - a list of root-level module names\n modules - None, or a dict from dotted module name to the actual\n module object, if its known.\n "
self.rootDirectory = rootDirectory
self.filesToContents = filesToContents
self.rootModuleNames = rootModuleNames
self._sha_hash = None | Initialize a codebase.
Args:
rootDirectory - the path to the root where the filesystem lives.
For instance, if the code is in /home/ubuntu/code/typed_python,
this would be '/home/ubuntu/code'
filesToContents - a dict containing the filename (relative to
rootDirectory) of each file, mapping to the byte contents.
rootModuleNames - a list of root-level module names
modules - None, or a dict from dotted module name to the actual
module object, if its known. | typed_python/Codebase.py | __init__ | APrioriInvestments/typed_python | 105 | python | def __init__(self, rootDirectory, filesToContents, rootModuleNames):
"Initialize a codebase.\n\n Args:\n rootDirectory - the path to the root where the filesystem lives.\n For instance, if the code is in /home/ubuntu/code/typed_python,\n this would be '/home/ubuntu/code'\n filesToContents - a dict containing the filename (relative to\n rootDirectory) of each file, mapping to the byte contents.\n rootModuleNames - a list of root-level module names\n modules - None, or a dict from dotted module name to the actual\n module object, if its known.\n "
self.rootDirectory = rootDirectory
self.filesToContents = filesToContents
self.rootModuleNames = rootModuleNames
self._sha_hash = None | def __init__(self, rootDirectory, filesToContents, rootModuleNames):
"Initialize a codebase.\n\n Args:\n rootDirectory - the path to the root where the filesystem lives.\n For instance, if the code is in /home/ubuntu/code/typed_python,\n this would be '/home/ubuntu/code'\n filesToContents - a dict containing the filename (relative to\n rootDirectory) of each file, mapping to the byte contents.\n rootModuleNames - a list of root-level module names\n modules - None, or a dict from dotted module name to the actual\n module object, if its known.\n "
self.rootDirectory = rootDirectory
self.filesToContents = filesToContents
self.rootModuleNames = rootModuleNames
self._sha_hash = None<|docstring|>Initialize a codebase.
Args:
rootDirectory - the path to the root where the filesystem lives.
For instance, if the code is in /home/ubuntu/code/typed_python,
this would be '/home/ubuntu/code'
filesToContents - a dict containing the filename (relative to
rootDirectory) of each file, mapping to the byte contents.
rootModuleNames - a list of root-level module names
modules - None, or a dict from dotted module name to the actual
module object, if its known.<|endoftext|> |
e5bb064c4e8057da73ac6eb6e56c76cf30b38e7c6bcfc8f2e78c154bcadc5437 | def allModuleLevelValues(self):
'Iterate over all module-level values. Yields (name, object) pairs.'
for (moduleName, module) in self.importModulesByName(self.moduleNames).items():
for item in dir(module):
(yield (((moduleName + '.') + item), getattr(module, item))) | Iterate over all module-level values. Yields (name, object) pairs. | typed_python/Codebase.py | allModuleLevelValues | APrioriInvestments/typed_python | 105 | python | def allModuleLevelValues(self):
for (moduleName, module) in self.importModulesByName(self.moduleNames).items():
for item in dir(module):
(yield (((moduleName + '.') + item), getattr(module, item))) | def allModuleLevelValues(self):
for (moduleName, module) in self.importModulesByName(self.moduleNames).items():
for item in dir(module):
(yield (((moduleName + '.') + item), getattr(module, item)))<|docstring|>Iterate over all module-level values. Yields (name, object) pairs.<|endoftext|> |
8ec0411523affbbc996ec1e0097fbed6afa1bcb95e85fb8ad7afff2d876d1cce | def markNative(self):
'Indicate that this codebase is already instantiated.'
with _lock:
for mname in self.rootModuleNames:
_installed_rootlevel_modules[mname] = self
_installed_codebases[self.sha_hash] = self | Indicate that this codebase is already instantiated. | typed_python/Codebase.py | markNative | APrioriInvestments/typed_python | 105 | python | def markNative(self):
with _lock:
for mname in self.rootModuleNames:
_installed_rootlevel_modules[mname] = self
_installed_codebases[self.sha_hash] = self | def markNative(self):
with _lock:
for mname in self.rootModuleNames:
_installed_rootlevel_modules[mname] = self
_installed_codebases[self.sha_hash] = self<|docstring|>Indicate that this codebase is already instantiated.<|endoftext|> |
e09327afed0b7845ae87a20ed9822b07af9aea3aa651aaf3c756434ca309652a | @staticmethod
def FromRootlevelPath(rootPath, prefix=None, extensions=('.py',), maxTotalBytes=((100 * 1024) * 1024), suppressFun=None):
"Build a codebase from the path to the root directory containing a module.\n\n Args:\n rootPath (str) - the root path we're going to pull in. This should point\n to a directory with the name of the python module this codebase\n will represent.\n extensions (tuple of strings) - a list of file extensions with the files\n we want to grab\n maxTotalBytes - a maximum bytecount before we'll throw an exception\n suppressFun - a function from module path (a dotted name) that returns\n True if we should stop walking into the path.\n "
(root, files, rootModuleNames) = Codebase._walkDiskRepresentation(rootPath, prefix=prefix, extensions=extensions, maxTotalBytes=maxTotalBytes, suppressFun=suppressFun)
return Codebase(root, files, rootModuleNames) | Build a codebase from the path to the root directory containing a module.
Args:
rootPath (str) - the root path we're going to pull in. This should point
to a directory with the name of the python module this codebase
will represent.
extensions (tuple of strings) - a list of file extensions with the files
we want to grab
maxTotalBytes - a maximum bytecount before we'll throw an exception
suppressFun - a function from module path (a dotted name) that returns
True if we should stop walking into the path. | typed_python/Codebase.py | FromRootlevelPath | APrioriInvestments/typed_python | 105 | python | @staticmethod
def FromRootlevelPath(rootPath, prefix=None, extensions=('.py',), maxTotalBytes=((100 * 1024) * 1024), suppressFun=None):
"Build a codebase from the path to the root directory containing a module.\n\n Args:\n rootPath (str) - the root path we're going to pull in. This should point\n to a directory with the name of the python module this codebase\n will represent.\n extensions (tuple of strings) - a list of file extensions with the files\n we want to grab\n maxTotalBytes - a maximum bytecount before we'll throw an exception\n suppressFun - a function from module path (a dotted name) that returns\n True if we should stop walking into the path.\n "
(root, files, rootModuleNames) = Codebase._walkDiskRepresentation(rootPath, prefix=prefix, extensions=extensions, maxTotalBytes=maxTotalBytes, suppressFun=suppressFun)
return Codebase(root, files, rootModuleNames) | @staticmethod
def FromRootlevelPath(rootPath, prefix=None, extensions=('.py',), maxTotalBytes=((100 * 1024) * 1024), suppressFun=None):
"Build a codebase from the path to the root directory containing a module.\n\n Args:\n rootPath (str) - the root path we're going to pull in. This should point\n to a directory with the name of the python module this codebase\n will represent.\n extensions (tuple of strings) - a list of file extensions with the files\n we want to grab\n maxTotalBytes - a maximum bytecount before we'll throw an exception\n suppressFun - a function from module path (a dotted name) that returns\n True if we should stop walking into the path.\n "
(root, files, rootModuleNames) = Codebase._walkDiskRepresentation(rootPath, prefix=prefix, extensions=extensions, maxTotalBytes=maxTotalBytes, suppressFun=suppressFun)
return Codebase(root, files, rootModuleNames)<|docstring|>Build a codebase from the path to the root directory containing a module.
Args:
rootPath (str) - the root path we're going to pull in. This should point
to a directory with the name of the python module this codebase
will represent.
extensions (tuple of strings) - a list of file extensions with the files
we want to grab
maxTotalBytes - a maximum bytecount before we'll throw an exception
suppressFun - a function from module path (a dotted name) that returns
True if we should stop walking into the path.<|endoftext|> |
813a53da7dc2d08523a72d5eb117b0268dbb44b16441bbfb2967bda76929cfca | @staticmethod
def _walkDiskRepresentation(rootPath, prefix=None, extensions=('.py',), maxTotalBytes=((100 * 1024) * 1024), suppressFun=None):
" Utility method that collects the code for a given root module.\n\n Parameters:\n -----------\n rootPath : str\n the root path for which to gather code\n\n suppressFun : a function(path) that returns True if the module path shouldn't\n be included in the codebase.\n\n Returns:\n --------\n tuple(parentDir:str, files:dict(str->str), modules:dict(str->module))\n parentDir:str is the path of the parent directory of the module\n files:dict(str->str) maps file paths (relative to the parentDir) to their contents\n modules:dict(str->module) maps module names to modules\n "
(parentDir, moduleDir) = os.path.split(rootPath)
files = {}
total_bytes = [0]
def walkDisk(path, so_far):
if so_far.startswith('.'):
return
if (suppressFun is not None):
if suppressFun(so_far):
return
for name in os.listdir(path):
fullpath = os.path.join(path, name)
so_far_with_name = (os.path.join(so_far, name) if so_far else name)
if os.path.isdir(fullpath):
walkDisk(fullpath, so_far_with_name)
elif (os.path.splitext(name)[1] in extensions):
with open(fullpath, 'r', encoding='utf-8') as f:
try:
contents = f.read()
except UnicodeDecodeError:
raise Exception(f'Failed to parse code in {fullpath} because of a unicode error.')
total_bytes[0] += len(contents)
if (total_bytes[0] > maxTotalBytes):
raise Exception(('exceeded bytecount with %s of size %s' % (fullpath, len(contents))))
files[so_far_with_name] = contents
walkDisk(os.path.abspath(rootPath), moduleDir)
modules_by_name = Codebase.filesToModuleNames(files, prefix)
rootLevelModuleNames = set([x.split('.')[0] for x in modules_by_name])
return (parentDir, files, rootLevelModuleNames) | Utility method that collects the code for a given root module.
Parameters:
-----------
rootPath : str
the root path for which to gather code
suppressFun : a function(path) that returns True if the module path shouldn't
be included in the codebase.
Returns:
--------
tuple(parentDir:str, files:dict(str->str), modules:dict(str->module))
parentDir:str is the path of the parent directory of the module
files:dict(str->str) maps file paths (relative to the parentDir) to their contents
modules:dict(str->module) maps module names to modules | typed_python/Codebase.py | _walkDiskRepresentation | APrioriInvestments/typed_python | 105 | python | @staticmethod
def _walkDiskRepresentation(rootPath, prefix=None, extensions=('.py',), maxTotalBytes=((100 * 1024) * 1024), suppressFun=None):
" Utility method that collects the code for a given root module.\n\n Parameters:\n -----------\n rootPath : str\n the root path for which to gather code\n\n suppressFun : a function(path) that returns True if the module path shouldn't\n be included in the codebase.\n\n Returns:\n --------\n tuple(parentDir:str, files:dict(str->str), modules:dict(str->module))\n parentDir:str is the path of the parent directory of the module\n files:dict(str->str) maps file paths (relative to the parentDir) to their contents\n modules:dict(str->module) maps module names to modules\n "
(parentDir, moduleDir) = os.path.split(rootPath)
files = {}
total_bytes = [0]
def walkDisk(path, so_far):
if so_far.startswith('.'):
return
if (suppressFun is not None):
if suppressFun(so_far):
return
for name in os.listdir(path):
fullpath = os.path.join(path, name)
so_far_with_name = (os.path.join(so_far, name) if so_far else name)
if os.path.isdir(fullpath):
walkDisk(fullpath, so_far_with_name)
elif (os.path.splitext(name)[1] in extensions):
with open(fullpath, 'r', encoding='utf-8') as f:
try:
contents = f.read()
except UnicodeDecodeError:
raise Exception(f'Failed to parse code in {fullpath} because of a unicode error.')
total_bytes[0] += len(contents)
if (total_bytes[0] > maxTotalBytes):
raise Exception(('exceeded bytecount with %s of size %s' % (fullpath, len(contents))))
files[so_far_with_name] = contents
walkDisk(os.path.abspath(rootPath), moduleDir)
modules_by_name = Codebase.filesToModuleNames(files, prefix)
rootLevelModuleNames = set([x.split('.')[0] for x in modules_by_name])
return (parentDir, files, rootLevelModuleNames) | @staticmethod
def _walkDiskRepresentation(rootPath, prefix=None, extensions=('.py',), maxTotalBytes=((100 * 1024) * 1024), suppressFun=None):
" Utility method that collects the code for a given root module.\n\n Parameters:\n -----------\n rootPath : str\n the root path for which to gather code\n\n suppressFun : a function(path) that returns True if the module path shouldn't\n be included in the codebase.\n\n Returns:\n --------\n tuple(parentDir:str, files:dict(str->str), modules:dict(str->module))\n parentDir:str is the path of the parent directory of the module\n files:dict(str->str) maps file paths (relative to the parentDir) to their contents\n modules:dict(str->module) maps module names to modules\n "
(parentDir, moduleDir) = os.path.split(rootPath)
files = {}
total_bytes = [0]
def walkDisk(path, so_far):
if so_far.startswith('.'):
return
if (suppressFun is not None):
if suppressFun(so_far):
return
for name in os.listdir(path):
fullpath = os.path.join(path, name)
so_far_with_name = (os.path.join(so_far, name) if so_far else name)
if os.path.isdir(fullpath):
walkDisk(fullpath, so_far_with_name)
elif (os.path.splitext(name)[1] in extensions):
with open(fullpath, 'r', encoding='utf-8') as f:
try:
contents = f.read()
except UnicodeDecodeError:
raise Exception(f'Failed to parse code in {fullpath} because of a unicode error.')
total_bytes[0] += len(contents)
if (total_bytes[0] > maxTotalBytes):
raise Exception(('exceeded bytecount with %s of size %s' % (fullpath, len(contents))))
files[so_far_with_name] = contents
walkDisk(os.path.abspath(rootPath), moduleDir)
modules_by_name = Codebase.filesToModuleNames(files, prefix)
rootLevelModuleNames = set([x.split('.')[0] for x in modules_by_name])
return (parentDir, files, rootLevelModuleNames)<|docstring|>Utility method that collects the code for a given root module.
Parameters:
-----------
rootPath : str
the root path for which to gather code
suppressFun : a function(path) that returns True if the module path shouldn't
be included in the codebase.
Returns:
--------
tuple(parentDir:str, files:dict(str->str), modules:dict(str->module))
parentDir:str is the path of the parent directory of the module
files:dict(str->str) maps file paths (relative to the parentDir) to their contents
modules:dict(str->module) maps module names to modules<|endoftext|> |
2719045f456f2ef91e0976ec227ff10a43dbde4670c52e0399574c050b45c269 | def instantiate(self, rootDirectory=None):
'Instantiate a codebase on disk\n\n Args:\n rootDirectory - if None, then pick a directory. otherwise,\n this is where to put the code. This directory must be\n persistent for the life of the process.\n '
if self.isInstantiated():
return
if (self.rootDirectory is not None):
raise Exception('Codebase is already instantiated, but not marked as such?')
with _lock:
if (self.sha_hash in _installed_codebases):
return
for rootMod in self.rootModuleNames:
if (rootMod in _installed_rootlevel_modules):
raise Exception(f'Module {rootMod} is instantiated in another codebase already')
if (rootDirectory is None):
rootDirectory = tempfile.TemporaryDirectory().name
for (fpath, fcontents) in self.filesToContents.items():
(path, name) = os.path.split(fpath)
fullpath = os.path.join(rootDirectory, path)
if (not os.path.exists(fullpath)):
os.makedirs(fullpath)
with open(os.path.join(fullpath, name), 'wb') as f:
f.write(fcontents.encode('utf-8'))
sys.path = ([rootDirectory] + sys.path)
for rootMod in self.rootModuleNames:
_installed_rootlevel_modules[rootMod] = self
_installed_codebases[self.sha_hash] = self
self.rootDirectory = rootDirectory | Instantiate a codebase on disk
Args:
rootDirectory - if None, then pick a directory. otherwise,
this is where to put the code. This directory must be
persistent for the life of the process. | typed_python/Codebase.py | instantiate | APrioriInvestments/typed_python | 105 | python | def instantiate(self, rootDirectory=None):
'Instantiate a codebase on disk\n\n Args:\n rootDirectory - if None, then pick a directory. otherwise,\n this is where to put the code. This directory must be\n persistent for the life of the process.\n '
if self.isInstantiated():
return
if (self.rootDirectory is not None):
raise Exception('Codebase is already instantiated, but not marked as such?')
with _lock:
if (self.sha_hash in _installed_codebases):
return
for rootMod in self.rootModuleNames:
if (rootMod in _installed_rootlevel_modules):
raise Exception(f'Module {rootMod} is instantiated in another codebase already')
if (rootDirectory is None):
rootDirectory = tempfile.TemporaryDirectory().name
for (fpath, fcontents) in self.filesToContents.items():
(path, name) = os.path.split(fpath)
fullpath = os.path.join(rootDirectory, path)
if (not os.path.exists(fullpath)):
os.makedirs(fullpath)
with open(os.path.join(fullpath, name), 'wb') as f:
f.write(fcontents.encode('utf-8'))
sys.path = ([rootDirectory] + sys.path)
for rootMod in self.rootModuleNames:
_installed_rootlevel_modules[rootMod] = self
_installed_codebases[self.sha_hash] = self
self.rootDirectory = rootDirectory | def instantiate(self, rootDirectory=None):
'Instantiate a codebase on disk\n\n Args:\n rootDirectory - if None, then pick a directory. otherwise,\n this is where to put the code. This directory must be\n persistent for the life of the process.\n '
if self.isInstantiated():
return
if (self.rootDirectory is not None):
raise Exception('Codebase is already instantiated, but not marked as such?')
with _lock:
if (self.sha_hash in _installed_codebases):
return
for rootMod in self.rootModuleNames:
if (rootMod in _installed_rootlevel_modules):
raise Exception(f'Module {rootMod} is instantiated in another codebase already')
if (rootDirectory is None):
rootDirectory = tempfile.TemporaryDirectory().name
for (fpath, fcontents) in self.filesToContents.items():
(path, name) = os.path.split(fpath)
fullpath = os.path.join(rootDirectory, path)
if (not os.path.exists(fullpath)):
os.makedirs(fullpath)
with open(os.path.join(fullpath, name), 'wb') as f:
f.write(fcontents.encode('utf-8'))
sys.path = ([rootDirectory] + sys.path)
for rootMod in self.rootModuleNames:
_installed_rootlevel_modules[rootMod] = self
_installed_codebases[self.sha_hash] = self
self.rootDirectory = rootDirectory<|docstring|>Instantiate a codebase on disk
Args:
rootDirectory - if None, then pick a directory. otherwise,
this is where to put the code. This directory must be
persistent for the life of the process.<|endoftext|> |
a4e9c5a6d29944f523fa7a1b217ba9548a7780803f80884e5902fe94f0ba472a | @staticmethod
def importModulesByName(modules_by_name):
' Returns a dict mapping module names (str) to modules. '
modules = {}
for mname in sorted(modules_by_name):
try:
modules[mname] = importlib.import_module(mname)
except Exception as e:
logging.getLogger(__name__).warn("Error importing module '%s' from codebase: %s", mname, e)
return modules | Returns a dict mapping module names (str) to modules. | typed_python/Codebase.py | importModulesByName | APrioriInvestments/typed_python | 105 | python | @staticmethod
def importModulesByName(modules_by_name):
' '
modules = {}
for mname in sorted(modules_by_name):
try:
modules[mname] = importlib.import_module(mname)
except Exception as e:
logging.getLogger(__name__).warn("Error importing module '%s' from codebase: %s", mname, e)
return modules | @staticmethod
def importModulesByName(modules_by_name):
' '
modules = {}
for mname in sorted(modules_by_name):
try:
modules[mname] = importlib.import_module(mname)
except Exception as e:
logging.getLogger(__name__).warn("Error importing module '%s' from codebase: %s", mname, e)
return modules<|docstring|>Returns a dict mapping module names (str) to modules.<|endoftext|> |
a794ab8e7f09338ba237a2b808c1dae3a203b08213c784e8774ab80b982ddc18 | def sample_gaussian(mu, Sigma, N=1):
'\n Draw N random row vectors from a Gaussian distribution\n\n Args:\n mu (numpy array [n x 1]): expected value vector\n Sigma (numpy array [n x n]): covariance matrix\n N (int): scalar number of samples\n\n Returns:\n M (numpy array [n x N]): samples from Gaussian distribtion\n\n '
N = int(N)
n = len(mu)
(U, s, V) = np.linalg.svd(Sigma)
S = np.zeros(Sigma.shape)
for i in range(min(Sigma.shape)):
S[(i, i)] = math.sqrt(s[i])
M = np.random.normal(size=(n, N))
M = (np.dot(np.dot(U, S), M) + mu)
return M | Draw N random row vectors from a Gaussian distribution
Args:
mu (numpy array [n x 1]): expected value vector
Sigma (numpy array [n x n]): covariance matrix
N (int): scalar number of samples
Returns:
M (numpy array [n x N]): samples from Gaussian distribtion | estimators.py | sample_gaussian | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def sample_gaussian(mu, Sigma, N=1):
'\n Draw N random row vectors from a Gaussian distribution\n\n Args:\n mu (numpy array [n x 1]): expected value vector\n Sigma (numpy array [n x n]): covariance matrix\n N (int): scalar number of samples\n\n Returns:\n M (numpy array [n x N]): samples from Gaussian distribtion\n\n '
N = int(N)
n = len(mu)
(U, s, V) = np.linalg.svd(Sigma)
S = np.zeros(Sigma.shape)
for i in range(min(Sigma.shape)):
S[(i, i)] = math.sqrt(s[i])
M = np.random.normal(size=(n, N))
M = (np.dot(np.dot(U, S), M) + mu)
return M | def sample_gaussian(mu, Sigma, N=1):
'\n Draw N random row vectors from a Gaussian distribution\n\n Args:\n mu (numpy array [n x 1]): expected value vector\n Sigma (numpy array [n x n]): covariance matrix\n N (int): scalar number of samples\n\n Returns:\n M (numpy array [n x N]): samples from Gaussian distribtion\n\n '
N = int(N)
n = len(mu)
(U, s, V) = np.linalg.svd(Sigma)
S = np.zeros(Sigma.shape)
for i in range(min(Sigma.shape)):
S[(i, i)] = math.sqrt(s[i])
M = np.random.normal(size=(n, N))
M = (np.dot(np.dot(U, S), M) + mu)
return M<|docstring|>Draw N random row vectors from a Gaussian distribution
Args:
mu (numpy array [n x 1]): expected value vector
Sigma (numpy array [n x n]): covariance matrix
N (int): scalar number of samples
Returns:
M (numpy array [n x N]): samples from Gaussian distribtion<|endoftext|> |
9a2587ac779f761b87cb6cabed6cecd6fa8f65ad61c1b82997f08aa640a43212 | def kinematic_state_observer(initial_cond, yaw_rates, inertial_accs, long_vs, T, alpha):
'\n Not working yet!\n '
num_sol = len(T)
states = np.zeros((2, num_sol))
states[(:, 0)] = np.squeeze(initial_cond[3:5])
C = np.array([1, 0])
B = np.identity(2)
A = np.zeros((2, 2))
for i in range(1, num_sol):
yaw_rate = yaw_rates[(i - 1)]
A[(0, 1)] = yaw_rate
A[(1, 0)] = (- yaw_rate)
K = (1.0 * np.array([((2 * alpha) * math.fabs(yaw_rate)), (((alpha ** 2) - 1) * yaw_rate)]))
states_dot = ((np.matmul((A - np.matmul(K, C)), states[(:, (i - 1))]) + np.matmul(B, inertial_accs[(:, (i - 1))])) + (K * long_vs[(i - 1)]))
dt = (T[i] - T[(i - 1)])
states[(:, i)] = (states[(:, (i - 1))] + (dt * states_dot))
return states | Not working yet! | estimators.py | kinematic_state_observer | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def kinematic_state_observer(initial_cond, yaw_rates, inertial_accs, long_vs, T, alpha):
'\n \n '
num_sol = len(T)
states = np.zeros((2, num_sol))
states[(:, 0)] = np.squeeze(initial_cond[3:5])
C = np.array([1, 0])
B = np.identity(2)
A = np.zeros((2, 2))
for i in range(1, num_sol):
yaw_rate = yaw_rates[(i - 1)]
A[(0, 1)] = yaw_rate
A[(1, 0)] = (- yaw_rate)
K = (1.0 * np.array([((2 * alpha) * math.fabs(yaw_rate)), (((alpha ** 2) - 1) * yaw_rate)]))
states_dot = ((np.matmul((A - np.matmul(K, C)), states[(:, (i - 1))]) + np.matmul(B, inertial_accs[(:, (i - 1))])) + (K * long_vs[(i - 1)]))
dt = (T[i] - T[(i - 1)])
states[(:, i)] = (states[(:, (i - 1))] + (dt * states_dot))
return states | def kinematic_state_observer(initial_cond, yaw_rates, inertial_accs, long_vs, T, alpha):
'\n \n '
num_sol = len(T)
states = np.zeros((2, num_sol))
states[(:, 0)] = np.squeeze(initial_cond[3:5])
C = np.array([1, 0])
B = np.identity(2)
A = np.zeros((2, 2))
for i in range(1, num_sol):
yaw_rate = yaw_rates[(i - 1)]
A[(0, 1)] = yaw_rate
A[(1, 0)] = (- yaw_rate)
K = (1.0 * np.array([((2 * alpha) * math.fabs(yaw_rate)), (((alpha ** 2) - 1) * yaw_rate)]))
states_dot = ((np.matmul((A - np.matmul(K, C)), states[(:, (i - 1))]) + np.matmul(B, inertial_accs[(:, (i - 1))])) + (K * long_vs[(i - 1)]))
dt = (T[i] - T[(i - 1)])
states[(:, i)] = (states[(:, (i - 1))] + (dt * states_dot))
return states<|docstring|>Not working yet!<|endoftext|> |
ef8048f2947caa0f23aafa5763edd23cb9be7a0574b5425f6b50aa0efc14ab04 | def findCombinationsUtil(arr, index, num, reducedNum, output):
'\n Find all combinations of < n numbers from 1 to num with repetition that add up to reducedNum \n\n Args:\n arr (list size n): current items that add up to <= reducedNum (in the 0th recursion)\n index (int): index of the next slot of arr list\n num (int): limit of what numbers to be chosen from -> [1, num]\n reducedNum (int): remaining number to add up to required sum\n output (list): for appending the results to\n\n '
if (reducedNum < 0):
return
if (reducedNum == 0):
output.append(arr[:index])
return
prev = (1 if (index == 0) else arr[(index - 1)])
for k in range(prev, (num + 1)):
arr[index] = k
findCombinationsUtil(arr, (index + 1), num, (reducedNum - k), output) | Find all combinations of < n numbers from 1 to num with repetition that add up to reducedNum
Args:
arr (list size n): current items that add up to <= reducedNum (in the 0th recursion)
index (int): index of the next slot of arr list
num (int): limit of what numbers to be chosen from -> [1, num]
reducedNum (int): remaining number to add up to required sum
output (list): for appending the results to | estimators.py | findCombinationsUtil | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def findCombinationsUtil(arr, index, num, reducedNum, output):
'\n Find all combinations of < n numbers from 1 to num with repetition that add up to reducedNum \n\n Args:\n arr (list size n): current items that add up to <= reducedNum (in the 0th recursion)\n index (int): index of the next slot of arr list\n num (int): limit of what numbers to be chosen from -> [1, num]\n reducedNum (int): remaining number to add up to required sum\n output (list): for appending the results to\n\n '
if (reducedNum < 0):
return
if (reducedNum == 0):
output.append(arr[:index])
return
prev = (1 if (index == 0) else arr[(index - 1)])
for k in range(prev, (num + 1)):
arr[index] = k
findCombinationsUtil(arr, (index + 1), num, (reducedNum - k), output) | def findCombinationsUtil(arr, index, num, reducedNum, output):
'\n Find all combinations of < n numbers from 1 to num with repetition that add up to reducedNum \n\n Args:\n arr (list size n): current items that add up to <= reducedNum (in the 0th recursion)\n index (int): index of the next slot of arr list\n num (int): limit of what numbers to be chosen from -> [1, num]\n reducedNum (int): remaining number to add up to required sum\n output (list): for appending the results to\n\n '
if (reducedNum < 0):
return
if (reducedNum == 0):
output.append(arr[:index])
return
prev = (1 if (index == 0) else arr[(index - 1)])
for k in range(prev, (num + 1)):
arr[index] = k
findCombinationsUtil(arr, (index + 1), num, (reducedNum - k), output)<|docstring|>Find all combinations of < n numbers from 1 to num with repetition that add up to reducedNum
Args:
arr (list size n): current items that add up to <= reducedNum (in the 0th recursion)
index (int): index of the next slot of arr list
num (int): limit of what numbers to be chosen from -> [1, num]
reducedNum (int): remaining number to add up to required sum
output (list): for appending the results to<|endoftext|> |
dba86f3e1c143f0e7fff8105a601631c65508d427934a617ac5958d756676e83 | def fit_data_rover_dynobj(dynamic_obj, vy=np.array([]), back_rotate=False):
'\n Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9) using dynamic object.\n\n Args:\n dynamic_obj (RoverPartialDynEst or RoverDyn obj): dynamic object\n vy (numpy array [nt]): optionally, lateral velocity if observed; defaults to empty\n back_rotate (bool): produce linear and lateral velocities from rotating state coordinates? defaults to False\n\n Returns:\n parameters (list): consists of parameters c1-c9 in that order\n '
parameters = ([0] * 9)
if ((dynamic_obj.state_dict['vx'] not in dynamic_obj.state_indices) or back_rotate):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
vx = dynamic_obj.cal_vxvy_from_coord(output=True)[(0, :)]
else:
vx = dynamic_obj.outputs[(dynamic_obj.state_indices.index(dynamic_obj.state_dict['vx']), :)]
if (dynamic_obj.state_dict['vx'] in dynamic_obj.state_dot_indices):
vx_dot_ind = (len(dynamic_obj.state_indices) + dynamic_obj.state_dot_indices.index(dynamic_obj.state_dict['vx']))
vxdot = dynamic_obj.outputs[(vx_dot_ind, :)]
else:
dts = np.diff(dynamic_obj.T)
if (len(vx) < len(dynamic_obj.T)):
dts = dts[:(len(vx) - 1)]
vxdot = (np.diff(vx) / dts)
last_ind = min(len(vx), len(vxdot))
diff = np.reshape((vx[:last_ind] - dynamic_obj.U[(1, :last_ind)]), [(- 1), 1])
A_long_accel = np.concatenate((np.ones((len(vxdot[:last_ind]), 1)), diff, np.square(diff)), axis=1)
parameters[4:7] = np.linalg.lstsq(A_long_accel, vxdot[(:last_ind, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
if (dynamic_obj.state_dict['theta'] in dynamic_obj.state_dot_indices):
theta_dot_ind = (len(dynamic_obj.state_indices) + dynamic_obj.state_dot_indices.index(dynamic_obj.state_dict['theta']))
thetadot = dynamic_obj.outputs[(theta_dot_ind, :)]
else:
theta_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['theta'])
thetadot = (np.diff(dynamic_obj.outputs[(theta_ind, :)]) / np.diff(dynamic_obj.T))
last_ind = min(len(thetadot), len(vx))
def nls_yawrate(x, yaw_rate, steering_cmd, vx):
return (yaw_rate - ((np.tan(((x[0] * steering_cmd) + x[1])) * vx) / (x[2] + (x[3] * (vx ** 2)))))
x0 = np.array([1, 0, 1.775, 0])
res_l = least_squares(nls_yawrate, x0, args=(thetadot[:last_ind], dynamic_obj.U[(0, :last_ind)], vx[:last_ind]))
parameters[:4] = res_l.x
if ((vy.shape[0] == 0) and back_rotate):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
vy = dynamic_obj.cal_vxvy_from_coord_wrapper(output=True)[(1, :)]
if (vy.shape[0] == 0):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
x_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['x'])
y_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['y'])
theta_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['theta'])
xdot = (np.diff(dynamic_obj.outputs[(x_ind, :)]) / np.diff(dynamic_obj.T))
ydot = (np.diff(dynamic_obj.outputs[(y_ind, :)]) / np.diff(dynamic_obj.T))
theta = dynamic_obj.outputs[(theta_ind, :)]
last_ind = min(len(xdot), len(vx), len(theta), len(thetadot))
def nls_xy(params, xdot, ydot, vx, yaw, yaw_rate):
vy = (yaw_rate * (params[0] + (params[1] * (vx ** 2))))
res_x = (xdot - ((vx * np.cos(yaw)) - (vy * np.sin(yaw))))
res_y = (ydot - ((vx * np.sin(yaw)) + (vy * np.cos(yaw))))
return np.concatenate((res_x, res_y)).flatten()
x0 = np.array([0.1, 0.1])
res_l = least_squares(nls_xy, x0, args=(xdot[:last_ind], ydot[:last_ind], vx[:last_ind], theta[:last_ind], thetadot[:last_ind]))
parameters[7:9] = res_l.x
else:
last_ind = min(len(thetadot), len(vx), len(vy))
prod = (thetadot[:last_ind] * (vx[:last_ind] ** 2))
A_lat_vel = np.concatenate((thetadot[(:last_ind, np.newaxis)], prod[(:, np.newaxis)]), axis=1)
parameters[7:9] = np.linalg.lstsq(A_lat_vel, vy[(:last_ind, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
return parameters | Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9) using dynamic object.
Args:
dynamic_obj (RoverPartialDynEst or RoverDyn obj): dynamic object
vy (numpy array [nt]): optionally, lateral velocity if observed; defaults to empty
back_rotate (bool): produce linear and lateral velocities from rotating state coordinates? defaults to False
Returns:
parameters (list): consists of parameters c1-c9 in that order | estimators.py | fit_data_rover_dynobj | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def fit_data_rover_dynobj(dynamic_obj, vy=np.array([]), back_rotate=False):
'\n Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9) using dynamic object.\n\n Args:\n dynamic_obj (RoverPartialDynEst or RoverDyn obj): dynamic object\n vy (numpy array [nt]): optionally, lateral velocity if observed; defaults to empty\n back_rotate (bool): produce linear and lateral velocities from rotating state coordinates? defaults to False\n\n Returns:\n parameters (list): consists of parameters c1-c9 in that order\n '
parameters = ([0] * 9)
if ((dynamic_obj.state_dict['vx'] not in dynamic_obj.state_indices) or back_rotate):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
vx = dynamic_obj.cal_vxvy_from_coord(output=True)[(0, :)]
else:
vx = dynamic_obj.outputs[(dynamic_obj.state_indices.index(dynamic_obj.state_dict['vx']), :)]
if (dynamic_obj.state_dict['vx'] in dynamic_obj.state_dot_indices):
vx_dot_ind = (len(dynamic_obj.state_indices) + dynamic_obj.state_dot_indices.index(dynamic_obj.state_dict['vx']))
vxdot = dynamic_obj.outputs[(vx_dot_ind, :)]
else:
dts = np.diff(dynamic_obj.T)
if (len(vx) < len(dynamic_obj.T)):
dts = dts[:(len(vx) - 1)]
vxdot = (np.diff(vx) / dts)
last_ind = min(len(vx), len(vxdot))
diff = np.reshape((vx[:last_ind] - dynamic_obj.U[(1, :last_ind)]), [(- 1), 1])
A_long_accel = np.concatenate((np.ones((len(vxdot[:last_ind]), 1)), diff, np.square(diff)), axis=1)
parameters[4:7] = np.linalg.lstsq(A_long_accel, vxdot[(:last_ind, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
if (dynamic_obj.state_dict['theta'] in dynamic_obj.state_dot_indices):
theta_dot_ind = (len(dynamic_obj.state_indices) + dynamic_obj.state_dot_indices.index(dynamic_obj.state_dict['theta']))
thetadot = dynamic_obj.outputs[(theta_dot_ind, :)]
else:
theta_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['theta'])
thetadot = (np.diff(dynamic_obj.outputs[(theta_ind, :)]) / np.diff(dynamic_obj.T))
last_ind = min(len(thetadot), len(vx))
def nls_yawrate(x, yaw_rate, steering_cmd, vx):
return (yaw_rate - ((np.tan(((x[0] * steering_cmd) + x[1])) * vx) / (x[2] + (x[3] * (vx ** 2)))))
x0 = np.array([1, 0, 1.775, 0])
res_l = least_squares(nls_yawrate, x0, args=(thetadot[:last_ind], dynamic_obj.U[(0, :last_ind)], vx[:last_ind]))
parameters[:4] = res_l.x
if ((vy.shape[0] == 0) and back_rotate):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
vy = dynamic_obj.cal_vxvy_from_coord_wrapper(output=True)[(1, :)]
if (vy.shape[0] == 0):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
x_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['x'])
y_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['y'])
theta_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['theta'])
xdot = (np.diff(dynamic_obj.outputs[(x_ind, :)]) / np.diff(dynamic_obj.T))
ydot = (np.diff(dynamic_obj.outputs[(y_ind, :)]) / np.diff(dynamic_obj.T))
theta = dynamic_obj.outputs[(theta_ind, :)]
last_ind = min(len(xdot), len(vx), len(theta), len(thetadot))
def nls_xy(params, xdot, ydot, vx, yaw, yaw_rate):
vy = (yaw_rate * (params[0] + (params[1] * (vx ** 2))))
res_x = (xdot - ((vx * np.cos(yaw)) - (vy * np.sin(yaw))))
res_y = (ydot - ((vx * np.sin(yaw)) + (vy * np.cos(yaw))))
return np.concatenate((res_x, res_y)).flatten()
x0 = np.array([0.1, 0.1])
res_l = least_squares(nls_xy, x0, args=(xdot[:last_ind], ydot[:last_ind], vx[:last_ind], theta[:last_ind], thetadot[:last_ind]))
parameters[7:9] = res_l.x
else:
last_ind = min(len(thetadot), len(vx), len(vy))
prod = (thetadot[:last_ind] * (vx[:last_ind] ** 2))
A_lat_vel = np.concatenate((thetadot[(:last_ind, np.newaxis)], prod[(:, np.newaxis)]), axis=1)
parameters[7:9] = np.linalg.lstsq(A_lat_vel, vy[(:last_ind, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
return parameters | def fit_data_rover_dynobj(dynamic_obj, vy=np.array([]), back_rotate=False):
'\n Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9) using dynamic object.\n\n Args:\n dynamic_obj (RoverPartialDynEst or RoverDyn obj): dynamic object\n vy (numpy array [nt]): optionally, lateral velocity if observed; defaults to empty\n back_rotate (bool): produce linear and lateral velocities from rotating state coordinates? defaults to False\n\n Returns:\n parameters (list): consists of parameters c1-c9 in that order\n '
parameters = ([0] * 9)
if ((dynamic_obj.state_dict['vx'] not in dynamic_obj.state_indices) or back_rotate):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
vx = dynamic_obj.cal_vxvy_from_coord(output=True)[(0, :)]
else:
vx = dynamic_obj.outputs[(dynamic_obj.state_indices.index(dynamic_obj.state_dict['vx']), :)]
if (dynamic_obj.state_dict['vx'] in dynamic_obj.state_dot_indices):
vx_dot_ind = (len(dynamic_obj.state_indices) + dynamic_obj.state_dot_indices.index(dynamic_obj.state_dict['vx']))
vxdot = dynamic_obj.outputs[(vx_dot_ind, :)]
else:
dts = np.diff(dynamic_obj.T)
if (len(vx) < len(dynamic_obj.T)):
dts = dts[:(len(vx) - 1)]
vxdot = (np.diff(vx) / dts)
last_ind = min(len(vx), len(vxdot))
diff = np.reshape((vx[:last_ind] - dynamic_obj.U[(1, :last_ind)]), [(- 1), 1])
A_long_accel = np.concatenate((np.ones((len(vxdot[:last_ind]), 1)), diff, np.square(diff)), axis=1)
parameters[4:7] = np.linalg.lstsq(A_long_accel, vxdot[(:last_ind, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
if (dynamic_obj.state_dict['theta'] in dynamic_obj.state_dot_indices):
theta_dot_ind = (len(dynamic_obj.state_indices) + dynamic_obj.state_dot_indices.index(dynamic_obj.state_dict['theta']))
thetadot = dynamic_obj.outputs[(theta_dot_ind, :)]
else:
theta_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['theta'])
thetadot = (np.diff(dynamic_obj.outputs[(theta_ind, :)]) / np.diff(dynamic_obj.T))
last_ind = min(len(thetadot), len(vx))
def nls_yawrate(x, yaw_rate, steering_cmd, vx):
return (yaw_rate - ((np.tan(((x[0] * steering_cmd) + x[1])) * vx) / (x[2] + (x[3] * (vx ** 2)))))
x0 = np.array([1, 0, 1.775, 0])
res_l = least_squares(nls_yawrate, x0, args=(thetadot[:last_ind], dynamic_obj.U[(0, :last_ind)], vx[:last_ind]))
parameters[:4] = res_l.x
if ((vy.shape[0] == 0) and back_rotate):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
vy = dynamic_obj.cal_vxvy_from_coord_wrapper(output=True)[(1, :)]
if (vy.shape[0] == 0):
assert ((dynamic_obj.state_dict['x'] in dynamic_obj.state_indices) and (dynamic_obj.state_dict['y'] in dynamic_obj.state_indices)), 'No source for vehicle coordinates from output data'
x_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['x'])
y_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['y'])
theta_ind = dynamic_obj.state_indices.index(dynamic_obj.state_dict['theta'])
xdot = (np.diff(dynamic_obj.outputs[(x_ind, :)]) / np.diff(dynamic_obj.T))
ydot = (np.diff(dynamic_obj.outputs[(y_ind, :)]) / np.diff(dynamic_obj.T))
theta = dynamic_obj.outputs[(theta_ind, :)]
last_ind = min(len(xdot), len(vx), len(theta), len(thetadot))
def nls_xy(params, xdot, ydot, vx, yaw, yaw_rate):
vy = (yaw_rate * (params[0] + (params[1] * (vx ** 2))))
res_x = (xdot - ((vx * np.cos(yaw)) - (vy * np.sin(yaw))))
res_y = (ydot - ((vx * np.sin(yaw)) + (vy * np.cos(yaw))))
return np.concatenate((res_x, res_y)).flatten()
x0 = np.array([0.1, 0.1])
res_l = least_squares(nls_xy, x0, args=(xdot[:last_ind], ydot[:last_ind], vx[:last_ind], theta[:last_ind], thetadot[:last_ind]))
parameters[7:9] = res_l.x
else:
last_ind = min(len(thetadot), len(vx), len(vy))
prod = (thetadot[:last_ind] * (vx[:last_ind] ** 2))
A_lat_vel = np.concatenate((thetadot[(:last_ind, np.newaxis)], prod[(:, np.newaxis)]), axis=1)
parameters[7:9] = np.linalg.lstsq(A_lat_vel, vy[(:last_ind, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
return parameters<|docstring|>Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9) using dynamic object.
Args:
dynamic_obj (RoverPartialDynEst or RoverDyn obj): dynamic object
vy (numpy array [nt]): optionally, lateral velocity if observed; defaults to empty
back_rotate (bool): produce linear and lateral velocities from rotating state coordinates? defaults to False
Returns:
parameters (list): consists of parameters c1-c9 in that order<|endoftext|> |
17aeede8a37b93e2a669bad974a9c0cc25d65c14a5606595d93ec973c5c06995 | def fit_data_rover(states, U, dt, vxdot=np.array([]), yawrate=np.array([]), vy=np.array([])):
'\n Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9).\n\n Args:\n states (numpy array [4 x nt]): rover states consisting of x, y, theta and vx at different time instances\n U (numpy array [2 x nt]): input to the model at different time instances consisting of steering angle and commanded velocity\n vxdot (numpy array [nt]): optionally, linear longitudinal acceleration at different time instances\n yawrate (numpy array [nt]): optionally, yaw rate at different time instances\n vy (numpy array [nt]): optionally, lateral velocity if observed\n\n Returns:\n parameters (list): consists of parameters c1-c9 in that order\n\n '
parameters = ([0] * 9)
if (vxdot.shape[0] == 0):
vxdot = (np.diff(states[(3, :)]) / dt)
else:
vxdot = vxdot[:(- 1)]
diff = np.reshape((states[(3, :(- 1))] - U[(1, :(- 1))]), [(- 1), 1])
A_long_accel = np.concatenate((np.ones((len(vxdot), 1)), diff, np.square(diff)), axis=1)
parameters[4:7] = np.linalg.lstsq(A_long_accel, vxdot[(:, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
if (yawrate.shape[0] == 0):
yawrate = (np.diff(states[(2, :)]) / dt)
else:
yawrate = yawrate[:(- 1)]
def nls_yawrate(x, yaw_rate, steering_cmd, vx):
return (yaw_rate - ((np.tan(((x[0] * steering_cmd) + x[1])) * vx) / (x[2] + (x[3] * (vx ** 2)))))
x0 = np.array([1, 0, 1.775, 0])
res_l = least_squares(nls_yawrate, x0, args=(yawrate, U[(0, :(- 1))], states[(3, :(- 1))]))
parameters[:4] = res_l.x
if (vy.shape[0] == 0):
xdot = (np.diff(states[(0, :)]) / dt)
ydot = (np.diff(states[(1, :)]) / dt)
def nls_xy(params, xdot, ydot, vx, yaw, yaw_rate):
vy = (yaw_rate * (params[0] + (params[1] * (vx ** 2))))
res_x = (xdot - ((vx * np.cos(yaw)) - (vy * np.sin(yaw))))
res_y = (ydot - ((vx * np.sin(yaw)) + (vy * np.cos(yaw))))
return np.concatenate((res_x, res_y)).flatten()
x0 = np.array([0.1, 0.1])
res_l = least_squares(nls_xy, x0, args=(xdot, ydot, states[(3, :(- 1))], states[(2, :(- 1))], yawrate))
parameters[7:9] = res_l.x
else:
prod = (yawrate * (states[(3, :(- 1))] ** 2))
A_lat_vel = np.concatenate((yawrate[(:, np.newaxis)], prod[(:, np.newaxis)]), axis=1)
parameters[7:9] = np.linalg.lstsq(A_lat_vel, vy[(:(- 1), np.newaxis)], rcond=None)[0][(:, 0)].tolist()
return parameters | Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9).
Args:
states (numpy array [4 x nt]): rover states consisting of x, y, theta and vx at different time instances
U (numpy array [2 x nt]): input to the model at different time instances consisting of steering angle and commanded velocity
vxdot (numpy array [nt]): optionally, linear longitudinal acceleration at different time instances
yawrate (numpy array [nt]): optionally, yaw rate at different time instances
vy (numpy array [nt]): optionally, lateral velocity if observed
Returns:
parameters (list): consists of parameters c1-c9 in that order | estimators.py | fit_data_rover | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def fit_data_rover(states, U, dt, vxdot=np.array([]), yawrate=np.array([]), vy=np.array([])):
'\n Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9).\n\n Args:\n states (numpy array [4 x nt]): rover states consisting of x, y, theta and vx at different time instances\n U (numpy array [2 x nt]): input to the model at different time instances consisting of steering angle and commanded velocity\n vxdot (numpy array [nt]): optionally, linear longitudinal acceleration at different time instances\n yawrate (numpy array [nt]): optionally, yaw rate at different time instances\n vy (numpy array [nt]): optionally, lateral velocity if observed\n\n Returns:\n parameters (list): consists of parameters c1-c9 in that order\n\n '
parameters = ([0] * 9)
if (vxdot.shape[0] == 0):
vxdot = (np.diff(states[(3, :)]) / dt)
else:
vxdot = vxdot[:(- 1)]
diff = np.reshape((states[(3, :(- 1))] - U[(1, :(- 1))]), [(- 1), 1])
A_long_accel = np.concatenate((np.ones((len(vxdot), 1)), diff, np.square(diff)), axis=1)
parameters[4:7] = np.linalg.lstsq(A_long_accel, vxdot[(:, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
if (yawrate.shape[0] == 0):
yawrate = (np.diff(states[(2, :)]) / dt)
else:
yawrate = yawrate[:(- 1)]
def nls_yawrate(x, yaw_rate, steering_cmd, vx):
return (yaw_rate - ((np.tan(((x[0] * steering_cmd) + x[1])) * vx) / (x[2] + (x[3] * (vx ** 2)))))
x0 = np.array([1, 0, 1.775, 0])
res_l = least_squares(nls_yawrate, x0, args=(yawrate, U[(0, :(- 1))], states[(3, :(- 1))]))
parameters[:4] = res_l.x
if (vy.shape[0] == 0):
xdot = (np.diff(states[(0, :)]) / dt)
ydot = (np.diff(states[(1, :)]) / dt)
def nls_xy(params, xdot, ydot, vx, yaw, yaw_rate):
vy = (yaw_rate * (params[0] + (params[1] * (vx ** 2))))
res_x = (xdot - ((vx * np.cos(yaw)) - (vy * np.sin(yaw))))
res_y = (ydot - ((vx * np.sin(yaw)) + (vy * np.cos(yaw))))
return np.concatenate((res_x, res_y)).flatten()
x0 = np.array([0.1, 0.1])
res_l = least_squares(nls_xy, x0, args=(xdot, ydot, states[(3, :(- 1))], states[(2, :(- 1))], yawrate))
parameters[7:9] = res_l.x
else:
prod = (yawrate * (states[(3, :(- 1))] ** 2))
A_lat_vel = np.concatenate((yawrate[(:, np.newaxis)], prod[(:, np.newaxis)]), axis=1)
parameters[7:9] = np.linalg.lstsq(A_lat_vel, vy[(:(- 1), np.newaxis)], rcond=None)[0][(:, 0)].tolist()
return parameters | def fit_data_rover(states, U, dt, vxdot=np.array([]), yawrate=np.array([]), vy=np.array([])):
'\n Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9).\n\n Args:\n states (numpy array [4 x nt]): rover states consisting of x, y, theta and vx at different time instances\n U (numpy array [2 x nt]): input to the model at different time instances consisting of steering angle and commanded velocity\n vxdot (numpy array [nt]): optionally, linear longitudinal acceleration at different time instances\n yawrate (numpy array [nt]): optionally, yaw rate at different time instances\n vy (numpy array [nt]): optionally, lateral velocity if observed\n\n Returns:\n parameters (list): consists of parameters c1-c9 in that order\n\n '
parameters = ([0] * 9)
if (vxdot.shape[0] == 0):
vxdot = (np.diff(states[(3, :)]) / dt)
else:
vxdot = vxdot[:(- 1)]
diff = np.reshape((states[(3, :(- 1))] - U[(1, :(- 1))]), [(- 1), 1])
A_long_accel = np.concatenate((np.ones((len(vxdot), 1)), diff, np.square(diff)), axis=1)
parameters[4:7] = np.linalg.lstsq(A_long_accel, vxdot[(:, np.newaxis)], rcond=None)[0][(:, 0)].tolist()
if (yawrate.shape[0] == 0):
yawrate = (np.diff(states[(2, :)]) / dt)
else:
yawrate = yawrate[:(- 1)]
def nls_yawrate(x, yaw_rate, steering_cmd, vx):
return (yaw_rate - ((np.tan(((x[0] * steering_cmd) + x[1])) * vx) / (x[2] + (x[3] * (vx ** 2)))))
x0 = np.array([1, 0, 1.775, 0])
res_l = least_squares(nls_yawrate, x0, args=(yawrate, U[(0, :(- 1))], states[(3, :(- 1))]))
parameters[:4] = res_l.x
if (vy.shape[0] == 0):
xdot = (np.diff(states[(0, :)]) / dt)
ydot = (np.diff(states[(1, :)]) / dt)
def nls_xy(params, xdot, ydot, vx, yaw, yaw_rate):
vy = (yaw_rate * (params[0] + (params[1] * (vx ** 2))))
res_x = (xdot - ((vx * np.cos(yaw)) - (vy * np.sin(yaw))))
res_y = (ydot - ((vx * np.sin(yaw)) + (vy * np.cos(yaw))))
return np.concatenate((res_x, res_y)).flatten()
x0 = np.array([0.1, 0.1])
res_l = least_squares(nls_xy, x0, args=(xdot, ydot, states[(3, :(- 1))], states[(2, :(- 1))], yawrate))
parameters[7:9] = res_l.x
else:
prod = (yawrate * (states[(3, :(- 1))] ** 2))
A_lat_vel = np.concatenate((yawrate[(:, np.newaxis)], prod[(:, np.newaxis)]), axis=1)
parameters[7:9] = np.linalg.lstsq(A_lat_vel, vy[(:(- 1), np.newaxis)], rcond=None)[0][(:, 0)].tolist()
return parameters<|docstring|>Perform LS and NLS fitting parameters estimation for the rover dynamics (c1-c9).
Args:
states (numpy array [4 x nt]): rover states consisting of x, y, theta and vx at different time instances
U (numpy array [2 x nt]): input to the model at different time instances consisting of steering angle and commanded velocity
vxdot (numpy array [nt]): optionally, linear longitudinal acceleration at different time instances
yawrate (numpy array [nt]): optionally, yaw rate at different time instances
vy (numpy array [nt]): optionally, lateral velocity if observed
Returns:
parameters (list): consists of parameters c1-c9 in that order<|endoftext|> |
def196033f688bf0d8256afb0b0089d900bcfaadacf1ae013069abd74d1384e2 | def sample_nlds(z0, U, nt, f, h, num_out, Q=None, P0=None, R=None, Qu=None, additional_args_pm=[], additional_args_om=[], overwrite_inds=[], overwrite_vals=[]):
'\n Retrieve ground truth, initial and output data (SNLDS: Stochastic non-linear dynamic system)\n\n Args:\n z0 (numpy array [n x 1]): initial ground truth condition\n U (numpy array [nu x nt]): inputs for the process and observation model\n nt (int): number of simulation steps\n f (function): function handle for one-time step forward propagating the state; expected signature f(state, input, noise, ...)\n h (function): function handle for retrieving the outputs of the system as a function of system states;\n expected signature h(state, input, noise, ...)\n num_out (int): number of outputs from function h\n Q (numpy array [nq x nq]): noise covariance matrix involved in the stochastic model f\n P0 (numpy array [n x n]): initial covariance for the initial estimate around the ground truth\n R (numpy array [nr x nr]): covariance matrix of the noise involved in h function\n Qu (numpy array [nqu x nqu]): noise covariance matrix involved in the input to the stochastic model f\n additional_args_pm (list): list of additional arguments to be passed to function f\n additional_args_om (list): list of additional arguments to be passed to function h\n overwrite_inds (list): list of state indices to be overwritten\n overwrite_vals (list): list of ground truth values to overwrite state propagation\n\n Returns:\n gt_states (numpy array [n x nt]): ground truth states at different time instances\n initial_cond (numpy array [n x 1]): initial condition from Gaussian distribution with mean z0 and covariance P0\n outputs (numpy array [num_out x nt]): simulated outputs of the system\n additional_args_pm_list (2d list [len(additional_args_pm) x nt]): additional arguments to be passed to \n function f at each time instant\n additional_args_om_list (2d list [len(additional_args_om) x nt]): additional arguments to be passed to\n function h at each time instant\n\n '
if (not len(U)):
U = np.zeros((0, nt))
if (Q is None):
Q = np.zeros((len(z0), len(z0)))
if (Qu is None):
Qu = np.zeros((U.shape[0], U.shape[0]))
if (P0 is None):
P0 = np.zeros((len(z0), len(z0)))
if (R is None):
R = np.zeros((num_out, num_out))
assert (U.shape[1] == nt), 'Expected input for all {} time instances but only received {}'.format(nt, U.shape[1])
assert (Q.shape == (len(z0), len(z0))), 'Inconsistent size of process noise matrix'
assert (Qu.shape == (U.shape[0], U.shape[0])), 'Inconsistent size of input noise matrix'
assert (P0.shape == (len(z0), len(z0))), 'Inconsistent size of initial covariance matrix'
assert (R.shape == (num_out, num_out)), 'Inconsistent size of observation noise matrix'
additional_args_pm_list = np.zeros((len(additional_args_pm), nt)).tolist()
additional_args_om_list = np.zeros((len(additional_args_om), nt)).tolist()
for (i, argument) in enumerate(additional_args_pm):
if (not isinstance(argument, Iterable)):
additional_args_pm_list[i] = ([argument] * nt)
else:
assert (len(argument) == nt), 'If iterable argument for pm is provided, it should have the length of nt'
additional_args_pm_list[i] = argument
for (i, argument) in enumerate(additional_args_om):
if (not isinstance(argument, Iterable)):
additional_args_om_list[i] = ([argument] * nt)
else:
assert (len(argument) == nt), 'If iterable argument for om is provided, it should have the length of nt'
additional_args_om_list[i] = argument
assert (len(overwrite_inds) == len(overwrite_vals)), 'Inconsitent sizes of information to be overwritten'
for ind in overwrite_inds:
assert ((ind >= 0) and (ind < len(z0))), 'Overwrite index not within range [{},{})'.format(0, len(z0))
overwrite_vals_array = np.zeros((len(overwrite_inds), nt))
for (i, val) in enumerate(overwrite_vals):
if isinstance(val, Iterable):
assert (len(val) == nt), 'Iterable information should have the length of nt'
overwrite_vals_array[i] = val
state_noise_samples = sample_gaussian(np.zeros(z0.shape), Q, nt)
input_noise_samples = sample_gaussian(np.zeros((Qu.shape[0], 1)), Qu, nt)
obs_noise_samples = sample_gaussian(np.zeros((num_out, 1)), R, nt)
gt_states = np.zeros((z0.shape[0], nt))
gt_states[(:, 0:1)] = z0
initial_cond = sample_gaussian(z0, P0, 1)
outputs = np.zeros((num_out, nt))
outputs[(:, 0)] = h(gt_states[(:, 0)], U[(:, 0)], obs_noise_samples[(:, 0)], *[sub[0] for sub in additional_args_om_list])
for i in range(1, nt):
gt_states[(:, i)] = f(gt_states[(:, (i - 1))], U[(:, (i - 1))], state_noise_samples[(:, (i - 1))], input_noise_samples[(:, (i - 1))], *[sub[(i - 1)] for sub in additional_args_pm_list])
gt_states[(overwrite_inds, i)] = overwrite_vals_array[(:, i)]
outputs[(:, i)] = h(gt_states[(:, i)], U[(:, i)], obs_noise_samples[(:, i)], *[sub[i] for sub in additional_args_om_list])
return (gt_states, initial_cond, outputs, additional_args_pm_list, additional_args_om_list) | Retrieve ground truth, initial and output data (SNLDS: Stochastic non-linear dynamic system)
Args:
z0 (numpy array [n x 1]): initial ground truth condition
U (numpy array [nu x nt]): inputs for the process and observation model
nt (int): number of simulation steps
f (function): function handle for one-time step forward propagating the state; expected signature f(state, input, noise, ...)
h (function): function handle for retrieving the outputs of the system as a function of system states;
expected signature h(state, input, noise, ...)
num_out (int): number of outputs from function h
Q (numpy array [nq x nq]): noise covariance matrix involved in the stochastic model f
P0 (numpy array [n x n]): initial covariance for the initial estimate around the ground truth
R (numpy array [nr x nr]): covariance matrix of the noise involved in h function
Qu (numpy array [nqu x nqu]): noise covariance matrix involved in the input to the stochastic model f
additional_args_pm (list): list of additional arguments to be passed to function f
additional_args_om (list): list of additional arguments to be passed to function h
overwrite_inds (list): list of state indices to be overwritten
overwrite_vals (list): list of ground truth values to overwrite state propagation
Returns:
gt_states (numpy array [n x nt]): ground truth states at different time instances
initial_cond (numpy array [n x 1]): initial condition from Gaussian distribution with mean z0 and covariance P0
outputs (numpy array [num_out x nt]): simulated outputs of the system
additional_args_pm_list (2d list [len(additional_args_pm) x nt]): additional arguments to be passed to
function f at each time instant
additional_args_om_list (2d list [len(additional_args_om) x nt]): additional arguments to be passed to
function h at each time instant | estimators.py | sample_nlds | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def sample_nlds(z0, U, nt, f, h, num_out, Q=None, P0=None, R=None, Qu=None, additional_args_pm=[], additional_args_om=[], overwrite_inds=[], overwrite_vals=[]):
'\n Retrieve ground truth, initial and output data (SNLDS: Stochastic non-linear dynamic system)\n\n Args:\n z0 (numpy array [n x 1]): initial ground truth condition\n U (numpy array [nu x nt]): inputs for the process and observation model\n nt (int): number of simulation steps\n f (function): function handle for one-time step forward propagating the state; expected signature f(state, input, noise, ...)\n h (function): function handle for retrieving the outputs of the system as a function of system states;\n expected signature h(state, input, noise, ...)\n num_out (int): number of outputs from function h\n Q (numpy array [nq x nq]): noise covariance matrix involved in the stochastic model f\n P0 (numpy array [n x n]): initial covariance for the initial estimate around the ground truth\n R (numpy array [nr x nr]): covariance matrix of the noise involved in h function\n Qu (numpy array [nqu x nqu]): noise covariance matrix involved in the input to the stochastic model f\n additional_args_pm (list): list of additional arguments to be passed to function f\n additional_args_om (list): list of additional arguments to be passed to function h\n overwrite_inds (list): list of state indices to be overwritten\n overwrite_vals (list): list of ground truth values to overwrite state propagation\n\n Returns:\n gt_states (numpy array [n x nt]): ground truth states at different time instances\n initial_cond (numpy array [n x 1]): initial condition from Gaussian distribution with mean z0 and covariance P0\n outputs (numpy array [num_out x nt]): simulated outputs of the system\n additional_args_pm_list (2d list [len(additional_args_pm) x nt]): additional arguments to be passed to \n function f at each time instant\n additional_args_om_list (2d list [len(additional_args_om) x nt]): additional arguments to be passed to\n function h at each time instant\n\n '
if (not len(U)):
U = np.zeros((0, nt))
if (Q is None):
Q = np.zeros((len(z0), len(z0)))
if (Qu is None):
Qu = np.zeros((U.shape[0], U.shape[0]))
if (P0 is None):
P0 = np.zeros((len(z0), len(z0)))
if (R is None):
R = np.zeros((num_out, num_out))
assert (U.shape[1] == nt), 'Expected input for all {} time instances but only received {}'.format(nt, U.shape[1])
assert (Q.shape == (len(z0), len(z0))), 'Inconsistent size of process noise matrix'
assert (Qu.shape == (U.shape[0], U.shape[0])), 'Inconsistent size of input noise matrix'
assert (P0.shape == (len(z0), len(z0))), 'Inconsistent size of initial covariance matrix'
assert (R.shape == (num_out, num_out)), 'Inconsistent size of observation noise matrix'
additional_args_pm_list = np.zeros((len(additional_args_pm), nt)).tolist()
additional_args_om_list = np.zeros((len(additional_args_om), nt)).tolist()
for (i, argument) in enumerate(additional_args_pm):
if (not isinstance(argument, Iterable)):
additional_args_pm_list[i] = ([argument] * nt)
else:
assert (len(argument) == nt), 'If iterable argument for pm is provided, it should have the length of nt'
additional_args_pm_list[i] = argument
for (i, argument) in enumerate(additional_args_om):
if (not isinstance(argument, Iterable)):
additional_args_om_list[i] = ([argument] * nt)
else:
assert (len(argument) == nt), 'If iterable argument for om is provided, it should have the length of nt'
additional_args_om_list[i] = argument
assert (len(overwrite_inds) == len(overwrite_vals)), 'Inconsitent sizes of information to be overwritten'
for ind in overwrite_inds:
assert ((ind >= 0) and (ind < len(z0))), 'Overwrite index not within range [{},{})'.format(0, len(z0))
overwrite_vals_array = np.zeros((len(overwrite_inds), nt))
for (i, val) in enumerate(overwrite_vals):
if isinstance(val, Iterable):
assert (len(val) == nt), 'Iterable information should have the length of nt'
overwrite_vals_array[i] = val
state_noise_samples = sample_gaussian(np.zeros(z0.shape), Q, nt)
input_noise_samples = sample_gaussian(np.zeros((Qu.shape[0], 1)), Qu, nt)
obs_noise_samples = sample_gaussian(np.zeros((num_out, 1)), R, nt)
gt_states = np.zeros((z0.shape[0], nt))
gt_states[(:, 0:1)] = z0
initial_cond = sample_gaussian(z0, P0, 1)
outputs = np.zeros((num_out, nt))
outputs[(:, 0)] = h(gt_states[(:, 0)], U[(:, 0)], obs_noise_samples[(:, 0)], *[sub[0] for sub in additional_args_om_list])
for i in range(1, nt):
gt_states[(:, i)] = f(gt_states[(:, (i - 1))], U[(:, (i - 1))], state_noise_samples[(:, (i - 1))], input_noise_samples[(:, (i - 1))], *[sub[(i - 1)] for sub in additional_args_pm_list])
gt_states[(overwrite_inds, i)] = overwrite_vals_array[(:, i)]
outputs[(:, i)] = h(gt_states[(:, i)], U[(:, i)], obs_noise_samples[(:, i)], *[sub[i] for sub in additional_args_om_list])
return (gt_states, initial_cond, outputs, additional_args_pm_list, additional_args_om_list) | def sample_nlds(z0, U, nt, f, h, num_out, Q=None, P0=None, R=None, Qu=None, additional_args_pm=[], additional_args_om=[], overwrite_inds=[], overwrite_vals=[]):
'\n Retrieve ground truth, initial and output data (SNLDS: Stochastic non-linear dynamic system)\n\n Args:\n z0 (numpy array [n x 1]): initial ground truth condition\n U (numpy array [nu x nt]): inputs for the process and observation model\n nt (int): number of simulation steps\n f (function): function handle for one-time step forward propagating the state; expected signature f(state, input, noise, ...)\n h (function): function handle for retrieving the outputs of the system as a function of system states;\n expected signature h(state, input, noise, ...)\n num_out (int): number of outputs from function h\n Q (numpy array [nq x nq]): noise covariance matrix involved in the stochastic model f\n P0 (numpy array [n x n]): initial covariance for the initial estimate around the ground truth\n R (numpy array [nr x nr]): covariance matrix of the noise involved in h function\n Qu (numpy array [nqu x nqu]): noise covariance matrix involved in the input to the stochastic model f\n additional_args_pm (list): list of additional arguments to be passed to function f\n additional_args_om (list): list of additional arguments to be passed to function h\n overwrite_inds (list): list of state indices to be overwritten\n overwrite_vals (list): list of ground truth values to overwrite state propagation\n\n Returns:\n gt_states (numpy array [n x nt]): ground truth states at different time instances\n initial_cond (numpy array [n x 1]): initial condition from Gaussian distribution with mean z0 and covariance P0\n outputs (numpy array [num_out x nt]): simulated outputs of the system\n additional_args_pm_list (2d list [len(additional_args_pm) x nt]): additional arguments to be passed to \n function f at each time instant\n additional_args_om_list (2d list [len(additional_args_om) x nt]): additional arguments to be passed to\n function h at each time instant\n\n '
if (not len(U)):
U = np.zeros((0, nt))
if (Q is None):
Q = np.zeros((len(z0), len(z0)))
if (Qu is None):
Qu = np.zeros((U.shape[0], U.shape[0]))
if (P0 is None):
P0 = np.zeros((len(z0), len(z0)))
if (R is None):
R = np.zeros((num_out, num_out))
assert (U.shape[1] == nt), 'Expected input for all {} time instances but only received {}'.format(nt, U.shape[1])
assert (Q.shape == (len(z0), len(z0))), 'Inconsistent size of process noise matrix'
assert (Qu.shape == (U.shape[0], U.shape[0])), 'Inconsistent size of input noise matrix'
assert (P0.shape == (len(z0), len(z0))), 'Inconsistent size of initial covariance matrix'
assert (R.shape == (num_out, num_out)), 'Inconsistent size of observation noise matrix'
additional_args_pm_list = np.zeros((len(additional_args_pm), nt)).tolist()
additional_args_om_list = np.zeros((len(additional_args_om), nt)).tolist()
for (i, argument) in enumerate(additional_args_pm):
if (not isinstance(argument, Iterable)):
additional_args_pm_list[i] = ([argument] * nt)
else:
assert (len(argument) == nt), 'If iterable argument for pm is provided, it should have the length of nt'
additional_args_pm_list[i] = argument
for (i, argument) in enumerate(additional_args_om):
if (not isinstance(argument, Iterable)):
additional_args_om_list[i] = ([argument] * nt)
else:
assert (len(argument) == nt), 'If iterable argument for om is provided, it should have the length of nt'
additional_args_om_list[i] = argument
assert (len(overwrite_inds) == len(overwrite_vals)), 'Inconsitent sizes of information to be overwritten'
for ind in overwrite_inds:
assert ((ind >= 0) and (ind < len(z0))), 'Overwrite index not within range [{},{})'.format(0, len(z0))
overwrite_vals_array = np.zeros((len(overwrite_inds), nt))
for (i, val) in enumerate(overwrite_vals):
if isinstance(val, Iterable):
assert (len(val) == nt), 'Iterable information should have the length of nt'
overwrite_vals_array[i] = val
state_noise_samples = sample_gaussian(np.zeros(z0.shape), Q, nt)
input_noise_samples = sample_gaussian(np.zeros((Qu.shape[0], 1)), Qu, nt)
obs_noise_samples = sample_gaussian(np.zeros((num_out, 1)), R, nt)
gt_states = np.zeros((z0.shape[0], nt))
gt_states[(:, 0:1)] = z0
initial_cond = sample_gaussian(z0, P0, 1)
outputs = np.zeros((num_out, nt))
outputs[(:, 0)] = h(gt_states[(:, 0)], U[(:, 0)], obs_noise_samples[(:, 0)], *[sub[0] for sub in additional_args_om_list])
for i in range(1, nt):
gt_states[(:, i)] = f(gt_states[(:, (i - 1))], U[(:, (i - 1))], state_noise_samples[(:, (i - 1))], input_noise_samples[(:, (i - 1))], *[sub[(i - 1)] for sub in additional_args_pm_list])
gt_states[(overwrite_inds, i)] = overwrite_vals_array[(:, i)]
outputs[(:, i)] = h(gt_states[(:, i)], U[(:, i)], obs_noise_samples[(:, i)], *[sub[i] for sub in additional_args_om_list])
return (gt_states, initial_cond, outputs, additional_args_pm_list, additional_args_om_list)<|docstring|>Retrieve ground truth, initial and output data (SNLDS: Stochastic non-linear dynamic system)
Args:
z0 (numpy array [n x 1]): initial ground truth condition
U (numpy array [nu x nt]): inputs for the process and observation model
nt (int): number of simulation steps
f (function): function handle for one-time step forward propagating the state; expected signature f(state, input, noise, ...)
h (function): function handle for retrieving the outputs of the system as a function of system states;
expected signature h(state, input, noise, ...)
num_out (int): number of outputs from function h
Q (numpy array [nq x nq]): noise covariance matrix involved in the stochastic model f
P0 (numpy array [n x n]): initial covariance for the initial estimate around the ground truth
R (numpy array [nr x nr]): covariance matrix of the noise involved in h function
Qu (numpy array [nqu x nqu]): noise covariance matrix involved in the input to the stochastic model f
additional_args_pm (list): list of additional arguments to be passed to function f
additional_args_om (list): list of additional arguments to be passed to function h
overwrite_inds (list): list of state indices to be overwritten
overwrite_vals (list): list of ground truth values to overwrite state propagation
Returns:
gt_states (numpy array [n x nt]): ground truth states at different time instances
initial_cond (numpy array [n x 1]): initial condition from Gaussian distribution with mean z0 and covariance P0
outputs (numpy array [num_out x nt]): simulated outputs of the system
additional_args_pm_list (2d list [len(additional_args_pm) x nt]): additional arguments to be passed to
function f at each time instant
additional_args_om_list (2d list [len(additional_args_om) x nt]): additional arguments to be passed to
function h at each time instant<|endoftext|> |
d143079353303749c0a3f0787b75fe5776dfc1c57871ecc831f09b93da2ba4e7 | def test_pbgf_linear(n=10, m=5, nt=10):
'\n Test the PointBasedFilter against KF when problem is linear. Raises error when mean and covariance from\n PBGF differs from that of KF.\n\n Args:\n n (int): dimensionality of problem; defaults to 10\n m (int): number of outputs which are randomly selected from the states; defaults to 5\n nt (int): number of filtering iterations; defaults to 10\n\n '
np.random.seed(0)
X = (5.0 * np.random.randn(n, 1))
P = (10.0 * np.random.randn(n, n))
P = np.matmul(P, P.T)
dt = 0.05
J = (np.eye(n) + (dt * ((((- 2.0) * np.eye(n)) + np.diag(np.ones((n - 1)), 1)) + np.diag(np.ones((n - 1)), (- 1)))))
def process_model(x, u, noise, input_noise):
return (np.matmul(J, x) + noise)
Q = (5.0 * np.eye(n))
out_loc = np.random.permutation(n)[:m]
R = (1.0 * np.eye(m))
H = np.zeros((m, n))
l_ind = (out_loc + (np.arange(m) * n))
H.flat[l_ind] = 1.0
def observation_model(x, u, noise):
return (np.matmul(H, x) + noise)
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, m, Q, P, R)[0:3]
pbgf = PointBasedFilter('CKF', 2)
X1 = x0.copy()
X2 = x0.copy()
P1 = P.copy()
P2 = P.copy()
mse = np.zeros((nt, 1))
mse[0] = np.mean(((X1 - x_gt[(:, 0)]) ** 2))
trace = np.zeros(mse.shape)
trace[0] = np.trace(P1)
for i in range(1, nt):
X1 = process_model(X1, [], 0.0, 0.0)
P1 = (np.matmul(np.matmul(J, P1), J.T) + Q)
z = (outputs[(:, i:(i + 1))] - observation_model(X1, [], 0.0))
S = (np.matmul(np.matmul(H, P1), H.T) + R)
K = np.matmul(np.matmul(P1, H.T), np.linalg.inv(S))
X1 += np.matmul(K, z)
P1 -= np.matmul(np.matmul(K, H), P1)
(X2, P2) = pbgf.predict_and_or_update(X2, P2, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
assert np.allclose(P1, P2), 'Covariance from KF and PBGF should be the same as problem is linear'
assert np.allclose(X1, X2), 'Expected Value from KF and PBGF should be the same as problem is linear'
mse[i] = np.mean(((X1 - x_gt[(:, i)]) ** 2))
trace[i] = np.trace(P1)
import matplotlib.pyplot as plt
plt.plot(mse, marker='x', label='mse')
plt.plot(trace, marker='o', label='trace')
plt.grid(True, 'both')
plt.legend()
plt.show() | Test the PointBasedFilter against KF when problem is linear. Raises error when mean and covariance from
PBGF differs from that of KF.
Args:
n (int): dimensionality of problem; defaults to 10
m (int): number of outputs which are randomly selected from the states; defaults to 5
nt (int): number of filtering iterations; defaults to 10 | estimators.py | test_pbgf_linear | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def test_pbgf_linear(n=10, m=5, nt=10):
'\n Test the PointBasedFilter against KF when problem is linear. Raises error when mean and covariance from\n PBGF differs from that of KF.\n\n Args:\n n (int): dimensionality of problem; defaults to 10\n m (int): number of outputs which are randomly selected from the states; defaults to 5\n nt (int): number of filtering iterations; defaults to 10\n\n '
np.random.seed(0)
X = (5.0 * np.random.randn(n, 1))
P = (10.0 * np.random.randn(n, n))
P = np.matmul(P, P.T)
dt = 0.05
J = (np.eye(n) + (dt * ((((- 2.0) * np.eye(n)) + np.diag(np.ones((n - 1)), 1)) + np.diag(np.ones((n - 1)), (- 1)))))
def process_model(x, u, noise, input_noise):
return (np.matmul(J, x) + noise)
Q = (5.0 * np.eye(n))
out_loc = np.random.permutation(n)[:m]
R = (1.0 * np.eye(m))
H = np.zeros((m, n))
l_ind = (out_loc + (np.arange(m) * n))
H.flat[l_ind] = 1.0
def observation_model(x, u, noise):
return (np.matmul(H, x) + noise)
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, m, Q, P, R)[0:3]
pbgf = PointBasedFilter('CKF', 2)
X1 = x0.copy()
X2 = x0.copy()
P1 = P.copy()
P2 = P.copy()
mse = np.zeros((nt, 1))
mse[0] = np.mean(((X1 - x_gt[(:, 0)]) ** 2))
trace = np.zeros(mse.shape)
trace[0] = np.trace(P1)
for i in range(1, nt):
X1 = process_model(X1, [], 0.0, 0.0)
P1 = (np.matmul(np.matmul(J, P1), J.T) + Q)
z = (outputs[(:, i:(i + 1))] - observation_model(X1, [], 0.0))
S = (np.matmul(np.matmul(H, P1), H.T) + R)
K = np.matmul(np.matmul(P1, H.T), np.linalg.inv(S))
X1 += np.matmul(K, z)
P1 -= np.matmul(np.matmul(K, H), P1)
(X2, P2) = pbgf.predict_and_or_update(X2, P2, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
assert np.allclose(P1, P2), 'Covariance from KF and PBGF should be the same as problem is linear'
assert np.allclose(X1, X2), 'Expected Value from KF and PBGF should be the same as problem is linear'
mse[i] = np.mean(((X1 - x_gt[(:, i)]) ** 2))
trace[i] = np.trace(P1)
import matplotlib.pyplot as plt
plt.plot(mse, marker='x', label='mse')
plt.plot(trace, marker='o', label='trace')
plt.grid(True, 'both')
plt.legend()
plt.show() | def test_pbgf_linear(n=10, m=5, nt=10):
'\n Test the PointBasedFilter against KF when problem is linear. Raises error when mean and covariance from\n PBGF differs from that of KF.\n\n Args:\n n (int): dimensionality of problem; defaults to 10\n m (int): number of outputs which are randomly selected from the states; defaults to 5\n nt (int): number of filtering iterations; defaults to 10\n\n '
np.random.seed(0)
X = (5.0 * np.random.randn(n, 1))
P = (10.0 * np.random.randn(n, n))
P = np.matmul(P, P.T)
dt = 0.05
J = (np.eye(n) + (dt * ((((- 2.0) * np.eye(n)) + np.diag(np.ones((n - 1)), 1)) + np.diag(np.ones((n - 1)), (- 1)))))
def process_model(x, u, noise, input_noise):
return (np.matmul(J, x) + noise)
Q = (5.0 * np.eye(n))
out_loc = np.random.permutation(n)[:m]
R = (1.0 * np.eye(m))
H = np.zeros((m, n))
l_ind = (out_loc + (np.arange(m) * n))
H.flat[l_ind] = 1.0
def observation_model(x, u, noise):
return (np.matmul(H, x) + noise)
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, m, Q, P, R)[0:3]
pbgf = PointBasedFilter('CKF', 2)
X1 = x0.copy()
X2 = x0.copy()
P1 = P.copy()
P2 = P.copy()
mse = np.zeros((nt, 1))
mse[0] = np.mean(((X1 - x_gt[(:, 0)]) ** 2))
trace = np.zeros(mse.shape)
trace[0] = np.trace(P1)
for i in range(1, nt):
X1 = process_model(X1, [], 0.0, 0.0)
P1 = (np.matmul(np.matmul(J, P1), J.T) + Q)
z = (outputs[(:, i:(i + 1))] - observation_model(X1, [], 0.0))
S = (np.matmul(np.matmul(H, P1), H.T) + R)
K = np.matmul(np.matmul(P1, H.T), np.linalg.inv(S))
X1 += np.matmul(K, z)
P1 -= np.matmul(np.matmul(K, H), P1)
(X2, P2) = pbgf.predict_and_or_update(X2, P2, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
assert np.allclose(P1, P2), 'Covariance from KF and PBGF should be the same as problem is linear'
assert np.allclose(X1, X2), 'Expected Value from KF and PBGF should be the same as problem is linear'
mse[i] = np.mean(((X1 - x_gt[(:, i)]) ** 2))
trace[i] = np.trace(P1)
import matplotlib.pyplot as plt
plt.plot(mse, marker='x', label='mse')
plt.plot(trace, marker='o', label='trace')
plt.grid(True, 'both')
plt.legend()
plt.show()<|docstring|>Test the PointBasedFilter against KF when problem is linear. Raises error when mean and covariance from
PBGF differs from that of KF.
Args:
n (int): dimensionality of problem; defaults to 10
m (int): number of outputs which are randomly selected from the states; defaults to 5
nt (int): number of filtering iterations; defaults to 10<|endoftext|> |
ccbd9b4e442c2dcbb6f839fae5b5538f2f2f655411945343431f032951430590 | def test_pbgf_1d_linear(gt_const=10.0, initial_cov=10.0, q_cov=0.01, r_cov=1.0, nt=50):
'\n Test the PBGF against KF when problem is linear. This problem is one-dimensional estimate of a random constant.\n\n Args:\n gt_const (float): parameter to be estimated; defaults to 10.0\n initial_cov (float): initial uncertainty of gt_const; defaults to 10.0\n q_cov (float): stochastic noise for evolution of the parameter; defaults to 1e-2, value of 0 implies parameter is constant\n r_cov (float): observation noise of the parameter; defaults to 1.0\n nt (int): number of filtering iterations; defaults to 50\n\n '
np.random.seed(0)
X = np.array([[gt_const]])
P = (initial_cov * np.ones((1, 1)))
def process_model(x, u=[], noise=0.0, input_noise=0.0):
return (x + noise)
def observation_model(x, u=[], noise=0.0):
return (x + noise)
R = np.array([[r_cov]])
Q = np.array([[q_cov]])
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, 1, Q, P, R)[0:3]
pbgf = PointBasedFilter('CKF', 2)
X1 = x0.copy()
X2 = x0.copy()
P1 = P.copy()
P2 = P.copy()
est_history = np.zeros((nt, 1))
est_history[0] = x0.copy()
mse = np.zeros((nt, 1))
mse[0] = np.mean(((X1 - x_gt[(:, 0)]) ** 2))
trace = np.zeros(mse.shape)
trace[0] = np.trace(P1)
for i in range(1, nt):
P1 = (P1 + Q)
z = (outputs[(:, i:(i + 1))] - X1)
S = (P1 + R)
K = np.matmul(P1, np.linalg.inv(S))
X1 += np.matmul(K, z)
P1 -= np.matmul(K, P1)
(X2, P2) = pbgf.predict_and_or_update(X2, P2, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
assert np.allclose(P1, P2), 'Covariance from KF and PBGF should be the same as problem is linear'
assert np.allclose(X1, X2), 'Expected Value from KF and PBGF should be the same as problem is linear'
mse[i] = np.mean(((X1 - x_gt[(:, i)]) ** 2))
trace[i] = np.trace(P1)
est_history[i] = X1[:].copy()
import matplotlib.pyplot as plt
plt.plot(est_history, label='est_voltage')
plt.plot(x_gt[(0, :)], linestyle='--', label='real_voltage')
plt.plot(mse, marker='x', label='mse')
plt.plot(trace, marker='o', label='trace')
plt.grid(True, 'both')
plt.legend()
plt.show() | Test the PBGF against KF when problem is linear. This problem is one-dimensional estimate of a random constant.
Args:
gt_const (float): parameter to be estimated; defaults to 10.0
initial_cov (float): initial uncertainty of gt_const; defaults to 10.0
q_cov (float): stochastic noise for evolution of the parameter; defaults to 1e-2, value of 0 implies parameter is constant
r_cov (float): observation noise of the parameter; defaults to 1.0
nt (int): number of filtering iterations; defaults to 50 | estimators.py | test_pbgf_1d_linear | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def test_pbgf_1d_linear(gt_const=10.0, initial_cov=10.0, q_cov=0.01, r_cov=1.0, nt=50):
'\n Test the PBGF against KF when problem is linear. This problem is one-dimensional estimate of a random constant.\n\n Args:\n gt_const (float): parameter to be estimated; defaults to 10.0\n initial_cov (float): initial uncertainty of gt_const; defaults to 10.0\n q_cov (float): stochastic noise for evolution of the parameter; defaults to 1e-2, value of 0 implies parameter is constant\n r_cov (float): observation noise of the parameter; defaults to 1.0\n nt (int): number of filtering iterations; defaults to 50\n\n '
np.random.seed(0)
X = np.array([[gt_const]])
P = (initial_cov * np.ones((1, 1)))
def process_model(x, u=[], noise=0.0, input_noise=0.0):
return (x + noise)
def observation_model(x, u=[], noise=0.0):
return (x + noise)
R = np.array([[r_cov]])
Q = np.array([[q_cov]])
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, 1, Q, P, R)[0:3]
pbgf = PointBasedFilter('CKF', 2)
X1 = x0.copy()
X2 = x0.copy()
P1 = P.copy()
P2 = P.copy()
est_history = np.zeros((nt, 1))
est_history[0] = x0.copy()
mse = np.zeros((nt, 1))
mse[0] = np.mean(((X1 - x_gt[(:, 0)]) ** 2))
trace = np.zeros(mse.shape)
trace[0] = np.trace(P1)
for i in range(1, nt):
P1 = (P1 + Q)
z = (outputs[(:, i:(i + 1))] - X1)
S = (P1 + R)
K = np.matmul(P1, np.linalg.inv(S))
X1 += np.matmul(K, z)
P1 -= np.matmul(K, P1)
(X2, P2) = pbgf.predict_and_or_update(X2, P2, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
assert np.allclose(P1, P2), 'Covariance from KF and PBGF should be the same as problem is linear'
assert np.allclose(X1, X2), 'Expected Value from KF and PBGF should be the same as problem is linear'
mse[i] = np.mean(((X1 - x_gt[(:, i)]) ** 2))
trace[i] = np.trace(P1)
est_history[i] = X1[:].copy()
import matplotlib.pyplot as plt
plt.plot(est_history, label='est_voltage')
plt.plot(x_gt[(0, :)], linestyle='--', label='real_voltage')
plt.plot(mse, marker='x', label='mse')
plt.plot(trace, marker='o', label='trace')
plt.grid(True, 'both')
plt.legend()
plt.show() | def test_pbgf_1d_linear(gt_const=10.0, initial_cov=10.0, q_cov=0.01, r_cov=1.0, nt=50):
'\n Test the PBGF against KF when problem is linear. This problem is one-dimensional estimate of a random constant.\n\n Args:\n gt_const (float): parameter to be estimated; defaults to 10.0\n initial_cov (float): initial uncertainty of gt_const; defaults to 10.0\n q_cov (float): stochastic noise for evolution of the parameter; defaults to 1e-2, value of 0 implies parameter is constant\n r_cov (float): observation noise of the parameter; defaults to 1.0\n nt (int): number of filtering iterations; defaults to 50\n\n '
np.random.seed(0)
X = np.array([[gt_const]])
P = (initial_cov * np.ones((1, 1)))
def process_model(x, u=[], noise=0.0, input_noise=0.0):
return (x + noise)
def observation_model(x, u=[], noise=0.0):
return (x + noise)
R = np.array([[r_cov]])
Q = np.array([[q_cov]])
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, 1, Q, P, R)[0:3]
pbgf = PointBasedFilter('CKF', 2)
X1 = x0.copy()
X2 = x0.copy()
P1 = P.copy()
P2 = P.copy()
est_history = np.zeros((nt, 1))
est_history[0] = x0.copy()
mse = np.zeros((nt, 1))
mse[0] = np.mean(((X1 - x_gt[(:, 0)]) ** 2))
trace = np.zeros(mse.shape)
trace[0] = np.trace(P1)
for i in range(1, nt):
P1 = (P1 + Q)
z = (outputs[(:, i:(i + 1))] - X1)
S = (P1 + R)
K = np.matmul(P1, np.linalg.inv(S))
X1 += np.matmul(K, z)
P1 -= np.matmul(K, P1)
(X2, P2) = pbgf.predict_and_or_update(X2, P2, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
assert np.allclose(P1, P2), 'Covariance from KF and PBGF should be the same as problem is linear'
assert np.allclose(X1, X2), 'Expected Value from KF and PBGF should be the same as problem is linear'
mse[i] = np.mean(((X1 - x_gt[(:, i)]) ** 2))
trace[i] = np.trace(P1)
est_history[i] = X1[:].copy()
import matplotlib.pyplot as plt
plt.plot(est_history, label='est_voltage')
plt.plot(x_gt[(0, :)], linestyle='--', label='real_voltage')
plt.plot(mse, marker='x', label='mse')
plt.plot(trace, marker='o', label='trace')
plt.grid(True, 'both')
plt.legend()
plt.show()<|docstring|>Test the PBGF against KF when problem is linear. This problem is one-dimensional estimate of a random constant.
Args:
gt_const (float): parameter to be estimated; defaults to 10.0
initial_cov (float): initial uncertainty of gt_const; defaults to 10.0
q_cov (float): stochastic noise for evolution of the parameter; defaults to 1e-2, value of 0 implies parameter is constant
r_cov (float): observation noise of the parameter; defaults to 1.0
nt (int): number of filtering iterations; defaults to 50<|endoftext|> |
05b3fd24879b4342d456d9c879be399145cfc7beb2f0fcc0ca87cd13e90a2144 | def test_pbgf_fixed_lag_smoothing_linear(n=10, m=5, nt=10, lag_interval=5):
'\n Test the PBGF smoothed estimate against filtered estimate. This problem is the same as that of test_pbgf_linear.\n\n Args:\n n (int): dimensionality of problem; defaults to 10\n m (int): number of outputs which are randomly selected from the states; defaults to 5\n nt (int): number of filtering iterations; defaults to 10\n lag_interval (int): lag interval for producing smoothed estimate\n\n '
np.random.seed(0)
X = (5.0 * np.random.randn(n, 1))
P = (10.0 * np.random.randn(n, n))
P = np.matmul(P, P.T)
dt = 0.05
J = (np.eye(n) + (dt * ((((- 2.0) * np.eye(n)) + np.diag(np.ones((n - 1)), 1)) + np.diag(np.ones((n - 1)), (- 1)))))
def process_model(x, u, noise, input_noise):
return (np.matmul(J, x) + noise)
Q = (5.0 * np.eye(n))
out_loc = np.random.permutation(n)[:m]
R = (1.0 * np.eye(m))
H = np.zeros((m, n))
l_ind = (out_loc + (np.arange(m) * n))
H.flat[l_ind] = 1.0
def observation_model(x, u, noise):
return (np.matmul(H, x) + noise)
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, m, Q, P, R)[0:3]
pbgf_filt = PointBasedFilter('CKF', 2)
pbgf_smooth = PointBasedFixedLagSmoother('CKF', 2, lag_interval)
pbgf_smooth_aug = PointBasedFixedLagSmootherAugmented('CKF', 2, lag_interval)
X_filt = x0.copy()
X_smooth = x0.copy()
P_filt = P.copy()
P_smooth = P.copy()
pbgf_smooth.set_initial_cond(X_smooth, P_smooth)
pbgf_smooth_aug.set_initial_cond(X_smooth, P_smooth)
X_filt_hist = np.zeros((X_filt.shape[0], nt))
X_filt_hist[(:, 0:1)] = X_filt.copy()
X_smooth_hist = np.zeros((X_smooth.shape[0], nt))
X_smooth_hist[(:, 0:1)] = X_smooth.copy()
mse_filt = np.zeros((nt, 1))
error = (X_filt - x_gt[(:, 0:1)])
mse_filt[0] = np.mean((error ** 2))
nees_filt = np.zeros((nt, 1))
nees_filt[0] = np.matmul(np.matmul(error.T, np.linalg.inv(P_filt)), error)
mse_smooth = np.zeros((nt, 1))
error = (X_smooth - x_gt[(:, 0:1)])
mse_smooth[0] = np.mean((error ** 2))
nees_smooth = np.zeros((nt, 1))
nees_smooth[0] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth)), error)
dentropy_filt = np.zeros((nt, 1))
dentropy_filt[0] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_filt))))
dentropy_smooth = np.zeros((nt, 1))
dentropy_smooth[0] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth))))
for i in range(1, nt):
(X_filt, P_filt) = pbgf_filt.predict_and_or_update(X_filt, P_filt, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
X_filt_hist[(:, i:(i + 1))] = X_filt.copy()
error = (X_filt - x_gt[(:, i:(i + 1))])
mse_filt[i] = np.mean((error ** 2))
nees_filt[i] = np.matmul(np.matmul(error.T, np.linalg.inv(P_filt)), error)
dentropy_filt[i] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_filt))))
(X_smooth_aug, P_smooth_aug) = pbgf_smooth_aug.predict_and_or_update(process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
(X_smooth_fi, P_smooth_fi, smooth_flag) = pbgf_smooth.predict_and_or_update(process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
if (smooth_flag and ((i - lag_interval) >= 0)):
assert np.allclose(X_smooth_aug, X_smooth_fi[0]), 'Backward pass smoother mean is not equivalent to that from augmented implementation'
assert np.allclose(P_smooth_aug, P_smooth_fi[0]), 'Backward pass smoother covariance is not equivalent to that from augmented implementation'
X_smooth_hist[(:, (i - lag_interval):((i - lag_interval) + 1))] = X_smooth_fi[0].copy()
error = (X_smooth_fi[0] - x_gt[(:, (i - lag_interval):((i - lag_interval) + 1))])
mse_smooth[(i - lag_interval)] = np.mean((error ** 2))
nees_smooth[(i - lag_interval)] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth_fi[0])), error)
dentropy_smooth[(i - lag_interval)] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth_fi[0]))))
if (i == (nt - 1)):
for k in range(1, len(X_smooth_fi)):
error = (X_smooth_fi[k] - x_gt[(:, ((i - lag_interval) + k):(((i - lag_interval) + k) + 1))])
mse_smooth[((i - lag_interval) + k)] = np.mean((error ** 2))
nees_smooth[((i - lag_interval) + k)] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth_fi[k])), error)
dentropy_smooth[((i - lag_interval) + k)] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth_fi[k]))))
X_smooth_hist[(:, ((i - lag_interval) + k):(((i - lag_interval) + k) + 1))] = X_smooth_fi[k].copy()
assert np.allclose(X_smooth_fi[(- 1)], X_filt), 'Filtered mean from smoothing and filtering are not the same'
assert np.allclose(P_smooth_fi[(- 1)], P_filt), 'Filtered covariance from smoothing and filtering are not the same'
for i in range((nt - 1)):
assert (dentropy_smooth[i] <= dentropy_filt[i]), 'Smoothed entropy should be lower than filtered estimate'
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.plot(mse_filt, marker='o', label='filtered')
plt.plot(mse_smooth, marker='o', label='smoothed')
plt.legend()
plt.xlabel('Time instance')
plt.ylabel('MSE')
plt.grid(True, 'both')
plt.subplot(2, 1, 2)
plt.plot(nees_filt, marker='o', label='filtered')
plt.plot(nees_smooth, marker='o', label='smoothed')
plt.legend()
plt.xlabel('Time instance')
plt.ylabel('NEES')
plt.grid(True, 'both')
plt.show() | Test the PBGF smoothed estimate against filtered estimate. This problem is the same as that of test_pbgf_linear.
Args:
n (int): dimensionality of problem; defaults to 10
m (int): number of outputs which are randomly selected from the states; defaults to 5
nt (int): number of filtering iterations; defaults to 10
lag_interval (int): lag interval for producing smoothed estimate | estimators.py | test_pbgf_fixed_lag_smoothing_linear | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def test_pbgf_fixed_lag_smoothing_linear(n=10, m=5, nt=10, lag_interval=5):
'\n Test the PBGF smoothed estimate against filtered estimate. This problem is the same as that of test_pbgf_linear.\n\n Args:\n n (int): dimensionality of problem; defaults to 10\n m (int): number of outputs which are randomly selected from the states; defaults to 5\n nt (int): number of filtering iterations; defaults to 10\n lag_interval (int): lag interval for producing smoothed estimate\n\n '
np.random.seed(0)
X = (5.0 * np.random.randn(n, 1))
P = (10.0 * np.random.randn(n, n))
P = np.matmul(P, P.T)
dt = 0.05
J = (np.eye(n) + (dt * ((((- 2.0) * np.eye(n)) + np.diag(np.ones((n - 1)), 1)) + np.diag(np.ones((n - 1)), (- 1)))))
def process_model(x, u, noise, input_noise):
return (np.matmul(J, x) + noise)
Q = (5.0 * np.eye(n))
out_loc = np.random.permutation(n)[:m]
R = (1.0 * np.eye(m))
H = np.zeros((m, n))
l_ind = (out_loc + (np.arange(m) * n))
H.flat[l_ind] = 1.0
def observation_model(x, u, noise):
return (np.matmul(H, x) + noise)
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, m, Q, P, R)[0:3]
pbgf_filt = PointBasedFilter('CKF', 2)
pbgf_smooth = PointBasedFixedLagSmoother('CKF', 2, lag_interval)
pbgf_smooth_aug = PointBasedFixedLagSmootherAugmented('CKF', 2, lag_interval)
X_filt = x0.copy()
X_smooth = x0.copy()
P_filt = P.copy()
P_smooth = P.copy()
pbgf_smooth.set_initial_cond(X_smooth, P_smooth)
pbgf_smooth_aug.set_initial_cond(X_smooth, P_smooth)
X_filt_hist = np.zeros((X_filt.shape[0], nt))
X_filt_hist[(:, 0:1)] = X_filt.copy()
X_smooth_hist = np.zeros((X_smooth.shape[0], nt))
X_smooth_hist[(:, 0:1)] = X_smooth.copy()
mse_filt = np.zeros((nt, 1))
error = (X_filt - x_gt[(:, 0:1)])
mse_filt[0] = np.mean((error ** 2))
nees_filt = np.zeros((nt, 1))
nees_filt[0] = np.matmul(np.matmul(error.T, np.linalg.inv(P_filt)), error)
mse_smooth = np.zeros((nt, 1))
error = (X_smooth - x_gt[(:, 0:1)])
mse_smooth[0] = np.mean((error ** 2))
nees_smooth = np.zeros((nt, 1))
nees_smooth[0] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth)), error)
dentropy_filt = np.zeros((nt, 1))
dentropy_filt[0] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_filt))))
dentropy_smooth = np.zeros((nt, 1))
dentropy_smooth[0] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth))))
for i in range(1, nt):
(X_filt, P_filt) = pbgf_filt.predict_and_or_update(X_filt, P_filt, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
X_filt_hist[(:, i:(i + 1))] = X_filt.copy()
error = (X_filt - x_gt[(:, i:(i + 1))])
mse_filt[i] = np.mean((error ** 2))
nees_filt[i] = np.matmul(np.matmul(error.T, np.linalg.inv(P_filt)), error)
dentropy_filt[i] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_filt))))
(X_smooth_aug, P_smooth_aug) = pbgf_smooth_aug.predict_and_or_update(process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
(X_smooth_fi, P_smooth_fi, smooth_flag) = pbgf_smooth.predict_and_or_update(process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
if (smooth_flag and ((i - lag_interval) >= 0)):
assert np.allclose(X_smooth_aug, X_smooth_fi[0]), 'Backward pass smoother mean is not equivalent to that from augmented implementation'
assert np.allclose(P_smooth_aug, P_smooth_fi[0]), 'Backward pass smoother covariance is not equivalent to that from augmented implementation'
X_smooth_hist[(:, (i - lag_interval):((i - lag_interval) + 1))] = X_smooth_fi[0].copy()
error = (X_smooth_fi[0] - x_gt[(:, (i - lag_interval):((i - lag_interval) + 1))])
mse_smooth[(i - lag_interval)] = np.mean((error ** 2))
nees_smooth[(i - lag_interval)] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth_fi[0])), error)
dentropy_smooth[(i - lag_interval)] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth_fi[0]))))
if (i == (nt - 1)):
for k in range(1, len(X_smooth_fi)):
error = (X_smooth_fi[k] - x_gt[(:, ((i - lag_interval) + k):(((i - lag_interval) + k) + 1))])
mse_smooth[((i - lag_interval) + k)] = np.mean((error ** 2))
nees_smooth[((i - lag_interval) + k)] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth_fi[k])), error)
dentropy_smooth[((i - lag_interval) + k)] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth_fi[k]))))
X_smooth_hist[(:, ((i - lag_interval) + k):(((i - lag_interval) + k) + 1))] = X_smooth_fi[k].copy()
assert np.allclose(X_smooth_fi[(- 1)], X_filt), 'Filtered mean from smoothing and filtering are not the same'
assert np.allclose(P_smooth_fi[(- 1)], P_filt), 'Filtered covariance from smoothing and filtering are not the same'
for i in range((nt - 1)):
assert (dentropy_smooth[i] <= dentropy_filt[i]), 'Smoothed entropy should be lower than filtered estimate'
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.plot(mse_filt, marker='o', label='filtered')
plt.plot(mse_smooth, marker='o', label='smoothed')
plt.legend()
plt.xlabel('Time instance')
plt.ylabel('MSE')
plt.grid(True, 'both')
plt.subplot(2, 1, 2)
plt.plot(nees_filt, marker='o', label='filtered')
plt.plot(nees_smooth, marker='o', label='smoothed')
plt.legend()
plt.xlabel('Time instance')
plt.ylabel('NEES')
plt.grid(True, 'both')
plt.show() | def test_pbgf_fixed_lag_smoothing_linear(n=10, m=5, nt=10, lag_interval=5):
'\n Test the PBGF smoothed estimate against filtered estimate. This problem is the same as that of test_pbgf_linear.\n\n Args:\n n (int): dimensionality of problem; defaults to 10\n m (int): number of outputs which are randomly selected from the states; defaults to 5\n nt (int): number of filtering iterations; defaults to 10\n lag_interval (int): lag interval for producing smoothed estimate\n\n '
np.random.seed(0)
X = (5.0 * np.random.randn(n, 1))
P = (10.0 * np.random.randn(n, n))
P = np.matmul(P, P.T)
dt = 0.05
J = (np.eye(n) + (dt * ((((- 2.0) * np.eye(n)) + np.diag(np.ones((n - 1)), 1)) + np.diag(np.ones((n - 1)), (- 1)))))
def process_model(x, u, noise, input_noise):
return (np.matmul(J, x) + noise)
Q = (5.0 * np.eye(n))
out_loc = np.random.permutation(n)[:m]
R = (1.0 * np.eye(m))
H = np.zeros((m, n))
l_ind = (out_loc + (np.arange(m) * n))
H.flat[l_ind] = 1.0
def observation_model(x, u, noise):
return (np.matmul(H, x) + noise)
(x_gt, x0, outputs) = sample_nlds(X, [], nt, process_model, observation_model, m, Q, P, R)[0:3]
pbgf_filt = PointBasedFilter('CKF', 2)
pbgf_smooth = PointBasedFixedLagSmoother('CKF', 2, lag_interval)
pbgf_smooth_aug = PointBasedFixedLagSmootherAugmented('CKF', 2, lag_interval)
X_filt = x0.copy()
X_smooth = x0.copy()
P_filt = P.copy()
P_smooth = P.copy()
pbgf_smooth.set_initial_cond(X_smooth, P_smooth)
pbgf_smooth_aug.set_initial_cond(X_smooth, P_smooth)
X_filt_hist = np.zeros((X_filt.shape[0], nt))
X_filt_hist[(:, 0:1)] = X_filt.copy()
X_smooth_hist = np.zeros((X_smooth.shape[0], nt))
X_smooth_hist[(:, 0:1)] = X_smooth.copy()
mse_filt = np.zeros((nt, 1))
error = (X_filt - x_gt[(:, 0:1)])
mse_filt[0] = np.mean((error ** 2))
nees_filt = np.zeros((nt, 1))
nees_filt[0] = np.matmul(np.matmul(error.T, np.linalg.inv(P_filt)), error)
mse_smooth = np.zeros((nt, 1))
error = (X_smooth - x_gt[(:, 0:1)])
mse_smooth[0] = np.mean((error ** 2))
nees_smooth = np.zeros((nt, 1))
nees_smooth[0] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth)), error)
dentropy_filt = np.zeros((nt, 1))
dentropy_filt[0] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_filt))))
dentropy_smooth = np.zeros((nt, 1))
dentropy_smooth[0] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth))))
for i in range(1, nt):
(X_filt, P_filt) = pbgf_filt.predict_and_or_update(X_filt, P_filt, process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
X_filt_hist[(:, i:(i + 1))] = X_filt.copy()
error = (X_filt - x_gt[(:, i:(i + 1))])
mse_filt[i] = np.mean((error ** 2))
nees_filt[i] = np.matmul(np.matmul(error.T, np.linalg.inv(P_filt)), error)
dentropy_filt[i] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_filt))))
(X_smooth_aug, P_smooth_aug) = pbgf_smooth_aug.predict_and_or_update(process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
(X_smooth_fi, P_smooth_fi, smooth_flag) = pbgf_smooth.predict_and_or_update(process_model, observation_model, Q, R, np.array([]), outputs[(:, i:(i + 1))])
if (smooth_flag and ((i - lag_interval) >= 0)):
assert np.allclose(X_smooth_aug, X_smooth_fi[0]), 'Backward pass smoother mean is not equivalent to that from augmented implementation'
assert np.allclose(P_smooth_aug, P_smooth_fi[0]), 'Backward pass smoother covariance is not equivalent to that from augmented implementation'
X_smooth_hist[(:, (i - lag_interval):((i - lag_interval) + 1))] = X_smooth_fi[0].copy()
error = (X_smooth_fi[0] - x_gt[(:, (i - lag_interval):((i - lag_interval) + 1))])
mse_smooth[(i - lag_interval)] = np.mean((error ** 2))
nees_smooth[(i - lag_interval)] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth_fi[0])), error)
dentropy_smooth[(i - lag_interval)] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth_fi[0]))))
if (i == (nt - 1)):
for k in range(1, len(X_smooth_fi)):
error = (X_smooth_fi[k] - x_gt[(:, ((i - lag_interval) + k):(((i - lag_interval) + k) + 1))])
mse_smooth[((i - lag_interval) + k)] = np.mean((error ** 2))
nees_smooth[((i - lag_interval) + k)] = np.matmul(np.matmul(error.T, np.linalg.inv(P_smooth_fi[k])), error)
dentropy_smooth[((i - lag_interval) + k)] = (((0.5 * n) * (1.0 + np.log((2 * math.pi)))) + (0.5 * np.log(np.linalg.det(P_smooth_fi[k]))))
X_smooth_hist[(:, ((i - lag_interval) + k):(((i - lag_interval) + k) + 1))] = X_smooth_fi[k].copy()
assert np.allclose(X_smooth_fi[(- 1)], X_filt), 'Filtered mean from smoothing and filtering are not the same'
assert np.allclose(P_smooth_fi[(- 1)], P_filt), 'Filtered covariance from smoothing and filtering are not the same'
for i in range((nt - 1)):
assert (dentropy_smooth[i] <= dentropy_filt[i]), 'Smoothed entropy should be lower than filtered estimate'
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.plot(mse_filt, marker='o', label='filtered')
plt.plot(mse_smooth, marker='o', label='smoothed')
plt.legend()
plt.xlabel('Time instance')
plt.ylabel('MSE')
plt.grid(True, 'both')
plt.subplot(2, 1, 2)
plt.plot(nees_filt, marker='o', label='filtered')
plt.plot(nees_smooth, marker='o', label='smoothed')
plt.legend()
plt.xlabel('Time instance')
plt.ylabel('NEES')
plt.grid(True, 'both')
plt.show()<|docstring|>Test the PBGF smoothed estimate against filtered estimate. This problem is the same as that of test_pbgf_linear.
Args:
n (int): dimensionality of problem; defaults to 10
m (int): number of outputs which are randomly selected from the states; defaults to 5
nt (int): number of filtering iterations; defaults to 10
lag_interval (int): lag interval for producing smoothed estimate<|endoftext|> |
b6c200801a91d433ac12624956004c85406a4cf5b36ca3b536faaad31f455a77 | def predict_and_or_update(self, X, P, f, h, Q, R, u, y, u_next=None, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update.\n algorithm reference: Algorithm 5.1, page 104 of "Compressed Estimation in Coupled High-dimensional Processes"\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n u_next (*): next input required for function h, defaults to None which will take values of u\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X (numpy array [n x 1]): expected value of the states after prediction and update\n P (numpy array [n x n]): covariance of the states after prediction and update\n\n '
n = len(X)
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
if self.use_torch_tensor:
Qu = torch.zeros((nqu, nqu), dtype=X.dtype, device=self.tensor_device)
else:
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
if self.use_torch_tensor:
X1 = torch.cat((X, torch.zeros((((nq + nqu) + nr), 1), dtype=X.dtype, device=self.tensor_device)), dim=0)
P1 = torch.block_diag(P, Q, Qu, R)
else:
X1 = np.concatenate((X, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(P, Q, Qu, R)
if (u_next is None):
u_next = u
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
ia = np.arange(n)
if predict_flag:
ib = np.arange(n, (n + nq))
ic = np.arange((n + nq), ((n + nq) + nqu))
(X2, x2, P2, x2_cent) = self.unscented_transformF(x, W, WeightMat, L, f, u, ia, ib, ic, additional_args_pm)
else:
X2 = X
P2 = P
x2 = x
x2_cent = (x[(ia, :)] - X)
if len(y):
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange(((n + nq) + nqu), (((n + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u_next, ia, ip, len(y), additional_args_om)
if self.use_torch_tensor:
Pxy = torch.matmul(torch.matmul(x2_cent, WeightMat), z2.T)
K = torch.matmul(Pxy, torch.linalg.inv(Pz))
else:
Pxy = np.matmul(np.matmul(x2_cent, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
if self.use_torch_tensor:
X3 = (X2 + torch.matmul(K, innovation))
P3 = (P2 - torch.matmul(K, Pxy.T))
else:
X3 = (X2 + np.matmul(K, innovation))
P3 = (P2 - np.matmul(K, Pxy.T))
else:
X3 = X2
P3 = P2
return (X3, P3) | Perform one iteration of prediction and/or update.
algorithm reference: Algorithm 5.1, page 104 of "Compressed Estimation in Coupled High-dimensional Processes"
Args:
X (numpy array [n x 1]): expected value of the states
P (numpy array [n x n]): covariance of the states
f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)
h (function): function handle for the observation model; expected signature h(state, input, noise, ...)
Q (numpy array [nq x nq]): process model noise covariance in the prediction step
R (numpy array [nr x nr]): observation model noise covariance in the update step
u (*): current input required for function f & possibly function h
y (numpy array [nu x 1]): current measurement/output of the system
u_next (*): next input required for function h, defaults to None which will take values of u
Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step
additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step
additional_args_om (list): list of additional arguments to be passed to the observation model during the update step
innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound
innovation when needed
predict_flag (bool): perform prediction? defaults to true
Returns:
X (numpy array [n x 1]): expected value of the states after prediction and update
P (numpy array [n x n]): covariance of the states after prediction and update | estimators.py | predict_and_or_update | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def predict_and_or_update(self, X, P, f, h, Q, R, u, y, u_next=None, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update.\n algorithm reference: Algorithm 5.1, page 104 of "Compressed Estimation in Coupled High-dimensional Processes"\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n u_next (*): next input required for function h, defaults to None which will take values of u\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X (numpy array [n x 1]): expected value of the states after prediction and update\n P (numpy array [n x n]): covariance of the states after prediction and update\n\n '
n = len(X)
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
if self.use_torch_tensor:
Qu = torch.zeros((nqu, nqu), dtype=X.dtype, device=self.tensor_device)
else:
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
if self.use_torch_tensor:
X1 = torch.cat((X, torch.zeros((((nq + nqu) + nr), 1), dtype=X.dtype, device=self.tensor_device)), dim=0)
P1 = torch.block_diag(P, Q, Qu, R)
else:
X1 = np.concatenate((X, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(P, Q, Qu, R)
if (u_next is None):
u_next = u
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
ia = np.arange(n)
if predict_flag:
ib = np.arange(n, (n + nq))
ic = np.arange((n + nq), ((n + nq) + nqu))
(X2, x2, P2, x2_cent) = self.unscented_transformF(x, W, WeightMat, L, f, u, ia, ib, ic, additional_args_pm)
else:
X2 = X
P2 = P
x2 = x
x2_cent = (x[(ia, :)] - X)
if len(y):
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange(((n + nq) + nqu), (((n + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u_next, ia, ip, len(y), additional_args_om)
if self.use_torch_tensor:
Pxy = torch.matmul(torch.matmul(x2_cent, WeightMat), z2.T)
K = torch.matmul(Pxy, torch.linalg.inv(Pz))
else:
Pxy = np.matmul(np.matmul(x2_cent, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
if self.use_torch_tensor:
X3 = (X2 + torch.matmul(K, innovation))
P3 = (P2 - torch.matmul(K, Pxy.T))
else:
X3 = (X2 + np.matmul(K, innovation))
P3 = (P2 - np.matmul(K, Pxy.T))
else:
X3 = X2
P3 = P2
return (X3, P3) | def predict_and_or_update(self, X, P, f, h, Q, R, u, y, u_next=None, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update.\n algorithm reference: Algorithm 5.1, page 104 of "Compressed Estimation in Coupled High-dimensional Processes"\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n u_next (*): next input required for function h, defaults to None which will take values of u\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X (numpy array [n x 1]): expected value of the states after prediction and update\n P (numpy array [n x n]): covariance of the states after prediction and update\n\n '
n = len(X)
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
if self.use_torch_tensor:
Qu = torch.zeros((nqu, nqu), dtype=X.dtype, device=self.tensor_device)
else:
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
if self.use_torch_tensor:
X1 = torch.cat((X, torch.zeros((((nq + nqu) + nr), 1), dtype=X.dtype, device=self.tensor_device)), dim=0)
P1 = torch.block_diag(P, Q, Qu, R)
else:
X1 = np.concatenate((X, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(P, Q, Qu, R)
if (u_next is None):
u_next = u
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
ia = np.arange(n)
if predict_flag:
ib = np.arange(n, (n + nq))
ic = np.arange((n + nq), ((n + nq) + nqu))
(X2, x2, P2, x2_cent) = self.unscented_transformF(x, W, WeightMat, L, f, u, ia, ib, ic, additional_args_pm)
else:
X2 = X
P2 = P
x2 = x
x2_cent = (x[(ia, :)] - X)
if len(y):
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange(((n + nq) + nqu), (((n + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u_next, ia, ip, len(y), additional_args_om)
if self.use_torch_tensor:
Pxy = torch.matmul(torch.matmul(x2_cent, WeightMat), z2.T)
K = torch.matmul(Pxy, torch.linalg.inv(Pz))
else:
Pxy = np.matmul(np.matmul(x2_cent, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
if self.use_torch_tensor:
X3 = (X2 + torch.matmul(K, innovation))
P3 = (P2 - torch.matmul(K, Pxy.T))
else:
X3 = (X2 + np.matmul(K, innovation))
P3 = (P2 - np.matmul(K, Pxy.T))
else:
X3 = X2
P3 = P2
return (X3, P3)<|docstring|>Perform one iteration of prediction and/or update.
algorithm reference: Algorithm 5.1, page 104 of "Compressed Estimation in Coupled High-dimensional Processes"
Args:
X (numpy array [n x 1]): expected value of the states
P (numpy array [n x n]): covariance of the states
f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)
h (function): function handle for the observation model; expected signature h(state, input, noise, ...)
Q (numpy array [nq x nq]): process model noise covariance in the prediction step
R (numpy array [nr x nr]): observation model noise covariance in the update step
u (*): current input required for function f & possibly function h
y (numpy array [nu x 1]): current measurement/output of the system
u_next (*): next input required for function h, defaults to None which will take values of u
Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step
additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step
additional_args_om (list): list of additional arguments to be passed to the observation model during the update step
innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound
innovation when needed
predict_flag (bool): perform prediction? defaults to true
Returns:
X (numpy array [n x 1]): expected value of the states after prediction and update
P (numpy array [n x n]): covariance of the states after prediction and update<|endoftext|> |
f020104f7d90076d0c3159a7dc9b31b3c7560b0dd99de0c3c1642884707c1ad2 | def unscented_transformH(self, x, W, WeightMat, L, f, u, ia, iq, n, additional_args):
'\n Function to propagate sigma/cubature points through observation function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array\n WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal\n L (int): number of points\n f (function): function handle for the observation model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the observation noise in sigma/cubature points\n n (int): dimensionality of output or return from function f\n additional_args (list): list of additional arguments to be passed to the observation model\n\n Returns:\n Y (numpy array [n x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n x L]): Transformed sigma/cubature points\n P (numpy array [n x n]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n x L]): zero-mean Transformed sigma/cubature points\n\n '
if self.use_torch_tensor:
Y = torch.zeros((n, 1), dtype=x.dtype, device=self.tensor_device)
y = torch.zeros((n, L), dtype=x.dtype, device=self.tensor_device)
else:
Y = np.zeros((n, 1))
y = np.zeros((n, L))
for k in range(L):
y[(:, k)] = f(x[(ia, k)], u, x[(iq, k)], *additional_args)
if self.use_torch_tensor:
Y += (W[(0, k)] * y[(:, k:(k + 1))])
else:
Y += (W.flat[k] * y[(:, k:(k + 1))])
y1 = (y - Y)
if self.use_torch_tensor:
P = torch.matmul(torch.matmul(y1, WeightMat), y1.T)
else:
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | Function to propagate sigma/cubature points through observation function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array
WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal
L (int): number of points
f (function): function handle for the observation model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points
iq (numpy array [n_q x 1]): row indices of the observation noise in sigma/cubature points
n (int): dimensionality of output or return from function f
additional_args (list): list of additional arguments to be passed to the observation model
Returns:
Y (numpy array [n x 1]): Expected value vector of the result from transformation function f
y (numpy array [n x L]): Transformed sigma/cubature points
P (numpy array [n x n]): Covariance matrix of the result from transformation function f
y1 (numpy array [n x L]): zero-mean Transformed sigma/cubature points | estimators.py | unscented_transformH | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def unscented_transformH(self, x, W, WeightMat, L, f, u, ia, iq, n, additional_args):
'\n Function to propagate sigma/cubature points through observation function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array\n WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal\n L (int): number of points\n f (function): function handle for the observation model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the observation noise in sigma/cubature points\n n (int): dimensionality of output or return from function f\n additional_args (list): list of additional arguments to be passed to the observation model\n\n Returns:\n Y (numpy array [n x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n x L]): Transformed sigma/cubature points\n P (numpy array [n x n]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n x L]): zero-mean Transformed sigma/cubature points\n\n '
if self.use_torch_tensor:
Y = torch.zeros((n, 1), dtype=x.dtype, device=self.tensor_device)
y = torch.zeros((n, L), dtype=x.dtype, device=self.tensor_device)
else:
Y = np.zeros((n, 1))
y = np.zeros((n, L))
for k in range(L):
y[(:, k)] = f(x[(ia, k)], u, x[(iq, k)], *additional_args)
if self.use_torch_tensor:
Y += (W[(0, k)] * y[(:, k:(k + 1))])
else:
Y += (W.flat[k] * y[(:, k:(k + 1))])
y1 = (y - Y)
if self.use_torch_tensor:
P = torch.matmul(torch.matmul(y1, WeightMat), y1.T)
else:
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | def unscented_transformH(self, x, W, WeightMat, L, f, u, ia, iq, n, additional_args):
'\n Function to propagate sigma/cubature points through observation function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array\n WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal\n L (int): number of points\n f (function): function handle for the observation model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the observation noise in sigma/cubature points\n n (int): dimensionality of output or return from function f\n additional_args (list): list of additional arguments to be passed to the observation model\n\n Returns:\n Y (numpy array [n x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n x L]): Transformed sigma/cubature points\n P (numpy array [n x n]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n x L]): zero-mean Transformed sigma/cubature points\n\n '
if self.use_torch_tensor:
Y = torch.zeros((n, 1), dtype=x.dtype, device=self.tensor_device)
y = torch.zeros((n, L), dtype=x.dtype, device=self.tensor_device)
else:
Y = np.zeros((n, 1))
y = np.zeros((n, L))
for k in range(L):
y[(:, k)] = f(x[(ia, k)], u, x[(iq, k)], *additional_args)
if self.use_torch_tensor:
Y += (W[(0, k)] * y[(:, k:(k + 1))])
else:
Y += (W.flat[k] * y[(:, k:(k + 1))])
y1 = (y - Y)
if self.use_torch_tensor:
P = torch.matmul(torch.matmul(y1, WeightMat), y1.T)
else:
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1)<|docstring|>Function to propagate sigma/cubature points through observation function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array
WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal
L (int): number of points
f (function): function handle for the observation model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points
iq (numpy array [n_q x 1]): row indices of the observation noise in sigma/cubature points
n (int): dimensionality of output or return from function f
additional_args (list): list of additional arguments to be passed to the observation model
Returns:
Y (numpy array [n x 1]): Expected value vector of the result from transformation function f
y (numpy array [n x L]): Transformed sigma/cubature points
P (numpy array [n x n]): Covariance matrix of the result from transformation function f
y1 (numpy array [n x L]): zero-mean Transformed sigma/cubature points<|endoftext|> |
a6a4e09e5d2cc14a4ca89cd6126d08606b66c6d757282558f8b7a55fd2830a83 | def unscented_transformF(self, x, W, WeightMat, L, f, u, ia, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
order = len(ia)
if self.use_torch_tensor:
Y = torch.zeros((order, 1), dtype=x.dtype, device=self.tensor_device)
else:
Y = np.zeros((order, 1))
y = x
for k in range(L):
if len(iqu):
y[(ia, k)] = f(x[(ia, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(ia, k)] = f(x[(ia, k)], u, x[(iq, k)], (torch.zeros(u.shape, dtype=x.dtype, device=self.tensor_device) if self.use_torch_tensor else np.zeros(u.shape)), *additional_args)
if self.use_torch_tensor:
Y += (W[(0, k)] * y[(np.arange(order), k:(k + 1))])
else:
Y += (W.flat[k] * y[(np.arange(order), k:(k + 1))])
y1 = (y[(np.arange(order), :)] - Y)
if self.use_torch_tensor:
P = torch.matmul(torch.matmul(y1, WeightMat), y1.T)
else:
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | Function to propagate sigma/cubature points through process model function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal
L (int): number of points
f (function): function handle for the process model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points
iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points
iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points
additional_args (list): list of additional arguments to be passed to the process model
Returns:
Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f
y (numpy array [n_a x L]): Transformed sigma/cubature points
P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f
y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points | estimators.py | unscented_transformF | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def unscented_transformF(self, x, W, WeightMat, L, f, u, ia, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
order = len(ia)
if self.use_torch_tensor:
Y = torch.zeros((order, 1), dtype=x.dtype, device=self.tensor_device)
else:
Y = np.zeros((order, 1))
y = x
for k in range(L):
if len(iqu):
y[(ia, k)] = f(x[(ia, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(ia, k)] = f(x[(ia, k)], u, x[(iq, k)], (torch.zeros(u.shape, dtype=x.dtype, device=self.tensor_device) if self.use_torch_tensor else np.zeros(u.shape)), *additional_args)
if self.use_torch_tensor:
Y += (W[(0, k)] * y[(np.arange(order), k:(k + 1))])
else:
Y += (W.flat[k] * y[(np.arange(order), k:(k + 1))])
y1 = (y[(np.arange(order), :)] - Y)
if self.use_torch_tensor:
P = torch.matmul(torch.matmul(y1, WeightMat), y1.T)
else:
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | def unscented_transformF(self, x, W, WeightMat, L, f, u, ia, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
order = len(ia)
if self.use_torch_tensor:
Y = torch.zeros((order, 1), dtype=x.dtype, device=self.tensor_device)
else:
Y = np.zeros((order, 1))
y = x
for k in range(L):
if len(iqu):
y[(ia, k)] = f(x[(ia, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(ia, k)] = f(x[(ia, k)], u, x[(iq, k)], (torch.zeros(u.shape, dtype=x.dtype, device=self.tensor_device) if self.use_torch_tensor else np.zeros(u.shape)), *additional_args)
if self.use_torch_tensor:
Y += (W[(0, k)] * y[(np.arange(order), k:(k + 1))])
else:
Y += (W.flat[k] * y[(np.arange(order), k:(k + 1))])
y1 = (y[(np.arange(order), :)] - Y)
if self.use_torch_tensor:
P = torch.matmul(torch.matmul(y1, WeightMat), y1.T)
else:
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1)<|docstring|>Function to propagate sigma/cubature points through process model function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal
L (int): number of points
f (function): function handle for the process model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
ia (numpy array [n_s x 1]): row indices of the states in sima/cubature points
iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points
iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points
additional_args (list): list of additional arguments to be passed to the process model
Returns:
Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f
y (numpy array [n_a x L]): Transformed sigma/cubature points
P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f
y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points<|endoftext|> |
0c9d931e3464c9c48c00143da1fff0b9f531c192f95dbdcf0be804f155d2ad35 | def sigmas2(self, X, P):
'\n function to generate second order sigma points\n reference: Appendix G.1 of "Compressed Estimation in Coupled High-dimensional Processes"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): second order sigma point\n L (int): number of sigma points\n W (numpy array [1 x L]): 1D Weight array of sigma points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
Params = [(1 - (n / 3.0)), (1.0 / 6.0), math.sqrt(3.0)]
L = ((2 * n) + 1)
W = np.concatenate((np.array([[Params[0]]]), np.matlib.repmat(Params[1], 1, (2 * n))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
temp = np.zeros((n, L))
loc = np.arange(n)
l_index = (((loc * L) + loc) + 1)
temp.flat[l_index] = Params[2]
l_index += n
temp.flat[l_index] = (- Params[2])
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, L))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, L)
x = (Y + np.matmul(sqP, temp))
return (x, L, W, WeightMat) | function to generate second order sigma points
reference: Appendix G.1 of "Compressed Estimation in Coupled High-dimensional Processes"
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): second order sigma point
L (int): number of sigma points
W (numpy array [1 x L]): 1D Weight array of sigma points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal | estimators.py | sigmas2 | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def sigmas2(self, X, P):
'\n function to generate second order sigma points\n reference: Appendix G.1 of "Compressed Estimation in Coupled High-dimensional Processes"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): second order sigma point\n L (int): number of sigma points\n W (numpy array [1 x L]): 1D Weight array of sigma points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
Params = [(1 - (n / 3.0)), (1.0 / 6.0), math.sqrt(3.0)]
L = ((2 * n) + 1)
W = np.concatenate((np.array([[Params[0]]]), np.matlib.repmat(Params[1], 1, (2 * n))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
temp = np.zeros((n, L))
loc = np.arange(n)
l_index = (((loc * L) + loc) + 1)
temp.flat[l_index] = Params[2]
l_index += n
temp.flat[l_index] = (- Params[2])
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, L))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, L)
x = (Y + np.matmul(sqP, temp))
return (x, L, W, WeightMat) | def sigmas2(self, X, P):
'\n function to generate second order sigma points\n reference: Appendix G.1 of "Compressed Estimation in Coupled High-dimensional Processes"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): second order sigma point\n L (int): number of sigma points\n W (numpy array [1 x L]): 1D Weight array of sigma points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
Params = [(1 - (n / 3.0)), (1.0 / 6.0), math.sqrt(3.0)]
L = ((2 * n) + 1)
W = np.concatenate((np.array([[Params[0]]]), np.matlib.repmat(Params[1], 1, (2 * n))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
temp = np.zeros((n, L))
loc = np.arange(n)
l_index = (((loc * L) + loc) + 1)
temp.flat[l_index] = Params[2]
l_index += n
temp.flat[l_index] = (- Params[2])
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, L))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, L)
x = (Y + np.matmul(sqP, temp))
return (x, L, W, WeightMat)<|docstring|>function to generate second order sigma points
reference: Appendix G.1 of "Compressed Estimation in Coupled High-dimensional Processes"
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): second order sigma point
L (int): number of sigma points
W (numpy array [1 x L]): 1D Weight array of sigma points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal<|endoftext|> |
d9dd836d9c11c0cc6b8fbb3ab0ce89caf0f935f6d5d2cc7464e74ce23c39ad7a | def sigmas4(self, X, P):
'\n function to generate fourth order sigma points\n Note: No analytical results exist for generating 4th order sigma points as it requires performing\n non-linear least square (see Appendix G.2 of "Compressed Estimation in Coupled High-dimensional Processes".\n\n A separate scheme is used here, see equation 5.20 instead.\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): fourth order sigma point\n L (int): number of sigma points\n W (numpy array [1 x L]): 1D Weight array of sigma points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = ((2 * (n ** 2)) + 1)
W = np.concatenate((np.array([[(1 + (((n ** 2) - (7.0 * n)) / 18.0))]]), np.matlib.repmat(((4 - n) / 18.0), 1, (2 * n)), np.matlib.repmat((1.0 / 36.0), 1, ((2 * (n ** 2)) - (2 * n)))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt(3.0)
temp = np.zeros((n, ((2 * n) + 1)))
loc = np.arange(n)
l_index = (((loc * ((2 * n) + 1)) + loc) + 1)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * n) + 1)))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, ((2 * n) + 1))
x = (Y + np.matmul(sqP, temp))
temp1 = np.zeros((n, ((2 * (n ** 2)) - (2 * n))))
count = comb(n, 2, exact=True)
loc = np.fromiter(itertools.chain.from_iterable(itertools.combinations(range(n), 2)), int, count=(count * 2)).reshape((- 1), 2)
l_index = ((loc * ((2 * (n ** 2)) - (2 * n))) + np.matlib.repmat(np.arange(count)[(:, np.newaxis)], 1, 2))
for i in itertools.product([1, 2], repeat=2):
temp1.flat[l_index[(:, 0)]] = (((- 1) ** i[0]) * s)
temp1.flat[l_index[(:, 1)]] = (((- 1) ** i[1]) * s)
l_index += count
if self.use_torch_tensor:
temp1 = torch.from_numpy(temp1).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * (n ** 2)) - (2 * n))))
x = torch.cat((x, (Y + torch.matmul(sqP, temp1))), dim=1)
else:
Y = np.matlib.repmat(X, 1, ((2 * (n ** 2)) - (2 * n)))
x = np.concatenate((x, (Y + np.matmul(sqP, temp1))), axis=1)
'\n if self.use_torch_tensor:\n temp2 = torch.cat((temp, temp1), dim=1)\n else:\n temp2 = np.concatenate((temp, temp1), axis=1)\n self.verifySigma(temp2, W, 5)\n print(self.verifyTransformedSigma(x, WeightMat, X, P))\n '
return (x, L, W, WeightMat) | function to generate fourth order sigma points
Note: No analytical results exist for generating 4th order sigma points as it requires performing
non-linear least square (see Appendix G.2 of "Compressed Estimation in Coupled High-dimensional Processes".
A separate scheme is used here, see equation 5.20 instead.
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): fourth order sigma point
L (int): number of sigma points
W (numpy array [1 x L]): 1D Weight array of sigma points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal | estimators.py | sigmas4 | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def sigmas4(self, X, P):
'\n function to generate fourth order sigma points\n Note: No analytical results exist for generating 4th order sigma points as it requires performing\n non-linear least square (see Appendix G.2 of "Compressed Estimation in Coupled High-dimensional Processes".\n\n A separate scheme is used here, see equation 5.20 instead.\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): fourth order sigma point\n L (int): number of sigma points\n W (numpy array [1 x L]): 1D Weight array of sigma points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = ((2 * (n ** 2)) + 1)
W = np.concatenate((np.array([[(1 + (((n ** 2) - (7.0 * n)) / 18.0))]]), np.matlib.repmat(((4 - n) / 18.0), 1, (2 * n)), np.matlib.repmat((1.0 / 36.0), 1, ((2 * (n ** 2)) - (2 * n)))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt(3.0)
temp = np.zeros((n, ((2 * n) + 1)))
loc = np.arange(n)
l_index = (((loc * ((2 * n) + 1)) + loc) + 1)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * n) + 1)))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, ((2 * n) + 1))
x = (Y + np.matmul(sqP, temp))
temp1 = np.zeros((n, ((2 * (n ** 2)) - (2 * n))))
count = comb(n, 2, exact=True)
loc = np.fromiter(itertools.chain.from_iterable(itertools.combinations(range(n), 2)), int, count=(count * 2)).reshape((- 1), 2)
l_index = ((loc * ((2 * (n ** 2)) - (2 * n))) + np.matlib.repmat(np.arange(count)[(:, np.newaxis)], 1, 2))
for i in itertools.product([1, 2], repeat=2):
temp1.flat[l_index[(:, 0)]] = (((- 1) ** i[0]) * s)
temp1.flat[l_index[(:, 1)]] = (((- 1) ** i[1]) * s)
l_index += count
if self.use_torch_tensor:
temp1 = torch.from_numpy(temp1).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * (n ** 2)) - (2 * n))))
x = torch.cat((x, (Y + torch.matmul(sqP, temp1))), dim=1)
else:
Y = np.matlib.repmat(X, 1, ((2 * (n ** 2)) - (2 * n)))
x = np.concatenate((x, (Y + np.matmul(sqP, temp1))), axis=1)
'\n if self.use_torch_tensor:\n temp2 = torch.cat((temp, temp1), dim=1)\n else:\n temp2 = np.concatenate((temp, temp1), axis=1)\n self.verifySigma(temp2, W, 5)\n print(self.verifyTransformedSigma(x, WeightMat, X, P))\n '
return (x, L, W, WeightMat) | def sigmas4(self, X, P):
'\n function to generate fourth order sigma points\n Note: No analytical results exist for generating 4th order sigma points as it requires performing\n non-linear least square (see Appendix G.2 of "Compressed Estimation in Coupled High-dimensional Processes".\n\n A separate scheme is used here, see equation 5.20 instead.\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): fourth order sigma point\n L (int): number of sigma points\n W (numpy array [1 x L]): 1D Weight array of sigma points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = ((2 * (n ** 2)) + 1)
W = np.concatenate((np.array([[(1 + (((n ** 2) - (7.0 * n)) / 18.0))]]), np.matlib.repmat(((4 - n) / 18.0), 1, (2 * n)), np.matlib.repmat((1.0 / 36.0), 1, ((2 * (n ** 2)) - (2 * n)))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt(3.0)
temp = np.zeros((n, ((2 * n) + 1)))
loc = np.arange(n)
l_index = (((loc * ((2 * n) + 1)) + loc) + 1)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * n) + 1)))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, ((2 * n) + 1))
x = (Y + np.matmul(sqP, temp))
temp1 = np.zeros((n, ((2 * (n ** 2)) - (2 * n))))
count = comb(n, 2, exact=True)
loc = np.fromiter(itertools.chain.from_iterable(itertools.combinations(range(n), 2)), int, count=(count * 2)).reshape((- 1), 2)
l_index = ((loc * ((2 * (n ** 2)) - (2 * n))) + np.matlib.repmat(np.arange(count)[(:, np.newaxis)], 1, 2))
for i in itertools.product([1, 2], repeat=2):
temp1.flat[l_index[(:, 0)]] = (((- 1) ** i[0]) * s)
temp1.flat[l_index[(:, 1)]] = (((- 1) ** i[1]) * s)
l_index += count
if self.use_torch_tensor:
temp1 = torch.from_numpy(temp1).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * (n ** 2)) - (2 * n))))
x = torch.cat((x, (Y + torch.matmul(sqP, temp1))), dim=1)
else:
Y = np.matlib.repmat(X, 1, ((2 * (n ** 2)) - (2 * n)))
x = np.concatenate((x, (Y + np.matmul(sqP, temp1))), axis=1)
'\n if self.use_torch_tensor:\n temp2 = torch.cat((temp, temp1), dim=1)\n else:\n temp2 = np.concatenate((temp, temp1), axis=1)\n self.verifySigma(temp2, W, 5)\n print(self.verifyTransformedSigma(x, WeightMat, X, P))\n '
return (x, L, W, WeightMat)<|docstring|>function to generate fourth order sigma points
Note: No analytical results exist for generating 4th order sigma points as it requires performing
non-linear least square (see Appendix G.2 of "Compressed Estimation in Coupled High-dimensional Processes".
A separate scheme is used here, see equation 5.20 instead.
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): fourth order sigma point
L (int): number of sigma points
W (numpy array [1 x L]): 1D Weight array of sigma points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal<|endoftext|> |
75c859d89cab1165e3822cf545207380f863ced94a82d294c2baf9799117c223 | def cubature2(self, X, P):
'\n function to generate second order cubature points\n reference: paper "Cubature Kalman Fitlers"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): second order cubature point\n L (int): number of cubature points\n W (numpy array [1 x L]): 1D Weight array of cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = (2 * n)
W = np.matlib.repmat((1.0 / L), 1, L)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt(n)
temp = np.zeros((n, L))
loc = np.arange(n)
l_index = ((loc * L) + loc)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, L))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, L)
x = (Y + np.matmul(sqP, temp))
return (x, L, W, WeightMat) | function to generate second order cubature points
reference: paper "Cubature Kalman Fitlers"
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): second order cubature point
L (int): number of cubature points
W (numpy array [1 x L]): 1D Weight array of cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal | estimators.py | cubature2 | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def cubature2(self, X, P):
'\n function to generate second order cubature points\n reference: paper "Cubature Kalman Fitlers"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): second order cubature point\n L (int): number of cubature points\n W (numpy array [1 x L]): 1D Weight array of cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = (2 * n)
W = np.matlib.repmat((1.0 / L), 1, L)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt(n)
temp = np.zeros((n, L))
loc = np.arange(n)
l_index = ((loc * L) + loc)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, L))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, L)
x = (Y + np.matmul(sqP, temp))
return (x, L, W, WeightMat) | def cubature2(self, X, P):
'\n function to generate second order cubature points\n reference: paper "Cubature Kalman Fitlers"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): second order cubature point\n L (int): number of cubature points\n W (numpy array [1 x L]): 1D Weight array of cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = (2 * n)
W = np.matlib.repmat((1.0 / L), 1, L)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt(n)
temp = np.zeros((n, L))
loc = np.arange(n)
l_index = ((loc * L) + loc)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, L))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, L)
x = (Y + np.matmul(sqP, temp))
return (x, L, W, WeightMat)<|docstring|>function to generate second order cubature points
reference: paper "Cubature Kalman Fitlers"
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): second order cubature point
L (int): number of cubature points
W (numpy array [1 x L]): 1D Weight array of cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal<|endoftext|> |
f104d71b06cc4a8d95ab56bb845dc435bd75d5b1494d8ca29029e9db900c65d1 | def cubature4(self, X, P):
'\n function to generate fourth order cubature points\n reference: paper "High-degree cubature kalman filter"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): fourth order cubature point\n L (int): number of cubature points\n W (numpy array [1 x L]): 1D Weight array of cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = ((2 * (n ** 2)) + 1)
W = np.concatenate((np.array([[(2.0 / (n + 2.0))]]), np.matlib.repmat(((4 - n) / (2.0 * ((n + 2) ** 2))), 1, (2 * n)), np.matlib.repmat((1.0 / ((n + 2.0) ** 2)), 1, ((2 * (n ** 2)) - (2 * n)))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt((n + 2.0))
temp = np.zeros((n, ((2 * n) + 1)))
loc = np.arange(n)
l_index = (((loc * ((2 * n) + 1)) + loc) + 1)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * n) + 1)))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, ((2 * n) + 1))
x = (Y + np.matmul(sqP, temp))
s = (math.sqrt((n + 2.0)) / math.sqrt(2.0))
temp1 = np.zeros((n, ((2 * (n ** 2)) - (2 * n))))
count = comb(n, 2, exact=True)
loc = np.fromiter(itertools.chain.from_iterable(itertools.combinations(range(n), 2)), int, count=(count * 2)).reshape((- 1), 2)
l_index = ((loc * ((2 * (n ** 2)) - (2 * n))) + np.matlib.repmat(np.arange(count)[(:, np.newaxis)], 1, 2))
for i in itertools.product([1, 2], repeat=2):
temp1.flat[l_index[(:, 0)]] = (((- 1) ** i[0]) * s)
temp1.flat[l_index[(:, 1)]] = (((- 1) ** i[1]) * s)
l_index += count
if self.use_torch_tensor:
temp1 = torch.from_numpy(temp1).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * (n ** 2)) - (2 * n))))
x = torch.cat((x, (Y + torch.matmul(sqP, temp1))), dim=1)
else:
Y = np.matlib.repmat(X, 1, ((2 * (n ** 2)) - (2 * n)))
x = np.concatenate((x, (Y + np.matmul(sqP, temp1))), axis=1)
'\n if self.use_torch_tensor:\n temp2 = torch.cat((temp, temp1), dim=1)\n else:\n temp2 = np.concatenate((temp, temp1), axis=1)\n self.verifySigma(temp2, W, 5)\n print(self.verifyTransformedSigma(x, WeightMat, X, P))\n '
return (x, L, W, WeightMat) | function to generate fourth order cubature points
reference: paper "High-degree cubature kalman filter"
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): fourth order cubature point
L (int): number of cubature points
W (numpy array [1 x L]): 1D Weight array of cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal | estimators.py | cubature4 | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def cubature4(self, X, P):
'\n function to generate fourth order cubature points\n reference: paper "High-degree cubature kalman filter"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): fourth order cubature point\n L (int): number of cubature points\n W (numpy array [1 x L]): 1D Weight array of cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = ((2 * (n ** 2)) + 1)
W = np.concatenate((np.array([[(2.0 / (n + 2.0))]]), np.matlib.repmat(((4 - n) / (2.0 * ((n + 2) ** 2))), 1, (2 * n)), np.matlib.repmat((1.0 / ((n + 2.0) ** 2)), 1, ((2 * (n ** 2)) - (2 * n)))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt((n + 2.0))
temp = np.zeros((n, ((2 * n) + 1)))
loc = np.arange(n)
l_index = (((loc * ((2 * n) + 1)) + loc) + 1)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * n) + 1)))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, ((2 * n) + 1))
x = (Y + np.matmul(sqP, temp))
s = (math.sqrt((n + 2.0)) / math.sqrt(2.0))
temp1 = np.zeros((n, ((2 * (n ** 2)) - (2 * n))))
count = comb(n, 2, exact=True)
loc = np.fromiter(itertools.chain.from_iterable(itertools.combinations(range(n), 2)), int, count=(count * 2)).reshape((- 1), 2)
l_index = ((loc * ((2 * (n ** 2)) - (2 * n))) + np.matlib.repmat(np.arange(count)[(:, np.newaxis)], 1, 2))
for i in itertools.product([1, 2], repeat=2):
temp1.flat[l_index[(:, 0)]] = (((- 1) ** i[0]) * s)
temp1.flat[l_index[(:, 1)]] = (((- 1) ** i[1]) * s)
l_index += count
if self.use_torch_tensor:
temp1 = torch.from_numpy(temp1).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * (n ** 2)) - (2 * n))))
x = torch.cat((x, (Y + torch.matmul(sqP, temp1))), dim=1)
else:
Y = np.matlib.repmat(X, 1, ((2 * (n ** 2)) - (2 * n)))
x = np.concatenate((x, (Y + np.matmul(sqP, temp1))), axis=1)
'\n if self.use_torch_tensor:\n temp2 = torch.cat((temp, temp1), dim=1)\n else:\n temp2 = np.concatenate((temp, temp1), axis=1)\n self.verifySigma(temp2, W, 5)\n print(self.verifyTransformedSigma(x, WeightMat, X, P))\n '
return (x, L, W, WeightMat) | def cubature4(self, X, P):
'\n function to generate fourth order cubature points\n reference: paper "High-degree cubature kalman filter"\n\n Args:\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n x (numpy array [n x L]): fourth order cubature point\n L (int): number of cubature points\n W (numpy array [1 x L]): 1D Weight array of cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n\n '
n = X.shape[0]
L = ((2 * (n ** 2)) + 1)
W = np.concatenate((np.array([[(2.0 / (n + 2.0))]]), np.matlib.repmat(((4 - n) / (2.0 * ((n + 2) ** 2))), 1, (2 * n)), np.matlib.repmat((1.0 / ((n + 2.0) ** 2)), 1, ((2 * (n ** 2)) - (2 * n)))), axis=1)
WeightMat = np.diag(np.squeeze(W))
if self.use_torch_tensor:
W = torch.from_numpy(W).to(self.tensor_device)
WeightMat = torch.from_numpy(WeightMat).to(self.tensor_device)
if self.use_torch_tensor:
(U, D, _) = torch.linalg.svd(P)
sqP = torch.matmul(U, torch.diag((D ** 0.5)))
else:
(U, D, _) = np.linalg.svd(P)
sqP = np.matmul(U, np.diag((D ** 0.5)))
s = math.sqrt((n + 2.0))
temp = np.zeros((n, ((2 * n) + 1)))
loc = np.arange(n)
l_index = (((loc * ((2 * n) + 1)) + loc) + 1)
temp.flat[l_index] = s
l_index += n
temp.flat[l_index] = (- s)
if self.use_torch_tensor:
temp = torch.from_numpy(temp).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * n) + 1)))
x = (Y + torch.matmul(sqP, temp))
else:
Y = np.matlib.repmat(X, 1, ((2 * n) + 1))
x = (Y + np.matmul(sqP, temp))
s = (math.sqrt((n + 2.0)) / math.sqrt(2.0))
temp1 = np.zeros((n, ((2 * (n ** 2)) - (2 * n))))
count = comb(n, 2, exact=True)
loc = np.fromiter(itertools.chain.from_iterable(itertools.combinations(range(n), 2)), int, count=(count * 2)).reshape((- 1), 2)
l_index = ((loc * ((2 * (n ** 2)) - (2 * n))) + np.matlib.repmat(np.arange(count)[(:, np.newaxis)], 1, 2))
for i in itertools.product([1, 2], repeat=2):
temp1.flat[l_index[(:, 0)]] = (((- 1) ** i[0]) * s)
temp1.flat[l_index[(:, 1)]] = (((- 1) ** i[1]) * s)
l_index += count
if self.use_torch_tensor:
temp1 = torch.from_numpy(temp1).to(self.tensor_device)
Y = torch.tile(X, (1, ((2 * (n ** 2)) - (2 * n))))
x = torch.cat((x, (Y + torch.matmul(sqP, temp1))), dim=1)
else:
Y = np.matlib.repmat(X, 1, ((2 * (n ** 2)) - (2 * n)))
x = np.concatenate((x, (Y + np.matmul(sqP, temp1))), axis=1)
'\n if self.use_torch_tensor:\n temp2 = torch.cat((temp, temp1), dim=1)\n else:\n temp2 = np.concatenate((temp, temp1), axis=1)\n self.verifySigma(temp2, W, 5)\n print(self.verifyTransformedSigma(x, WeightMat, X, P))\n '
return (x, L, W, WeightMat)<|docstring|>function to generate fourth order cubature points
reference: paper "High-degree cubature kalman filter"
Args:
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
x (numpy array [n x L]): fourth order cubature point
L (int): number of cubature points
W (numpy array [1 x L]): 1D Weight array of cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal<|endoftext|> |
079abdc9d99a5d03c1d105524f53abbcacda9c4280b4e89324f80d0df9b5ec0f | def verifyTransformedSigma(self, x, WeightMat, X, P):
'\n Verify if the transformed sigma/cubature point captures the mean and covariance of the \n target Gaussian distribution\n\n Args:\n x (numpy array [n x L]): sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n mean_close (bool): whether mean of the distibution is captured by the sigma/cubature points\n cov_close (bool): whether covariance of the distibution is captured by the sigma/cubature points\n\n '
sigma_mean = np.zeros(X.shape)
if self.use_torch_tensor:
W = np.diag(WeightMat.detach().numpy())
x_copy = x.detach().numpy()
else:
W = np.diag(WeightMat)
x_copy = x
for i in range(x.shape[1]):
sigma_mean += (W[i] * x_copy[(:, i:(i + 1))])
if self.use_torch_tensor:
sigma_cov = np.matmul(np.matmul((x_copy - sigma_mean), WeightMat.detach().numpy()), np.transpose((x_copy - sigma_mean)))
mean_close = np.allclose(X.detach().numpy(), sigma_mean)
cov_close = np.allclose(P.detach().numpy(), sigma_cov)
else:
sigma_cov = np.matmul(np.matmul((x_copy - sigma_mean), WeightMat), np.transpose((x_copy - sigma_mean)))
mean_close = np.allclose(X, sigma_mean)
cov_close = np.allclose(P, sigma_cov)
return (mean_close, cov_close) | Verify if the transformed sigma/cubature point captures the mean and covariance of the
target Gaussian distribution
Args:
x (numpy array [n x L]): sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
mean_close (bool): whether mean of the distibution is captured by the sigma/cubature points
cov_close (bool): whether covariance of the distibution is captured by the sigma/cubature points | estimators.py | verifyTransformedSigma | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def verifyTransformedSigma(self, x, WeightMat, X, P):
'\n Verify if the transformed sigma/cubature point captures the mean and covariance of the \n target Gaussian distribution\n\n Args:\n x (numpy array [n x L]): sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n mean_close (bool): whether mean of the distibution is captured by the sigma/cubature points\n cov_close (bool): whether covariance of the distibution is captured by the sigma/cubature points\n\n '
sigma_mean = np.zeros(X.shape)
if self.use_torch_tensor:
W = np.diag(WeightMat.detach().numpy())
x_copy = x.detach().numpy()
else:
W = np.diag(WeightMat)
x_copy = x
for i in range(x.shape[1]):
sigma_mean += (W[i] * x_copy[(:, i:(i + 1))])
if self.use_torch_tensor:
sigma_cov = np.matmul(np.matmul((x_copy - sigma_mean), WeightMat.detach().numpy()), np.transpose((x_copy - sigma_mean)))
mean_close = np.allclose(X.detach().numpy(), sigma_mean)
cov_close = np.allclose(P.detach().numpy(), sigma_cov)
else:
sigma_cov = np.matmul(np.matmul((x_copy - sigma_mean), WeightMat), np.transpose((x_copy - sigma_mean)))
mean_close = np.allclose(X, sigma_mean)
cov_close = np.allclose(P, sigma_cov)
return (mean_close, cov_close) | def verifyTransformedSigma(self, x, WeightMat, X, P):
'\n Verify if the transformed sigma/cubature point captures the mean and covariance of the \n target Gaussian distribution\n\n Args:\n x (numpy array [n x L]): sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal\n X (numpy array [n x 1]): mean of Gaussian distribution\n P (numpy array [n x n]): covariance matrix of Gaussian distribution\n\n Returns:\n mean_close (bool): whether mean of the distibution is captured by the sigma/cubature points\n cov_close (bool): whether covariance of the distibution is captured by the sigma/cubature points\n\n '
sigma_mean = np.zeros(X.shape)
if self.use_torch_tensor:
W = np.diag(WeightMat.detach().numpy())
x_copy = x.detach().numpy()
else:
W = np.diag(WeightMat)
x_copy = x
for i in range(x.shape[1]):
sigma_mean += (W[i] * x_copy[(:, i:(i + 1))])
if self.use_torch_tensor:
sigma_cov = np.matmul(np.matmul((x_copy - sigma_mean), WeightMat.detach().numpy()), np.transpose((x_copy - sigma_mean)))
mean_close = np.allclose(X.detach().numpy(), sigma_mean)
cov_close = np.allclose(P.detach().numpy(), sigma_cov)
else:
sigma_cov = np.matmul(np.matmul((x_copy - sigma_mean), WeightMat), np.transpose((x_copy - sigma_mean)))
mean_close = np.allclose(X, sigma_mean)
cov_close = np.allclose(P, sigma_cov)
return (mean_close, cov_close)<|docstring|>Verify if the transformed sigma/cubature point captures the mean and covariance of the
target Gaussian distribution
Args:
x (numpy array [n x L]): sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights of the points on the diagonal
X (numpy array [n x 1]): mean of Gaussian distribution
P (numpy array [n x n]): covariance matrix of Gaussian distribution
Returns:
mean_close (bool): whether mean of the distibution is captured by the sigma/cubature points
cov_close (bool): whether covariance of the distibution is captured by the sigma/cubature points<|endoftext|> |
66d4a33434802d1f41d9c42808d590b463baf559930db102ea331f8c3481c2ca | def verifySigma(self, x, W, order=2):
'\n Since originally the points of PBGF are generated from standard Gaussian distribution,\n check if moments up to specified order are being captured. Raises error when mismatch is found.\n\n Args:\n x (numpy array [n x L]): sigma/cubature points\n W (numpy array [1 x L or L x 1]): 1D Weight array of sigma/cubature points\n order (int): moment order in which the sampled points are generated from\n\n '
(n, L) = x.shape
if self.use_torch_tensor:
x_copy = x.detach().numpy()
W_copy = W.detach().numpy()
else:
x_copy = x
W_copy = W
for i in range(1, (order + 1)):
arr = ([0] * n)
outputs = []
findCombinationsUtil(arr, 0, i, i, outputs)
for output in outputs:
theoretical_moment = 1.0
for power in output:
theoretical_moment *= self.stdGaussMoment(power)
if (theoretical_moment == 0):
break
elem_combinations = itertools.permutations(range(n), len(output))
for elem_combination in elem_combinations:
moment = (W_copy * np.prod((x_copy[(elem_combination, :)] ** np.matlib.repmat(output, L, 1).T), axis=0)).sum()
assert np.isclose(moment, theoretical_moment), 'The {}th moment with element {} and power {} yielded value of {} instead of {}'.format(i, elem_combination, output, moment, theoretical_moment) | Since originally the points of PBGF are generated from standard Gaussian distribution,
check if moments up to specified order are being captured. Raises error when mismatch is found.
Args:
x (numpy array [n x L]): sigma/cubature points
W (numpy array [1 x L or L x 1]): 1D Weight array of sigma/cubature points
order (int): moment order in which the sampled points are generated from | estimators.py | verifySigma | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def verifySigma(self, x, W, order=2):
'\n Since originally the points of PBGF are generated from standard Gaussian distribution,\n check if moments up to specified order are being captured. Raises error when mismatch is found.\n\n Args:\n x (numpy array [n x L]): sigma/cubature points\n W (numpy array [1 x L or L x 1]): 1D Weight array of sigma/cubature points\n order (int): moment order in which the sampled points are generated from\n\n '
(n, L) = x.shape
if self.use_torch_tensor:
x_copy = x.detach().numpy()
W_copy = W.detach().numpy()
else:
x_copy = x
W_copy = W
for i in range(1, (order + 1)):
arr = ([0] * n)
outputs = []
findCombinationsUtil(arr, 0, i, i, outputs)
for output in outputs:
theoretical_moment = 1.0
for power in output:
theoretical_moment *= self.stdGaussMoment(power)
if (theoretical_moment == 0):
break
elem_combinations = itertools.permutations(range(n), len(output))
for elem_combination in elem_combinations:
moment = (W_copy * np.prod((x_copy[(elem_combination, :)] ** np.matlib.repmat(output, L, 1).T), axis=0)).sum()
assert np.isclose(moment, theoretical_moment), 'The {}th moment with element {} and power {} yielded value of {} instead of {}'.format(i, elem_combination, output, moment, theoretical_moment) | def verifySigma(self, x, W, order=2):
'\n Since originally the points of PBGF are generated from standard Gaussian distribution,\n check if moments up to specified order are being captured. Raises error when mismatch is found.\n\n Args:\n x (numpy array [n x L]): sigma/cubature points\n W (numpy array [1 x L or L x 1]): 1D Weight array of sigma/cubature points\n order (int): moment order in which the sampled points are generated from\n\n '
(n, L) = x.shape
if self.use_torch_tensor:
x_copy = x.detach().numpy()
W_copy = W.detach().numpy()
else:
x_copy = x
W_copy = W
for i in range(1, (order + 1)):
arr = ([0] * n)
outputs = []
findCombinationsUtil(arr, 0, i, i, outputs)
for output in outputs:
theoretical_moment = 1.0
for power in output:
theoretical_moment *= self.stdGaussMoment(power)
if (theoretical_moment == 0):
break
elem_combinations = itertools.permutations(range(n), len(output))
for elem_combination in elem_combinations:
moment = (W_copy * np.prod((x_copy[(elem_combination, :)] ** np.matlib.repmat(output, L, 1).T), axis=0)).sum()
assert np.isclose(moment, theoretical_moment), 'The {}th moment with element {} and power {} yielded value of {} instead of {}'.format(i, elem_combination, output, moment, theoretical_moment)<|docstring|>Since originally the points of PBGF are generated from standard Gaussian distribution,
check if moments up to specified order are being captured. Raises error when mismatch is found.
Args:
x (numpy array [n x L]): sigma/cubature points
W (numpy array [1 x L or L x 1]): 1D Weight array of sigma/cubature points
order (int): moment order in which the sampled points are generated from<|endoftext|> |
71450965e5fc49f17e5c0dd365d02957cabf3cf3c75b6defc78625b196c94612 | def stdGaussMoment(self, order):
'\n Calculate order-th moment of univariate standard Gaussian distribution (zero mean, 1 std)\n\n Args:\n order (int): scalar moment order\n\n Returns:\n prod (int): requested order-th moment of standard Gaussian distribution\n\n '
if (order % 2):
return 0.0
else:
prod = 1.0
for i in range(1, order, 2):
prod *= i
return prod | Calculate order-th moment of univariate standard Gaussian distribution (zero mean, 1 std)
Args:
order (int): scalar moment order
Returns:
prod (int): requested order-th moment of standard Gaussian distribution | estimators.py | stdGaussMoment | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def stdGaussMoment(self, order):
'\n Calculate order-th moment of univariate standard Gaussian distribution (zero mean, 1 std)\n\n Args:\n order (int): scalar moment order\n\n Returns:\n prod (int): requested order-th moment of standard Gaussian distribution\n\n '
if (order % 2):
return 0.0
else:
prod = 1.0
for i in range(1, order, 2):
prod *= i
return prod | def stdGaussMoment(self, order):
'\n Calculate order-th moment of univariate standard Gaussian distribution (zero mean, 1 std)\n\n Args:\n order (int): scalar moment order\n\n Returns:\n prod (int): requested order-th moment of standard Gaussian distribution\n\n '
if (order % 2):
return 0.0
else:
prod = 1.0
for i in range(1, order, 2):
prod *= i
return prod<|docstring|>Calculate order-th moment of univariate standard Gaussian distribution (zero mean, 1 std)
Args:
order (int): scalar moment order
Returns:
prod (int): requested order-th moment of standard Gaussian distribution<|endoftext|> |
f4504ccf0a9c4e9df1369949c10f25349714974f90f2dc66a1806f32fbee66ba | def set_initial_cond(self, X, P):
'\n Set the initial condition of the smoother, i.e. the distribution at time zero.\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n\n '
self.init_cond_set = True
self.n = len(X)
self.filter_density.append((X.copy(), P.copy()))
self.latest_action = 'update' | Set the initial condition of the smoother, i.e. the distribution at time zero.
Args:
X (numpy array [n x 1]): expected value of the states
P (numpy array [n x n]): covariance of the states | estimators.py | set_initial_cond | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def set_initial_cond(self, X, P):
'\n Set the initial condition of the smoother, i.e. the distribution at time zero.\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n\n '
self.init_cond_set = True
self.n = len(X)
self.filter_density.append((X.copy(), P.copy()))
self.latest_action = 'update' | def set_initial_cond(self, X, P):
'\n Set the initial condition of the smoother, i.e. the distribution at time zero.\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n\n '
self.init_cond_set = True
self.n = len(X)
self.filter_density.append((X.copy(), P.copy()))
self.latest_action = 'update'<|docstring|>Set the initial condition of the smoother, i.e. the distribution at time zero.
Args:
X (numpy array [n x 1]): expected value of the states
P (numpy array [n x n]): covariance of the states<|endoftext|> |
396be7fc702205c44ef00d09c9a92b6d82e60066925dff19d0b65749ffdde6fc | def predict_and_or_update(self, f, h, Q, R, u, y, u_next=None, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.\n algorithm reference: Algorithm 10.6, page 162 of "Bayesian Filtering and Smoothing"\n\n Args:\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n u_next (*): next input required for function h, defaults to None which will take values of u\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update\n P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update\n smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially\n returned until a lag_length worth of observations have been cumulated.\n\n '
assert self.init_cond_set, 'User must specify the initial condition separately'
X_fi = ([[]] * (self.lag_interval + 1))
P_fi = ([[]] * (self.lag_interval + 1))
n = self.n
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
if (self.latest_action == 'update'):
X1 = np.concatenate((self.filter_density[(- 1)][0], self.filter_density[(- 1)][0], np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.filter_density[(- 1)][1], self.filter_density[(- 1)][1], Q, Qu, R)
P1[(0:n, n:(2 * n))] = self.filter_density[(- 1)][1]
P1[(n:(2 * n), 0:n)] = self.filter_density[(- 1)][1]
else:
X1 = np.concatenate((self.prevX, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.prevP, Q, Qu, R)
if (u_next is None):
u_next = u
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
if predict_flag:
ia = np.arange(n)
ib = np.arange(n, (2 * n))
iq = np.arange((2 * n), ((2 * n) + nq))
iqu = np.arange(((2 * n) + nq), (((2 * n) + nq) + nqu))
(X, x, P, x1) = self.unscented_transformF(x, W, WeightMat, L, f, u, ia, ib, iq, iqu, additional_args_pm)
self.prevX = X.copy()
self.prevP = P.copy()
X_fi[self.lag_interval] = X[(ib, :)]
P_fi[self.lag_interval] = P[(n:(2 * n), n:(2 * n))]
self.latest_action = 'predict'
if len(y):
if (self.latest_action == 'predict'):
if (not self.backward_pass):
self.pred_density.append((X[(ib, :)].copy(), P[(n:(2 * n), n:(2 * n))].copy()))
self.gain.append(np.matmul(P[(0:n, n:(2 * n))], np.linalg.inv(P[(n:(2 * n), n:(2 * n))])))
if (len(self.gain) >= self.lag_interval):
self.backward_pass = True
else:
self.pred_density[:(- 1)] = self.pred_density[1:]
self.pred_density[(- 1)] = (X[(ib, :)].copy(), P[(n:(2 * n), n:(2 * n))].copy())
self.gain[:(- 1)] = self.gain[1:]
self.gain[(- 1)] = np.matmul(P[(0:n, n:(2 * n))], np.linalg.inv(P[(n:(2 * n), n:(2 * n))]))
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange((((2 * n) + nq) + nqu), ((((2 * n) + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u_next, ib, ip, len(y), additional_args_om)
Pxy = np.matmul(np.matmul(x1, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
X += np.matmul(K, innovation)
P -= np.matmul(K, Pxy.T)
X_fi[self.lag_interval] = X[(ib, :)]
P_fi[self.lag_interval] = P[(n:(2 * n), n:(2 * n))]
if self.backward_pass:
for j in range((self.lag_interval - 1), (- 1), (- 1)):
X_fi[j] = (self.filter_density[j][0] + np.matmul(self.gain[j], (X_fi[(j + 1)] - self.pred_density[j][0])))
P_fi[j] = (self.filter_density[j][1] + np.matmul(np.matmul(self.gain[j], (P_fi[(j + 1)] - self.pred_density[j][1])), self.gain[j].T))
if (self.latest_action == 'update'):
self.filter_density[(- 1)] = (X[(ib, :)], P[(n:(2 * n), n:(2 * n))])
elif (len(self.gain) < self.lag_interval):
self.filter_density.append((X[(ib, :)], P[(n:(2 * n), n:(2 * n))]))
else:
self.filter_density[:(- 1)] = self.filter_density[1:]
self.filter_density[(- 1)] = (X[(ib, :)], P[(n:(2 * n), n:(2 * n))])
self.latest_action = 'update'
return (X_fi, P_fi, self.backward_pass) | Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.
algorithm reference: Algorithm 10.6, page 162 of "Bayesian Filtering and Smoothing"
Args:
f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)
h (function): function handle for the observation model; expected signature h(state, input, noise, ...)
Q (numpy array [nq x nq]): process model noise covariance in the prediction step
R (numpy array [nr x nr]): observation model noise covariance in the update step
u (*): current input required for function f & possibly function h
y (numpy array [nu x 1]): current measurement/output of the system
u_next (*): next input required for function h, defaults to None which will take values of u
Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step
additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step
additional_args_om (list): list of additional arguments to be passed to the observation model during the update step
innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound
innovation when needed
predict_flag (bool): perform prediction? defaults to true
Returns:
X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update
P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update
smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially
returned until a lag_length worth of observations have been cumulated. | estimators.py | predict_and_or_update | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def predict_and_or_update(self, f, h, Q, R, u, y, u_next=None, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.\n algorithm reference: Algorithm 10.6, page 162 of "Bayesian Filtering and Smoothing"\n\n Args:\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n u_next (*): next input required for function h, defaults to None which will take values of u\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update\n P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update\n smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially\n returned until a lag_length worth of observations have been cumulated.\n\n '
assert self.init_cond_set, 'User must specify the initial condition separately'
X_fi = ([[]] * (self.lag_interval + 1))
P_fi = ([[]] * (self.lag_interval + 1))
n = self.n
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
if (self.latest_action == 'update'):
X1 = np.concatenate((self.filter_density[(- 1)][0], self.filter_density[(- 1)][0], np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.filter_density[(- 1)][1], self.filter_density[(- 1)][1], Q, Qu, R)
P1[(0:n, n:(2 * n))] = self.filter_density[(- 1)][1]
P1[(n:(2 * n), 0:n)] = self.filter_density[(- 1)][1]
else:
X1 = np.concatenate((self.prevX, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.prevP, Q, Qu, R)
if (u_next is None):
u_next = u
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
if predict_flag:
ia = np.arange(n)
ib = np.arange(n, (2 * n))
iq = np.arange((2 * n), ((2 * n) + nq))
iqu = np.arange(((2 * n) + nq), (((2 * n) + nq) + nqu))
(X, x, P, x1) = self.unscented_transformF(x, W, WeightMat, L, f, u, ia, ib, iq, iqu, additional_args_pm)
self.prevX = X.copy()
self.prevP = P.copy()
X_fi[self.lag_interval] = X[(ib, :)]
P_fi[self.lag_interval] = P[(n:(2 * n), n:(2 * n))]
self.latest_action = 'predict'
if len(y):
if (self.latest_action == 'predict'):
if (not self.backward_pass):
self.pred_density.append((X[(ib, :)].copy(), P[(n:(2 * n), n:(2 * n))].copy()))
self.gain.append(np.matmul(P[(0:n, n:(2 * n))], np.linalg.inv(P[(n:(2 * n), n:(2 * n))])))
if (len(self.gain) >= self.lag_interval):
self.backward_pass = True
else:
self.pred_density[:(- 1)] = self.pred_density[1:]
self.pred_density[(- 1)] = (X[(ib, :)].copy(), P[(n:(2 * n), n:(2 * n))].copy())
self.gain[:(- 1)] = self.gain[1:]
self.gain[(- 1)] = np.matmul(P[(0:n, n:(2 * n))], np.linalg.inv(P[(n:(2 * n), n:(2 * n))]))
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange((((2 * n) + nq) + nqu), ((((2 * n) + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u_next, ib, ip, len(y), additional_args_om)
Pxy = np.matmul(np.matmul(x1, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
X += np.matmul(K, innovation)
P -= np.matmul(K, Pxy.T)
X_fi[self.lag_interval] = X[(ib, :)]
P_fi[self.lag_interval] = P[(n:(2 * n), n:(2 * n))]
if self.backward_pass:
for j in range((self.lag_interval - 1), (- 1), (- 1)):
X_fi[j] = (self.filter_density[j][0] + np.matmul(self.gain[j], (X_fi[(j + 1)] - self.pred_density[j][0])))
P_fi[j] = (self.filter_density[j][1] + np.matmul(np.matmul(self.gain[j], (P_fi[(j + 1)] - self.pred_density[j][1])), self.gain[j].T))
if (self.latest_action == 'update'):
self.filter_density[(- 1)] = (X[(ib, :)], P[(n:(2 * n), n:(2 * n))])
elif (len(self.gain) < self.lag_interval):
self.filter_density.append((X[(ib, :)], P[(n:(2 * n), n:(2 * n))]))
else:
self.filter_density[:(- 1)] = self.filter_density[1:]
self.filter_density[(- 1)] = (X[(ib, :)], P[(n:(2 * n), n:(2 * n))])
self.latest_action = 'update'
return (X_fi, P_fi, self.backward_pass) | def predict_and_or_update(self, f, h, Q, R, u, y, u_next=None, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.\n algorithm reference: Algorithm 10.6, page 162 of "Bayesian Filtering and Smoothing"\n\n Args:\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n u_next (*): next input required for function h, defaults to None which will take values of u\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update\n P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update\n smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially\n returned until a lag_length worth of observations have been cumulated.\n\n '
assert self.init_cond_set, 'User must specify the initial condition separately'
X_fi = ([[]] * (self.lag_interval + 1))
P_fi = ([[]] * (self.lag_interval + 1))
n = self.n
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
if (self.latest_action == 'update'):
X1 = np.concatenate((self.filter_density[(- 1)][0], self.filter_density[(- 1)][0], np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.filter_density[(- 1)][1], self.filter_density[(- 1)][1], Q, Qu, R)
P1[(0:n, n:(2 * n))] = self.filter_density[(- 1)][1]
P1[(n:(2 * n), 0:n)] = self.filter_density[(- 1)][1]
else:
X1 = np.concatenate((self.prevX, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.prevP, Q, Qu, R)
if (u_next is None):
u_next = u
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
if predict_flag:
ia = np.arange(n)
ib = np.arange(n, (2 * n))
iq = np.arange((2 * n), ((2 * n) + nq))
iqu = np.arange(((2 * n) + nq), (((2 * n) + nq) + nqu))
(X, x, P, x1) = self.unscented_transformF(x, W, WeightMat, L, f, u, ia, ib, iq, iqu, additional_args_pm)
self.prevX = X.copy()
self.prevP = P.copy()
X_fi[self.lag_interval] = X[(ib, :)]
P_fi[self.lag_interval] = P[(n:(2 * n), n:(2 * n))]
self.latest_action = 'predict'
if len(y):
if (self.latest_action == 'predict'):
if (not self.backward_pass):
self.pred_density.append((X[(ib, :)].copy(), P[(n:(2 * n), n:(2 * n))].copy()))
self.gain.append(np.matmul(P[(0:n, n:(2 * n))], np.linalg.inv(P[(n:(2 * n), n:(2 * n))])))
if (len(self.gain) >= self.lag_interval):
self.backward_pass = True
else:
self.pred_density[:(- 1)] = self.pred_density[1:]
self.pred_density[(- 1)] = (X[(ib, :)].copy(), P[(n:(2 * n), n:(2 * n))].copy())
self.gain[:(- 1)] = self.gain[1:]
self.gain[(- 1)] = np.matmul(P[(0:n, n:(2 * n))], np.linalg.inv(P[(n:(2 * n), n:(2 * n))]))
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange((((2 * n) + nq) + nqu), ((((2 * n) + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u_next, ib, ip, len(y), additional_args_om)
Pxy = np.matmul(np.matmul(x1, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
X += np.matmul(K, innovation)
P -= np.matmul(K, Pxy.T)
X_fi[self.lag_interval] = X[(ib, :)]
P_fi[self.lag_interval] = P[(n:(2 * n), n:(2 * n))]
if self.backward_pass:
for j in range((self.lag_interval - 1), (- 1), (- 1)):
X_fi[j] = (self.filter_density[j][0] + np.matmul(self.gain[j], (X_fi[(j + 1)] - self.pred_density[j][0])))
P_fi[j] = (self.filter_density[j][1] + np.matmul(np.matmul(self.gain[j], (P_fi[(j + 1)] - self.pred_density[j][1])), self.gain[j].T))
if (self.latest_action == 'update'):
self.filter_density[(- 1)] = (X[(ib, :)], P[(n:(2 * n), n:(2 * n))])
elif (len(self.gain) < self.lag_interval):
self.filter_density.append((X[(ib, :)], P[(n:(2 * n), n:(2 * n))]))
else:
self.filter_density[:(- 1)] = self.filter_density[1:]
self.filter_density[(- 1)] = (X[(ib, :)], P[(n:(2 * n), n:(2 * n))])
self.latest_action = 'update'
return (X_fi, P_fi, self.backward_pass)<|docstring|>Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.
algorithm reference: Algorithm 10.6, page 162 of "Bayesian Filtering and Smoothing"
Args:
f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)
h (function): function handle for the observation model; expected signature h(state, input, noise, ...)
Q (numpy array [nq x nq]): process model noise covariance in the prediction step
R (numpy array [nr x nr]): observation model noise covariance in the update step
u (*): current input required for function f & possibly function h
y (numpy array [nu x 1]): current measurement/output of the system
u_next (*): next input required for function h, defaults to None which will take values of u
Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step
additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step
additional_args_om (list): list of additional arguments to be passed to the observation model during the update step
innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound
innovation when needed
predict_flag (bool): perform prediction? defaults to true
Returns:
X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update
P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update
smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially
returned until a lag_length worth of observations have been cumulated.<|endoftext|> |
742448c9b7dc6eef5df2445166d8784b6fe17c8e26ee0013d65bce2a08404f66 | def unscented_transformF(self, x, W, WeightMat, L, f, u, ia, ib, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the frozen states in sima/cubature points\n ib (numpy array [n_s x 1]): row indices of the dynamic states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
order = (len(ia) + len(ib))
Y = np.zeros((order, 1))
y = x
for k in range(L):
if len(iqu):
y[(ib, k)] = f(x[(ib, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(ib, k)] = f(x[(ib, k)], u, x[(iq, k)], np.zeros(u.shape), *additional_args)
Y += (W.flat[k] * y[(np.arange(order), k:(k + 1))])
y1 = (y[(np.arange(order), :)] - Y)
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | Function to propagate sigma/cubature points through process model function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal
L (int): number of points
f (function): function handle for the process model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
ia (numpy array [n_s x 1]): row indices of the frozen states in sima/cubature points
ib (numpy array [n_s x 1]): row indices of the dynamic states in sima/cubature points
iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points
iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points
additional_args (list): list of additional arguments to be passed to the process model
Returns:
Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f
y (numpy array [n_a x L]): Transformed sigma/cubature points
P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f
y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points | estimators.py | unscented_transformF | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def unscented_transformF(self, x, W, WeightMat, L, f, u, ia, ib, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the frozen states in sima/cubature points\n ib (numpy array [n_s x 1]): row indices of the dynamic states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
order = (len(ia) + len(ib))
Y = np.zeros((order, 1))
y = x
for k in range(L):
if len(iqu):
y[(ib, k)] = f(x[(ib, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(ib, k)] = f(x[(ib, k)], u, x[(iq, k)], np.zeros(u.shape), *additional_args)
Y += (W.flat[k] * y[(np.arange(order), k:(k + 1))])
y1 = (y[(np.arange(order), :)] - Y)
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | def unscented_transformF(self, x, W, WeightMat, L, f, u, ia, ib, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n ia (numpy array [n_s x 1]): row indices of the frozen states in sima/cubature points\n ib (numpy array [n_s x 1]): row indices of the dynamic states in sima/cubature points\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
order = (len(ia) + len(ib))
Y = np.zeros((order, 1))
y = x
for k in range(L):
if len(iqu):
y[(ib, k)] = f(x[(ib, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(ib, k)] = f(x[(ib, k)], u, x[(iq, k)], np.zeros(u.shape), *additional_args)
Y += (W.flat[k] * y[(np.arange(order), k:(k + 1))])
y1 = (y[(np.arange(order), :)] - Y)
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1)<|docstring|>Function to propagate sigma/cubature points through process model function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal
L (int): number of points
f (function): function handle for the process model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
ia (numpy array [n_s x 1]): row indices of the frozen states in sima/cubature points
ib (numpy array [n_s x 1]): row indices of the dynamic states in sima/cubature points
iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points
iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points
additional_args (list): list of additional arguments to be passed to the process model
Returns:
Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f
y (numpy array [n_a x L]): Transformed sigma/cubature points
P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f
y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points<|endoftext|> |
101e800977e4d7dd3bd420e330d7411080395247fc38e6576262890e35613200 | def set_initial_cond(self, X, P):
'\n Set the initial condition of the smoother, i.e. the distribution at time zero.\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n\n '
self.init_cond_set = True
self.n = len(X)
self.na = ((self.lag_interval + 1) * self.n)
self.ia = np.arange(self.n)
self.ib = np.arange(self.n, self.na)
self.X_aug = np.tile(X, ((self.lag_interval + 1), 1))
self.P_aug = np.tile(P, ((self.lag_interval + 1), (self.lag_interval + 1))) | Set the initial condition of the smoother, i.e. the distribution at time zero.
Args:
X (numpy array [n x 1]): expected value of the states
P (numpy array [n x n]): covariance of the states | estimators.py | set_initial_cond | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def set_initial_cond(self, X, P):
'\n Set the initial condition of the smoother, i.e. the distribution at time zero.\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n\n '
self.init_cond_set = True
self.n = len(X)
self.na = ((self.lag_interval + 1) * self.n)
self.ia = np.arange(self.n)
self.ib = np.arange(self.n, self.na)
self.X_aug = np.tile(X, ((self.lag_interval + 1), 1))
self.P_aug = np.tile(P, ((self.lag_interval + 1), (self.lag_interval + 1))) | def set_initial_cond(self, X, P):
'\n Set the initial condition of the smoother, i.e. the distribution at time zero.\n\n Args:\n X (numpy array [n x 1]): expected value of the states\n P (numpy array [n x n]): covariance of the states\n\n '
self.init_cond_set = True
self.n = len(X)
self.na = ((self.lag_interval + 1) * self.n)
self.ia = np.arange(self.n)
self.ib = np.arange(self.n, self.na)
self.X_aug = np.tile(X, ((self.lag_interval + 1), 1))
self.P_aug = np.tile(P, ((self.lag_interval + 1), (self.lag_interval + 1)))<|docstring|>Set the initial condition of the smoother, i.e. the distribution at time zero.
Args:
X (numpy array [n x 1]): expected value of the states
P (numpy array [n x n]): covariance of the states<|endoftext|> |
a033fc1699f484b05eb623c1b88c6f9d272a0d3aac164e3f685b49dee23c32ef | def predict_and_or_update(self, f, h, Q, R, u, y, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.\n\n Args:\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update\n P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update\n smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially\n returned until a lag_length worth of observations have been cumulated.\n\n '
assert self.init_cond_set, 'User must specify the initial condition separately'
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
X1 = np.concatenate((self.X_aug, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.P_aug, Q, Qu, R)
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
if predict_flag:
iq = np.arange(self.na, (self.na + nq))
iqu = np.arange((self.na + nq), ((self.na + nq) + nqu))
(self.X_aug, x, self.P_aug, x1) = self.unscented_transformF(x, W, WeightMat, L, f, u, iq, iqu, additional_args_pm)
if len(y):
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange(((self.na + nq) + nqu), (((self.na + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u, self.ia, ip, len(y), additional_args_om)
Pxy = np.matmul(np.matmul(x1, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
self.X_aug += np.matmul(K, innovation)
self.P_aug -= np.matmul(K, Pxy.T)
return (self.X_aug[((self.n * self.lag_interval):, :)], self.P_aug[((self.n * self.lag_interval):, (self.n * self.lag_interval):)]) | Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.
Args:
f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)
h (function): function handle for the observation model; expected signature h(state, input, noise, ...)
Q (numpy array [nq x nq]): process model noise covariance in the prediction step
R (numpy array [nr x nr]): observation model noise covariance in the update step
u (*): current input required for function f & possibly function h
y (numpy array [nu x 1]): current measurement/output of the system
Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step
additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step
additional_args_om (list): list of additional arguments to be passed to the observation model during the update step
innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound
innovation when needed
predict_flag (bool): perform prediction? defaults to true
Returns:
X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update
P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update
smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially
returned until a lag_length worth of observations have been cumulated. | estimators.py | predict_and_or_update | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def predict_and_or_update(self, f, h, Q, R, u, y, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.\n\n Args:\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update\n P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update\n smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially\n returned until a lag_length worth of observations have been cumulated.\n\n '
assert self.init_cond_set, 'User must specify the initial condition separately'
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
X1 = np.concatenate((self.X_aug, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.P_aug, Q, Qu, R)
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
if predict_flag:
iq = np.arange(self.na, (self.na + nq))
iqu = np.arange((self.na + nq), ((self.na + nq) + nqu))
(self.X_aug, x, self.P_aug, x1) = self.unscented_transformF(x, W, WeightMat, L, f, u, iq, iqu, additional_args_pm)
if len(y):
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange(((self.na + nq) + nqu), (((self.na + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u, self.ia, ip, len(y), additional_args_om)
Pxy = np.matmul(np.matmul(x1, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
self.X_aug += np.matmul(K, innovation)
self.P_aug -= np.matmul(K, Pxy.T)
return (self.X_aug[((self.n * self.lag_interval):, :)], self.P_aug[((self.n * self.lag_interval):, (self.n * self.lag_interval):)]) | def predict_and_or_update(self, f, h, Q, R, u, y, Qu=None, additional_args_pm=[], additional_args_om=[], innovation_bound_func={}, predict_flag=True):
'\n Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.\n\n Args:\n f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)\n h (function): function handle for the observation model; expected signature h(state, input, noise, ...)\n Q (numpy array [nq x nq]): process model noise covariance in the prediction step\n R (numpy array [nr x nr]): observation model noise covariance in the update step\n u (*): current input required for function f & possibly function h\n y (numpy array [nu x 1]): current measurement/output of the system\n Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step\n additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step\n additional_args_om (list): list of additional arguments to be passed to the observation model during the update step\n innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound\n innovation when needed\n predict_flag (bool): perform prediction? defaults to true\n\n Returns:\n X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update\n P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update\n smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially\n returned until a lag_length worth of observations have been cumulated.\n\n '
assert self.init_cond_set, 'User must specify the initial condition separately'
nq = Q.shape[0]
if (Qu is not None):
nqu = Qu.shape[0]
else:
nqu = 0
Qu = np.zeros((nqu, nqu))
nr = R.shape[0]
X1 = np.concatenate((self.X_aug, np.zeros((((nq + nqu) + nr), 1))), axis=0)
P1 = block_diag(self.P_aug, Q, Qu, R)
if (self.method == 'UKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.sigmas2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.sigmas4(X1, P1)
elif (self.method == 'CKF'):
if (self.order == 2):
(x, L, W, WeightMat) = self.cubature2(X1, P1)
elif (self.order == 4):
(x, L, W, WeightMat) = self.cubature4(X1, P1)
if predict_flag:
iq = np.arange(self.na, (self.na + nq))
iqu = np.arange((self.na + nq), ((self.na + nq) + nqu))
(self.X_aug, x, self.P_aug, x1) = self.unscented_transformF(x, W, WeightMat, L, f, u, iq, iqu, additional_args_pm)
if len(y):
for key in innovation_bound_func:
assert (key in range(len(y))), 'Key of innovation bound function dictionary should be within the length of the output'
assert callable(innovation_bound_func[key]), 'Innovation bound function is not callable'
ip = np.arange(((self.na + nq) + nqu), (((self.na + nq) + nqu) + nr))
(Z, _, Pz, z2) = self.unscented_transformH(x, W, WeightMat, L, h, u, self.ia, ip, len(y), additional_args_om)
Pxy = np.matmul(np.matmul(x1, WeightMat), z2.T)
K = np.matmul(Pxy, np.linalg.inv(Pz))
innovation = (y - Z)
for key in innovation_bound_func:
innovation[(key, :)] = innovation_bound_func[key](innovation[(key, :)])
self.X_aug += np.matmul(K, innovation)
self.P_aug -= np.matmul(K, Pxy.T)
return (self.X_aug[((self.n * self.lag_interval):, :)], self.P_aug[((self.n * self.lag_interval):, (self.n * self.lag_interval):)])<|docstring|>Perform one iteration of prediction and/or update + backward pass to produce smoothed estimate when applicable.
Args:
f (function): function handle for the process model; expected signature f(state, input, model noise, input noise, ...)
h (function): function handle for the observation model; expected signature h(state, input, noise, ...)
Q (numpy array [nq x nq]): process model noise covariance in the prediction step
R (numpy array [nr x nr]): observation model noise covariance in the update step
u (*): current input required for function f & possibly function h
y (numpy array [nu x 1]): current measurement/output of the system
Qu (numpy array [nqu x nqu]): input noise covariance in the prediction step
additional_args_pm (list): list of additional arguments to be passed to the process model during the prediction step
additional_args_om (list): list of additional arguments to be passed to the observation model during the update step
innovation_bound_func (dict): dictionary with innovation index as keys and callable function as value to bound
innovation when needed
predict_flag (bool): perform prediction? defaults to true
Returns:
X_fi (numpy array [n x 1]): fixed-interval list of smoothed expected values of the states with recent prediction & update
P_fi (numpy array [n x n]): fixed-interval list of smoothed covariance of the states with recent prediction & update
smoothed_flag (bool): whether estimate returned is filtered or smoothed estimate; filtered estimate is initially
returned until a lag_length worth of observations have been cumulated.<|endoftext|> |
e9fc1fbf687a1bcdc9f49594de31c87ec92f79a060a5a5651849536f3199788d | def unscented_transformF(self, x, W, WeightMat, L, f, u, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
ic = (self.ib - self.n)
Y = np.zeros((self.na, 1))
y = x
for k in range(L):
y[(self.ib, k)] = x[(ic, k)]
if len(iqu):
y[(self.ia, k)] = f(x[(self.ia, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(self.ia, k)] = f(x[(self.ia, k)], u, x[(iq, k)], np.zeros(u.shape), *additional_args)
Y += (W.flat[k] * y[(np.arange(self.na), k:(k + 1))])
y1 = (y[(np.arange(self.na), :)] - Y)
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | Function to propagate sigma/cubature points through process model function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal
L (int): number of points
f (function): function handle for the process model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points
iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points
additional_args (list): list of additional arguments to be passed to the process model
Returns:
Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f
y (numpy array [n_a x L]): Transformed sigma/cubature points
P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f
y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points | estimators.py | unscented_transformF | karan-narula/System-Identification-Tools-for-Dynamic-System | 5 | python | def unscented_transformF(self, x, W, WeightMat, L, f, u, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
ic = (self.ib - self.n)
Y = np.zeros((self.na, 1))
y = x
for k in range(L):
y[(self.ib, k)] = x[(ic, k)]
if len(iqu):
y[(self.ia, k)] = f(x[(self.ia, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(self.ia, k)] = f(x[(self.ia, k)], u, x[(iq, k)], np.zeros(u.shape), *additional_args)
Y += (W.flat[k] * y[(np.arange(self.na), k:(k + 1))])
y1 = (y[(np.arange(self.na), :)] - Y)
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1) | def unscented_transformF(self, x, W, WeightMat, L, f, u, iq, iqu, additional_args):
'\n Function to propagate sigma/cubature points through process model function.\n\n Args:\n x (numpy array [n_a x L]): sigma/cubature points\n W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points\n WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal\n L (int): number of points\n f (function): function handle for the process model; expected signature f(state, input, noise, ...)\n u (?): current input required for function f\n iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points\n iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points\n additional_args (list): list of additional arguments to be passed to the process model\n\n Returns:\n Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f\n y (numpy array [n_a x L]): Transformed sigma/cubature points\n P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f\n y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points\n\n '
ic = (self.ib - self.n)
Y = np.zeros((self.na, 1))
y = x
for k in range(L):
y[(self.ib, k)] = x[(ic, k)]
if len(iqu):
y[(self.ia, k)] = f(x[(self.ia, k)], u, x[(iq, k)], x[(iqu, k)], *additional_args)
else:
y[(self.ia, k)] = f(x[(self.ia, k)], u, x[(iq, k)], np.zeros(u.shape), *additional_args)
Y += (W.flat[k] * y[(np.arange(self.na), k:(k + 1))])
y1 = (y[(np.arange(self.na), :)] - Y)
P = np.matmul(np.matmul(y1, WeightMat), y1.T)
return (Y, y, P, y1)<|docstring|>Function to propagate sigma/cubature points through process model function.
Args:
x (numpy array [n_a x L]): sigma/cubature points
W (numpy array [L x 1 or 1 x L]: 1D Weight array of the sigma/cubature points
WeightMat (numpy array [L x L]): weight matrix with weights in W of the points on the diagonal
L (int): number of points
f (function): function handle for the process model; expected signature f(state, input, noise, ...)
u (?): current input required for function f
iq (numpy array [n_q x 1]): row indices of the process noise in sigma/cubature points
iqu (numpy array [n_qu x 1]): row indices of the input noise in sigma/cubature points
additional_args (list): list of additional arguments to be passed to the process model
Returns:
Y (numpy array [n_s x 1]): Expected value vector of the result from transformation function f
y (numpy array [n_a x L]): Transformed sigma/cubature points
P (numpy array [n_s x n_s]): Covariance matrix of the result from transformation function f
y1 (numpy array [n_s x L]): zero-mean Transformed sigma/cubature points<|endoftext|> |
1988b3aec96e789a6374d4030eb1b8c828982129fbb43c3a145fe6b335fa1329 | def from_name_to_parton(name_parton):
'\n from string name, to parton object\n '
for parton in list_partons:
if (name_parton == parton.name):
return parton | from string name, to parton object | EoS_HRG/HRG.py | from_name_to_parton | pierre-moreau/EoS_HRG | 0 | python | def from_name_to_parton(name_parton):
'\n \n '
for parton in list_partons:
if (name_parton == parton.name):
return parton | def from_name_to_parton(name_parton):
'\n \n '
for parton in list_partons:
if (name_parton == parton.name):
return parton<|docstring|>from string name, to parton object<|endoftext|> |
624853655aa591266f716e549997b5d6d89a8fd58b3f0cbf62d600dff9f92e73 | def Bcharge(particle):
'\n Return Baryon charge of the particle object\n '
if is_baryon(particle):
pdg = particle.pdgid
if (pdg > 0):
Bcharge = 1
elif (pdg < 0):
Bcharge = (- 1)
else:
Bcharge = 0
return Bcharge | Return Baryon charge of the particle object | EoS_HRG/HRG.py | Bcharge | pierre-moreau/EoS_HRG | 0 | python | def Bcharge(particle):
'\n \n '
if is_baryon(particle):
pdg = particle.pdgid
if (pdg > 0):
Bcharge = 1
elif (pdg < 0):
Bcharge = (- 1)
else:
Bcharge = 0
return Bcharge | def Bcharge(particle):
'\n \n '
if is_baryon(particle):
pdg = particle.pdgid
if (pdg > 0):
Bcharge = 1
elif (pdg < 0):
Bcharge = (- 1)
else:
Bcharge = 0
return Bcharge<|docstring|>Return Baryon charge of the particle object<|endoftext|> |
c94926b1c48250c12a850d67bed87ae938dc691fb3d0653b0886dfeaa55ba5df | def Qcharge(particle):
'\n Return electric charge of the paricle object\n '
Qcharge = particle.charge
return int(Qcharge) | Return electric charge of the paricle object | EoS_HRG/HRG.py | Qcharge | pierre-moreau/EoS_HRG | 0 | python | def Qcharge(particle):
'\n \n '
Qcharge = particle.charge
return int(Qcharge) | def Qcharge(particle):
'\n \n '
Qcharge = particle.charge
return int(Qcharge)<|docstring|>Return electric charge of the paricle object<|endoftext|> |
5a433b05da1bdf27093cc057bd57384165ff5b241e4fcc0c011be2ef02802182 | def Scharge(particle):
'\n Return strangeness of the particle object\n '
pdg = particle.pdgid
if (pdg.has_strange or (not pdg.is_valid)):
if is_meson(particle):
try:
match = re.match('([A-Z,a-z]?)([A-Z,a-z]?)', particle.quarks)
quark1 = from_name_to_parton(match.group(1))
quark2 = from_name_to_parton(match.group(2))
Scharge = (quark1.Scharge + quark2.Scharge)
except:
Scharge = 0
elif is_baryon(particle):
match = re.match('([A-Z,a-z]?)([A-Z,a-z]?)([A-Z,a-z]?)', particle.quarks)
quark1 = from_name_to_parton(match.group(1))
quark2 = from_name_to_parton(match.group(2))
quark3 = from_name_to_parton(match.group(3))
Scharge = ((quark1.Scharge + quark2.Scharge) + quark3.Scharge)
else:
Scharge = 0
else:
Scharge = 0
return int(Scharge) | Return strangeness of the particle object | EoS_HRG/HRG.py | Scharge | pierre-moreau/EoS_HRG | 0 | python | def Scharge(particle):
'\n \n '
pdg = particle.pdgid
if (pdg.has_strange or (not pdg.is_valid)):
if is_meson(particle):
try:
match = re.match('([A-Z,a-z]?)([A-Z,a-z]?)', particle.quarks)
quark1 = from_name_to_parton(match.group(1))
quark2 = from_name_to_parton(match.group(2))
Scharge = (quark1.Scharge + quark2.Scharge)
except:
Scharge = 0
elif is_baryon(particle):
match = re.match('([A-Z,a-z]?)([A-Z,a-z]?)([A-Z,a-z]?)', particle.quarks)
quark1 = from_name_to_parton(match.group(1))
quark2 = from_name_to_parton(match.group(2))
quark3 = from_name_to_parton(match.group(3))
Scharge = ((quark1.Scharge + quark2.Scharge) + quark3.Scharge)
else:
Scharge = 0
else:
Scharge = 0
return int(Scharge) | def Scharge(particle):
'\n \n '
pdg = particle.pdgid
if (pdg.has_strange or (not pdg.is_valid)):
if is_meson(particle):
try:
match = re.match('([A-Z,a-z]?)([A-Z,a-z]?)', particle.quarks)
quark1 = from_name_to_parton(match.group(1))
quark2 = from_name_to_parton(match.group(2))
Scharge = (quark1.Scharge + quark2.Scharge)
except:
Scharge = 0
elif is_baryon(particle):
match = re.match('([A-Z,a-z]?)([A-Z,a-z]?)([A-Z,a-z]?)', particle.quarks)
quark1 = from_name_to_parton(match.group(1))
quark2 = from_name_to_parton(match.group(2))
quark3 = from_name_to_parton(match.group(3))
Scharge = ((quark1.Scharge + quark2.Scharge) + quark3.Scharge)
else:
Scharge = 0
else:
Scharge = 0
return int(Scharge)<|docstring|>Return strangeness of the particle object<|endoftext|> |
3deec668f6f0633d55efa8b726d153755fa1f795b4eba68c170204490212f40b | def muk(particle, muB, muQ, muS):
'\n Return the chemical potential of the particle object\n \\mu = B*_mu_B + Q*_mu_Q + S*_mu_S\n '
muk = (((Bcharge(particle) * muB) + (Qcharge(particle) * muQ)) + (Scharge(particle) * muS))
return muk | Return the chemical potential of the particle object
\mu = B*_mu_B + Q*_mu_Q + S*_mu_S | EoS_HRG/HRG.py | muk | pierre-moreau/EoS_HRG | 0 | python | def muk(particle, muB, muQ, muS):
'\n Return the chemical potential of the particle object\n \\mu = B*_mu_B + Q*_mu_Q + S*_mu_S\n '
muk = (((Bcharge(particle) * muB) + (Qcharge(particle) * muQ)) + (Scharge(particle) * muS))
return muk | def muk(particle, muB, muQ, muS):
'\n Return the chemical potential of the particle object\n \\mu = B*_mu_B + Q*_mu_Q + S*_mu_S\n '
muk = (((Bcharge(particle) * muB) + (Qcharge(particle) * muQ)) + (Scharge(particle) * muS))
return muk<|docstring|>Return the chemical potential of the particle object
\mu = B*_mu_B + Q*_mu_Q + S*_mu_S<|endoftext|> |
2a42544646265e24dd5b595ba047a587091b4756c301ab21d524e4070eb9cb5c | def J(particle):
'\n spin of the particle object\n '
xJ = particle.J
if (xJ == None):
if (('N(22' in particle.name) or ('Lambda(2350)' in particle.name)):
xJ = (9 / 2)
if (('Delta(2420)' in particle.name) or ('N(2600)' in particle.name)):
xJ = (11 / 2)
return xJ | spin of the particle object | EoS_HRG/HRG.py | J | pierre-moreau/EoS_HRG | 0 | python | def J(particle):
'\n \n '
xJ = particle.J
if (xJ == None):
if (('N(22' in particle.name) or ('Lambda(2350)' in particle.name)):
xJ = (9 / 2)
if (('Delta(2420)' in particle.name) or ('N(2600)' in particle.name)):
xJ = (11 / 2)
return xJ | def J(particle):
'\n \n '
xJ = particle.J
if (xJ == None):
if (('N(22' in particle.name) or ('Lambda(2350)' in particle.name)):
xJ = (9 / 2)
if (('Delta(2420)' in particle.name) or ('N(2600)' in particle.name)):
xJ = (11 / 2)
return xJ<|docstring|>spin of the particle object<|endoftext|> |
53f69a6a8f43f5e58ab982752180ac148725b186b2bd956320c07c61e8c9bcf9 | def d_spin(particle):
'\n degeneracy factor of the particle object\n d = 2*J+1\n '
return ((2 * J(particle)) + 1) | degeneracy factor of the particle object
d = 2*J+1 | EoS_HRG/HRG.py | d_spin | pierre-moreau/EoS_HRG | 0 | python | def d_spin(particle):
'\n degeneracy factor of the particle object\n d = 2*J+1\n '
return ((2 * J(particle)) + 1) | def d_spin(particle):
'\n degeneracy factor of the particle object\n d = 2*J+1\n '
return ((2 * J(particle)) + 1)<|docstring|>degeneracy factor of the particle object
d = 2*J+1<|endoftext|> |
703218607001464aea0a94daf9c8541e3de5dc60d4c819986f3e1660d5620efc | def BW(m, M0, gamma):
'\n Breit-Wigner spectral function\n PHYSICAL REVIEW C 98, 034906 (2018)\n '
BW = (((((2.0 * gamma) * M0) * m) / ((((m ** 2.0) - (M0 ** 2.0)) ** 2.0) + ((M0 * gamma) ** 2.0))) / pi)
return BW | Breit-Wigner spectral function
PHYSICAL REVIEW C 98, 034906 (2018) | EoS_HRG/HRG.py | BW | pierre-moreau/EoS_HRG | 0 | python | def BW(m, M0, gamma):
'\n Breit-Wigner spectral function\n PHYSICAL REVIEW C 98, 034906 (2018)\n '
BW = (((((2.0 * gamma) * M0) * m) / ((((m ** 2.0) - (M0 ** 2.0)) ** 2.0) + ((M0 * gamma) ** 2.0))) / pi)
return BW | def BW(m, M0, gamma):
'\n Breit-Wigner spectral function\n PHYSICAL REVIEW C 98, 034906 (2018)\n '
BW = (((((2.0 * gamma) * M0) * m) / ((((m ** 2.0) - (M0 ** 2.0)) ** 2.0) + ((M0 * gamma) ** 2.0))) / pi)
return BW<|docstring|>Breit-Wigner spectral function
PHYSICAL REVIEW C 98, 034906 (2018)<|endoftext|> |
003fce627e4c6389c9ecbc9d070fd6b861fd513c24b0610353074359e0579ee2 | def print_info(part):
'\n Print info of a particle object\n '
if (not isinstance(part, list)):
print(f'{part} {part.pdgid}; mass {mass(part)} [GeV]; width {width(part)} [GeV]; J = {J(part)}; {part.quarks}; B,Q,S = {Bcharge(part)},{Qcharge(part)},{Scharge(part)}; anti = {(to_antiparticle(part) if has_anti(part) else False)}')
else:
for xpart in part:
print_info(xpart) | Print info of a particle object | EoS_HRG/HRG.py | print_info | pierre-moreau/EoS_HRG | 0 | python | def print_info(part):
'\n \n '
if (not isinstance(part, list)):
print(f'{part} {part.pdgid}; mass {mass(part)} [GeV]; width {width(part)} [GeV]; J = {J(part)}; {part.quarks}; B,Q,S = {Bcharge(part)},{Qcharge(part)},{Scharge(part)}; anti = {(to_antiparticle(part) if has_anti(part) else False)}')
else:
for xpart in part:
print_info(xpart) | def print_info(part):
'\n \n '
if (not isinstance(part, list)):
print(f'{part} {part.pdgid}; mass {mass(part)} [GeV]; width {width(part)} [GeV]; J = {J(part)}; {part.quarks}; B,Q,S = {Bcharge(part)},{Qcharge(part)},{Scharge(part)}; anti = {(to_antiparticle(part) if has_anti(part) else False)}')
else:
for xpart in part:
print_info(xpart)<|docstring|>Print info of a particle object<|endoftext|> |
78aa0d6ef5ef04c79cef69622056dc088fdff55d6e6dfc0bce84e4c056abc0b7 | def threshold(list_part):
'\n Average threshold energy for the particle\n sum of decay products weighted by the corresponding branching ratios (branch)\n '
mth_dict = {}
for hadron in list_part:
thres = 0.0
list_decays = part_decay(hadron)
if (list_decays != None):
for decay in list_decays:
br = decay[0]
children = decay[1]
thres += (br * sum([mass(child) for child in children]))
mth_dict.update({hadron.name: thres})
return mth_dict | Average threshold energy for the particle
sum of decay products weighted by the corresponding branching ratios (branch) | EoS_HRG/HRG.py | threshold | pierre-moreau/EoS_HRG | 0 | python | def threshold(list_part):
'\n Average threshold energy for the particle\n sum of decay products weighted by the corresponding branching ratios (branch)\n '
mth_dict = {}
for hadron in list_part:
thres = 0.0
list_decays = part_decay(hadron)
if (list_decays != None):
for decay in list_decays:
br = decay[0]
children = decay[1]
thres += (br * sum([mass(child) for child in children]))
mth_dict.update({hadron.name: thres})
return mth_dict | def threshold(list_part):
'\n Average threshold energy for the particle\n sum of decay products weighted by the corresponding branching ratios (branch)\n '
mth_dict = {}
for hadron in list_part:
thres = 0.0
list_decays = part_decay(hadron)
if (list_decays != None):
for decay in list_decays:
br = decay[0]
children = decay[1]
thres += (br * sum([mass(child) for child in children]))
mth_dict.update({hadron.name: thres})
return mth_dict<|docstring|>Average threshold energy for the particle
sum of decay products weighted by the corresponding branching ratios (branch)<|endoftext|> |
0426ac0227211bde17880eb58486ffd8152471108d42b92eea2771d2c55b18dc | def norm_BW():
'\n Normalization factor for the spectral function of each particle\n '
norm = np.zeros(len((HRG_mesons + HRG_baryons)))
for (ip, part) in enumerate((HRG_mesons + HRG_baryons)):
xmass = mass(part)
xwidth = width(part)
if ((xwidth / xmass) <= thres_off):
continue
try:
mthres = mth_all[part.name]
except:
mthres = (xmass - (2.0 * xwidth))
mth_all[part.name] = mthres
mmin = max(mthres, (xmass - (2.0 * xwidth)))
mmax = (xmass + (2.0 * xwidth))
norm[ip] = integrate.quad(BW, mmin, mmax, args=(xmass, xwidth))[0]
return dict(zip((HRG_mesons + HRG_baryons), norm)) | Normalization factor for the spectral function of each particle | EoS_HRG/HRG.py | norm_BW | pierre-moreau/EoS_HRG | 0 | python | def norm_BW():
'\n \n '
norm = np.zeros(len((HRG_mesons + HRG_baryons)))
for (ip, part) in enumerate((HRG_mesons + HRG_baryons)):
xmass = mass(part)
xwidth = width(part)
if ((xwidth / xmass) <= thres_off):
continue
try:
mthres = mth_all[part.name]
except:
mthres = (xmass - (2.0 * xwidth))
mth_all[part.name] = mthres
mmin = max(mthres, (xmass - (2.0 * xwidth)))
mmax = (xmass + (2.0 * xwidth))
norm[ip] = integrate.quad(BW, mmin, mmax, args=(xmass, xwidth))[0]
return dict(zip((HRG_mesons + HRG_baryons), norm)) | def norm_BW():
'\n \n '
norm = np.zeros(len((HRG_mesons + HRG_baryons)))
for (ip, part) in enumerate((HRG_mesons + HRG_baryons)):
xmass = mass(part)
xwidth = width(part)
if ((xwidth / xmass) <= thres_off):
continue
try:
mthres = mth_all[part.name]
except:
mthres = (xmass - (2.0 * xwidth))
mth_all[part.name] = mthres
mmin = max(mthres, (xmass - (2.0 * xwidth)))
mmax = (xmass + (2.0 * xwidth))
norm[ip] = integrate.quad(BW, mmin, mmax, args=(xmass, xwidth))[0]
return dict(zip((HRG_mesons + HRG_baryons), norm))<|docstring|>Normalization factor for the spectral function of each particle<|endoftext|> |
46e34d0121ba5392a520472eb35d81d1a714eabc25e3a4cdef397eab533dd632 | def HRG(T, muB, muQ, muS, **kwargs):
'\n Calculation of the HRG EoS as a function of T,muB,muQ,muS\n kwargs:\n species = all, mesons, baryons -> which particles to include?\n offshell = True, False -> integration over mass for unstable particles?\n '
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
eval_chi = kwargs['eval_chi']
except:
eval_chi = False
try:
gammaS = kwargs['gammaS']
except:
gammaS = 1
try:
species = kwargs['species']
except:
species = 'all'
if (isinstance(T, float) or isinstance(T, np.float64)):
p = 0.0
ndens = 0.0
nB = 0.0
nQ = 0.0
nS = 0.0
s = 0.0
e = 0.0
chi = np.zeros(len(list_chi))
flag_1part = False
if (species == 'all'):
list_part = (HRG_mesons + HRG_baryons)
elif (species == 'mesons'):
list_part = HRG_mesons
elif (species == 'baryons'):
list_part = HRG_baryons
else:
list_part = [to_particle(species)]
flag_1part = True
maxk = 100
for part in list_part:
resultp = 0.0
resultn = 0.0
results = 0.0
resultpder = np.zeros(4)
if flag_1part:
antip = 0
else:
antip = float(has_anti(part))
xmass = mass(part)
xwidth = width(part)
dg = d_spin(part)
xmu = (muk(part, muB, muQ, muS) + np.log((gammaS ** abs(Scharge(part)))))
fug = np.exp((xmu / T))
factB = ((- 1.0) ** Bcharge(part))
if (((xwidth / xmass) <= thres_off) or (not offshell)):
factp = (((dg / (2.0 * (pi ** 2.0))) * (xmass ** 2.0)) * (T ** 2.0))
facts = ((dg / (2.0 * (pi ** 2.0))) * (xmass ** 2.0))
for k in range(1, (maxk + 1)):
kn2 = kn(2, ((k * xmass) / T))
resultpk0 = (((factp * (factB ** (k + 1.0))) / (k ** 2.0)) * kn2)
resultpk = (resultpk0 * ((fug ** k) + (antip * (fug ** (- k)))))
if ((not eval_chi) and (abs((resultpk / (resultp + resultpk))) <= 0.005)):
break
elif (eval_chi and (abs(((resultpk * (k ** 4.0)) / ((resultpk * (k ** 4.0)) + resultpder[1]))) <= 0.005) and (abs(((resultpk * (k ** 6.0)) / ((resultpk * (k ** 6.0)) + resultpder[2]))) <= 0.005) and (abs(((resultpk * (k ** 8.0)) / ((resultpk * (k ** 8.0)) + resultpder[3]))) <= 0.005)):
break
resultp += resultpk
resultn += (((resultpk0 * k) / T) * ((fug ** k) - (antip * (fug ** (- k)))))
kn1 = kn(1, ((k * xmass) / T))
results += (((facts * (factB ** (k + 1.0))) / (k ** 2.0)) * (((fug ** k) * (((k * xmass) * kn1) + (((4.0 * T) - (k * xmu)) * kn2))) + ((antip * (fug ** (- k))) * (((k * xmass) * kn1) + (((4.0 * T) + (k * xmu)) * kn2)))))
if eval_chi:
resultpder += (resultpk * np.array([(k ** 2.0), (k ** 4.0), (k ** 6.0), (k ** 8.0)]))
else:
mthres = mth_all[part.name]
mmin = max(mthres, (xmass - (2.0 * xwidth)))
mmax = (xmass + (2.0 * xwidth))
xnorm = norm[part]
def fp(m, k):
return ((BW(m, xmass, xwidth) * (m ** 2.0)) * kn(2, ((k * m) / T)))
def fs(m, k):
return ((BW(m, xmass, xwidth) * (m ** 2.0)) * (((fug ** k) * (((k * m) * kn(1, ((k * m) / T))) + (((4.0 * T) - (k * xmu)) * kn(2, ((k * m) / T))))) + ((antip * (fug ** (- k))) * (((k * m) * kn(1, ((k * m) / T))) + (((4.0 * T) + (k * xmu)) * kn(2, ((k * m) / T)))))))
factp = (((dg / (2.0 * (pi ** 2.0))) * (T ** 2.0)) / xnorm)
facts = ((dg / (2.0 * (pi ** 2.0))) / xnorm)
for k in range(1, (maxk + 1)):
resultpk0 = (((integrate.quad(fp, mmin, mmax, epsrel=0.01, args=k)[0] * factp) * (factB ** (k + 1.0))) / (k ** 2.0))
resultpk = (resultpk0 * ((fug ** k) + (antip * (fug ** (- k)))))
if ((not eval_chi) and (abs((resultpk / (resultp + resultpk))) <= 0.005)):
break
elif (eval_chi and (abs(((resultpk * (k ** 4.0)) / ((resultpk * (k ** 4.0)) + resultpder[1]))) <= 0.005) and (abs(((resultpk * (k ** 6.0)) / ((resultpk * (k ** 6.0)) + resultpder[2]))) <= 0.005) and (abs(((resultpk * (k ** 8.0)) / ((resultpk * (k ** 8.0)) + resultpder[3]))) <= 0.005)):
break
resultp += resultpk
resultn += (((resultpk0 * k) / T) * ((fug ** k) - (antip * (fug ** (- k)))))
results += (((facts * (factB ** (k + 1.0))) / (k ** 2.0)) * integrate.quad(fs, mmin, mmax, epsrel=0.01, args=k)[0])
if eval_chi:
resultpder += (resultpk * np.array([(k ** 2.0), (k ** 4.0), (k ** 6.0), (k ** 8.0)]))
p += (resultp / (T ** 4.0))
ndens += (resultn / (T ** 3.0))
nB += ((Bcharge(part) * resultn) / (T ** 3.0))
nQ += ((Qcharge(part) * resultn) / (T ** 3.0))
nS += ((Scharge(part) * resultn) / (T ** 3.0))
s += (results / (T ** 3.0))
if eval_chi:
for (ichi, xchi) in enumerate(list_chi):
if (ichi == 0):
chi[ichi] += (resultp / (T ** 4.0))
continue
ii = BQS[xchi]['B']
jj = BQS[xchi]['Q']
kk = BQS[xchi]['S']
factBQS = (((Bcharge(part) ** ii) * (Qcharge(part) ** jj)) * (Scharge(part) ** kk))
chi[ichi] += ((factBQS * resultpder[int(((((ii + jj) + kk) / 2) - 1))]) / (T ** 4.0))
e = ((((s - p) + ((muB / T) * nB)) + ((muQ / T) * nQ)) + ((muS / T) * nS))
elif (isinstance(T, np.ndarray) or isinstance(T, list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
ndens = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
chi = np.zeros((len(list_chi), len(T)))
for (i, xT) in enumerate(T):
try:
xmuB = muB[i]
except:
xmuB = muB
try:
xmuQ = muQ[i]
except:
xmuQ = muQ
try:
xmuS = muS[i]
except:
xmuS = muS
result = HRG(xT, xmuB, xmuQ, xmuS, **kwargs)
p[i] = result['P']
s[i] = result['s']
ndens[i] = result['n']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
chi[(:, i)] = result['chi']
else:
raise Exception('Problem with input')
return {'T': T, 'P': p, 's': s, 'n': ndens, 'n_B': nB, 'n_Q': nQ, 'n_S': nS, 'e': e, 'chi': chi, 'I': (e - (3 * p))} | Calculation of the HRG EoS as a function of T,muB,muQ,muS
kwargs:
species = all, mesons, baryons -> which particles to include?
offshell = True, False -> integration over mass for unstable particles? | EoS_HRG/HRG.py | HRG | pierre-moreau/EoS_HRG | 0 | python | def HRG(T, muB, muQ, muS, **kwargs):
'\n Calculation of the HRG EoS as a function of T,muB,muQ,muS\n kwargs:\n species = all, mesons, baryons -> which particles to include?\n offshell = True, False -> integration over mass for unstable particles?\n '
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
eval_chi = kwargs['eval_chi']
except:
eval_chi = False
try:
gammaS = kwargs['gammaS']
except:
gammaS = 1
try:
species = kwargs['species']
except:
species = 'all'
if (isinstance(T, float) or isinstance(T, np.float64)):
p = 0.0
ndens = 0.0
nB = 0.0
nQ = 0.0
nS = 0.0
s = 0.0
e = 0.0
chi = np.zeros(len(list_chi))
flag_1part = False
if (species == 'all'):
list_part = (HRG_mesons + HRG_baryons)
elif (species == 'mesons'):
list_part = HRG_mesons
elif (species == 'baryons'):
list_part = HRG_baryons
else:
list_part = [to_particle(species)]
flag_1part = True
maxk = 100
for part in list_part:
resultp = 0.0
resultn = 0.0
results = 0.0
resultpder = np.zeros(4)
if flag_1part:
antip = 0
else:
antip = float(has_anti(part))
xmass = mass(part)
xwidth = width(part)
dg = d_spin(part)
xmu = (muk(part, muB, muQ, muS) + np.log((gammaS ** abs(Scharge(part)))))
fug = np.exp((xmu / T))
factB = ((- 1.0) ** Bcharge(part))
if (((xwidth / xmass) <= thres_off) or (not offshell)):
factp = (((dg / (2.0 * (pi ** 2.0))) * (xmass ** 2.0)) * (T ** 2.0))
facts = ((dg / (2.0 * (pi ** 2.0))) * (xmass ** 2.0))
for k in range(1, (maxk + 1)):
kn2 = kn(2, ((k * xmass) / T))
resultpk0 = (((factp * (factB ** (k + 1.0))) / (k ** 2.0)) * kn2)
resultpk = (resultpk0 * ((fug ** k) + (antip * (fug ** (- k)))))
if ((not eval_chi) and (abs((resultpk / (resultp + resultpk))) <= 0.005)):
break
elif (eval_chi and (abs(((resultpk * (k ** 4.0)) / ((resultpk * (k ** 4.0)) + resultpder[1]))) <= 0.005) and (abs(((resultpk * (k ** 6.0)) / ((resultpk * (k ** 6.0)) + resultpder[2]))) <= 0.005) and (abs(((resultpk * (k ** 8.0)) / ((resultpk * (k ** 8.0)) + resultpder[3]))) <= 0.005)):
break
resultp += resultpk
resultn += (((resultpk0 * k) / T) * ((fug ** k) - (antip * (fug ** (- k)))))
kn1 = kn(1, ((k * xmass) / T))
results += (((facts * (factB ** (k + 1.0))) / (k ** 2.0)) * (((fug ** k) * (((k * xmass) * kn1) + (((4.0 * T) - (k * xmu)) * kn2))) + ((antip * (fug ** (- k))) * (((k * xmass) * kn1) + (((4.0 * T) + (k * xmu)) * kn2)))))
if eval_chi:
resultpder += (resultpk * np.array([(k ** 2.0), (k ** 4.0), (k ** 6.0), (k ** 8.0)]))
else:
mthres = mth_all[part.name]
mmin = max(mthres, (xmass - (2.0 * xwidth)))
mmax = (xmass + (2.0 * xwidth))
xnorm = norm[part]
def fp(m, k):
return ((BW(m, xmass, xwidth) * (m ** 2.0)) * kn(2, ((k * m) / T)))
def fs(m, k):
return ((BW(m, xmass, xwidth) * (m ** 2.0)) * (((fug ** k) * (((k * m) * kn(1, ((k * m) / T))) + (((4.0 * T) - (k * xmu)) * kn(2, ((k * m) / T))))) + ((antip * (fug ** (- k))) * (((k * m) * kn(1, ((k * m) / T))) + (((4.0 * T) + (k * xmu)) * kn(2, ((k * m) / T)))))))
factp = (((dg / (2.0 * (pi ** 2.0))) * (T ** 2.0)) / xnorm)
facts = ((dg / (2.0 * (pi ** 2.0))) / xnorm)
for k in range(1, (maxk + 1)):
resultpk0 = (((integrate.quad(fp, mmin, mmax, epsrel=0.01, args=k)[0] * factp) * (factB ** (k + 1.0))) / (k ** 2.0))
resultpk = (resultpk0 * ((fug ** k) + (antip * (fug ** (- k)))))
if ((not eval_chi) and (abs((resultpk / (resultp + resultpk))) <= 0.005)):
break
elif (eval_chi and (abs(((resultpk * (k ** 4.0)) / ((resultpk * (k ** 4.0)) + resultpder[1]))) <= 0.005) and (abs(((resultpk * (k ** 6.0)) / ((resultpk * (k ** 6.0)) + resultpder[2]))) <= 0.005) and (abs(((resultpk * (k ** 8.0)) / ((resultpk * (k ** 8.0)) + resultpder[3]))) <= 0.005)):
break
resultp += resultpk
resultn += (((resultpk0 * k) / T) * ((fug ** k) - (antip * (fug ** (- k)))))
results += (((facts * (factB ** (k + 1.0))) / (k ** 2.0)) * integrate.quad(fs, mmin, mmax, epsrel=0.01, args=k)[0])
if eval_chi:
resultpder += (resultpk * np.array([(k ** 2.0), (k ** 4.0), (k ** 6.0), (k ** 8.0)]))
p += (resultp / (T ** 4.0))
ndens += (resultn / (T ** 3.0))
nB += ((Bcharge(part) * resultn) / (T ** 3.0))
nQ += ((Qcharge(part) * resultn) / (T ** 3.0))
nS += ((Scharge(part) * resultn) / (T ** 3.0))
s += (results / (T ** 3.0))
if eval_chi:
for (ichi, xchi) in enumerate(list_chi):
if (ichi == 0):
chi[ichi] += (resultp / (T ** 4.0))
continue
ii = BQS[xchi]['B']
jj = BQS[xchi]['Q']
kk = BQS[xchi]['S']
factBQS = (((Bcharge(part) ** ii) * (Qcharge(part) ** jj)) * (Scharge(part) ** kk))
chi[ichi] += ((factBQS * resultpder[int(((((ii + jj) + kk) / 2) - 1))]) / (T ** 4.0))
e = ((((s - p) + ((muB / T) * nB)) + ((muQ / T) * nQ)) + ((muS / T) * nS))
elif (isinstance(T, np.ndarray) or isinstance(T, list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
ndens = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
chi = np.zeros((len(list_chi), len(T)))
for (i, xT) in enumerate(T):
try:
xmuB = muB[i]
except:
xmuB = muB
try:
xmuQ = muQ[i]
except:
xmuQ = muQ
try:
xmuS = muS[i]
except:
xmuS = muS
result = HRG(xT, xmuB, xmuQ, xmuS, **kwargs)
p[i] = result['P']
s[i] = result['s']
ndens[i] = result['n']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
chi[(:, i)] = result['chi']
else:
raise Exception('Problem with input')
return {'T': T, 'P': p, 's': s, 'n': ndens, 'n_B': nB, 'n_Q': nQ, 'n_S': nS, 'e': e, 'chi': chi, 'I': (e - (3 * p))} | def HRG(T, muB, muQ, muS, **kwargs):
'\n Calculation of the HRG EoS as a function of T,muB,muQ,muS\n kwargs:\n species = all, mesons, baryons -> which particles to include?\n offshell = True, False -> integration over mass for unstable particles?\n '
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
eval_chi = kwargs['eval_chi']
except:
eval_chi = False
try:
gammaS = kwargs['gammaS']
except:
gammaS = 1
try:
species = kwargs['species']
except:
species = 'all'
if (isinstance(T, float) or isinstance(T, np.float64)):
p = 0.0
ndens = 0.0
nB = 0.0
nQ = 0.0
nS = 0.0
s = 0.0
e = 0.0
chi = np.zeros(len(list_chi))
flag_1part = False
if (species == 'all'):
list_part = (HRG_mesons + HRG_baryons)
elif (species == 'mesons'):
list_part = HRG_mesons
elif (species == 'baryons'):
list_part = HRG_baryons
else:
list_part = [to_particle(species)]
flag_1part = True
maxk = 100
for part in list_part:
resultp = 0.0
resultn = 0.0
results = 0.0
resultpder = np.zeros(4)
if flag_1part:
antip = 0
else:
antip = float(has_anti(part))
xmass = mass(part)
xwidth = width(part)
dg = d_spin(part)
xmu = (muk(part, muB, muQ, muS) + np.log((gammaS ** abs(Scharge(part)))))
fug = np.exp((xmu / T))
factB = ((- 1.0) ** Bcharge(part))
if (((xwidth / xmass) <= thres_off) or (not offshell)):
factp = (((dg / (2.0 * (pi ** 2.0))) * (xmass ** 2.0)) * (T ** 2.0))
facts = ((dg / (2.0 * (pi ** 2.0))) * (xmass ** 2.0))
for k in range(1, (maxk + 1)):
kn2 = kn(2, ((k * xmass) / T))
resultpk0 = (((factp * (factB ** (k + 1.0))) / (k ** 2.0)) * kn2)
resultpk = (resultpk0 * ((fug ** k) + (antip * (fug ** (- k)))))
if ((not eval_chi) and (abs((resultpk / (resultp + resultpk))) <= 0.005)):
break
elif (eval_chi and (abs(((resultpk * (k ** 4.0)) / ((resultpk * (k ** 4.0)) + resultpder[1]))) <= 0.005) and (abs(((resultpk * (k ** 6.0)) / ((resultpk * (k ** 6.0)) + resultpder[2]))) <= 0.005) and (abs(((resultpk * (k ** 8.0)) / ((resultpk * (k ** 8.0)) + resultpder[3]))) <= 0.005)):
break
resultp += resultpk
resultn += (((resultpk0 * k) / T) * ((fug ** k) - (antip * (fug ** (- k)))))
kn1 = kn(1, ((k * xmass) / T))
results += (((facts * (factB ** (k + 1.0))) / (k ** 2.0)) * (((fug ** k) * (((k * xmass) * kn1) + (((4.0 * T) - (k * xmu)) * kn2))) + ((antip * (fug ** (- k))) * (((k * xmass) * kn1) + (((4.0 * T) + (k * xmu)) * kn2)))))
if eval_chi:
resultpder += (resultpk * np.array([(k ** 2.0), (k ** 4.0), (k ** 6.0), (k ** 8.0)]))
else:
mthres = mth_all[part.name]
mmin = max(mthres, (xmass - (2.0 * xwidth)))
mmax = (xmass + (2.0 * xwidth))
xnorm = norm[part]
def fp(m, k):
return ((BW(m, xmass, xwidth) * (m ** 2.0)) * kn(2, ((k * m) / T)))
def fs(m, k):
return ((BW(m, xmass, xwidth) * (m ** 2.0)) * (((fug ** k) * (((k * m) * kn(1, ((k * m) / T))) + (((4.0 * T) - (k * xmu)) * kn(2, ((k * m) / T))))) + ((antip * (fug ** (- k))) * (((k * m) * kn(1, ((k * m) / T))) + (((4.0 * T) + (k * xmu)) * kn(2, ((k * m) / T)))))))
factp = (((dg / (2.0 * (pi ** 2.0))) * (T ** 2.0)) / xnorm)
facts = ((dg / (2.0 * (pi ** 2.0))) / xnorm)
for k in range(1, (maxk + 1)):
resultpk0 = (((integrate.quad(fp, mmin, mmax, epsrel=0.01, args=k)[0] * factp) * (factB ** (k + 1.0))) / (k ** 2.0))
resultpk = (resultpk0 * ((fug ** k) + (antip * (fug ** (- k)))))
if ((not eval_chi) and (abs((resultpk / (resultp + resultpk))) <= 0.005)):
break
elif (eval_chi and (abs(((resultpk * (k ** 4.0)) / ((resultpk * (k ** 4.0)) + resultpder[1]))) <= 0.005) and (abs(((resultpk * (k ** 6.0)) / ((resultpk * (k ** 6.0)) + resultpder[2]))) <= 0.005) and (abs(((resultpk * (k ** 8.0)) / ((resultpk * (k ** 8.0)) + resultpder[3]))) <= 0.005)):
break
resultp += resultpk
resultn += (((resultpk0 * k) / T) * ((fug ** k) - (antip * (fug ** (- k)))))
results += (((facts * (factB ** (k + 1.0))) / (k ** 2.0)) * integrate.quad(fs, mmin, mmax, epsrel=0.01, args=k)[0])
if eval_chi:
resultpder += (resultpk * np.array([(k ** 2.0), (k ** 4.0), (k ** 6.0), (k ** 8.0)]))
p += (resultp / (T ** 4.0))
ndens += (resultn / (T ** 3.0))
nB += ((Bcharge(part) * resultn) / (T ** 3.0))
nQ += ((Qcharge(part) * resultn) / (T ** 3.0))
nS += ((Scharge(part) * resultn) / (T ** 3.0))
s += (results / (T ** 3.0))
if eval_chi:
for (ichi, xchi) in enumerate(list_chi):
if (ichi == 0):
chi[ichi] += (resultp / (T ** 4.0))
continue
ii = BQS[xchi]['B']
jj = BQS[xchi]['Q']
kk = BQS[xchi]['S']
factBQS = (((Bcharge(part) ** ii) * (Qcharge(part) ** jj)) * (Scharge(part) ** kk))
chi[ichi] += ((factBQS * resultpder[int(((((ii + jj) + kk) / 2) - 1))]) / (T ** 4.0))
e = ((((s - p) + ((muB / T) * nB)) + ((muQ / T) * nQ)) + ((muS / T) * nS))
elif (isinstance(T, np.ndarray) or isinstance(T, list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
ndens = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
chi = np.zeros((len(list_chi), len(T)))
for (i, xT) in enumerate(T):
try:
xmuB = muB[i]
except:
xmuB = muB
try:
xmuQ = muQ[i]
except:
xmuQ = muQ
try:
xmuS = muS[i]
except:
xmuS = muS
result = HRG(xT, xmuB, xmuQ, xmuS, **kwargs)
p[i] = result['P']
s[i] = result['s']
ndens[i] = result['n']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
chi[(:, i)] = result['chi']
else:
raise Exception('Problem with input')
return {'T': T, 'P': p, 's': s, 'n': ndens, 'n_B': nB, 'n_Q': nQ, 'n_S': nS, 'e': e, 'chi': chi, 'I': (e - (3 * p))}<|docstring|>Calculation of the HRG EoS as a function of T,muB,muQ,muS
kwargs:
species = all, mesons, baryons -> which particles to include?
offshell = True, False -> integration over mass for unstable particles?<|endoftext|> |
db1cfb66a2dd2478498e6707f8aeee190bd512fb70ad6517fd77d0100fd591cf | def HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs):
'\n Calculate all particle number densities from HRG\n Includes decays as well.\n '
list_particles = (((HRG_mesons + HRG_baryons) + to_antiparticle(HRG_mesons)) + to_antiparticle(HRG_baryons))
list_particles.sort(reverse=True, key=(lambda part: mass(part)))
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
no_feeddown = kwargs['no_feeddown']
if (no_feeddown == 'all'):
no_feeddown = [part.name for part in list_particles]
except:
no_feeddown = ['pi+', 'pi-', 'Lambda', 'Lambda~']
try:
freezeout_decay = kwargs['freezeout_decay']
if (freezeout_decay == 'PHSD'):
stables = ['eta']
freezeout_decay = True
no_feeddown = [part.name for part in list_particles]
except:
freezeout_decay = True
stables = []
init_dens = {}
feeddown_dens = {}
final_dens = {}
if (EoS == 'nS0'):
init_EoS = EoS_nS0(HRG, T, muB, gammaS=gammaS, offshell=offshell)
muQ = init_EoS['muQ']
muS = init_EoS['muS']
for part in list_particles:
part_dens = HRG(T, muB, muQ, muS, gammaS=gammaS, offshell=offshell, species=part.name)['n']
init_dens[part.name] = part_dens
feeddown_dens[part.name] = 0.0
final_dens[part.name] = part_dens
weak_decays = ['K0', 'Lambda', 'Sigma+', 'Sigma-', 'Xi-', 'Xi0', 'Omega-']
weak_decays += [to_antiparticle(to_particle(part)).name for part in weak_decays]
if freezeout_decay:
for parent in list_particles:
list_decays = part_decay(parent)
if (parent.name in stables):
continue
if ((parent.name == 'K0') or (parent.name == 'K~0')):
list_decays = part_decay(to_particle('K(S)0'))
fact_br = 0.5
else:
fact_br = 1.0
if (list_decays != None):
for decay in list_decays:
br = (fact_br * decay[0])
children = decay[1]
for child in children:
try:
final_dens[child.name] += (br * final_dens[parent.name])
if (parent.name in weak_decays):
feeddown_dens[child.name] += (br * final_dens[parent.name])
except:
pass
for part in no_feeddown:
final_dens[part] -= feeddown_dens[part]
return final_dens | Calculate all particle number densities from HRG
Includes decays as well. | EoS_HRG/HRG.py | HRG_freezout | pierre-moreau/EoS_HRG | 0 | python | def HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs):
'\n Calculate all particle number densities from HRG\n Includes decays as well.\n '
list_particles = (((HRG_mesons + HRG_baryons) + to_antiparticle(HRG_mesons)) + to_antiparticle(HRG_baryons))
list_particles.sort(reverse=True, key=(lambda part: mass(part)))
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
no_feeddown = kwargs['no_feeddown']
if (no_feeddown == 'all'):
no_feeddown = [part.name for part in list_particles]
except:
no_feeddown = ['pi+', 'pi-', 'Lambda', 'Lambda~']
try:
freezeout_decay = kwargs['freezeout_decay']
if (freezeout_decay == 'PHSD'):
stables = ['eta']
freezeout_decay = True
no_feeddown = [part.name for part in list_particles]
except:
freezeout_decay = True
stables = []
init_dens = {}
feeddown_dens = {}
final_dens = {}
if (EoS == 'nS0'):
init_EoS = EoS_nS0(HRG, T, muB, gammaS=gammaS, offshell=offshell)
muQ = init_EoS['muQ']
muS = init_EoS['muS']
for part in list_particles:
part_dens = HRG(T, muB, muQ, muS, gammaS=gammaS, offshell=offshell, species=part.name)['n']
init_dens[part.name] = part_dens
feeddown_dens[part.name] = 0.0
final_dens[part.name] = part_dens
weak_decays = ['K0', 'Lambda', 'Sigma+', 'Sigma-', 'Xi-', 'Xi0', 'Omega-']
weak_decays += [to_antiparticle(to_particle(part)).name for part in weak_decays]
if freezeout_decay:
for parent in list_particles:
list_decays = part_decay(parent)
if (parent.name in stables):
continue
if ((parent.name == 'K0') or (parent.name == 'K~0')):
list_decays = part_decay(to_particle('K(S)0'))
fact_br = 0.5
else:
fact_br = 1.0
if (list_decays != None):
for decay in list_decays:
br = (fact_br * decay[0])
children = decay[1]
for child in children:
try:
final_dens[child.name] += (br * final_dens[parent.name])
if (parent.name in weak_decays):
feeddown_dens[child.name] += (br * final_dens[parent.name])
except:
pass
for part in no_feeddown:
final_dens[part] -= feeddown_dens[part]
return final_dens | def HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs):
'\n Calculate all particle number densities from HRG\n Includes decays as well.\n '
list_particles = (((HRG_mesons + HRG_baryons) + to_antiparticle(HRG_mesons)) + to_antiparticle(HRG_baryons))
list_particles.sort(reverse=True, key=(lambda part: mass(part)))
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
no_feeddown = kwargs['no_feeddown']
if (no_feeddown == 'all'):
no_feeddown = [part.name for part in list_particles]
except:
no_feeddown = ['pi+', 'pi-', 'Lambda', 'Lambda~']
try:
freezeout_decay = kwargs['freezeout_decay']
if (freezeout_decay == 'PHSD'):
stables = ['eta']
freezeout_decay = True
no_feeddown = [part.name for part in list_particles]
except:
freezeout_decay = True
stables = []
init_dens = {}
feeddown_dens = {}
final_dens = {}
if (EoS == 'nS0'):
init_EoS = EoS_nS0(HRG, T, muB, gammaS=gammaS, offshell=offshell)
muQ = init_EoS['muQ']
muS = init_EoS['muS']
for part in list_particles:
part_dens = HRG(T, muB, muQ, muS, gammaS=gammaS, offshell=offshell, species=part.name)['n']
init_dens[part.name] = part_dens
feeddown_dens[part.name] = 0.0
final_dens[part.name] = part_dens
weak_decays = ['K0', 'Lambda', 'Sigma+', 'Sigma-', 'Xi-', 'Xi0', 'Omega-']
weak_decays += [to_antiparticle(to_particle(part)).name for part in weak_decays]
if freezeout_decay:
for parent in list_particles:
list_decays = part_decay(parent)
if (parent.name in stables):
continue
if ((parent.name == 'K0') or (parent.name == 'K~0')):
list_decays = part_decay(to_particle('K(S)0'))
fact_br = 0.5
else:
fact_br = 1.0
if (list_decays != None):
for decay in list_decays:
br = (fact_br * decay[0])
children = decay[1]
for child in children:
try:
final_dens[child.name] += (br * final_dens[parent.name])
if (parent.name in weak_decays):
feeddown_dens[child.name] += (br * final_dens[parent.name])
except:
pass
for part in no_feeddown:
final_dens[part] -= feeddown_dens[part]
return final_dens<|docstring|>Calculate all particle number densities from HRG
Includes decays as well.<|endoftext|> |
459fa0e26f826170fa677ee484d466902d2aafd8e7af84415e75eaefe533a95e | def fit_freezeout(dict_yield, **kwargs):
'\n Extract freeze out parameters by fitting final heavy ion data (dN/dy)\n given in dict_yield. Construct ratios of different particles.\n '
try:
chi2_plot = kwargs['chi2_plot']
except:
chi2_plot = False
try:
freezeout_decay = kwargs['freezeout_decay']
except:
freezeout_decay = True
try:
method = kwargs['method']
except:
method = 'all'
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
EoS = kwargs['EoS']
except:
EoS = 'all'
list_part1 = ['pi-', 'K-', 'p~', 'Lambda~', 'Xi~+', 'K-', 'p~', 'Lambda', 'Xi~+']
list_part2 = ['pi+', 'K+', 'p', 'Lambda', 'Xi-', 'pi-', 'pi-', 'pi-', 'pi-']
list_part = ['pi+', 'pi-', 'K+', 'K-', 'p', 'p~', 'Lambda', 'Lambda~', 'Xi-', 'Xi~+']
data_yields = []
err_yields = []
final_part = []
for part in list_part:
try:
if ((dict_yield[part] != None) and (dict_yield[part] > 0.0)):
data_yields.append(dict_yield[part])
err_yields.append(dict_yield[(part + '_err')])
final_part.append(part)
except:
pass
data_ratios = []
err_ratios = []
final_part1 = []
final_part2 = []
for (part1, part2) in zip(list_part1, list_part2):
try:
if ((dict_yield[part1] != None) and (dict_yield[part1] > 0.0) and (dict_yield[part2] != None) and (dict_yield[part2] > 0.0)):
ratio = (dict_yield[part1] / dict_yield[part2])
data_ratios.append(ratio)
err_ratios.append((abs(ratio) * np.sqrt((((dict_yield[(part1 + '_err')] / dict_yield[part1]) ** 2.0) + ((dict_yield[(part2 + '_err')] / dict_yield[part2]) ** 2.0)))))
final_part1.append(part1)
final_part2.append(part2)
except:
pass
def f_yields(x, T, muB, muQ, muS, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result
def f_yields_nS0(x, T, muB, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result
def f_ratios(x, T, muB, muQ, muS, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result
def f_ratios_nS0(x, T, muB, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result
fix_T = False
fix_muB = False
fix_muQ = False
fix_muS = False
fix_gammaS = False
fix_dVdy = False
guess = (0.15, 0.05, 0.0, 0.05, 1.0, 2000.0)
bounds = ((0.1, 0.2), (0, 0.6), ((- 0.2), 0.2), (0, 0.2), (0.0, 1.2), (100.0, 10000.0))
if (((EoS == 'all') or (EoS == 'full')) and ((method == 'all') or (method == 'yields'))):
xyields = np.arange(len(final_part))
least_squares = LeastSquares(xyields, data_yields, err_yields, f_yields)
m = Minuit(least_squares, T=guess[0], muB=guess[1], muQ=guess[2], muS=guess[3], gammaS=guess[4], dVdy=guess[5], limit_T=bounds[0], limit_muB=bounds[1], limit_muQ=bounds[2], limit_muS=bounds[3], limit_gammaS=bounds[4], limit_dVdy=bounds[5], fix_T=fix_T, fix_muB=fix_muB, fix_muQ=fix_muQ, fix_muS=fix_muS, fix_gammaS=fix_gammaS, fix_dVdy=fix_dVdy)
m.migrad()
m.hesse()
popt1 = m.values.values()
perr1 = m.errors.values()
print('\nfit from yields, full EoS:')
fit_string1 = f'''$T_{{ch}}={popt1[0]:.4f} \pm {perr1[0]:.4f}\ GeV$
$\mu_{{B}}={popt1[1]:.4f} \pm {perr1[1]:.4f}\ GeV$
$\mu_{{Q}}={popt1[2]:.4f} \pm {perr1[2]:.4f}\ GeV$
$\mu_{{S}}={popt1[3]:.4f} \pm {perr1[3]:.4f}\ GeV$
$\gamma_{{S}}={popt1[4]:.2f} \pm {perr1[4]:.2f}$
$dV/dy={popt1[5]:.1f} \pm {perr1[5]:.1f} \ fm^3$'''
print(fit_string1)
thermo = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
snB1 = (thermo['s'] / thermo['n_B'])
snB1_err = 0.0
thermoT1 = HRG((popt1[0] + (perr1[0] / 2.0)), popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
thermoT2 = HRG((popt1[0] - (perr1[0] / 2.0)), popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB1_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = HRG(popt1[0], (popt1[1] + (perr1[1] / 2.0)), popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
thermomuB2 = HRG(popt1[0], (popt1[1] - (perr1[1] / 2.0)), popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB1_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermomuQ1 = HRG(popt1[0], popt1[1], (popt1[2] + (perr1[2] / 2.0)), popt1[3], gammaS=popt1[4], offshell=offshell)
thermomuQ2 = HRG(popt1[0], popt1[1], (popt1[2] - (perr1[2] / 2.0)), popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermomuQ1['n_B'] != 0.0) and (thermomuQ2['n_B'] != 0.0)):
snB1_err += (((thermomuQ1['s'] / thermomuQ1['n_B']) - (thermomuQ2['s'] / thermomuQ2['n_B'])) ** 2.0)
thermomuS1 = HRG(popt1[0], popt1[1], popt1[2], (popt1[3] + (perr1[3] / 2.0)), gammaS=popt1[4], offshell=offshell)
thermomuS2 = HRG(popt1[0], popt1[1], popt1[2], (popt1[3] - (perr1[3] / 2.0)), gammaS=popt1[4], offshell=offshell)
if ((thermomuS1['n_B'] != 0.0) and (thermomuS2['n_B'] != 0.0)):
snB1_err += (((thermomuS1['s'] / thermomuS1['n_B']) - (thermomuS2['s'] / thermomuS2['n_B'])) ** 2.0)
thermogammaS1 = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=(popt1[4] + (perr1[4] / 2.0)), offshell=offshell)
thermogammaS2 = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=(popt1[4] - (perr1[4] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB1_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB1_err = np.sqrt(snB1_err)
print(f's/n_B = {snB1} \pm {snB1_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dmuQ, fmuQ) = m.profile('muQ')
(dmuS, fmuS) = m.profile('muS')
(dgammaS, fgammaS) = m.profile('gammaS')
(ddVdy, fdVdy) = m.profile('dVdy')
output_chi21 = [[dT, fT], [dmuB, fmuB], [dmuQ, fmuQ], [dmuS, fmuS], [dgammaS, fgammaS], [ddVdy, fdVdy]]
else:
output_chi21 = None
output_yields = {'fit_yields': np.array(list(zip(popt1, perr1))), 'fit_string_yields': fit_string1, 'result_yields': f_yields(xyields, *popt1), 'data_yields': np.array(list(zip(data_yields, err_yields))), 'particle_yields': list(latex(final_part)), 'chi2_yields': output_chi21, 'snB_yields': np.array([snB1, snB1_err])}
else:
output_yields = {}
if (((EoS == 'all') or (EoS == 'nS0')) and ((method == 'all') or (method == 'yields'))):
xyields = np.arange(len(final_part))
least_squares = LeastSquares(xyields, data_yields, err_yields, f_yields_nS0)
m = Minuit(least_squares, T=guess[0], muB=guess[1], gammaS=guess[4], dVdy=guess[5], limit_T=bounds[0], limit_muB=bounds[1], limit_gammaS=bounds[4], limit_dVdy=bounds[5], fix_T=fix_T, fix_muB=fix_muB, fix_gammaS=fix_gammaS, fix_dVdy=fix_dVdy)
m.migrad()
m.hesse()
popt1 = m.values.values()
perr1 = m.errors.values()
thermo = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=popt1[2], offshell=offshell)
print('\nfit from yields, nS0 EoS:')
fit_string1 = f'''$T_{{ch}}={popt1[0]:.4f} \pm {perr1[0]:.4f}\ GeV$
$\mu_{{B}}={popt1[1]:.4f} \pm {perr1[1]:.4f}\ GeV$
$\gamma_{{S}}={popt1[2]:.2f} \pm {perr1[2]:.2f}$
$dV/dy={popt1[3]:.1f} \pm {perr1[3]:.1f} \ fm^3$
$\mu_{{Q}}={thermo['muQ']:.4f}\ GeV$
$\mu_{{S}}={thermo['muS']:.4f}\ GeV$'''
print(fit_string1)
snB1 = (thermo['s'] / thermo['n_B'])
snB1_err = 0.0
thermoT1 = EoS_nS0(HRG, (popt1[0] + (perr1[0] / 2.0)), popt1[1], gammaS=popt1[2], offshell=offshell)
thermoT2 = EoS_nS0(HRG, (popt1[0] - (perr1[0] / 2.0)), popt1[1], gammaS=popt1[2], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB1_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = EoS_nS0(HRG, popt1[0], (popt1[1] + (perr1[1] / 2.0)), gammaS=popt1[2], offshell=offshell)
thermomuB2 = EoS_nS0(HRG, popt1[0], (popt1[1] - (perr1[1] / 2.0)), gammaS=popt1[2], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB1_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermogammaS1 = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=(popt1[2] + (perr1[2] / 2.0)), offshell=offshell)
thermogammaS2 = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=(popt1[2] - (perr1[2] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB1_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB1_err = np.sqrt(snB1_err)
print(f's/n_B = {snB1} \pm {snB1_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dgammaS, fgammaS) = m.profile('gammaS')
(ddVdy, fdVdy) = m.profile('dVdy')
output_chi21 = [[dT, fT], [dmuB, fmuB], [dgammaS, fgammaS], [ddVdy, fdVdy]]
else:
output_chi21 = None
result_yields_nS0 = f_yields_nS0(xyields, *popt1)
(Tch, muB, gammaS, dVdy) = popt1
(Tch_err, muB_err, gammaS_err, dVdy_err) = perr1
popt1 = np.array([Tch, muB, thermo['muQ'], thermo['muS'], gammaS, dVdy])
perr1 = np.array([Tch_err, muB_err, 0.0, 0.0, gammaS_err, dVdy_err])
output_yields_nS0 = {'fit_yields_nS0': np.array(list(zip(popt1, perr1))), 'fit_string_yields_nS0': fit_string1, 'result_yields_nS0': result_yields_nS0, 'data_yields': np.array(list(zip(data_yields, err_yields))), 'particle_yields': list(latex(final_part)), 'chi2_yields_nS0': output_chi21, 'snB_yields_nS0': np.array([snB1, snB1_err])}
else:
output_yields_nS0 = {}
if (((EoS == 'all') or (EoS == 'full')) and ((method == 'all') or (method == 'ratios'))):
xratios = np.arange(len(data_ratios))
least_squares = LeastSquares(xratios, data_ratios, err_ratios, f_ratios)
m = Minuit(least_squares, T=guess[0], muB=guess[1], muQ=guess[2], muS=guess[3], gammaS=guess[4], limit_T=bounds[0], limit_muB=bounds[1], limit_muQ=bounds[2], limit_muS=bounds[3], limit_gammaS=bounds[4], fix_T=fix_T, fix_muB=fix_muB, fix_muQ=fix_muQ, fix_muS=fix_muS, fix_gammaS=fix_gammaS)
m.migrad()
m.hesse()
popt2 = m.values.values()
perr2 = m.errors.values()
print('\nfit from ratios, full EoS:')
fit_string2 = f'''$T_{{ch}}={popt2[0]:.4f} \pm {perr2[0]:.4f}\ GeV$
$\mu_{{B}}={popt2[1]:.4f} \pm {perr2[1]:.4f}\ GeV$
$\mu_{{Q}}={popt2[2]:.4f} \pm {perr2[2]:.4f}\ GeV$
$\mu_{{S}}={popt2[3]:.4f} \pm {perr2[3]:.4f}\ GeV$
$\gamma_{{S}}={popt2[4]:.2f} \pm {perr2[4]:.2f}$'''
print(fit_string2)
thermo = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
snB2 = (thermo['s'] / thermo['n_B'])
snB2_err = 0.0
thermoT1 = HRG((popt2[0] + (perr2[0] / 2.0)), popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
thermoT2 = HRG((popt2[0] - (perr2[0] / 2.0)), popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB2_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = HRG(popt2[0], (popt2[1] + (perr2[1] / 2.0)), popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
thermomuB2 = HRG(popt2[0], (popt2[1] - (perr2[1] / 2.0)), popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB2_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermomuQ1 = HRG(popt2[0], popt2[1], (popt2[2] + (perr2[2] / 2.0)), popt2[3], gammaS=popt2[4], offshell=offshell)
thermomuQ2 = HRG(popt2[0], popt2[1], (popt2[2] - (perr2[2] / 2.0)), popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermomuQ1['n_B'] != 0.0) and (thermomuQ2['n_B'] != 0.0)):
snB2_err += (((thermomuQ1['s'] / thermomuQ1['n_B']) - (thermomuQ2['s'] / thermomuQ2['n_B'])) ** 2.0)
thermomuS1 = HRG(popt2[0], popt2[1], popt2[2], (popt2[3] + (perr2[3] / 2.0)), gammaS=popt2[4], offshell=offshell)
thermomuS2 = HRG(popt2[0], popt2[1], popt2[2], (popt2[3] - (perr2[3] / 2.0)), gammaS=popt2[4], offshell=offshell)
if ((thermomuS1['n_B'] != 0.0) and (thermomuS2['n_B'] != 0.0)):
snB2_err += (((thermomuS1['s'] / thermomuS1['n_B']) - (thermomuS2['s'] / thermomuS2['n_B'])) ** 2.0)
thermogammaS1 = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=(popt2[4] + (perr2[4] / 2.0)), offshell=offshell)
thermogammaS2 = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=(popt2[4] - (perr2[4] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB2_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB2_err = np.sqrt(snB2_err)
print(f's/n_B = {snB2} \pm {snB2_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dmuQ, fmuQ) = m.profile('muQ')
(dmuS, fmuS) = m.profile('muS')
(dgammaS, fgammaS) = m.profile('gammaS')
output_chi22 = [[dT, fT], [dmuB, fmuB], [dmuQ, fmuQ], [dmuS, fmuS], [dgammaS, fgammaS]]
else:
output_chi22 = None
output_ratios = {'fit_ratios': np.array(list(zip(popt2, perr2))), 'fit_string_ratios': fit_string2, 'result_ratios': f_ratios(xratios, *popt2), 'data_ratios': np.array(list(zip(data_ratios, err_ratios))), 'particle_ratios': list(zip(latex(final_part1), latex(final_part2))), 'chi2_ratios': output_chi22, 'snB_ratios': np.array([snB2, snB2_err])}
else:
output_ratios = {}
if (((EoS == 'all') or (EoS == 'nS0')) and ((method == 'all') or (method == 'ratios'))):
xratios = np.arange(len(data_ratios))
least_squares = LeastSquares(xratios, data_ratios, err_ratios, f_ratios_nS0)
m = Minuit(least_squares, T=guess[0], muB=guess[1], gammaS=guess[4], limit_T=bounds[0], limit_muB=bounds[1], limit_gammaS=bounds[4], fix_T=fix_T, fix_muB=fix_muB, fix_gammaS=fix_gammaS)
m.migrad()
m.hesse()
popt2 = m.values.values()
perr2 = m.errors.values()
thermo = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=popt2[2], offshell=offshell)
print('\nfit from ratios, nS0 EoS:')
fit_string2 = f'''$T_{{ch}}={popt2[0]:.4f} \pm {perr2[0]:.4f}\ GeV$
$\mu_{{B}}={popt2[1]:.4f} \pm {perr2[1]:.4f}\ GeV$
$\gamma_{{S}}={popt2[2]:.2f} \pm {perr2[2]:.2f}$
$\mu_{{Q}}={thermo['muQ']:.4f}\ GeV$
$\mu_{{S}}={thermo['muS']:.4f}\ GeV$'''
print(fit_string2)
snB2 = (thermo['s'] / thermo['n_B'])
snB2_err = 0.0
thermoT1 = EoS_nS0(HRG, (popt2[0] + (perr2[0] / 2.0)), popt2[1], gammaS=popt2[2], offshell=offshell)
thermoT2 = EoS_nS0(HRG, (popt2[0] - (perr2[0] / 2.0)), popt2[1], gammaS=popt2[2], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB2_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = EoS_nS0(HRG, popt2[0], (popt2[1] + (perr2[1] / 2.0)), gammaS=popt2[2], offshell=offshell)
thermomuB2 = EoS_nS0(HRG, popt2[0], (popt2[1] - (perr2[1] / 2.0)), gammaS=popt2[2], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB2_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermogammaS1 = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=(popt2[2] + (perr2[2] / 2.0)), offshell=offshell)
thermogammaS2 = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=(popt2[2] - (perr2[2] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB2_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB2_err = np.sqrt(snB2_err)
print(f's/n_B = {snB2} \pm {snB2_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dgammaS, fgammaS) = m.profile('gammaS')
output_chi22 = [[dT, fT], [dmuB, fmuB], [dgammaS, fgammaS]]
else:
output_chi22 = None
result_ratios_nS0 = f_ratios_nS0(xratios, *popt2)
(Tch, muB, gammaS) = popt2
(Tch_err, muB_err, gammaS_err) = perr2
popt2 = np.array([Tch, muB, thermo['muQ'], thermo['muS'], gammaS])
perr2 = np.array([Tch_err, muB_err, 0.0, 0.0, gammaS_err])
output_ratios_nS0 = {'fit_ratios_nS0': np.array(list(zip(popt2, perr2))), 'fit_string_ratios_nS0': fit_string2, 'result_ratios_nS0': result_ratios_nS0, 'data_ratios': np.array(list(zip(data_ratios, err_ratios))), 'particle_ratios': list(zip(latex(final_part1), latex(final_part2))), 'chi2_ratios_nS0': output_chi22, 'snB_ratios_nS0': np.array([snB2, snB2_err])}
else:
output_ratios_nS0 = {}
output = {}
output.update(output_yields)
output.update(output_ratios)
output.update(output_yields_nS0)
output.update(output_ratios_nS0)
return output | Extract freeze out parameters by fitting final heavy ion data (dN/dy)
given in dict_yield. Construct ratios of different particles. | EoS_HRG/HRG.py | fit_freezeout | pierre-moreau/EoS_HRG | 0 | python | def fit_freezeout(dict_yield, **kwargs):
'\n Extract freeze out parameters by fitting final heavy ion data (dN/dy)\n given in dict_yield. Construct ratios of different particles.\n '
try:
chi2_plot = kwargs['chi2_plot']
except:
chi2_plot = False
try:
freezeout_decay = kwargs['freezeout_decay']
except:
freezeout_decay = True
try:
method = kwargs['method']
except:
method = 'all'
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
EoS = kwargs['EoS']
except:
EoS = 'all'
list_part1 = ['pi-', 'K-', 'p~', 'Lambda~', 'Xi~+', 'K-', 'p~', 'Lambda', 'Xi~+']
list_part2 = ['pi+', 'K+', 'p', 'Lambda', 'Xi-', 'pi-', 'pi-', 'pi-', 'pi-']
list_part = ['pi+', 'pi-', 'K+', 'K-', 'p', 'p~', 'Lambda', 'Lambda~', 'Xi-', 'Xi~+']
data_yields = []
err_yields = []
final_part = []
for part in list_part:
try:
if ((dict_yield[part] != None) and (dict_yield[part] > 0.0)):
data_yields.append(dict_yield[part])
err_yields.append(dict_yield[(part + '_err')])
final_part.append(part)
except:
pass
data_ratios = []
err_ratios = []
final_part1 = []
final_part2 = []
for (part1, part2) in zip(list_part1, list_part2):
try:
if ((dict_yield[part1] != None) and (dict_yield[part1] > 0.0) and (dict_yield[part2] != None) and (dict_yield[part2] > 0.0)):
ratio = (dict_yield[part1] / dict_yield[part2])
data_ratios.append(ratio)
err_ratios.append((abs(ratio) * np.sqrt((((dict_yield[(part1 + '_err')] / dict_yield[part1]) ** 2.0) + ((dict_yield[(part2 + '_err')] / dict_yield[part2]) ** 2.0)))))
final_part1.append(part1)
final_part2.append(part2)
except:
pass
def f_yields(x, T, muB, muQ, muS, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result
def f_yields_nS0(x, T, muB, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result
def f_ratios(x, T, muB, muQ, muS, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result
def f_ratios_nS0(x, T, muB, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result
fix_T = False
fix_muB = False
fix_muQ = False
fix_muS = False
fix_gammaS = False
fix_dVdy = False
guess = (0.15, 0.05, 0.0, 0.05, 1.0, 2000.0)
bounds = ((0.1, 0.2), (0, 0.6), ((- 0.2), 0.2), (0, 0.2), (0.0, 1.2), (100.0, 10000.0))
if (((EoS == 'all') or (EoS == 'full')) and ((method == 'all') or (method == 'yields'))):
xyields = np.arange(len(final_part))
least_squares = LeastSquares(xyields, data_yields, err_yields, f_yields)
m = Minuit(least_squares, T=guess[0], muB=guess[1], muQ=guess[2], muS=guess[3], gammaS=guess[4], dVdy=guess[5], limit_T=bounds[0], limit_muB=bounds[1], limit_muQ=bounds[2], limit_muS=bounds[3], limit_gammaS=bounds[4], limit_dVdy=bounds[5], fix_T=fix_T, fix_muB=fix_muB, fix_muQ=fix_muQ, fix_muS=fix_muS, fix_gammaS=fix_gammaS, fix_dVdy=fix_dVdy)
m.migrad()
m.hesse()
popt1 = m.values.values()
perr1 = m.errors.values()
print('\nfit from yields, full EoS:')
fit_string1 = f'$T_{{ch}}={popt1[0]:.4f} \pm {perr1[0]:.4f}\ GeV$
$\mu_{{B}}={popt1[1]:.4f} \pm {perr1[1]:.4f}\ GeV$
$\mu_{{Q}}={popt1[2]:.4f} \pm {perr1[2]:.4f}\ GeV$
$\mu_{{S}}={popt1[3]:.4f} \pm {perr1[3]:.4f}\ GeV$
$\gamma_{{S}}={popt1[4]:.2f} \pm {perr1[4]:.2f}$
$dV/dy={popt1[5]:.1f} \pm {perr1[5]:.1f} \ fm^3$'
print(fit_string1)
thermo = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
snB1 = (thermo['s'] / thermo['n_B'])
snB1_err = 0.0
thermoT1 = HRG((popt1[0] + (perr1[0] / 2.0)), popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
thermoT2 = HRG((popt1[0] - (perr1[0] / 2.0)), popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB1_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = HRG(popt1[0], (popt1[1] + (perr1[1] / 2.0)), popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
thermomuB2 = HRG(popt1[0], (popt1[1] - (perr1[1] / 2.0)), popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB1_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermomuQ1 = HRG(popt1[0], popt1[1], (popt1[2] + (perr1[2] / 2.0)), popt1[3], gammaS=popt1[4], offshell=offshell)
thermomuQ2 = HRG(popt1[0], popt1[1], (popt1[2] - (perr1[2] / 2.0)), popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermomuQ1['n_B'] != 0.0) and (thermomuQ2['n_B'] != 0.0)):
snB1_err += (((thermomuQ1['s'] / thermomuQ1['n_B']) - (thermomuQ2['s'] / thermomuQ2['n_B'])) ** 2.0)
thermomuS1 = HRG(popt1[0], popt1[1], popt1[2], (popt1[3] + (perr1[3] / 2.0)), gammaS=popt1[4], offshell=offshell)
thermomuS2 = HRG(popt1[0], popt1[1], popt1[2], (popt1[3] - (perr1[3] / 2.0)), gammaS=popt1[4], offshell=offshell)
if ((thermomuS1['n_B'] != 0.0) and (thermomuS2['n_B'] != 0.0)):
snB1_err += (((thermomuS1['s'] / thermomuS1['n_B']) - (thermomuS2['s'] / thermomuS2['n_B'])) ** 2.0)
thermogammaS1 = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=(popt1[4] + (perr1[4] / 2.0)), offshell=offshell)
thermogammaS2 = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=(popt1[4] - (perr1[4] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB1_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB1_err = np.sqrt(snB1_err)
print(f's/n_B = {snB1} \pm {snB1_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dmuQ, fmuQ) = m.profile('muQ')
(dmuS, fmuS) = m.profile('muS')
(dgammaS, fgammaS) = m.profile('gammaS')
(ddVdy, fdVdy) = m.profile('dVdy')
output_chi21 = [[dT, fT], [dmuB, fmuB], [dmuQ, fmuQ], [dmuS, fmuS], [dgammaS, fgammaS], [ddVdy, fdVdy]]
else:
output_chi21 = None
output_yields = {'fit_yields': np.array(list(zip(popt1, perr1))), 'fit_string_yields': fit_string1, 'result_yields': f_yields(xyields, *popt1), 'data_yields': np.array(list(zip(data_yields, err_yields))), 'particle_yields': list(latex(final_part)), 'chi2_yields': output_chi21, 'snB_yields': np.array([snB1, snB1_err])}
else:
output_yields = {}
if (((EoS == 'all') or (EoS == 'nS0')) and ((method == 'all') or (method == 'yields'))):
xyields = np.arange(len(final_part))
least_squares = LeastSquares(xyields, data_yields, err_yields, f_yields_nS0)
m = Minuit(least_squares, T=guess[0], muB=guess[1], gammaS=guess[4], dVdy=guess[5], limit_T=bounds[0], limit_muB=bounds[1], limit_gammaS=bounds[4], limit_dVdy=bounds[5], fix_T=fix_T, fix_muB=fix_muB, fix_gammaS=fix_gammaS, fix_dVdy=fix_dVdy)
m.migrad()
m.hesse()
popt1 = m.values.values()
perr1 = m.errors.values()
thermo = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=popt1[2], offshell=offshell)
print('\nfit from yields, nS0 EoS:')
fit_string1 = f'$T_{{ch}}={popt1[0]:.4f} \pm {perr1[0]:.4f}\ GeV$
$\mu_{{B}}={popt1[1]:.4f} \pm {perr1[1]:.4f}\ GeV$
$\gamma_{{S}}={popt1[2]:.2f} \pm {perr1[2]:.2f}$
$dV/dy={popt1[3]:.1f} \pm {perr1[3]:.1f} \ fm^3$
$\mu_{{Q}}={thermo['muQ']:.4f}\ GeV$
$\mu_{{S}}={thermo['muS']:.4f}\ GeV$'
print(fit_string1)
snB1 = (thermo['s'] / thermo['n_B'])
snB1_err = 0.0
thermoT1 = EoS_nS0(HRG, (popt1[0] + (perr1[0] / 2.0)), popt1[1], gammaS=popt1[2], offshell=offshell)
thermoT2 = EoS_nS0(HRG, (popt1[0] - (perr1[0] / 2.0)), popt1[1], gammaS=popt1[2], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB1_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = EoS_nS0(HRG, popt1[0], (popt1[1] + (perr1[1] / 2.0)), gammaS=popt1[2], offshell=offshell)
thermomuB2 = EoS_nS0(HRG, popt1[0], (popt1[1] - (perr1[1] / 2.0)), gammaS=popt1[2], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB1_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermogammaS1 = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=(popt1[2] + (perr1[2] / 2.0)), offshell=offshell)
thermogammaS2 = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=(popt1[2] - (perr1[2] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB1_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB1_err = np.sqrt(snB1_err)
print(f's/n_B = {snB1} \pm {snB1_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dgammaS, fgammaS) = m.profile('gammaS')
(ddVdy, fdVdy) = m.profile('dVdy')
output_chi21 = [[dT, fT], [dmuB, fmuB], [dgammaS, fgammaS], [ddVdy, fdVdy]]
else:
output_chi21 = None
result_yields_nS0 = f_yields_nS0(xyields, *popt1)
(Tch, muB, gammaS, dVdy) = popt1
(Tch_err, muB_err, gammaS_err, dVdy_err) = perr1
popt1 = np.array([Tch, muB, thermo['muQ'], thermo['muS'], gammaS, dVdy])
perr1 = np.array([Tch_err, muB_err, 0.0, 0.0, gammaS_err, dVdy_err])
output_yields_nS0 = {'fit_yields_nS0': np.array(list(zip(popt1, perr1))), 'fit_string_yields_nS0': fit_string1, 'result_yields_nS0': result_yields_nS0, 'data_yields': np.array(list(zip(data_yields, err_yields))), 'particle_yields': list(latex(final_part)), 'chi2_yields_nS0': output_chi21, 'snB_yields_nS0': np.array([snB1, snB1_err])}
else:
output_yields_nS0 = {}
if (((EoS == 'all') or (EoS == 'full')) and ((method == 'all') or (method == 'ratios'))):
xratios = np.arange(len(data_ratios))
least_squares = LeastSquares(xratios, data_ratios, err_ratios, f_ratios)
m = Minuit(least_squares, T=guess[0], muB=guess[1], muQ=guess[2], muS=guess[3], gammaS=guess[4], limit_T=bounds[0], limit_muB=bounds[1], limit_muQ=bounds[2], limit_muS=bounds[3], limit_gammaS=bounds[4], fix_T=fix_T, fix_muB=fix_muB, fix_muQ=fix_muQ, fix_muS=fix_muS, fix_gammaS=fix_gammaS)
m.migrad()
m.hesse()
popt2 = m.values.values()
perr2 = m.errors.values()
print('\nfit from ratios, full EoS:')
fit_string2 = f'$T_{{ch}}={popt2[0]:.4f} \pm {perr2[0]:.4f}\ GeV$
$\mu_{{B}}={popt2[1]:.4f} \pm {perr2[1]:.4f}\ GeV$
$\mu_{{Q}}={popt2[2]:.4f} \pm {perr2[2]:.4f}\ GeV$
$\mu_{{S}}={popt2[3]:.4f} \pm {perr2[3]:.4f}\ GeV$
$\gamma_{{S}}={popt2[4]:.2f} \pm {perr2[4]:.2f}$'
print(fit_string2)
thermo = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
snB2 = (thermo['s'] / thermo['n_B'])
snB2_err = 0.0
thermoT1 = HRG((popt2[0] + (perr2[0] / 2.0)), popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
thermoT2 = HRG((popt2[0] - (perr2[0] / 2.0)), popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB2_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = HRG(popt2[0], (popt2[1] + (perr2[1] / 2.0)), popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
thermomuB2 = HRG(popt2[0], (popt2[1] - (perr2[1] / 2.0)), popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB2_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermomuQ1 = HRG(popt2[0], popt2[1], (popt2[2] + (perr2[2] / 2.0)), popt2[3], gammaS=popt2[4], offshell=offshell)
thermomuQ2 = HRG(popt2[0], popt2[1], (popt2[2] - (perr2[2] / 2.0)), popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermomuQ1['n_B'] != 0.0) and (thermomuQ2['n_B'] != 0.0)):
snB2_err += (((thermomuQ1['s'] / thermomuQ1['n_B']) - (thermomuQ2['s'] / thermomuQ2['n_B'])) ** 2.0)
thermomuS1 = HRG(popt2[0], popt2[1], popt2[2], (popt2[3] + (perr2[3] / 2.0)), gammaS=popt2[4], offshell=offshell)
thermomuS2 = HRG(popt2[0], popt2[1], popt2[2], (popt2[3] - (perr2[3] / 2.0)), gammaS=popt2[4], offshell=offshell)
if ((thermomuS1['n_B'] != 0.0) and (thermomuS2['n_B'] != 0.0)):
snB2_err += (((thermomuS1['s'] / thermomuS1['n_B']) - (thermomuS2['s'] / thermomuS2['n_B'])) ** 2.0)
thermogammaS1 = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=(popt2[4] + (perr2[4] / 2.0)), offshell=offshell)
thermogammaS2 = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=(popt2[4] - (perr2[4] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB2_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB2_err = np.sqrt(snB2_err)
print(f's/n_B = {snB2} \pm {snB2_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dmuQ, fmuQ) = m.profile('muQ')
(dmuS, fmuS) = m.profile('muS')
(dgammaS, fgammaS) = m.profile('gammaS')
output_chi22 = [[dT, fT], [dmuB, fmuB], [dmuQ, fmuQ], [dmuS, fmuS], [dgammaS, fgammaS]]
else:
output_chi22 = None
output_ratios = {'fit_ratios': np.array(list(zip(popt2, perr2))), 'fit_string_ratios': fit_string2, 'result_ratios': f_ratios(xratios, *popt2), 'data_ratios': np.array(list(zip(data_ratios, err_ratios))), 'particle_ratios': list(zip(latex(final_part1), latex(final_part2))), 'chi2_ratios': output_chi22, 'snB_ratios': np.array([snB2, snB2_err])}
else:
output_ratios = {}
if (((EoS == 'all') or (EoS == 'nS0')) and ((method == 'all') or (method == 'ratios'))):
xratios = np.arange(len(data_ratios))
least_squares = LeastSquares(xratios, data_ratios, err_ratios, f_ratios_nS0)
m = Minuit(least_squares, T=guess[0], muB=guess[1], gammaS=guess[4], limit_T=bounds[0], limit_muB=bounds[1], limit_gammaS=bounds[4], fix_T=fix_T, fix_muB=fix_muB, fix_gammaS=fix_gammaS)
m.migrad()
m.hesse()
popt2 = m.values.values()
perr2 = m.errors.values()
thermo = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=popt2[2], offshell=offshell)
print('\nfit from ratios, nS0 EoS:')
fit_string2 = f'$T_{{ch}}={popt2[0]:.4f} \pm {perr2[0]:.4f}\ GeV$
$\mu_{{B}}={popt2[1]:.4f} \pm {perr2[1]:.4f}\ GeV$
$\gamma_{{S}}={popt2[2]:.2f} \pm {perr2[2]:.2f}$
$\mu_{{Q}}={thermo['muQ']:.4f}\ GeV$
$\mu_{{S}}={thermo['muS']:.4f}\ GeV$'
print(fit_string2)
snB2 = (thermo['s'] / thermo['n_B'])
snB2_err = 0.0
thermoT1 = EoS_nS0(HRG, (popt2[0] + (perr2[0] / 2.0)), popt2[1], gammaS=popt2[2], offshell=offshell)
thermoT2 = EoS_nS0(HRG, (popt2[0] - (perr2[0] / 2.0)), popt2[1], gammaS=popt2[2], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB2_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = EoS_nS0(HRG, popt2[0], (popt2[1] + (perr2[1] / 2.0)), gammaS=popt2[2], offshell=offshell)
thermomuB2 = EoS_nS0(HRG, popt2[0], (popt2[1] - (perr2[1] / 2.0)), gammaS=popt2[2], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB2_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermogammaS1 = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=(popt2[2] + (perr2[2] / 2.0)), offshell=offshell)
thermogammaS2 = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=(popt2[2] - (perr2[2] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB2_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB2_err = np.sqrt(snB2_err)
print(f's/n_B = {snB2} \pm {snB2_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dgammaS, fgammaS) = m.profile('gammaS')
output_chi22 = [[dT, fT], [dmuB, fmuB], [dgammaS, fgammaS]]
else:
output_chi22 = None
result_ratios_nS0 = f_ratios_nS0(xratios, *popt2)
(Tch, muB, gammaS) = popt2
(Tch_err, muB_err, gammaS_err) = perr2
popt2 = np.array([Tch, muB, thermo['muQ'], thermo['muS'], gammaS])
perr2 = np.array([Tch_err, muB_err, 0.0, 0.0, gammaS_err])
output_ratios_nS0 = {'fit_ratios_nS0': np.array(list(zip(popt2, perr2))), 'fit_string_ratios_nS0': fit_string2, 'result_ratios_nS0': result_ratios_nS0, 'data_ratios': np.array(list(zip(data_ratios, err_ratios))), 'particle_ratios': list(zip(latex(final_part1), latex(final_part2))), 'chi2_ratios_nS0': output_chi22, 'snB_ratios_nS0': np.array([snB2, snB2_err])}
else:
output_ratios_nS0 = {}
output = {}
output.update(output_yields)
output.update(output_ratios)
output.update(output_yields_nS0)
output.update(output_ratios_nS0)
return output | def fit_freezeout(dict_yield, **kwargs):
'\n Extract freeze out parameters by fitting final heavy ion data (dN/dy)\n given in dict_yield. Construct ratios of different particles.\n '
try:
chi2_plot = kwargs['chi2_plot']
except:
chi2_plot = False
try:
freezeout_decay = kwargs['freezeout_decay']
except:
freezeout_decay = True
try:
method = kwargs['method']
except:
method = 'all'
try:
offshell = kwargs['offshell']
except:
offshell = False
try:
EoS = kwargs['EoS']
except:
EoS = 'all'
list_part1 = ['pi-', 'K-', 'p~', 'Lambda~', 'Xi~+', 'K-', 'p~', 'Lambda', 'Xi~+']
list_part2 = ['pi+', 'K+', 'p', 'Lambda', 'Xi-', 'pi-', 'pi-', 'pi-', 'pi-']
list_part = ['pi+', 'pi-', 'K+', 'K-', 'p', 'p~', 'Lambda', 'Lambda~', 'Xi-', 'Xi~+']
data_yields = []
err_yields = []
final_part = []
for part in list_part:
try:
if ((dict_yield[part] != None) and (dict_yield[part] > 0.0)):
data_yields.append(dict_yield[part])
err_yields.append(dict_yield[(part + '_err')])
final_part.append(part)
except:
pass
data_ratios = []
err_ratios = []
final_part1 = []
final_part2 = []
for (part1, part2) in zip(list_part1, list_part2):
try:
if ((dict_yield[part1] != None) and (dict_yield[part1] > 0.0) and (dict_yield[part2] != None) and (dict_yield[part2] > 0.0)):
ratio = (dict_yield[part1] / dict_yield[part2])
data_ratios.append(ratio)
err_ratios.append((abs(ratio) * np.sqrt((((dict_yield[(part1 + '_err')] / dict_yield[part1]) ** 2.0) + ((dict_yield[(part2 + '_err')] / dict_yield[part2]) ** 2.0)))))
final_part1.append(part1)
final_part2.append(part2)
except:
pass
def f_yields(x, T, muB, muQ, muS, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result
def f_yields_nS0(x, T, muB, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result
def f_ratios(x, T, muB, muQ, muS, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result
def f_ratios_nS0(x, T, muB, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result
fix_T = False
fix_muB = False
fix_muQ = False
fix_muS = False
fix_gammaS = False
fix_dVdy = False
guess = (0.15, 0.05, 0.0, 0.05, 1.0, 2000.0)
bounds = ((0.1, 0.2), (0, 0.6), ((- 0.2), 0.2), (0, 0.2), (0.0, 1.2), (100.0, 10000.0))
if (((EoS == 'all') or (EoS == 'full')) and ((method == 'all') or (method == 'yields'))):
xyields = np.arange(len(final_part))
least_squares = LeastSquares(xyields, data_yields, err_yields, f_yields)
m = Minuit(least_squares, T=guess[0], muB=guess[1], muQ=guess[2], muS=guess[3], gammaS=guess[4], dVdy=guess[5], limit_T=bounds[0], limit_muB=bounds[1], limit_muQ=bounds[2], limit_muS=bounds[3], limit_gammaS=bounds[4], limit_dVdy=bounds[5], fix_T=fix_T, fix_muB=fix_muB, fix_muQ=fix_muQ, fix_muS=fix_muS, fix_gammaS=fix_gammaS, fix_dVdy=fix_dVdy)
m.migrad()
m.hesse()
popt1 = m.values.values()
perr1 = m.errors.values()
print('\nfit from yields, full EoS:')
fit_string1 = f'$T_{{ch}}={popt1[0]:.4f} \pm {perr1[0]:.4f}\ GeV$
$\mu_{{B}}={popt1[1]:.4f} \pm {perr1[1]:.4f}\ GeV$
$\mu_{{Q}}={popt1[2]:.4f} \pm {perr1[2]:.4f}\ GeV$
$\mu_{{S}}={popt1[3]:.4f} \pm {perr1[3]:.4f}\ GeV$
$\gamma_{{S}}={popt1[4]:.2f} \pm {perr1[4]:.2f}$
$dV/dy={popt1[5]:.1f} \pm {perr1[5]:.1f} \ fm^3$'
print(fit_string1)
thermo = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
snB1 = (thermo['s'] / thermo['n_B'])
snB1_err = 0.0
thermoT1 = HRG((popt1[0] + (perr1[0] / 2.0)), popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
thermoT2 = HRG((popt1[0] - (perr1[0] / 2.0)), popt1[1], popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB1_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = HRG(popt1[0], (popt1[1] + (perr1[1] / 2.0)), popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
thermomuB2 = HRG(popt1[0], (popt1[1] - (perr1[1] / 2.0)), popt1[2], popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB1_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermomuQ1 = HRG(popt1[0], popt1[1], (popt1[2] + (perr1[2] / 2.0)), popt1[3], gammaS=popt1[4], offshell=offshell)
thermomuQ2 = HRG(popt1[0], popt1[1], (popt1[2] - (perr1[2] / 2.0)), popt1[3], gammaS=popt1[4], offshell=offshell)
if ((thermomuQ1['n_B'] != 0.0) and (thermomuQ2['n_B'] != 0.0)):
snB1_err += (((thermomuQ1['s'] / thermomuQ1['n_B']) - (thermomuQ2['s'] / thermomuQ2['n_B'])) ** 2.0)
thermomuS1 = HRG(popt1[0], popt1[1], popt1[2], (popt1[3] + (perr1[3] / 2.0)), gammaS=popt1[4], offshell=offshell)
thermomuS2 = HRG(popt1[0], popt1[1], popt1[2], (popt1[3] - (perr1[3] / 2.0)), gammaS=popt1[4], offshell=offshell)
if ((thermomuS1['n_B'] != 0.0) and (thermomuS2['n_B'] != 0.0)):
snB1_err += (((thermomuS1['s'] / thermomuS1['n_B']) - (thermomuS2['s'] / thermomuS2['n_B'])) ** 2.0)
thermogammaS1 = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=(popt1[4] + (perr1[4] / 2.0)), offshell=offshell)
thermogammaS2 = HRG(popt1[0], popt1[1], popt1[2], popt1[3], gammaS=(popt1[4] - (perr1[4] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB1_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB1_err = np.sqrt(snB1_err)
print(f's/n_B = {snB1} \pm {snB1_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dmuQ, fmuQ) = m.profile('muQ')
(dmuS, fmuS) = m.profile('muS')
(dgammaS, fgammaS) = m.profile('gammaS')
(ddVdy, fdVdy) = m.profile('dVdy')
output_chi21 = [[dT, fT], [dmuB, fmuB], [dmuQ, fmuQ], [dmuS, fmuS], [dgammaS, fgammaS], [ddVdy, fdVdy]]
else:
output_chi21 = None
output_yields = {'fit_yields': np.array(list(zip(popt1, perr1))), 'fit_string_yields': fit_string1, 'result_yields': f_yields(xyields, *popt1), 'data_yields': np.array(list(zip(data_yields, err_yields))), 'particle_yields': list(latex(final_part)), 'chi2_yields': output_chi21, 'snB_yields': np.array([snB1, snB1_err])}
else:
output_yields = {}
if (((EoS == 'all') or (EoS == 'nS0')) and ((method == 'all') or (method == 'yields'))):
xyields = np.arange(len(final_part))
least_squares = LeastSquares(xyields, data_yields, err_yields, f_yields_nS0)
m = Minuit(least_squares, T=guess[0], muB=guess[1], gammaS=guess[4], dVdy=guess[5], limit_T=bounds[0], limit_muB=bounds[1], limit_gammaS=bounds[4], limit_dVdy=bounds[5], fix_T=fix_T, fix_muB=fix_muB, fix_gammaS=fix_gammaS, fix_dVdy=fix_dVdy)
m.migrad()
m.hesse()
popt1 = m.values.values()
perr1 = m.errors.values()
thermo = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=popt1[2], offshell=offshell)
print('\nfit from yields, nS0 EoS:')
fit_string1 = f'$T_{{ch}}={popt1[0]:.4f} \pm {perr1[0]:.4f}\ GeV$
$\mu_{{B}}={popt1[1]:.4f} \pm {perr1[1]:.4f}\ GeV$
$\gamma_{{S}}={popt1[2]:.2f} \pm {perr1[2]:.2f}$
$dV/dy={popt1[3]:.1f} \pm {perr1[3]:.1f} \ fm^3$
$\mu_{{Q}}={thermo['muQ']:.4f}\ GeV$
$\mu_{{S}}={thermo['muS']:.4f}\ GeV$'
print(fit_string1)
snB1 = (thermo['s'] / thermo['n_B'])
snB1_err = 0.0
thermoT1 = EoS_nS0(HRG, (popt1[0] + (perr1[0] / 2.0)), popt1[1], gammaS=popt1[2], offshell=offshell)
thermoT2 = EoS_nS0(HRG, (popt1[0] - (perr1[0] / 2.0)), popt1[1], gammaS=popt1[2], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB1_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = EoS_nS0(HRG, popt1[0], (popt1[1] + (perr1[1] / 2.0)), gammaS=popt1[2], offshell=offshell)
thermomuB2 = EoS_nS0(HRG, popt1[0], (popt1[1] - (perr1[1] / 2.0)), gammaS=popt1[2], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB1_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermogammaS1 = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=(popt1[2] + (perr1[2] / 2.0)), offshell=offshell)
thermogammaS2 = EoS_nS0(HRG, popt1[0], popt1[1], gammaS=(popt1[2] - (perr1[2] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB1_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB1_err = np.sqrt(snB1_err)
print(f's/n_B = {snB1} \pm {snB1_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dgammaS, fgammaS) = m.profile('gammaS')
(ddVdy, fdVdy) = m.profile('dVdy')
output_chi21 = [[dT, fT], [dmuB, fmuB], [dgammaS, fgammaS], [ddVdy, fdVdy]]
else:
output_chi21 = None
result_yields_nS0 = f_yields_nS0(xyields, *popt1)
(Tch, muB, gammaS, dVdy) = popt1
(Tch_err, muB_err, gammaS_err, dVdy_err) = perr1
popt1 = np.array([Tch, muB, thermo['muQ'], thermo['muS'], gammaS, dVdy])
perr1 = np.array([Tch_err, muB_err, 0.0, 0.0, gammaS_err, dVdy_err])
output_yields_nS0 = {'fit_yields_nS0': np.array(list(zip(popt1, perr1))), 'fit_string_yields_nS0': fit_string1, 'result_yields_nS0': result_yields_nS0, 'data_yields': np.array(list(zip(data_yields, err_yields))), 'particle_yields': list(latex(final_part)), 'chi2_yields_nS0': output_chi21, 'snB_yields_nS0': np.array([snB1, snB1_err])}
else:
output_yields_nS0 = {}
if (((EoS == 'all') or (EoS == 'full')) and ((method == 'all') or (method == 'ratios'))):
xratios = np.arange(len(data_ratios))
least_squares = LeastSquares(xratios, data_ratios, err_ratios, f_ratios)
m = Minuit(least_squares, T=guess[0], muB=guess[1], muQ=guess[2], muS=guess[3], gammaS=guess[4], limit_T=bounds[0], limit_muB=bounds[1], limit_muQ=bounds[2], limit_muS=bounds[3], limit_gammaS=bounds[4], fix_T=fix_T, fix_muB=fix_muB, fix_muQ=fix_muQ, fix_muS=fix_muS, fix_gammaS=fix_gammaS)
m.migrad()
m.hesse()
popt2 = m.values.values()
perr2 = m.errors.values()
print('\nfit from ratios, full EoS:')
fit_string2 = f'$T_{{ch}}={popt2[0]:.4f} \pm {perr2[0]:.4f}\ GeV$
$\mu_{{B}}={popt2[1]:.4f} \pm {perr2[1]:.4f}\ GeV$
$\mu_{{Q}}={popt2[2]:.4f} \pm {perr2[2]:.4f}\ GeV$
$\mu_{{S}}={popt2[3]:.4f} \pm {perr2[3]:.4f}\ GeV$
$\gamma_{{S}}={popt2[4]:.2f} \pm {perr2[4]:.2f}$'
print(fit_string2)
thermo = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
snB2 = (thermo['s'] / thermo['n_B'])
snB2_err = 0.0
thermoT1 = HRG((popt2[0] + (perr2[0] / 2.0)), popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
thermoT2 = HRG((popt2[0] - (perr2[0] / 2.0)), popt2[1], popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB2_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = HRG(popt2[0], (popt2[1] + (perr2[1] / 2.0)), popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
thermomuB2 = HRG(popt2[0], (popt2[1] - (perr2[1] / 2.0)), popt2[2], popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB2_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermomuQ1 = HRG(popt2[0], popt2[1], (popt2[2] + (perr2[2] / 2.0)), popt2[3], gammaS=popt2[4], offshell=offshell)
thermomuQ2 = HRG(popt2[0], popt2[1], (popt2[2] - (perr2[2] / 2.0)), popt2[3], gammaS=popt2[4], offshell=offshell)
if ((thermomuQ1['n_B'] != 0.0) and (thermomuQ2['n_B'] != 0.0)):
snB2_err += (((thermomuQ1['s'] / thermomuQ1['n_B']) - (thermomuQ2['s'] / thermomuQ2['n_B'])) ** 2.0)
thermomuS1 = HRG(popt2[0], popt2[1], popt2[2], (popt2[3] + (perr2[3] / 2.0)), gammaS=popt2[4], offshell=offshell)
thermomuS2 = HRG(popt2[0], popt2[1], popt2[2], (popt2[3] - (perr2[3] / 2.0)), gammaS=popt2[4], offshell=offshell)
if ((thermomuS1['n_B'] != 0.0) and (thermomuS2['n_B'] != 0.0)):
snB2_err += (((thermomuS1['s'] / thermomuS1['n_B']) - (thermomuS2['s'] / thermomuS2['n_B'])) ** 2.0)
thermogammaS1 = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=(popt2[4] + (perr2[4] / 2.0)), offshell=offshell)
thermogammaS2 = HRG(popt2[0], popt2[1], popt2[2], popt2[3], gammaS=(popt2[4] - (perr2[4] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB2_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB2_err = np.sqrt(snB2_err)
print(f's/n_B = {snB2} \pm {snB2_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dmuQ, fmuQ) = m.profile('muQ')
(dmuS, fmuS) = m.profile('muS')
(dgammaS, fgammaS) = m.profile('gammaS')
output_chi22 = [[dT, fT], [dmuB, fmuB], [dmuQ, fmuQ], [dmuS, fmuS], [dgammaS, fgammaS]]
else:
output_chi22 = None
output_ratios = {'fit_ratios': np.array(list(zip(popt2, perr2))), 'fit_string_ratios': fit_string2, 'result_ratios': f_ratios(xratios, *popt2), 'data_ratios': np.array(list(zip(data_ratios, err_ratios))), 'particle_ratios': list(zip(latex(final_part1), latex(final_part2))), 'chi2_ratios': output_chi22, 'snB_ratios': np.array([snB2, snB2_err])}
else:
output_ratios = {}
if (((EoS == 'all') or (EoS == 'nS0')) and ((method == 'all') or (method == 'ratios'))):
xratios = np.arange(len(data_ratios))
least_squares = LeastSquares(xratios, data_ratios, err_ratios, f_ratios_nS0)
m = Minuit(least_squares, T=guess[0], muB=guess[1], gammaS=guess[4], limit_T=bounds[0], limit_muB=bounds[1], limit_gammaS=bounds[4], fix_T=fix_T, fix_muB=fix_muB, fix_gammaS=fix_gammaS)
m.migrad()
m.hesse()
popt2 = m.values.values()
perr2 = m.errors.values()
thermo = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=popt2[2], offshell=offshell)
print('\nfit from ratios, nS0 EoS:')
fit_string2 = f'$T_{{ch}}={popt2[0]:.4f} \pm {perr2[0]:.4f}\ GeV$
$\mu_{{B}}={popt2[1]:.4f} \pm {perr2[1]:.4f}\ GeV$
$\gamma_{{S}}={popt2[2]:.2f} \pm {perr2[2]:.2f}$
$\mu_{{Q}}={thermo['muQ']:.4f}\ GeV$
$\mu_{{S}}={thermo['muS']:.4f}\ GeV$'
print(fit_string2)
snB2 = (thermo['s'] / thermo['n_B'])
snB2_err = 0.0
thermoT1 = EoS_nS0(HRG, (popt2[0] + (perr2[0] / 2.0)), popt2[1], gammaS=popt2[2], offshell=offshell)
thermoT2 = EoS_nS0(HRG, (popt2[0] - (perr2[0] / 2.0)), popt2[1], gammaS=popt2[2], offshell=offshell)
if ((thermoT1['n_B'] != 0.0) and (thermoT2['n_B'] != 0.0)):
snB2_err += (((thermoT1['s'] / thermoT1['n_B']) - (thermoT2['s'] / thermoT2['n_B'])) ** 2.0)
thermomuB1 = EoS_nS0(HRG, popt2[0], (popt2[1] + (perr2[1] / 2.0)), gammaS=popt2[2], offshell=offshell)
thermomuB2 = EoS_nS0(HRG, popt2[0], (popt2[1] - (perr2[1] / 2.0)), gammaS=popt2[2], offshell=offshell)
if ((thermomuB1['n_B'] != 0.0) and (thermomuB2['n_B'] != 0.0)):
snB2_err += (((thermomuB1['s'] / thermomuB1['n_B']) - (thermomuB2['s'] / thermomuB2['n_B'])) ** 2.0)
thermogammaS1 = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=(popt2[2] + (perr2[2] / 2.0)), offshell=offshell)
thermogammaS2 = EoS_nS0(HRG, popt2[0], popt2[1], gammaS=(popt2[2] - (perr2[2] / 2.0)), offshell=offshell)
if ((thermogammaS1['n_B'] != 0.0) and (thermogammaS2['n_B'] != 0.0)):
snB2_err += (((thermogammaS1['s'] / thermogammaS1['n_B']) - (thermogammaS2['s'] / thermogammaS2['n_B'])) ** 2.0)
snB2_err = np.sqrt(snB2_err)
print(f's/n_B = {snB2} \pm {snB2_err}')
if chi2_plot:
(dT, fT) = m.profile('T')
(dmuB, fmuB) = m.profile('muB')
(dgammaS, fgammaS) = m.profile('gammaS')
output_chi22 = [[dT, fT], [dmuB, fmuB], [dgammaS, fgammaS]]
else:
output_chi22 = None
result_ratios_nS0 = f_ratios_nS0(xratios, *popt2)
(Tch, muB, gammaS) = popt2
(Tch_err, muB_err, gammaS_err) = perr2
popt2 = np.array([Tch, muB, thermo['muQ'], thermo['muS'], gammaS])
perr2 = np.array([Tch_err, muB_err, 0.0, 0.0, gammaS_err])
output_ratios_nS0 = {'fit_ratios_nS0': np.array(list(zip(popt2, perr2))), 'fit_string_ratios_nS0': fit_string2, 'result_ratios_nS0': result_ratios_nS0, 'data_ratios': np.array(list(zip(data_ratios, err_ratios))), 'particle_ratios': list(zip(latex(final_part1), latex(final_part2))), 'chi2_ratios_nS0': output_chi22, 'snB_ratios_nS0': np.array([snB2, snB2_err])}
else:
output_ratios_nS0 = {}
output = {}
output.update(output_yields)
output.update(output_ratios)
output.update(output_yields_nS0)
output.update(output_ratios_nS0)
return output<|docstring|>Extract freeze out parameters by fitting final heavy ion data (dN/dy)
given in dict_yield. Construct ratios of different particles.<|endoftext|> |
8b73ee787db6a961b8ba2b7c6069e5b24aeeb91f6a482f7d759132f754856b52 | def f_yields(x, T, muB, muQ, muS, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result | Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume
x is a dummy argument | EoS_HRG/HRG.py | f_yields | pierre-moreau/EoS_HRG | 0 | python | def f_yields(x, T, muB, muQ, muS, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result | def f_yields(x, T, muB, muQ, muS, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result<|docstring|>Calculate the particle yields for fixed T,muB,muQ,muS,gammaS,volume
x is a dummy argument<|endoftext|> |
f284ede6122ce4e15a0cf60f1d54f96e38e5e62003bf1c45d7db317ff405ac29 | def f_yields_nS0(x, T, muB, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result | Calculate the particle yields for fixed T,muB,gammaS,volume
x is a dummy argument | EoS_HRG/HRG.py | f_yields_nS0 | pierre-moreau/EoS_HRG | 0 | python | def f_yields_nS0(x, T, muB, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result | def f_yields_nS0(x, T, muB, gammaS, dVdy):
'\n Calculate the particle yields for fixed T,muB,gammaS,volume\n x is a dummy argument\n '
result = np.zeros(len(final_part))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, part) in enumerate(final_part):
yval = result_HRG[part]
if (not freezeout_decay):
if (part == 'Lambda'):
yval += result_HRG['Sigma0']
elif (part == 'Lambda~'):
yval += result_HRG['Sigma~0']
result[i] = (((yval * (T ** 3.0)) * dVdy) / (0.197 ** 3.0))
return result<|docstring|>Calculate the particle yields for fixed T,muB,gammaS,volume
x is a dummy argument<|endoftext|> |
d6322a5077366b8e4b3127596d2dd5da9389fb1ee722136f74dedaf32252f594 | def f_ratios(x, T, muB, muQ, muS, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result | Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS
x is a dummy argument | EoS_HRG/HRG.py | f_ratios | pierre-moreau/EoS_HRG | 0 | python | def f_ratios(x, T, muB, muQ, muS, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result | def f_ratios(x, T, muB, muQ, muS, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, muQ, muS, gammaS, EoS='full', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result<|docstring|>Calculate the ratios of particle yields for fixed T,muB,muQ,muS,gammaS
x is a dummy argument<|endoftext|> |
c449985e82872b80be7c14c873883c114678a139daa1f0c36e8d6973cf776958 | def f_ratios_nS0(x, T, muB, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result | Calculate the ratios of particle yields for fixed T,muB,gammaS
x is a dummy argument | EoS_HRG/HRG.py | f_ratios_nS0 | pierre-moreau/EoS_HRG | 0 | python | def f_ratios_nS0(x, T, muB, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result | def f_ratios_nS0(x, T, muB, gammaS):
'\n Calculate the ratios of particle yields for fixed T,muB,gammaS\n x is a dummy argument\n '
result = np.zeros(len(data_ratios))
result_HRG = HRG_freezout(T, muB, 0.0, 0.0, gammaS, EoS='nS0', **kwargs)
for (i, (part1, part2)) in enumerate(zip(final_part1, final_part2)):
yval1 = result_HRG[part1]
yval2 = result_HRG[part2]
if (not freezeout_decay):
if (part1 == 'Lambda'):
yval1 += result_HRG['Sigma0']
elif (part1 == 'Lambda~'):
yval1 += result_HRG['Sigma~0']
if (part2 == 'Lambda'):
yval2 += result_HRG['Sigma0']
elif (part2 == 'Lambda~'):
yval2 += result_HRG['Sigma~0']
result[i] = (yval1 / yval2)
return result<|docstring|>Calculate the ratios of particle yields for fixed T,muB,gammaS
x is a dummy argument<|endoftext|> |
51562280363fc6efd9789ec179b5b5876f14a0b55fb178f925167327101e810e | def write_output_dirs(labels2_map, seqdict, weightdict, output_dir, output_prefix):
'\n For each partition, create <output_dir>/<output_prefix>_<partition>/in.fa and in.weights\n '
output_dirs = []
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
for (ncut_label, members) in labels2_map.items():
d2 = os.path.join(output_dir, ((output_prefix + '_') + str(ncut_label)))
os.makedirs(d2)
output_dirs.append(d2)
with open(os.path.join(d2, 'in.fa'), 'w') as f:
for seqid in members:
f.write('>{0}\n{1}\n'.format(seqid, seqdict[seqid].seq))
with open(os.path.join(d2, 'in.weights'), 'w') as f:
for seqid in members:
f.write('{0}\t{1}\n'.format(seqid, weightdict[seqid]))
return output_dirs | For each partition, create <output_dir>/<output_prefix>_<partition>/in.fa and in.weights | Cogent/process_kmer_to_graph.py | write_output_dirs | Zuhayr-PacBio/Cogent | 60 | python | def write_output_dirs(labels2_map, seqdict, weightdict, output_dir, output_prefix):
'\n \n '
output_dirs = []
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
for (ncut_label, members) in labels2_map.items():
d2 = os.path.join(output_dir, ((output_prefix + '_') + str(ncut_label)))
os.makedirs(d2)
output_dirs.append(d2)
with open(os.path.join(d2, 'in.fa'), 'w') as f:
for seqid in members:
f.write('>{0}\n{1}\n'.format(seqid, seqdict[seqid].seq))
with open(os.path.join(d2, 'in.weights'), 'w') as f:
for seqid in members:
f.write('{0}\t{1}\n'.format(seqid, weightdict[seqid]))
return output_dirs | def write_output_dirs(labels2_map, seqdict, weightdict, output_dir, output_prefix):
'\n \n '
output_dirs = []
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
for (ncut_label, members) in labels2_map.items():
d2 = os.path.join(output_dir, ((output_prefix + '_') + str(ncut_label)))
os.makedirs(d2)
output_dirs.append(d2)
with open(os.path.join(d2, 'in.fa'), 'w') as f:
for seqid in members:
f.write('>{0}\n{1}\n'.format(seqid, seqdict[seqid].seq))
with open(os.path.join(d2, 'in.weights'), 'w') as f:
for seqid in members:
f.write('{0}\t{1}\n'.format(seqid, weightdict[seqid]))
return output_dirs<|docstring|>For each partition, create <output_dir>/<output_prefix>_<partition>/in.fa and in.weights<|endoftext|> |
820a6cb15e44fde1b92362dc5667bc911f556967234768b56eb68d197fe10e89 | def family_finding(dist_filename, seqdict, output_prefix, has_pbid=False, weight_threshold=0.05, ncut_threshold=0.2):
'\n Make a weighted (undirected) graph where each node is a sequence, each edge is k-mer similarity\n Then do normalized cut to find the family partitions\n\n For each partition, make <output_prefix>/<partition_number>/in.fa\n\n If the IDs are in PB id format (has genome answer), like PB.1.3\n then write that out as the "gene" (ground truth) label in graphml output for visualization\n '
nodelist = list(seqdict.keys())
nodelist = dict(((x, i) for (i, x) in enumerate(nodelist)))
print('making weight graph from ', dist_filename, file=sys.stderr)
G = pk.make_weighted_graph_from_mash_dist(nodelist, dist_filename, threshold=weight_threshold)
for n in G:
G.nodes[n]['labels'] = [n]
print('graph contains {0} nodes, {1} edges'.format(G.number_of_nodes(), G.number_of_edges()), file=sys.stderr)
nodelist = dict(((i, x) for (x, i) in nodelist.items()))
print('performing ncut on graph....', file=sys.stderr)
ncut_map = {}
labels2_map = defaultdict((lambda : []))
for tmp_nodes in nx.connected_components(G):
g = nx.Graph(nx.subgraph(G, tmp_nodes))
run_ncut(g, labels2_map, ncut_map, nodelist, ncut_threshold)
seqid_unassigned = set(seqdict.keys())
with open((output_prefix + '.partition.txt'), 'w') as f:
f.write('Partition\tSize\tMembers\n')
for (k, v) in labels2_map.items():
print(k, v, file=sys.stderr)
f.write('{0}_{1}\t{2}\t{3}\n'.format(output_prefix, k, len(v), ','.join(v)))
for seqid in v:
seqid_unassigned.remove(seqid)
f.write('#unassigned:{0}\n'.format(','.join(seqid_unassigned)))
if (not has_pbid):
for n in G:
G.nodes[n]['label'] = str(ncut_map[n])
G.nodes[n]['gene'] = str(nodelist[n])
G.nodes[n]['labels'] = str(G.nodes[n]['labels'])
else:
nodelist = np.array(nodelist)
gene_answer = defaultdict((lambda : []))
for (i, seqid) in enumerate(nodelist):
pbid = seqid.split('.')[1]
gene_answer[pbid].append(i)
if (i in G):
G.nodes[i]['gene'] = str(pbid)
G.nodes[i]['label'] = str(ncut_map[i])
labels = np.array([i for i in G.nodes()])
labels2 = np.array([ncut_map[i] for i in G.nodes()])
pos = nx.random_layout(G)
nx.write_graphml(G, (output_prefix + '.graphml'))
return labels2_map | Make a weighted (undirected) graph where each node is a sequence, each edge is k-mer similarity
Then do normalized cut to find the family partitions
For each partition, make <output_prefix>/<partition_number>/in.fa
If the IDs are in PB id format (has genome answer), like PB.1.3
then write that out as the "gene" (ground truth) label in graphml output for visualization | Cogent/process_kmer_to_graph.py | family_finding | Zuhayr-PacBio/Cogent | 60 | python | def family_finding(dist_filename, seqdict, output_prefix, has_pbid=False, weight_threshold=0.05, ncut_threshold=0.2):
'\n Make a weighted (undirected) graph where each node is a sequence, each edge is k-mer similarity\n Then do normalized cut to find the family partitions\n\n For each partition, make <output_prefix>/<partition_number>/in.fa\n\n If the IDs are in PB id format (has genome answer), like PB.1.3\n then write that out as the "gene" (ground truth) label in graphml output for visualization\n '
nodelist = list(seqdict.keys())
nodelist = dict(((x, i) for (i, x) in enumerate(nodelist)))
print('making weight graph from ', dist_filename, file=sys.stderr)
G = pk.make_weighted_graph_from_mash_dist(nodelist, dist_filename, threshold=weight_threshold)
for n in G:
G.nodes[n]['labels'] = [n]
print('graph contains {0} nodes, {1} edges'.format(G.number_of_nodes(), G.number_of_edges()), file=sys.stderr)
nodelist = dict(((i, x) for (x, i) in nodelist.items()))
print('performing ncut on graph....', file=sys.stderr)
ncut_map = {}
labels2_map = defaultdict((lambda : []))
for tmp_nodes in nx.connected_components(G):
g = nx.Graph(nx.subgraph(G, tmp_nodes))
run_ncut(g, labels2_map, ncut_map, nodelist, ncut_threshold)
seqid_unassigned = set(seqdict.keys())
with open((output_prefix + '.partition.txt'), 'w') as f:
f.write('Partition\tSize\tMembers\n')
for (k, v) in labels2_map.items():
print(k, v, file=sys.stderr)
f.write('{0}_{1}\t{2}\t{3}\n'.format(output_prefix, k, len(v), ','.join(v)))
for seqid in v:
seqid_unassigned.remove(seqid)
f.write('#unassigned:{0}\n'.format(','.join(seqid_unassigned)))
if (not has_pbid):
for n in G:
G.nodes[n]['label'] = str(ncut_map[n])
G.nodes[n]['gene'] = str(nodelist[n])
G.nodes[n]['labels'] = str(G.nodes[n]['labels'])
else:
nodelist = np.array(nodelist)
gene_answer = defaultdict((lambda : []))
for (i, seqid) in enumerate(nodelist):
pbid = seqid.split('.')[1]
gene_answer[pbid].append(i)
if (i in G):
G.nodes[i]['gene'] = str(pbid)
G.nodes[i]['label'] = str(ncut_map[i])
labels = np.array([i for i in G.nodes()])
labels2 = np.array([ncut_map[i] for i in G.nodes()])
pos = nx.random_layout(G)
nx.write_graphml(G, (output_prefix + '.graphml'))
return labels2_map | def family_finding(dist_filename, seqdict, output_prefix, has_pbid=False, weight_threshold=0.05, ncut_threshold=0.2):
'\n Make a weighted (undirected) graph where each node is a sequence, each edge is k-mer similarity\n Then do normalized cut to find the family partitions\n\n For each partition, make <output_prefix>/<partition_number>/in.fa\n\n If the IDs are in PB id format (has genome answer), like PB.1.3\n then write that out as the "gene" (ground truth) label in graphml output for visualization\n '
nodelist = list(seqdict.keys())
nodelist = dict(((x, i) for (i, x) in enumerate(nodelist)))
print('making weight graph from ', dist_filename, file=sys.stderr)
G = pk.make_weighted_graph_from_mash_dist(nodelist, dist_filename, threshold=weight_threshold)
for n in G:
G.nodes[n]['labels'] = [n]
print('graph contains {0} nodes, {1} edges'.format(G.number_of_nodes(), G.number_of_edges()), file=sys.stderr)
nodelist = dict(((i, x) for (x, i) in nodelist.items()))
print('performing ncut on graph....', file=sys.stderr)
ncut_map = {}
labels2_map = defaultdict((lambda : []))
for tmp_nodes in nx.connected_components(G):
g = nx.Graph(nx.subgraph(G, tmp_nodes))
run_ncut(g, labels2_map, ncut_map, nodelist, ncut_threshold)
seqid_unassigned = set(seqdict.keys())
with open((output_prefix + '.partition.txt'), 'w') as f:
f.write('Partition\tSize\tMembers\n')
for (k, v) in labels2_map.items():
print(k, v, file=sys.stderr)
f.write('{0}_{1}\t{2}\t{3}\n'.format(output_prefix, k, len(v), ','.join(v)))
for seqid in v:
seqid_unassigned.remove(seqid)
f.write('#unassigned:{0}\n'.format(','.join(seqid_unassigned)))
if (not has_pbid):
for n in G:
G.nodes[n]['label'] = str(ncut_map[n])
G.nodes[n]['gene'] = str(nodelist[n])
G.nodes[n]['labels'] = str(G.nodes[n]['labels'])
else:
nodelist = np.array(nodelist)
gene_answer = defaultdict((lambda : []))
for (i, seqid) in enumerate(nodelist):
pbid = seqid.split('.')[1]
gene_answer[pbid].append(i)
if (i in G):
G.nodes[i]['gene'] = str(pbid)
G.nodes[i]['label'] = str(ncut_map[i])
labels = np.array([i for i in G.nodes()])
labels2 = np.array([ncut_map[i] for i in G.nodes()])
pos = nx.random_layout(G)
nx.write_graphml(G, (output_prefix + '.graphml'))
return labels2_map<|docstring|>Make a weighted (undirected) graph where each node is a sequence, each edge is k-mer similarity
Then do normalized cut to find the family partitions
For each partition, make <output_prefix>/<partition_number>/in.fa
If the IDs are in PB id format (has genome answer), like PB.1.3
then write that out as the "gene" (ground truth) label in graphml output for visualization<|endoftext|> |
a52ce826a55cff7ddd3121b53eaecc3527ca7d621e448b9b4700b65cff642629 | @classmethod
def validate(cls, value, allow_tuple=True):
'\n Valid examples:\n\n all\n instances\n (suites, instances)\n '
def validate_single(v):
if isinstance(v, str):
return SortType(v.lower()).value
if isinstance(v, SortType):
return v.value
raise ValueError('Invalid shuffle type value: {}'.format(v))
if (isinstance(value, (tuple, list)) and allow_tuple):
values = [validate_single(v) for v in value]
if ((SortType.ALL.value in values) and (len(values) > 1)):
raise ValueError('Passing extra shuffle types along with `all` is a redundant operation.'.format(values))
return values
return validate_single(value) | Valid examples:
all
instances
(suites, instances) | testplan/testing/ordering.py | validate | ymn1k/testplan | 0 | python | @classmethod
def validate(cls, value, allow_tuple=True):
'\n Valid examples:\n\n all\n instances\n (suites, instances)\n '
def validate_single(v):
if isinstance(v, str):
return SortType(v.lower()).value
if isinstance(v, SortType):
return v.value
raise ValueError('Invalid shuffle type value: {}'.format(v))
if (isinstance(value, (tuple, list)) and allow_tuple):
values = [validate_single(v) for v in value]
if ((SortType.ALL.value in values) and (len(values) > 1)):
raise ValueError('Passing extra shuffle types along with `all` is a redundant operation.'.format(values))
return values
return validate_single(value) | @classmethod
def validate(cls, value, allow_tuple=True):
'\n Valid examples:\n\n all\n instances\n (suites, instances)\n '
def validate_single(v):
if isinstance(v, str):
return SortType(v.lower()).value
if isinstance(v, SortType):
return v.value
raise ValueError('Invalid shuffle type value: {}'.format(v))
if (isinstance(value, (tuple, list)) and allow_tuple):
values = [validate_single(v) for v in value]
if ((SortType.ALL.value in values) and (len(values) > 1)):
raise ValueError('Passing extra shuffle types along with `all` is a redundant operation.'.format(values))
return values
return validate_single(value)<|docstring|>Valid examples:
all
instances
(suites, instances)<|endoftext|> |
637fe6b2f158aa4518e23404c94f4307490a6869fd91b5fab1049706a3a64abb | def apply_mask(image, mask):
'apply mask to image'
redImg = np.zeros(image.shape, image.dtype)
redImg[(:, :)] = (0, 0, 255)
redMask = cv2.bitwise_and(redImg, redImg, mask=mask)
cv2.addWeighted(redMask, 1, image, 1, 0, image)
return image | apply mask to image | gen_mask.py | apply_mask | mathiasaap/SegCaps | 65 | python | def apply_mask(image, mask):
redImg = np.zeros(image.shape, image.dtype)
redImg[(:, :)] = (0, 0, 255)
redMask = cv2.bitwise_and(redImg, redImg, mask=mask)
cv2.addWeighted(redMask, 1, image, 1, 0, image)
return image | def apply_mask(image, mask):
redImg = np.zeros(image.shape, image.dtype)
redImg[(:, :)] = (0, 0, 255)
redMask = cv2.bitwise_and(redImg, redImg, mask=mask)
cv2.addWeighted(redMask, 1, image, 1, 0, image)
return image<|docstring|>apply mask to image<|endoftext|> |
592613fe5210a8cf610b234ecb303cf0ada7df0b08541f1010c79f0b35153fd3 | def __init__(self, args, net_input_shape):
'\n Create evaluation model and load the pre-train weights for inference.\n '
self.net_input_shape = net_input_shape
weights_path = join(args.weights_path)
(_, eval_model, _) = create_model(args, net_input_shape, enable_decoder=False)
eval_model.load_weights(weights_path, by_name=True)
self.model = eval_model | Create evaluation model and load the pre-train weights for inference. | gen_mask.py | __init__ | mathiasaap/SegCaps | 65 | python | def __init__(self, args, net_input_shape):
'\n \n '
self.net_input_shape = net_input_shape
weights_path = join(args.weights_path)
(_, eval_model, _) = create_model(args, net_input_shape, enable_decoder=False)
eval_model.load_weights(weights_path, by_name=True)
self.model = eval_model | def __init__(self, args, net_input_shape):
'\n \n '
self.net_input_shape = net_input_shape
weights_path = join(args.weights_path)
(_, eval_model, _) = create_model(args, net_input_shape, enable_decoder=False)
eval_model.load_weights(weights_path, by_name=True)
self.model = eval_model<|docstring|>Create evaluation model and load the pre-train weights for inference.<|endoftext|> |
c52eef061696adb1b183c2e6e5c0cc1c7c6600c5bd44b0340cf5e8f6d57a1928 | def createUI(pWindowTitle, pApplyCallback):
'\n This is a function that creates the user interface, where users can input the shape and size of petals,\n number of petals, and petal angle to create varioius pine cone like shapes\n '
windowID = 'PineCone'
if cmds.window(windowID, exists=True):
cmds.deleteUI(windowID)
cmds.window(windowID, title=pWindowTitle, sizeable=True, resizeToFitChildren=True)
cmds.rowColumnLayout(numberOfColumns=3, columnWidth=[(1, 130), (2, 60), (3, 60)], columnOffset=[(1, 'right', 3)])
cmds.text(label='Petal Shape: ')
petalShape = cmds.optionMenu()
cmds.menuItem(label='Cone')
cmds.menuItem(label='Cylinder')
cmds.separator(h=10, style='none')
cmds.text(label='# Petals per Layer: ')
numPetals = cmds.intField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Angle: ')
petalAngle = cmds.intField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Height: ')
petalHeight = cmds.floatField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Radius: ')
petalRadius = cmds.floatField()
cmds.separator(h=10, style='none')
cmds.button(label='Apply', command=functools.partial(pApplyCallback, petalShape, numPetals, petalHeight, petalRadius, petalAngle))
def cancelCallback(*pArgs):
if cmds.window(windowID, exists=True):
cmds.deleteUI(windowID)
cmds.button(label='Cancel', command=cancelCallback)
cmds.showWindow() | This is a function that creates the user interface, where users can input the shape and size of petals,
number of petals, and petal angle to create varioius pine cone like shapes | Pine Cone.py | createUI | dannygelman1/Pine-Cone-Generator | 1 | python | def createUI(pWindowTitle, pApplyCallback):
'\n This is a function that creates the user interface, where users can input the shape and size of petals,\n number of petals, and petal angle to create varioius pine cone like shapes\n '
windowID = 'PineCone'
if cmds.window(windowID, exists=True):
cmds.deleteUI(windowID)
cmds.window(windowID, title=pWindowTitle, sizeable=True, resizeToFitChildren=True)
cmds.rowColumnLayout(numberOfColumns=3, columnWidth=[(1, 130), (2, 60), (3, 60)], columnOffset=[(1, 'right', 3)])
cmds.text(label='Petal Shape: ')
petalShape = cmds.optionMenu()
cmds.menuItem(label='Cone')
cmds.menuItem(label='Cylinder')
cmds.separator(h=10, style='none')
cmds.text(label='# Petals per Layer: ')
numPetals = cmds.intField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Angle: ')
petalAngle = cmds.intField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Height: ')
petalHeight = cmds.floatField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Radius: ')
petalRadius = cmds.floatField()
cmds.separator(h=10, style='none')
cmds.button(label='Apply', command=functools.partial(pApplyCallback, petalShape, numPetals, petalHeight, petalRadius, petalAngle))
def cancelCallback(*pArgs):
if cmds.window(windowID, exists=True):
cmds.deleteUI(windowID)
cmds.button(label='Cancel', command=cancelCallback)
cmds.showWindow() | def createUI(pWindowTitle, pApplyCallback):
'\n This is a function that creates the user interface, where users can input the shape and size of petals,\n number of petals, and petal angle to create varioius pine cone like shapes\n '
windowID = 'PineCone'
if cmds.window(windowID, exists=True):
cmds.deleteUI(windowID)
cmds.window(windowID, title=pWindowTitle, sizeable=True, resizeToFitChildren=True)
cmds.rowColumnLayout(numberOfColumns=3, columnWidth=[(1, 130), (2, 60), (3, 60)], columnOffset=[(1, 'right', 3)])
cmds.text(label='Petal Shape: ')
petalShape = cmds.optionMenu()
cmds.menuItem(label='Cone')
cmds.menuItem(label='Cylinder')
cmds.separator(h=10, style='none')
cmds.text(label='# Petals per Layer: ')
numPetals = cmds.intField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Angle: ')
petalAngle = cmds.intField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Height: ')
petalHeight = cmds.floatField()
cmds.separator(h=10, style='none')
cmds.text(label='Petal Radius: ')
petalRadius = cmds.floatField()
cmds.separator(h=10, style='none')
cmds.button(label='Apply', command=functools.partial(pApplyCallback, petalShape, numPetals, petalHeight, petalRadius, petalAngle))
def cancelCallback(*pArgs):
if cmds.window(windowID, exists=True):
cmds.deleteUI(windowID)
cmds.button(label='Cancel', command=cancelCallback)
cmds.showWindow()<|docstring|>This is a function that creates the user interface, where users can input the shape and size of petals,
number of petals, and petal angle to create varioius pine cone like shapes<|endoftext|> |
9164d8550281f2430a2e3c1a656384935abd9882fe9c791e1d17260855b1f458 | def applyCallback(pPetalShape, pNumPetals, pPetalHeight, pPetalRadius, pPetalAngle, *pArgs):
'\n This function generates pine cone like shapes from user input\n '
numberPetals = cmds.intField(pNumPetals, query=True, value=True)
startH = cmds.floatField(pPetalHeight, query=True, value=True)
startR = cmds.floatField(pPetalRadius, query=True, value=True)
petalAngle = cmds.intField(pPetalAngle, query=True, value=True)
petalShape = cmds.optionMenu(pPetalShape, query=True, value=True)
if (petalShape == 'Cone'):
result = cmds.polyCone(r=startR, h=startH, name='OG#')
else:
result = cmds.polyCylinder(r=startR, h=startH, name='OG#')
cmds.move(0, (startH / 2), 0, result[0])
cmds.move(0, 0, 0, (result[0] + '.scalePivot'), (result[0] + '.rotatePivot'), absolute=True)
cmds.rotate(petalAngle, 0, 0, result[0])
coneGroup = cmds.group(empty=True, name='Group')
for i in range(1, numberPetals):
resInstance = cmds.instance(result[0], name='instance#')
cmds.rotate(petalAngle, 0, ((360 / numberPetals) * i), resInstance)
cmds.parent(resInstance, coneGroup)
cmds.parent(result, coneGroup)
full = cmds.group(empty=True, name='full')
for x in range(1, 5):
dupInstance = cmds.instance(coneGroup, name='dup#')
cmds.move(0, 0, (0.2 * x), dupInstance)
cmds.scale((1 - (0.1 * x)), (1 - (0.1 * x)), (1 - (0.1 * x)), dupInstance)
cmds.parent(dupInstance, full)
cmds.parent(coneGroup, full) | This function generates pine cone like shapes from user input | Pine Cone.py | applyCallback | dannygelman1/Pine-Cone-Generator | 1 | python | def applyCallback(pPetalShape, pNumPetals, pPetalHeight, pPetalRadius, pPetalAngle, *pArgs):
'\n \n '
numberPetals = cmds.intField(pNumPetals, query=True, value=True)
startH = cmds.floatField(pPetalHeight, query=True, value=True)
startR = cmds.floatField(pPetalRadius, query=True, value=True)
petalAngle = cmds.intField(pPetalAngle, query=True, value=True)
petalShape = cmds.optionMenu(pPetalShape, query=True, value=True)
if (petalShape == 'Cone'):
result = cmds.polyCone(r=startR, h=startH, name='OG#')
else:
result = cmds.polyCylinder(r=startR, h=startH, name='OG#')
cmds.move(0, (startH / 2), 0, result[0])
cmds.move(0, 0, 0, (result[0] + '.scalePivot'), (result[0] + '.rotatePivot'), absolute=True)
cmds.rotate(petalAngle, 0, 0, result[0])
coneGroup = cmds.group(empty=True, name='Group')
for i in range(1, numberPetals):
resInstance = cmds.instance(result[0], name='instance#')
cmds.rotate(petalAngle, 0, ((360 / numberPetals) * i), resInstance)
cmds.parent(resInstance, coneGroup)
cmds.parent(result, coneGroup)
full = cmds.group(empty=True, name='full')
for x in range(1, 5):
dupInstance = cmds.instance(coneGroup, name='dup#')
cmds.move(0, 0, (0.2 * x), dupInstance)
cmds.scale((1 - (0.1 * x)), (1 - (0.1 * x)), (1 - (0.1 * x)), dupInstance)
cmds.parent(dupInstance, full)
cmds.parent(coneGroup, full) | def applyCallback(pPetalShape, pNumPetals, pPetalHeight, pPetalRadius, pPetalAngle, *pArgs):
'\n \n '
numberPetals = cmds.intField(pNumPetals, query=True, value=True)
startH = cmds.floatField(pPetalHeight, query=True, value=True)
startR = cmds.floatField(pPetalRadius, query=True, value=True)
petalAngle = cmds.intField(pPetalAngle, query=True, value=True)
petalShape = cmds.optionMenu(pPetalShape, query=True, value=True)
if (petalShape == 'Cone'):
result = cmds.polyCone(r=startR, h=startH, name='OG#')
else:
result = cmds.polyCylinder(r=startR, h=startH, name='OG#')
cmds.move(0, (startH / 2), 0, result[0])
cmds.move(0, 0, 0, (result[0] + '.scalePivot'), (result[0] + '.rotatePivot'), absolute=True)
cmds.rotate(petalAngle, 0, 0, result[0])
coneGroup = cmds.group(empty=True, name='Group')
for i in range(1, numberPetals):
resInstance = cmds.instance(result[0], name='instance#')
cmds.rotate(petalAngle, 0, ((360 / numberPetals) * i), resInstance)
cmds.parent(resInstance, coneGroup)
cmds.parent(result, coneGroup)
full = cmds.group(empty=True, name='full')
for x in range(1, 5):
dupInstance = cmds.instance(coneGroup, name='dup#')
cmds.move(0, 0, (0.2 * x), dupInstance)
cmds.scale((1 - (0.1 * x)), (1 - (0.1 * x)), (1 - (0.1 * x)), dupInstance)
cmds.parent(dupInstance, full)
cmds.parent(coneGroup, full)<|docstring|>This function generates pine cone like shapes from user input<|endoftext|> |
af6227c45a366d109e963652a6f80e3ec5dcdc8b93491d5dce53900fe2cddcab | def nondimensional():
'\n Factory associated with NondimElasticQuasistatic.\n '
return NondimElasticQuasistatic() | Factory associated with NondimElasticQuasistatic. | spatialdata/units/NondimElasticQuasistatic.py | nondimensional | rwalkerlewis/spatialdata | 6 | python | def nondimensional():
'\n \n '
return NondimElasticQuasistatic() | def nondimensional():
'\n \n '
return NondimElasticQuasistatic()<|docstring|>Factory associated with NondimElasticQuasistatic.<|endoftext|> |
2f8ebecb8fa225b4c4aeb137fc092f9dec8d71c8d19d0f1a8eca64b0cd737738 | def __init__(self, name='nondimelasticquasistatic'):
'\n Constructor.\n '
Nondimensional.__init__(self, name) | Constructor. | spatialdata/units/NondimElasticQuasistatic.py | __init__ | rwalkerlewis/spatialdata | 6 | python | def __init__(self, name='nondimelasticquasistatic'):
'\n \n '
Nondimensional.__init__(self, name) | def __init__(self, name='nondimelasticquasistatic'):
'\n \n '
Nondimensional.__init__(self, name)<|docstring|>Constructor.<|endoftext|> |
e66799a58642b5e54956c4fb342920078b9134317970f9c0dfd7e3391a444a09 | def _configure(self):
'\n Setup members using inventory.\n '
Nondimensional._configure(self)
self.setLengthScale(self.inventory.lengthScale)
self.setPressureScale(self.inventory.shearModulus)
self.setTimeScale(self.inventory.relaxationTime)
self.computeDensityScale() | Setup members using inventory. | spatialdata/units/NondimElasticQuasistatic.py | _configure | rwalkerlewis/spatialdata | 6 | python | def _configure(self):
'\n \n '
Nondimensional._configure(self)
self.setLengthScale(self.inventory.lengthScale)
self.setPressureScale(self.inventory.shearModulus)
self.setTimeScale(self.inventory.relaxationTime)
self.computeDensityScale() | def _configure(self):
'\n \n '
Nondimensional._configure(self)
self.setLengthScale(self.inventory.lengthScale)
self.setPressureScale(self.inventory.shearModulus)
self.setTimeScale(self.inventory.relaxationTime)
self.computeDensityScale()<|docstring|>Setup members using inventory.<|endoftext|> |
525dea784f8279a628629bf0f02b84fb50b8f7b7a5ce9bb246cdc19b3f69df64 | def write_database_integrity_violation(results, headers, reason_message, action_message=None):
'Emit a integrity violation warning and write the violating records to a log file in the current directory\n\n :param results: a list of tuples representing the violating records\n :param headers: a tuple of strings that will be used as a header for the log file. Should have the same length\n as each tuple in the results list.\n :param reason_message: a human readable message detailing the reason of the integrity violation\n :param action_message: an optional human readable message detailing a performed action, if any\n '
from datetime import datetime
from tempfile import NamedTemporaryFile
from tabulate import tabulate
from aiida.cmdline.utils import echo
from aiida.manage import configuration
if configuration.PROFILE.is_test_profile:
return
if (action_message is None):
action_message = 'nothing'
with NamedTemporaryFile(prefix='migration-', suffix='.log', dir='.', delete=False, mode='w+') as handle:
echo.echo('')
echo.echo_warning('\n{}\nFound one or multiple records that violate the integrity of the database\nViolation reason: {}\nPerformed action: {}\nViolators written to: {}\n{}\n'.format(WARNING_BORDER, reason_message, action_message, handle.name, WARNING_BORDER))
handle.write(f'''# {datetime.utcnow().isoformat()}
''')
handle.write(f'''# Violation reason: {reason_message}
''')
handle.write(f'''# Performed action: {action_message}
''')
handle.write('\n')
handle.write(tabulate(results, headers)) | Emit a integrity violation warning and write the violating records to a log file in the current directory
:param results: a list of tuples representing the violating records
:param headers: a tuple of strings that will be used as a header for the log file. Should have the same length
as each tuple in the results list.
:param reason_message: a human readable message detailing the reason of the integrity violation
:param action_message: an optional human readable message detailing a performed action, if any | aiida/manage/database/integrity/utils.py | write_database_integrity_violation | azadoks/aiida-core | 180 | python | def write_database_integrity_violation(results, headers, reason_message, action_message=None):
'Emit a integrity violation warning and write the violating records to a log file in the current directory\n\n :param results: a list of tuples representing the violating records\n :param headers: a tuple of strings that will be used as a header for the log file. Should have the same length\n as each tuple in the results list.\n :param reason_message: a human readable message detailing the reason of the integrity violation\n :param action_message: an optional human readable message detailing a performed action, if any\n '
from datetime import datetime
from tempfile import NamedTemporaryFile
from tabulate import tabulate
from aiida.cmdline.utils import echo
from aiida.manage import configuration
if configuration.PROFILE.is_test_profile:
return
if (action_message is None):
action_message = 'nothing'
with NamedTemporaryFile(prefix='migration-', suffix='.log', dir='.', delete=False, mode='w+') as handle:
echo.echo()
echo.echo_warning('\n{}\nFound one or multiple records that violate the integrity of the database\nViolation reason: {}\nPerformed action: {}\nViolators written to: {}\n{}\n'.format(WARNING_BORDER, reason_message, action_message, handle.name, WARNING_BORDER))
handle.write(f'# {datetime.utcnow().isoformat()}
')
handle.write(f'# Violation reason: {reason_message}
')
handle.write(f'# Performed action: {action_message}
')
handle.write('\n')
handle.write(tabulate(results, headers)) | def write_database_integrity_violation(results, headers, reason_message, action_message=None):
'Emit a integrity violation warning and write the violating records to a log file in the current directory\n\n :param results: a list of tuples representing the violating records\n :param headers: a tuple of strings that will be used as a header for the log file. Should have the same length\n as each tuple in the results list.\n :param reason_message: a human readable message detailing the reason of the integrity violation\n :param action_message: an optional human readable message detailing a performed action, if any\n '
from datetime import datetime
from tempfile import NamedTemporaryFile
from tabulate import tabulate
from aiida.cmdline.utils import echo
from aiida.manage import configuration
if configuration.PROFILE.is_test_profile:
return
if (action_message is None):
action_message = 'nothing'
with NamedTemporaryFile(prefix='migration-', suffix='.log', dir='.', delete=False, mode='w+') as handle:
echo.echo()
echo.echo_warning('\n{}\nFound one or multiple records that violate the integrity of the database\nViolation reason: {}\nPerformed action: {}\nViolators written to: {}\n{}\n'.format(WARNING_BORDER, reason_message, action_message, handle.name, WARNING_BORDER))
handle.write(f'# {datetime.utcnow().isoformat()}
')
handle.write(f'# Violation reason: {reason_message}
')
handle.write(f'# Performed action: {action_message}
')
handle.write('\n')
handle.write(tabulate(results, headers))<|docstring|>Emit a integrity violation warning and write the violating records to a log file in the current directory
:param results: a list of tuples representing the violating records
:param headers: a tuple of strings that will be used as a header for the log file. Should have the same length
as each tuple in the results list.
:param reason_message: a human readable message detailing the reason of the integrity violation
:param action_message: an optional human readable message detailing a performed action, if any<|endoftext|> |
65bc734b2a4f7cde654d79059594170d8b90c141417f3205f409ea9c5c89b499 | def test_next_first(self):
' Delete the next patch with only unapplied patches '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty()) | Delete the next patch with only unapplied patches | tests/test_delete.py | test_next_first | jayvdb/python-quilt | 4 | python | def test_next_first(self):
' '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty()) | def test_next_first(self):
' '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty())<|docstring|>Delete the next patch with only unapplied patches<|endoftext|> |
1cea31bcdc818d4eb4fb478d077e8a3dd38d3fa33990c2db7a236c5d5ba863a3 | def test_next_after(self):
' Delete the successor to the topmost patch '
with tmp_series() as [dir, series]:
series.add_patch(Patch('topmost'))
series.add_patch(Patch('unapplied'))
series.save()
db = Db(dir)
db.add_patch(Patch('topmost'))
db.save()
cmd = Delete(dir, db.dirname, series.dirname)
cmd.delete_next()
series.read()
[patch] = series.patches()
self.assertEqual(patch, Patch('topmost')) | Delete the successor to the topmost patch | tests/test_delete.py | test_next_after | jayvdb/python-quilt | 4 | python | def test_next_after(self):
' '
with tmp_series() as [dir, series]:
series.add_patch(Patch('topmost'))
series.add_patch(Patch('unapplied'))
series.save()
db = Db(dir)
db.add_patch(Patch('topmost'))
db.save()
cmd = Delete(dir, db.dirname, series.dirname)
cmd.delete_next()
series.read()
[patch] = series.patches()
self.assertEqual(patch, Patch('topmost')) | def test_next_after(self):
' '
with tmp_series() as [dir, series]:
series.add_patch(Patch('topmost'))
series.add_patch(Patch('unapplied'))
series.save()
db = Db(dir)
db.add_patch(Patch('topmost'))
db.save()
cmd = Delete(dir, db.dirname, series.dirname)
cmd.delete_next()
series.read()
[patch] = series.patches()
self.assertEqual(patch, Patch('topmost'))<|docstring|>Delete the successor to the topmost patch<|endoftext|> |
a4c4c6d2579ac6830bcd12bad189e576ccf4902f15370f36175f2f034ef582aa | def test_no_backup_next(self):
' Remove the next patch without leaving a backup '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
patch = os.path.join(patches.dirname, 'patch')
make_file(b'', patch)
run_cli(DeleteCommand, dict(next=True, patch=None, remove=True, backup=False), patches.dirname, applied=dir)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists((patch + '~'))) | Remove the next patch without leaving a backup | tests/test_delete.py | test_no_backup_next | jayvdb/python-quilt | 4 | python | def test_no_backup_next(self):
' '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
patch = os.path.join(patches.dirname, 'patch')
make_file(b, patch)
run_cli(DeleteCommand, dict(next=True, patch=None, remove=True, backup=False), patches.dirname, applied=dir)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists((patch + '~'))) | def test_no_backup_next(self):
' '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
patch = os.path.join(patches.dirname, 'patch')
make_file(b, patch)
run_cli(DeleteCommand, dict(next=True, patch=None, remove=True, backup=False), patches.dirname, applied=dir)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists((patch + '~')))<|docstring|>Remove the next patch without leaving a backup<|endoftext|> |
6026d4dc99da5b45e69b6fc7beb62804736fbe30264ffc852ff424cab3ae2223 | def test_no_backup_named(self):
' Remove a specified patch without leaving a backup '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
patch = os.path.join(patches.dirname, 'patch')
make_file(b'', patch)
run_cli(DeleteCommand, dict(patch='patch', next=False, remove=True, backup=False), patches.dirname, applied=dir)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists((patch + '~'))) | Remove a specified patch without leaving a backup | tests/test_delete.py | test_no_backup_named | jayvdb/python-quilt | 4 | python | def test_no_backup_named(self):
' '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
patch = os.path.join(patches.dirname, 'patch')
make_file(b, patch)
run_cli(DeleteCommand, dict(patch='patch', next=False, remove=True, backup=False), patches.dirname, applied=dir)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists((patch + '~'))) | def test_no_backup_named(self):
' '
with tmp_series() as [dir, patches]:
patches.add_patch(Patch('patch'))
patches.save()
patch = os.path.join(patches.dirname, 'patch')
make_file(b, patch)
run_cli(DeleteCommand, dict(patch='patch', next=False, remove=True, backup=False), patches.dirname, applied=dir)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists((patch + '~')))<|docstring|>Remove a specified patch without leaving a backup<|endoftext|> |
6b08d6af9354705e967472d1581f026fe81898524527344d536685b9607245dd | async def create(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account'
return (await self.req.post(endpoint=endpoint, json=kwargs)) | :param kwargs:
:return: | paystackapi/dedicated_virtual_account.py | create | Ichinga-Samuel/async-paystackapi | 0 | python | async def create(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account'
return (await self.req.post(endpoint=endpoint, json=kwargs)) | async def create(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account'
return (await self.req.post(endpoint=endpoint, json=kwargs))<|docstring|>:param kwargs:
:return:<|endoftext|> |
cdbd29c43f721b6da1381cfeb7b25e6a60f6f9d215eb45ad5641bbeda4dd3cc3 | async def list(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account'
return ((await self.req.get(endpoint=endpoint, params=kwargs)) if kwargs else (await self.req.get(endpoint=endpoint))) | :param kwargs:
:return: | paystackapi/dedicated_virtual_account.py | list | Ichinga-Samuel/async-paystackapi | 0 | python | async def list(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account'
return ((await self.req.get(endpoint=endpoint, params=kwargs)) if kwargs else (await self.req.get(endpoint=endpoint))) | async def list(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account'
return ((await self.req.get(endpoint=endpoint, params=kwargs)) if kwargs else (await self.req.get(endpoint=endpoint)))<|docstring|>:param kwargs:
:return:<|endoftext|> |
36d0c8c9a9d3af4d69611f9501286e28dac90484a600345e52affe6cfd6f7eaa | async def fetch(self, *, _id):
'\n\n :param _id:\n :return:\n '
endpoint = f'dedicated_account/{_id}'
return (await self.req.get(endpoint=endpoint)) | :param _id:
:return: | paystackapi/dedicated_virtual_account.py | fetch | Ichinga-Samuel/async-paystackapi | 0 | python | async def fetch(self, *, _id):
'\n\n :param _id:\n :return:\n '
endpoint = f'dedicated_account/{_id}'
return (await self.req.get(endpoint=endpoint)) | async def fetch(self, *, _id):
'\n\n :param _id:\n :return:\n '
endpoint = f'dedicated_account/{_id}'
return (await self.req.get(endpoint=endpoint))<|docstring|>:param _id:
:return:<|endoftext|> |
00e36f1638c0ced2ea71634319508641ef9796467dfc73e121d7869220b2dc0a | async def deactivate(self, *, _id):
'\n\n :param _id:\n :return:\n '
endpoint = f'dedicated_account/{_id}'
return (await self.req.delete(endpoint=endpoint)) | :param _id:
:return: | paystackapi/dedicated_virtual_account.py | deactivate | Ichinga-Samuel/async-paystackapi | 0 | python | async def deactivate(self, *, _id):
'\n\n :param _id:\n :return:\n '
endpoint = f'dedicated_account/{_id}'
return (await self.req.delete(endpoint=endpoint)) | async def deactivate(self, *, _id):
'\n\n :param _id:\n :return:\n '
endpoint = f'dedicated_account/{_id}'
return (await self.req.delete(endpoint=endpoint))<|docstring|>:param _id:
:return:<|endoftext|> |
f611d032afa79ea5c6b0f336b368aea706670891c8db60e07bb589bcbc15c776 | async def split(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account/split'
return (await self.req.post(endpoint=endpoint, json=kwargs)) | :param kwargs:
:return: | paystackapi/dedicated_virtual_account.py | split | Ichinga-Samuel/async-paystackapi | 0 | python | async def split(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account/split'
return (await self.req.post(endpoint=endpoint, json=kwargs)) | async def split(self, **kwargs):
'\n\n :param kwargs:\n :return:\n '
endpoint = 'dedicated_account/split'
return (await self.req.post(endpoint=endpoint, json=kwargs))<|docstring|>:param kwargs:
:return:<|endoftext|> |
7124f9df24250bc55a0ee60cb9d2ffc0b33357acb8531cbaeec1f361739b456c | async def remove_split(self, *, account_number):
'\n\n :param account_number:\n :return:\n '
endpoint = 'dedicated_account/split'
return (await self.req.delete(endpoint=endpoint, json={'account_number': account_number})) | :param account_number:
:return: | paystackapi/dedicated_virtual_account.py | remove_split | Ichinga-Samuel/async-paystackapi | 0 | python | async def remove_split(self, *, account_number):
'\n\n :param account_number:\n :return:\n '
endpoint = 'dedicated_account/split'
return (await self.req.delete(endpoint=endpoint, json={'account_number': account_number})) | async def remove_split(self, *, account_number):
'\n\n :param account_number:\n :return:\n '
endpoint = 'dedicated_account/split'
return (await self.req.delete(endpoint=endpoint, json={'account_number': account_number}))<|docstring|>:param account_number:
:return:<|endoftext|> |
948aecf7c1aed9ca251b02a111fa7b45ac19d29ef016d765d72ba6617688d61e | async def providers(self):
'\n\n :return:\n '
endpoint = 'dedicated_account/available_providers'
return (await self.req.get(endpoint=endpoint)) | :return: | paystackapi/dedicated_virtual_account.py | providers | Ichinga-Samuel/async-paystackapi | 0 | python | async def providers(self):
'\n\n \n '
endpoint = 'dedicated_account/available_providers'
return (await self.req.get(endpoint=endpoint)) | async def providers(self):
'\n\n \n '
endpoint = 'dedicated_account/available_providers'
return (await self.req.get(endpoint=endpoint))<|docstring|>:return:<|endoftext|> |
ba7353bb99893b61c0936fb1abb45b842b1c44e1f94c1b5e1ba73f17f6231ef5 | def parse_cfg_file(cfg_file: str):
'Read configuration file and parse it into list of blocks'
lines = read_uncommented_lines(cfg_file)
blocks = parse_cfg_list(lines)
return blocks | Read configuration file and parse it into list of blocks | yolov3.py | parse_cfg_file | TalHadad/yolov3_tf2 | 0 | python | def parse_cfg_file(cfg_file: str):
lines = read_uncommented_lines(cfg_file)
blocks = parse_cfg_list(lines)
return blocks | def parse_cfg_file(cfg_file: str):
lines = read_uncommented_lines(cfg_file)
blocks = parse_cfg_list(lines)
return blocks<|docstring|>Read configuration file and parse it into list of blocks<|endoftext|> |
573849d54724dca4da6dddd6c4e653f065af93b3d2056f3387a2391728e9d8c7 | def read_uncommented_lines(cfg_file: str) -> List:
'Read file lines to list and remove unnecessary characters like ‘\n’ and ‘#’.'
with open(cfg_file, 'r') as file:
lines = [line.rstrip('\n') for line in file if ((line != '\n') and (line[0] != '#'))]
return lines | Read file lines to list and remove unnecessary characters like ‘
’ and ‘#’. | yolov3.py | read_uncommented_lines | TalHadad/yolov3_tf2 | 0 | python | def read_uncommented_lines(cfg_file: str) -> List:
'Read file lines to list and remove unnecessary characters like ‘\n’ and ‘#’.'
with open(cfg_file, 'r') as file:
lines = [line.rstrip('\n') for line in file if ((line != '\n') and (line[0] != '#'))]
return lines | def read_uncommented_lines(cfg_file: str) -> List:
'Read file lines to list and remove unnecessary characters like ‘\n’ and ‘#’.'
with open(cfg_file, 'r') as file:
lines = [line.rstrip('\n') for line in file if ((line != '\n') and (line[0] != '#'))]
return lines<|docstring|>Read file lines to list and remove unnecessary characters like ‘
’ and ‘#’.<|endoftext|> |
41646817b764f06b075b7f094a52ea27f9c48b5d18db950d0f892f6d90ab6240 | def parse_cfg_list(cfg_list: List) -> List:
'Read attributes list and store them as key, value pairs in list blocks'
holder = {}
blocks = []
for cfg_item in cfg_list:
if (cfg_item[0] == '['):
cfg_item = ('type=' + cfg_item[1:(- 1)].rstrip())
if (len(holder) != 0):
blocks.append(holder)
holder = {}
(key, value) = cfg_item.split('=')
holder[key.rstrip()] = value.lstrip()
blocks.append(holder)
return blocks | Read attributes list and store them as key, value pairs in list blocks | yolov3.py | parse_cfg_list | TalHadad/yolov3_tf2 | 0 | python | def parse_cfg_list(cfg_list: List) -> List:
holder = {}
blocks = []
for cfg_item in cfg_list:
if (cfg_item[0] == '['):
cfg_item = ('type=' + cfg_item[1:(- 1)].rstrip())
if (len(holder) != 0):
blocks.append(holder)
holder = {}
(key, value) = cfg_item.split('=')
holder[key.rstrip()] = value.lstrip()
blocks.append(holder)
return blocks | def parse_cfg_list(cfg_list: List) -> List:
holder = {}
blocks = []
for cfg_item in cfg_list:
if (cfg_item[0] == '['):
cfg_item = ('type=' + cfg_item[1:(- 1)].rstrip())
if (len(holder) != 0):
blocks.append(holder)
holder = {}
(key, value) = cfg_item.split('=')
holder[key.rstrip()] = value.lstrip()
blocks.append(holder)
return blocks<|docstring|>Read attributes list and store them as key, value pairs in list blocks<|endoftext|> |
4106f396554b819497ac9c578f59a20c2e3d482c990e339e41ac43e0bde04ecf | def scan(self) -> bool:
'Find a device advertising the environmental sensor service.'
found = None
def callback(_found):
nonlocal found
found = _found
self._addr_type = None
self._addr = None
self._scan_callback = callback
self._ble.gap_scan(5000, 100000, 10000, True)
while (found is None):
time.sleep_ms(10)
self._scan_callback = None
return found | Find a device advertising the environmental sensor service. | tepra.py | scan | nnabeyang/tepra-lite-esp32 | 33 | python | def scan(self) -> bool:
found = None
def callback(_found):
nonlocal found
found = _found
self._addr_type = None
self._addr = None
self._scan_callback = callback
self._ble.gap_scan(5000, 100000, 10000, True)
while (found is None):
time.sleep_ms(10)
self._scan_callback = None
return found | def scan(self) -> bool:
found = None
def callback(_found):
nonlocal found
found = _found
self._addr_type = None
self._addr = None
self._scan_callback = callback
self._ble.gap_scan(5000, 100000, 10000, True)
while (found is None):
time.sleep_ms(10)
self._scan_callback = None
return found<|docstring|>Find a device advertising the environmental sensor service.<|endoftext|> |
44dcf4fd7b6bb86092d0985576641a2c8c55c411f101f7c793f202edddf63343 | def connect(self):
'Connect to the specified device (otherwise use cached address from a scan).'
if ((self._addr_type is None) or (self._addr is None)):
return False
self._ble.gap_connect(self._addr_type, self._addr)
while (self._conn_handle is None):
time.sleep_ms(10)
return True | Connect to the specified device (otherwise use cached address from a scan). | tepra.py | connect | nnabeyang/tepra-lite-esp32 | 33 | python | def connect(self):
if ((self._addr_type is None) or (self._addr is None)):
return False
self._ble.gap_connect(self._addr_type, self._addr)
while (self._conn_handle is None):
time.sleep_ms(10)
return True | def connect(self):
if ((self._addr_type is None) or (self._addr is None)):
return False
self._ble.gap_connect(self._addr_type, self._addr)
while (self._conn_handle is None):
time.sleep_ms(10)
return True<|docstring|>Connect to the specified device (otherwise use cached address from a scan).<|endoftext|> |
11d6f469b1bd1d1f49573e318e9868d0d8d07aa7b1aeb6495f7dd480a3153ee5 | def disconnect(self):
'Disconnect from current device.'
if (not self._conn_handle):
return
self._ble.gap_disconnect(self._conn_handle)
self._reset() | Disconnect from current device. | tepra.py | disconnect | nnabeyang/tepra-lite-esp32 | 33 | python | def disconnect(self):
if (not self._conn_handle):
return
self._ble.gap_disconnect(self._conn_handle)
self._reset() | def disconnect(self):
if (not self._conn_handle):
return
self._ble.gap_disconnect(self._conn_handle)
self._reset()<|docstring|>Disconnect from current device.<|endoftext|> |
f130263b1685090654939737df2e87d139793d9f268c61d157f9a7a68eb4dff1 | def write(self, c: Characteristic, data: bytes):
'Send data without response.'
if (not c.prop_write_without_response()):
return
if (self._conn_handle is None):
return
self._log('Writing without response: {}', hexstr(data))
self._ble.gattc_write(self._conn_handle, c.value_handle, data, 0)
return | Send data without response. | tepra.py | write | nnabeyang/tepra-lite-esp32 | 33 | python | def write(self, c: Characteristic, data: bytes):
if (not c.prop_write_without_response()):
return
if (self._conn_handle is None):
return
self._log('Writing without response: {}', hexstr(data))
self._ble.gattc_write(self._conn_handle, c.value_handle, data, 0)
return | def write(self, c: Characteristic, data: bytes):
if (not c.prop_write_without_response()):
return
if (self._conn_handle is None):
return
self._log('Writing without response: {}', hexstr(data))
self._ble.gattc_write(self._conn_handle, c.value_handle, data, 0)
return<|docstring|>Send data without response.<|endoftext|> |
56c952c25ad486aee48b8aa6482b2b53d7cb0d76d3ed4dece871fc916f932a4a | def write_request(self, c: Characteristic, data: bytes, callback):
'Send data with response.'
done = False
if (not c.prop_write()):
return
if (self._conn_handle is None):
return
def callback_done(handle, status):
nonlocal done
done = True
callback(handle, status)
self._write_done_callback = callback_done
self._log('Writing with response')
self._ble.gattc_write(self._conn_handle, c.value_handle, data, 1)
while (not done):
time.sleep_ms(10)
self._write_done_callback = None
return | Send data with response. | tepra.py | write_request | nnabeyang/tepra-lite-esp32 | 33 | python | def write_request(self, c: Characteristic, data: bytes, callback):
done = False
if (not c.prop_write()):
return
if (self._conn_handle is None):
return
def callback_done(handle, status):
nonlocal done
done = True
callback(handle, status)
self._write_done_callback = callback_done
self._log('Writing with response')
self._ble.gattc_write(self._conn_handle, c.value_handle, data, 1)
while (not done):
time.sleep_ms(10)
self._write_done_callback = None
return | def write_request(self, c: Characteristic, data: bytes, callback):
done = False
if (not c.prop_write()):
return
if (self._conn_handle is None):
return
def callback_done(handle, status):
nonlocal done
done = True
callback(handle, status)
self._write_done_callback = callback_done
self._log('Writing with response')
self._ble.gattc_write(self._conn_handle, c.value_handle, data, 1)
while (not done):
time.sleep_ms(10)
self._write_done_callback = None
return<|docstring|>Send data with response.<|endoftext|> |
5cc1371512180af45aa28756bd19827775ca8a0778ed2cccb0c31995946e2190 | def write_cccd(self, c: Characteristic, indication=False, notification=False):
'Write the Client Characteristic Configuration Descriptor of a characteristic.'
done = False
if ((not c.prop_indicate()) and (not c.prop_notify())):
return
if (self._conn_handle is None):
return
def callback_done(*_):
nonlocal done
done = True
self._write_done_callback = callback_done
value = ((2 if indication else 0) + (1 if notification else 0))
self._ble.gattc_write(self._conn_handle, (c.value_handle + 1), bytes([value]), 1)
while (not done):
time.sleep_ms(10)
self._write_done_callback = None
return | Write the Client Characteristic Configuration Descriptor of a characteristic. | tepra.py | write_cccd | nnabeyang/tepra-lite-esp32 | 33 | python | def write_cccd(self, c: Characteristic, indication=False, notification=False):
done = False
if ((not c.prop_indicate()) and (not c.prop_notify())):
return
if (self._conn_handle is None):
return
def callback_done(*_):
nonlocal done
done = True
self._write_done_callback = callback_done
value = ((2 if indication else 0) + (1 if notification else 0))
self._ble.gattc_write(self._conn_handle, (c.value_handle + 1), bytes([value]), 1)
while (not done):
time.sleep_ms(10)
self._write_done_callback = None
return | def write_cccd(self, c: Characteristic, indication=False, notification=False):
done = False
if ((not c.prop_indicate()) and (not c.prop_notify())):
return
if (self._conn_handle is None):
return
def callback_done(*_):
nonlocal done
done = True
self._write_done_callback = callback_done
value = ((2 if indication else 0) + (1 if notification else 0))
self._ble.gattc_write(self._conn_handle, (c.value_handle + 1), bytes([value]), 1)
while (not done):
time.sleep_ms(10)
self._write_done_callback = None
return<|docstring|>Write the Client Characteristic Configuration Descriptor of a characteristic.<|endoftext|> |
885dd4f837e17eabeb55d0e0ee243f5f5e2b91563fe14704b6a0424b9f438311 | def write_wait_notification(self, tx: Characteristic, tx_data: bytes, rx: Characteristic) -> Optional[bytes]:
'Write without response and wait for a notification'
rx_data = None
if ((not tx.prop_write_without_response()) or (not rx.prop_notify())):
return rx_data
if (self._conn_handle is None):
return rx_data
def callback(handle, d):
nonlocal rx_data
if (handle == rx.value_handle):
rx_data = d
self._notify_callback = callback
self.write(tx, tx_data)
while (rx_data is None):
time.sleep_ms(10)
self._notify_callback = None
return rx_data | Write without response and wait for a notification | tepra.py | write_wait_notification | nnabeyang/tepra-lite-esp32 | 33 | python | def write_wait_notification(self, tx: Characteristic, tx_data: bytes, rx: Characteristic) -> Optional[bytes]:
rx_data = None
if ((not tx.prop_write_without_response()) or (not rx.prop_notify())):
return rx_data
if (self._conn_handle is None):
return rx_data
def callback(handle, d):
nonlocal rx_data
if (handle == rx.value_handle):
rx_data = d
self._notify_callback = callback
self.write(tx, tx_data)
while (rx_data is None):
time.sleep_ms(10)
self._notify_callback = None
return rx_data | def write_wait_notification(self, tx: Characteristic, tx_data: bytes, rx: Characteristic) -> Optional[bytes]:
rx_data = None
if ((not tx.prop_write_without_response()) or (not rx.prop_notify())):
return rx_data
if (self._conn_handle is None):
return rx_data
def callback(handle, d):
nonlocal rx_data
if (handle == rx.value_handle):
rx_data = d
self._notify_callback = callback
self.write(tx, tx_data)
while (rx_data is None):
time.sleep_ms(10)
self._notify_callback = None
return rx_data<|docstring|>Write without response and wait for a notification<|endoftext|> |
8ed09a93fa56ac9fd14abf82a55ccb5473d3adb1a9b724abbb5be30d415b0a91 | def wait_notification(self, rx: Characteristic) -> Optional[bytes]:
'Wait for a notification from the characteristic'
rx_data = None
if (not rx.prop_notify()):
return
if (self._conn_handle is None):
return
def callback(handle, d):
nonlocal rx_data
if (handle == rx.value_handle):
rx_data = d
self._notify_callback = callback
while (rx_data is None):
time.sleep_ms(10)
self._notify_callback = None
return rx_data | Wait for a notification from the characteristic | tepra.py | wait_notification | nnabeyang/tepra-lite-esp32 | 33 | python | def wait_notification(self, rx: Characteristic) -> Optional[bytes]:
rx_data = None
if (not rx.prop_notify()):
return
if (self._conn_handle is None):
return
def callback(handle, d):
nonlocal rx_data
if (handle == rx.value_handle):
rx_data = d
self._notify_callback = callback
while (rx_data is None):
time.sleep_ms(10)
self._notify_callback = None
return rx_data | def wait_notification(self, rx: Characteristic) -> Optional[bytes]:
rx_data = None
if (not rx.prop_notify()):
return
if (self._conn_handle is None):
return
def callback(handle, d):
nonlocal rx_data
if (handle == rx.value_handle):
rx_data = d
self._notify_callback = callback
while (rx_data is None):
time.sleep_ms(10)
self._notify_callback = None
return rx_data<|docstring|>Wait for a notification from the characteristic<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.