body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
7f67c14795cfff5f8d2c9a94388122a86b5fd9e7e934579cc2f8c39dd9bbe950 | def __init__(self, remainingTimes=1, unlimited=True):
'\n Request Time initialization\n :param int remainingTimes: Remaining times for request to expire\n :param boolean unlimited: Unlimited times for request, overwrite\n remainingTimes\n '
self.remainingTimes = remainingTimes
self.unlimited = unlimited | Request Time initialization
:param int remainingTimes: Remaining times for request to expire
:param boolean unlimited: Unlimited times for request, overwrite
remainingTimes | pymockserver/times.py | __init__ | MXWest/py-mockserver | 3 | python | def __init__(self, remainingTimes=1, unlimited=True):
'\n Request Time initialization\n :param int remainingTimes: Remaining times for request to expire\n :param boolean unlimited: Unlimited times for request, overwrite\n remainingTimes\n '
self.remainingTimes = remainingTimes
self.unlimited = unlimited | def __init__(self, remainingTimes=1, unlimited=True):
'\n Request Time initialization\n :param int remainingTimes: Remaining times for request to expire\n :param boolean unlimited: Unlimited times for request, overwrite\n remainingTimes\n '
self.remainingTimes = remainingTimes
self.unlimited = unlimited<|docstring|>Request Time initialization
:param int remainingTimes: Remaining times for request to expire
:param boolean unlimited: Unlimited times for request, overwrite
remainingTimes<|endoftext|> |
4bd7d5490f6539f48f6174c40a2554a7053619c8a49b107c62b53f180b5c8ad3 | def execute_wait(etcd_client, tree, db):
'Execute WAIT.\n\n :param etcd_client: Etcd client.\n :type etcd_client: Client\n :param tree: Parsing tree.\n :type tree: SQLTree\n :param db: Current database.\n :type db: str\n '
result_columns = prepare_columns(tree)
result_set = ResultSet(result_columns)
table_columns = get_table_columns(etcd_client, db, tree.table)
for primary_key in list_table(etcd_client, db, tree.table):
table_row = get_row_by_primary_key(etcd_client, db, tree.table, primary_key)
etcd_index = table_row.etcd_index
if tree.where:
expr = tree.where
try:
wait_index = tree.options['after']
except KeyError:
wait_index = (etcd_index + 1)
if eval_expr((table_columns, table_row), expr)[1]:
start = time.time()
while True:
if (time.time() > (start + WAIT_WAIT_TIMEOUT)):
raise InternalError(('Wait timeout %d seconds expired' % WAIT_WAIT_TIMEOUT))
try:
new_row = get_row_by_primary_key(etcd_client, db, tree.table, primary_key, wait=True, wait_index=wait_index)
break
except KeyError:
wait_index += 1
row = Row(eval_row(table_columns, new_row, tree), etcd_index=new_row.etcd_index, modified_index=new_row.modified_index)
result_set.add_row(row)
else:
row = Row(eval_row(table_columns, table_row, tree), etcd_index=etcd_index, modified_index=etcd_index)
result_set.add_row(row)
return result_set | Execute WAIT.
:param etcd_client: Etcd client.
:type etcd_client: Client
:param tree: Parsing tree.
:type tree: SQLTree
:param db: Current database.
:type db: str | etcdb/execute/dml/wait.py | execute_wait | box/etcdb | 12 | python | def execute_wait(etcd_client, tree, db):
'Execute WAIT.\n\n :param etcd_client: Etcd client.\n :type etcd_client: Client\n :param tree: Parsing tree.\n :type tree: SQLTree\n :param db: Current database.\n :type db: str\n '
result_columns = prepare_columns(tree)
result_set = ResultSet(result_columns)
table_columns = get_table_columns(etcd_client, db, tree.table)
for primary_key in list_table(etcd_client, db, tree.table):
table_row = get_row_by_primary_key(etcd_client, db, tree.table, primary_key)
etcd_index = table_row.etcd_index
if tree.where:
expr = tree.where
try:
wait_index = tree.options['after']
except KeyError:
wait_index = (etcd_index + 1)
if eval_expr((table_columns, table_row), expr)[1]:
start = time.time()
while True:
if (time.time() > (start + WAIT_WAIT_TIMEOUT)):
raise InternalError(('Wait timeout %d seconds expired' % WAIT_WAIT_TIMEOUT))
try:
new_row = get_row_by_primary_key(etcd_client, db, tree.table, primary_key, wait=True, wait_index=wait_index)
break
except KeyError:
wait_index += 1
row = Row(eval_row(table_columns, new_row, tree), etcd_index=new_row.etcd_index, modified_index=new_row.modified_index)
result_set.add_row(row)
else:
row = Row(eval_row(table_columns, table_row, tree), etcd_index=etcd_index, modified_index=etcd_index)
result_set.add_row(row)
return result_set | def execute_wait(etcd_client, tree, db):
'Execute WAIT.\n\n :param etcd_client: Etcd client.\n :type etcd_client: Client\n :param tree: Parsing tree.\n :type tree: SQLTree\n :param db: Current database.\n :type db: str\n '
result_columns = prepare_columns(tree)
result_set = ResultSet(result_columns)
table_columns = get_table_columns(etcd_client, db, tree.table)
for primary_key in list_table(etcd_client, db, tree.table):
table_row = get_row_by_primary_key(etcd_client, db, tree.table, primary_key)
etcd_index = table_row.etcd_index
if tree.where:
expr = tree.where
try:
wait_index = tree.options['after']
except KeyError:
wait_index = (etcd_index + 1)
if eval_expr((table_columns, table_row), expr)[1]:
start = time.time()
while True:
if (time.time() > (start + WAIT_WAIT_TIMEOUT)):
raise InternalError(('Wait timeout %d seconds expired' % WAIT_WAIT_TIMEOUT))
try:
new_row = get_row_by_primary_key(etcd_client, db, tree.table, primary_key, wait=True, wait_index=wait_index)
break
except KeyError:
wait_index += 1
row = Row(eval_row(table_columns, new_row, tree), etcd_index=new_row.etcd_index, modified_index=new_row.modified_index)
result_set.add_row(row)
else:
row = Row(eval_row(table_columns, table_row, tree), etcd_index=etcd_index, modified_index=etcd_index)
result_set.add_row(row)
return result_set<|docstring|>Execute WAIT.
:param etcd_client: Etcd client.
:type etcd_client: Client
:param tree: Parsing tree.
:type tree: SQLTree
:param db: Current database.
:type db: str<|endoftext|> |
173acc0ddbe2ca2b17eaf8cfec385e4d398a911a5a69602aa8f7def7b19d2c33 | def save_important_parameters(optimization_results, result_index, result_filename, comp_dict, external_components=None):
"Saves the most important parameters from the optimization results in a csv file, and\n automatically generates pie plots containing the results of financial annuity shares,\n emission shares and electricity usage shares between components in the energy system.\n\n :param optimization_results: The file containing all information about the optimization results\n :type optimization_results: ?\n :param result_index: The index number that relates to the specific optimization result\n :type result_index: int\n :param result_filename: The result filename e.g. 'my_optimization_results.pickle'\n :type result_filename: pickle\n :param comp_dict: The dictionary containing names of all components\n :type comp_dict: dict\n "
if result_filename.endswith('.pickle'):
result_filename = result_filename[:(- 7)]
with open(str((result_filename + '_important_params')), 'w', newline='') as file:
writer = csv.writer(file)
headers = ['Component', 'Parameter', 'Value']
writer.writerow(headers)
component_names = []
component_annuities = []
component_emissions = []
component_elec_use = []
component_elec_use_names = []
sum_flows = []
for component in optimization_results[result_index].smooth_result:
name = component.name
if hasattr(component, 'power_max'):
entry = [name, 'maximum power', component.power_max]
writer.writerow(entry)
elif (hasattr(component, 'nominal_value') and hasattr(component, 'reference_value')):
power_max = (component.nominal_value * component.reference_value)
entry = [name, 'maximum power', power_max]
writer.writerow(entry)
elif hasattr(component, 'storage_capacity'):
entry = [name, 'storage capacity', component.storage_capacity]
writer.writerow(entry)
if (component.flows.get(tuple('from_grid, bel')) is not None):
total_from_grid = sum(component.flows[tuple('from_grid, bel')])
entry = [name, 'annual grid supply', total_from_grid]
writer.writerow(entry)
elif (component.flows.get(tuple('bh2_hp, h2_demand')) is not None):
total_h2_demand = sum(component.flows[tuple('bh2_hp, h2_demand')])
entry = [name, 'total demand (hydrogen)', total_h2_demand]
writer.writerow(entry)
maximum_flow = max(component.flows[tuple('bh2_hp, h2_demand')])
entry = [name, 'maximum hourly demand', maximum_flow]
writer.writerow(entry)
elif (component.flows.get(tuple('bth, th_demand')) is not None):
total_h2_demand = sum(component.flows[tuple('bth, th_demand')])
entry = [name, 'total demand (thermal)', total_h2_demand]
writer.writerow(entry)
this_annuity = component.results['annuity_total']
this_emission = component.results['annual_total_emissions']
if (name in comp_dict.keys()):
name = comp_dict[name]
for this_tuple in component.flows:
if ('bel' in this_tuple[0]):
total_elec_use = sum(component.flows[tuple(this_tuple)])
if (name not in component_elec_use_names):
component_elec_use.append(total_elec_use)
component_elec_use_names.append(name)
this_tuple_flow_sum = [this_tuple, sum(component.flows[tuple(this_tuple)])]
sum_flows.append(this_tuple_flow_sum)
if ((component.component != 'gate') and (component.component != 'energy_demand_from_csv') and (component.component != 'sink')):
component_names.append(name)
component_annuities.append(this_annuity)
component_emissions.append(this_emission)
flow_sums_dataframe = pd.DataFrame(sum_flows, columns=['Flow name', 'Flow sum'])
if (external_components is not None):
for ext_component in external_components:
name = ext_component.name
this_annuity = ext_component.results['annuity_total']
this_emission = ext_component.results['annual_total_emissions']
if (name in comp_dict.keys()):
name = comp_dict[name]
component_names.append(name)
component_annuities.append(this_annuity)
component_emissions.append(this_emission)
palette = sns.hls_palette(15, l=0.3, s=0.8)
component_names = np.char.array(component_names)
component_annuities = np.array(component_annuities)
annuity_shares = ((100.0 * component_annuities) / component_annuities.sum())
(patches_1, texts_1) = plt.pie(component_annuities, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_names, annuity_shares)]
plt.legend(patches_1, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an der gesamten Annuität')
plt.tight_layout()
plt.savefig((str(result_filename) + '_annuity_breakdown.png'), bbox_inches='tight')
plt.show()
component_emissions = np.array(component_emissions)
emission_shares = ((100.0 * component_emissions) / component_emissions.sum())
(patches_2, texts_2) = plt.pie(component_emissions, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_names, emission_shares)]
plt.legend(patches_2, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an den Gesamtemissionen')
plt.tight_layout()
plt.savefig((str(result_filename) + '_emissions_breakdown.png'), bbox_inches='tight')
plt.show()
component_elec_use_names = np.char.array(component_elec_use_names)
component_elec_use = np.array(component_elec_use)
elec_use_shares = ((100.0 * component_elec_use) / component_elec_use.sum())
(patches_3, texts_3) = plt.pie(component_elec_use, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_elec_use_names, elec_use_shares)]
plt.legend(patches_3, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an dem gesamten Stromverbrauch')
plt.tight_layout()
plt.savefig((str(result_filename) + '_electricity_use_breakdown.png'), bbox_inches='tight')
plt.show()
return flow_sums_dataframe | Saves the most important parameters from the optimization results in a csv file, and
automatically generates pie plots containing the results of financial annuity shares,
emission shares and electricity usage shares between components in the energy system.
:param optimization_results: The file containing all information about the optimization results
:type optimization_results: ?
:param result_index: The index number that relates to the specific optimization result
:type result_index: int
:param result_filename: The result filename e.g. 'my_optimization_results.pickle'
:type result_filename: pickle
:param comp_dict: The dictionary containing names of all components
:type comp_dict: dict | smooth/framework/functions/save_important_parameters.py | save_important_parameters | morrme/smooth | 0 | python | def save_important_parameters(optimization_results, result_index, result_filename, comp_dict, external_components=None):
"Saves the most important parameters from the optimization results in a csv file, and\n automatically generates pie plots containing the results of financial annuity shares,\n emission shares and electricity usage shares between components in the energy system.\n\n :param optimization_results: The file containing all information about the optimization results\n :type optimization_results: ?\n :param result_index: The index number that relates to the specific optimization result\n :type result_index: int\n :param result_filename: The result filename e.g. 'my_optimization_results.pickle'\n :type result_filename: pickle\n :param comp_dict: The dictionary containing names of all components\n :type comp_dict: dict\n "
if result_filename.endswith('.pickle'):
result_filename = result_filename[:(- 7)]
with open(str((result_filename + '_important_params')), 'w', newline=) as file:
writer = csv.writer(file)
headers = ['Component', 'Parameter', 'Value']
writer.writerow(headers)
component_names = []
component_annuities = []
component_emissions = []
component_elec_use = []
component_elec_use_names = []
sum_flows = []
for component in optimization_results[result_index].smooth_result:
name = component.name
if hasattr(component, 'power_max'):
entry = [name, 'maximum power', component.power_max]
writer.writerow(entry)
elif (hasattr(component, 'nominal_value') and hasattr(component, 'reference_value')):
power_max = (component.nominal_value * component.reference_value)
entry = [name, 'maximum power', power_max]
writer.writerow(entry)
elif hasattr(component, 'storage_capacity'):
entry = [name, 'storage capacity', component.storage_capacity]
writer.writerow(entry)
if (component.flows.get(tuple('from_grid, bel')) is not None):
total_from_grid = sum(component.flows[tuple('from_grid, bel')])
entry = [name, 'annual grid supply', total_from_grid]
writer.writerow(entry)
elif (component.flows.get(tuple('bh2_hp, h2_demand')) is not None):
total_h2_demand = sum(component.flows[tuple('bh2_hp, h2_demand')])
entry = [name, 'total demand (hydrogen)', total_h2_demand]
writer.writerow(entry)
maximum_flow = max(component.flows[tuple('bh2_hp, h2_demand')])
entry = [name, 'maximum hourly demand', maximum_flow]
writer.writerow(entry)
elif (component.flows.get(tuple('bth, th_demand')) is not None):
total_h2_demand = sum(component.flows[tuple('bth, th_demand')])
entry = [name, 'total demand (thermal)', total_h2_demand]
writer.writerow(entry)
this_annuity = component.results['annuity_total']
this_emission = component.results['annual_total_emissions']
if (name in comp_dict.keys()):
name = comp_dict[name]
for this_tuple in component.flows:
if ('bel' in this_tuple[0]):
total_elec_use = sum(component.flows[tuple(this_tuple)])
if (name not in component_elec_use_names):
component_elec_use.append(total_elec_use)
component_elec_use_names.append(name)
this_tuple_flow_sum = [this_tuple, sum(component.flows[tuple(this_tuple)])]
sum_flows.append(this_tuple_flow_sum)
if ((component.component != 'gate') and (component.component != 'energy_demand_from_csv') and (component.component != 'sink')):
component_names.append(name)
component_annuities.append(this_annuity)
component_emissions.append(this_emission)
flow_sums_dataframe = pd.DataFrame(sum_flows, columns=['Flow name', 'Flow sum'])
if (external_components is not None):
for ext_component in external_components:
name = ext_component.name
this_annuity = ext_component.results['annuity_total']
this_emission = ext_component.results['annual_total_emissions']
if (name in comp_dict.keys()):
name = comp_dict[name]
component_names.append(name)
component_annuities.append(this_annuity)
component_emissions.append(this_emission)
palette = sns.hls_palette(15, l=0.3, s=0.8)
component_names = np.char.array(component_names)
component_annuities = np.array(component_annuities)
annuity_shares = ((100.0 * component_annuities) / component_annuities.sum())
(patches_1, texts_1) = plt.pie(component_annuities, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_names, annuity_shares)]
plt.legend(patches_1, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an der gesamten Annuität')
plt.tight_layout()
plt.savefig((str(result_filename) + '_annuity_breakdown.png'), bbox_inches='tight')
plt.show()
component_emissions = np.array(component_emissions)
emission_shares = ((100.0 * component_emissions) / component_emissions.sum())
(patches_2, texts_2) = plt.pie(component_emissions, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_names, emission_shares)]
plt.legend(patches_2, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an den Gesamtemissionen')
plt.tight_layout()
plt.savefig((str(result_filename) + '_emissions_breakdown.png'), bbox_inches='tight')
plt.show()
component_elec_use_names = np.char.array(component_elec_use_names)
component_elec_use = np.array(component_elec_use)
elec_use_shares = ((100.0 * component_elec_use) / component_elec_use.sum())
(patches_3, texts_3) = plt.pie(component_elec_use, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_elec_use_names, elec_use_shares)]
plt.legend(patches_3, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an dem gesamten Stromverbrauch')
plt.tight_layout()
plt.savefig((str(result_filename) + '_electricity_use_breakdown.png'), bbox_inches='tight')
plt.show()
return flow_sums_dataframe | def save_important_parameters(optimization_results, result_index, result_filename, comp_dict, external_components=None):
"Saves the most important parameters from the optimization results in a csv file, and\n automatically generates pie plots containing the results of financial annuity shares,\n emission shares and electricity usage shares between components in the energy system.\n\n :param optimization_results: The file containing all information about the optimization results\n :type optimization_results: ?\n :param result_index: The index number that relates to the specific optimization result\n :type result_index: int\n :param result_filename: The result filename e.g. 'my_optimization_results.pickle'\n :type result_filename: pickle\n :param comp_dict: The dictionary containing names of all components\n :type comp_dict: dict\n "
if result_filename.endswith('.pickle'):
result_filename = result_filename[:(- 7)]
with open(str((result_filename + '_important_params')), 'w', newline=) as file:
writer = csv.writer(file)
headers = ['Component', 'Parameter', 'Value']
writer.writerow(headers)
component_names = []
component_annuities = []
component_emissions = []
component_elec_use = []
component_elec_use_names = []
sum_flows = []
for component in optimization_results[result_index].smooth_result:
name = component.name
if hasattr(component, 'power_max'):
entry = [name, 'maximum power', component.power_max]
writer.writerow(entry)
elif (hasattr(component, 'nominal_value') and hasattr(component, 'reference_value')):
power_max = (component.nominal_value * component.reference_value)
entry = [name, 'maximum power', power_max]
writer.writerow(entry)
elif hasattr(component, 'storage_capacity'):
entry = [name, 'storage capacity', component.storage_capacity]
writer.writerow(entry)
if (component.flows.get(tuple('from_grid, bel')) is not None):
total_from_grid = sum(component.flows[tuple('from_grid, bel')])
entry = [name, 'annual grid supply', total_from_grid]
writer.writerow(entry)
elif (component.flows.get(tuple('bh2_hp, h2_demand')) is not None):
total_h2_demand = sum(component.flows[tuple('bh2_hp, h2_demand')])
entry = [name, 'total demand (hydrogen)', total_h2_demand]
writer.writerow(entry)
maximum_flow = max(component.flows[tuple('bh2_hp, h2_demand')])
entry = [name, 'maximum hourly demand', maximum_flow]
writer.writerow(entry)
elif (component.flows.get(tuple('bth, th_demand')) is not None):
total_h2_demand = sum(component.flows[tuple('bth, th_demand')])
entry = [name, 'total demand (thermal)', total_h2_demand]
writer.writerow(entry)
this_annuity = component.results['annuity_total']
this_emission = component.results['annual_total_emissions']
if (name in comp_dict.keys()):
name = comp_dict[name]
for this_tuple in component.flows:
if ('bel' in this_tuple[0]):
total_elec_use = sum(component.flows[tuple(this_tuple)])
if (name not in component_elec_use_names):
component_elec_use.append(total_elec_use)
component_elec_use_names.append(name)
this_tuple_flow_sum = [this_tuple, sum(component.flows[tuple(this_tuple)])]
sum_flows.append(this_tuple_flow_sum)
if ((component.component != 'gate') and (component.component != 'energy_demand_from_csv') and (component.component != 'sink')):
component_names.append(name)
component_annuities.append(this_annuity)
component_emissions.append(this_emission)
flow_sums_dataframe = pd.DataFrame(sum_flows, columns=['Flow name', 'Flow sum'])
if (external_components is not None):
for ext_component in external_components:
name = ext_component.name
this_annuity = ext_component.results['annuity_total']
this_emission = ext_component.results['annual_total_emissions']
if (name in comp_dict.keys()):
name = comp_dict[name]
component_names.append(name)
component_annuities.append(this_annuity)
component_emissions.append(this_emission)
palette = sns.hls_palette(15, l=0.3, s=0.8)
component_names = np.char.array(component_names)
component_annuities = np.array(component_annuities)
annuity_shares = ((100.0 * component_annuities) / component_annuities.sum())
(patches_1, texts_1) = plt.pie(component_annuities, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_names, annuity_shares)]
plt.legend(patches_1, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an der gesamten Annuität')
plt.tight_layout()
plt.savefig((str(result_filename) + '_annuity_breakdown.png'), bbox_inches='tight')
plt.show()
component_emissions = np.array(component_emissions)
emission_shares = ((100.0 * component_emissions) / component_emissions.sum())
(patches_2, texts_2) = plt.pie(component_emissions, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_names, emission_shares)]
plt.legend(patches_2, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an den Gesamtemissionen')
plt.tight_layout()
plt.savefig((str(result_filename) + '_emissions_breakdown.png'), bbox_inches='tight')
plt.show()
component_elec_use_names = np.char.array(component_elec_use_names)
component_elec_use = np.array(component_elec_use)
elec_use_shares = ((100.0 * component_elec_use) / component_elec_use.sum())
(patches_3, texts_3) = plt.pie(component_elec_use, startangle=90, colors=palette)
labels = ['{0}: {1:1.2f} %'.format(i, j) for (i, j) in zip(component_elec_use_names, elec_use_shares)]
plt.legend(patches_3, labels, loc='best', bbox_to_anchor=((- 0.1), 1.0), fontsize=8)
plt.title('Prozentualer Anteil an dem gesamten Stromverbrauch')
plt.tight_layout()
plt.savefig((str(result_filename) + '_electricity_use_breakdown.png'), bbox_inches='tight')
plt.show()
return flow_sums_dataframe<|docstring|>Saves the most important parameters from the optimization results in a csv file, and
automatically generates pie plots containing the results of financial annuity shares,
emission shares and electricity usage shares between components in the energy system.
:param optimization_results: The file containing all information about the optimization results
:type optimization_results: ?
:param result_index: The index number that relates to the specific optimization result
:type result_index: int
:param result_filename: The result filename e.g. 'my_optimization_results.pickle'
:type result_filename: pickle
:param comp_dict: The dictionary containing names of all components
:type comp_dict: dict<|endoftext|> |
87d35a4b94755618bddd41c1d32735a68474c8b484ece8f70d275989b3339a04 | def __init__(self, remotes, error_buffer, encoding=None, compress_level=None, fine_quality_level=None, subsample_level=None):
'compress_level: 0-9 [9 is highest compression]\n fine_quality_level: 0-100 [100 is best quality]\n subsample_level: 0-3 [0 is best quality]\n\n Lots of references for this, but\n https://github.com/TurboVNC/turbovnc/blob/master/doc/performance.txt\n is decent.\n '
load_pygame()
import libvncdriver
if (encoding is None):
encoding = os.environ.get('LIBVNC_ENCODING', 'tight')
if (compress_level is None):
compress_level = int(os.environ.get('LIBVNC_COMPRESS_LEVEL', '0'))
if (fine_quality_level is None):
fine_quality_level = int(os.environ.get('LIBVNC_FINE_QUALITY_LEVEL', '100'))
if (subsample_level is None):
subsample_level = int(os.environ.get('LIBVNC_SUBSAMPLE_LEVEL', '0'))
if (not hasattr(libvncdriver, 'VNCSession')):
raise error.Error('\n *=================================================*\n|| libvncdriver is not installed ||\n|| Try installing with "pip install libvncdriver" ||\n|| or use the go or python driver by setting ||\n|| UNIVERSE_VNCDRIVER=go ||\n|| UNIVERSE_VNCDRIVER=py ||\n *=================================================*')
logger.info(("Using libvncdriver's %s encoding" % encoding))
self.driver = libvncdriver.VNCSession(remotes=remotes, error_buffer=error_buffer, encoding=encoding, compress_level=compress_level, fine_quality_level=fine_quality_level, subsample_level=subsample_level)
self.screen = None
self.render_called_once = False
if PYGAME_INSTALLED:
pygame.init() | compress_level: 0-9 [9 is highest compression]
fine_quality_level: 0-100 [100 is best quality]
subsample_level: 0-3 [0 is best quality]
Lots of references for this, but
https://github.com/TurboVNC/turbovnc/blob/master/doc/performance.txt
is decent. | universe/vncdriver/libvnc_session.py | __init__ | albertghionea/hackthevalley_openai | 8,120 | python | def __init__(self, remotes, error_buffer, encoding=None, compress_level=None, fine_quality_level=None, subsample_level=None):
'compress_level: 0-9 [9 is highest compression]\n fine_quality_level: 0-100 [100 is best quality]\n subsample_level: 0-3 [0 is best quality]\n\n Lots of references for this, but\n https://github.com/TurboVNC/turbovnc/blob/master/doc/performance.txt\n is decent.\n '
load_pygame()
import libvncdriver
if (encoding is None):
encoding = os.environ.get('LIBVNC_ENCODING', 'tight')
if (compress_level is None):
compress_level = int(os.environ.get('LIBVNC_COMPRESS_LEVEL', '0'))
if (fine_quality_level is None):
fine_quality_level = int(os.environ.get('LIBVNC_FINE_QUALITY_LEVEL', '100'))
if (subsample_level is None):
subsample_level = int(os.environ.get('LIBVNC_SUBSAMPLE_LEVEL', '0'))
if (not hasattr(libvncdriver, 'VNCSession')):
raise error.Error('\n *=================================================*\n|| libvncdriver is not installed ||\n|| Try installing with "pip install libvncdriver" ||\n|| or use the go or python driver by setting ||\n|| UNIVERSE_VNCDRIVER=go ||\n|| UNIVERSE_VNCDRIVER=py ||\n *=================================================*')
logger.info(("Using libvncdriver's %s encoding" % encoding))
self.driver = libvncdriver.VNCSession(remotes=remotes, error_buffer=error_buffer, encoding=encoding, compress_level=compress_level, fine_quality_level=fine_quality_level, subsample_level=subsample_level)
self.screen = None
self.render_called_once = False
if PYGAME_INSTALLED:
pygame.init() | def __init__(self, remotes, error_buffer, encoding=None, compress_level=None, fine_quality_level=None, subsample_level=None):
'compress_level: 0-9 [9 is highest compression]\n fine_quality_level: 0-100 [100 is best quality]\n subsample_level: 0-3 [0 is best quality]\n\n Lots of references for this, but\n https://github.com/TurboVNC/turbovnc/blob/master/doc/performance.txt\n is decent.\n '
load_pygame()
import libvncdriver
if (encoding is None):
encoding = os.environ.get('LIBVNC_ENCODING', 'tight')
if (compress_level is None):
compress_level = int(os.environ.get('LIBVNC_COMPRESS_LEVEL', '0'))
if (fine_quality_level is None):
fine_quality_level = int(os.environ.get('LIBVNC_FINE_QUALITY_LEVEL', '100'))
if (subsample_level is None):
subsample_level = int(os.environ.get('LIBVNC_SUBSAMPLE_LEVEL', '0'))
if (not hasattr(libvncdriver, 'VNCSession')):
raise error.Error('\n *=================================================*\n|| libvncdriver is not installed ||\n|| Try installing with "pip install libvncdriver" ||\n|| or use the go or python driver by setting ||\n|| UNIVERSE_VNCDRIVER=go ||\n|| UNIVERSE_VNCDRIVER=py ||\n *=================================================*')
logger.info(("Using libvncdriver's %s encoding" % encoding))
self.driver = libvncdriver.VNCSession(remotes=remotes, error_buffer=error_buffer, encoding=encoding, compress_level=compress_level, fine_quality_level=fine_quality_level, subsample_level=subsample_level)
self.screen = None
self.render_called_once = False
if PYGAME_INSTALLED:
pygame.init()<|docstring|>compress_level: 0-9 [9 is highest compression]
fine_quality_level: 0-100 [100 is best quality]
subsample_level: 0-3 [0 is best quality]
Lots of references for this, but
https://github.com/TurboVNC/turbovnc/blob/master/doc/performance.txt
is decent.<|endoftext|> |
6d7b8fb507eda3edea5dca868efbb376b3629e71631384a43ed78f38db2f311b | def node_distribution_similarity(p_s: np.ndarray, p_t: np.ndarray, values: list=None) -> np.ndarray:
'\n Calculate the node distribution similarity matrix\n Args:\n p_s: (n_s, 1) array representing the distribution of source node\n p_t: (n_t, 1) array representing the distribution of target node\n\n Returns:\n cost_st: (n_s, n_t) the cost matrix between node probability\n '
if (values is None):
cost_st = ((np.repeat(p_s, p_t.shape[0], axis=1) - np.repeat(p_t, p_s.shape[0], axis=1).T) ** 2)
else:
cost_st = ((np.repeat((values[0] * p_s), p_t.shape[0], axis=1) - np.repeat((values[1] * p_t), p_s.shape[0], axis=1).T) ** 2)
return cost_st | Calculate the node distribution similarity matrix
Args:
p_s: (n_s, 1) array representing the distribution of source node
p_t: (n_t, 1) array representing the distribution of target node
Returns:
cost_st: (n_s, n_t) the cost matrix between node probability | GromovWassersteinFramework.py | node_distribution_similarity | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def node_distribution_similarity(p_s: np.ndarray, p_t: np.ndarray, values: list=None) -> np.ndarray:
'\n Calculate the node distribution similarity matrix\n Args:\n p_s: (n_s, 1) array representing the distribution of source node\n p_t: (n_t, 1) array representing the distribution of target node\n\n Returns:\n cost_st: (n_s, n_t) the cost matrix between node probability\n '
if (values is None):
cost_st = ((np.repeat(p_s, p_t.shape[0], axis=1) - np.repeat(p_t, p_s.shape[0], axis=1).T) ** 2)
else:
cost_st = ((np.repeat((values[0] * p_s), p_t.shape[0], axis=1) - np.repeat((values[1] * p_t), p_s.shape[0], axis=1).T) ** 2)
return cost_st | def node_distribution_similarity(p_s: np.ndarray, p_t: np.ndarray, values: list=None) -> np.ndarray:
'\n Calculate the node distribution similarity matrix\n Args:\n p_s: (n_s, 1) array representing the distribution of source node\n p_t: (n_t, 1) array representing the distribution of target node\n\n Returns:\n cost_st: (n_s, n_t) the cost matrix between node probability\n '
if (values is None):
cost_st = ((np.repeat(p_s, p_t.shape[0], axis=1) - np.repeat(p_t, p_s.shape[0], axis=1).T) ** 2)
else:
cost_st = ((np.repeat((values[0] * p_s), p_t.shape[0], axis=1) - np.repeat((values[1] * p_t), p_s.shape[0], axis=1).T) ** 2)
return cost_st<|docstring|>Calculate the node distribution similarity matrix
Args:
p_s: (n_s, 1) array representing the distribution of source node
p_t: (n_t, 1) array representing the distribution of target node
Returns:
cost_st: (n_s, n_t) the cost matrix between node probability<|endoftext|> |
9848a6e20c86acf103661dd13ab3d99c24a5e3ce7b83277f29c6a06ceae4f938 | def softmax_grad(x: np.ndarray) -> np.ndarray:
'\n The gradient of softmax function\n Args:\n x: (N, 1) or (N, ) array representing a distribution generated by softmax function\n\n Returns:\n grad_x: (N, N) array, the Jacobian matrix representing the gradient of softmax\n\n '
s = x.reshape((- 1), 1)
return (np.diagflat(s) - np.dot(s, s.T)) | The gradient of softmax function
Args:
x: (N, 1) or (N, ) array representing a distribution generated by softmax function
Returns:
grad_x: (N, N) array, the Jacobian matrix representing the gradient of softmax | GromovWassersteinFramework.py | softmax_grad | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def softmax_grad(x: np.ndarray) -> np.ndarray:
'\n The gradient of softmax function\n Args:\n x: (N, 1) or (N, ) array representing a distribution generated by softmax function\n\n Returns:\n grad_x: (N, N) array, the Jacobian matrix representing the gradient of softmax\n\n '
s = x.reshape((- 1), 1)
return (np.diagflat(s) - np.dot(s, s.T)) | def softmax_grad(x: np.ndarray) -> np.ndarray:
'\n The gradient of softmax function\n Args:\n x: (N, 1) or (N, ) array representing a distribution generated by softmax function\n\n Returns:\n grad_x: (N, N) array, the Jacobian matrix representing the gradient of softmax\n\n '
s = x.reshape((- 1), 1)
return (np.diagflat(s) - np.dot(s, s.T))<|docstring|>The gradient of softmax function
Args:
x: (N, 1) or (N, ) array representing a distribution generated by softmax function
Returns:
grad_x: (N, N) array, the Jacobian matrix representing the gradient of softmax<|endoftext|> |
afd6a258bb3125fa791b12d4da3f59f1d41ec2ee279933e620f67872beb6a13d | def update_distribution(a: np.ndarray, p_s0: np.ndarray, theta0: np.ndarray, beta: float, lr: float, weight: float) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Update distribution via gradient descent\n Args:\n a: (n_s, 1) dual vector\n p_s0: (n_s, 1) current distribution\n theta0: (n_s, 1) current parameters of the distribution\n beta: the weight of first term\n lr: the learning rate\n weight: the weight of second term (regularizer)\n\n Returns:\n p_s: (n_s, 1) array of updated distribution\n theta: (n_s, 1) array of updated parameters\n '
grad_ps = (beta * np.log(a))
if (weight > 0):
grad_ps -= (weight * (np.log(p_s0) + 1))
grad_theta = np.matmul(softmax_grad(p_s0), grad_ps)
grad_theta -= np.mean(grad_theta)
grad_theta /= (1e-10 + (np.sum((grad_theta ** 2)) ** 0.5))
theta = (theta0 - (lr * grad_theta))
p_s = softmax(theta)
return (p_s, theta) | Update distribution via gradient descent
Args:
a: (n_s, 1) dual vector
p_s0: (n_s, 1) current distribution
theta0: (n_s, 1) current parameters of the distribution
beta: the weight of first term
lr: the learning rate
weight: the weight of second term (regularizer)
Returns:
p_s: (n_s, 1) array of updated distribution
theta: (n_s, 1) array of updated parameters | GromovWassersteinFramework.py | update_distribution | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def update_distribution(a: np.ndarray, p_s0: np.ndarray, theta0: np.ndarray, beta: float, lr: float, weight: float) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Update distribution via gradient descent\n Args:\n a: (n_s, 1) dual vector\n p_s0: (n_s, 1) current distribution\n theta0: (n_s, 1) current parameters of the distribution\n beta: the weight of first term\n lr: the learning rate\n weight: the weight of second term (regularizer)\n\n Returns:\n p_s: (n_s, 1) array of updated distribution\n theta: (n_s, 1) array of updated parameters\n '
grad_ps = (beta * np.log(a))
if (weight > 0):
grad_ps -= (weight * (np.log(p_s0) + 1))
grad_theta = np.matmul(softmax_grad(p_s0), grad_ps)
grad_theta -= np.mean(grad_theta)
grad_theta /= (1e-10 + (np.sum((grad_theta ** 2)) ** 0.5))
theta = (theta0 - (lr * grad_theta))
p_s = softmax(theta)
return (p_s, theta) | def update_distribution(a: np.ndarray, p_s0: np.ndarray, theta0: np.ndarray, beta: float, lr: float, weight: float) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Update distribution via gradient descent\n Args:\n a: (n_s, 1) dual vector\n p_s0: (n_s, 1) current distribution\n theta0: (n_s, 1) current parameters of the distribution\n beta: the weight of first term\n lr: the learning rate\n weight: the weight of second term (regularizer)\n\n Returns:\n p_s: (n_s, 1) array of updated distribution\n theta: (n_s, 1) array of updated parameters\n '
grad_ps = (beta * np.log(a))
if (weight > 0):
grad_ps -= (weight * (np.log(p_s0) + 1))
grad_theta = np.matmul(softmax_grad(p_s0), grad_ps)
grad_theta -= np.mean(grad_theta)
grad_theta /= (1e-10 + (np.sum((grad_theta ** 2)) ** 0.5))
theta = (theta0 - (lr * grad_theta))
p_s = softmax(theta)
return (p_s, theta)<|docstring|>Update distribution via gradient descent
Args:
a: (n_s, 1) dual vector
p_s0: (n_s, 1) current distribution
theta0: (n_s, 1) current parameters of the distribution
beta: the weight of first term
lr: the learning rate
weight: the weight of second term (regularizer)
Returns:
p_s: (n_s, 1) array of updated distribution
theta: (n_s, 1) array of updated parameters<|endoftext|> |
e120494d8450c5c52c6767c7baf2d2f20ddd9cc184e7f7bfc50db025cfeed29d | def sinkhorn_knopp_iteration(cost: np.ndarray, p_s: np.ndarray=None, p_t: np.ndarray=None, a: np.ndarray=None, trans0: np.ndarray=None, beta: float=0.1, error_bound: float=0.001, max_iter: int=50) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Sinkhorn-Knopp iteration algorithm\n\n When initial optimal transport "trans0" is not available, the function solves\n min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * <log(trans), trans>\n\n When initial optimal transport "trans0" is given, the function solves:\n min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * KL(trans || trans0)\n\n Args:\n cost: (n_s, n_t) array representing distance between nodes\n p_s: (n_s, 1) array representing the distribution of source nodes\n p_t: (n_t, 1) array representing the distribution of target nodes\n a: (n_s, 1) array representing the dual variable\n trans0: (n_s, n_t) initial array of optimal transport\n beta: the weight of entropic regularizer\n error_bound: the error bound to check convergence\n max_iter: the maximum number of iterations\n\n Returns:\n trans: optimal transport\n a: updated dual variable\n\n '
if (p_s is None):
p_s = (np.ones((cost.shape[0], 1)) / cost.shape[0])
if (p_t is None):
p_t = (np.ones((cost.shape[1], 1)) / cost.shape[1])
if (a is None):
a = (np.ones((cost.shape[0], 1)) / cost.shape[0])
if (trans0 is not None):
kernel = (np.exp(((- cost) / beta)) * trans0)
else:
kernel = np.exp(((- cost) / beta))
relative_error = np.inf
b = []
i = 0
while ((relative_error > error_bound) and (i < max_iter)):
b = (p_t / np.matmul(kernel.T, a))
a_new = (p_s / np.matmul(kernel, b))
relative_error = (np.sum(np.abs((a_new - a))) / np.sum(np.abs(a)))
a = a_new
i += 1
trans = (np.matmul(a, b.T) * kernel)
return (trans, a) | Sinkhorn-Knopp iteration algorithm
When initial optimal transport "trans0" is not available, the function solves
min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * <log(trans), trans>
When initial optimal transport "trans0" is given, the function solves:
min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * KL(trans || trans0)
Args:
cost: (n_s, n_t) array representing distance between nodes
p_s: (n_s, 1) array representing the distribution of source nodes
p_t: (n_t, 1) array representing the distribution of target nodes
a: (n_s, 1) array representing the dual variable
trans0: (n_s, n_t) initial array of optimal transport
beta: the weight of entropic regularizer
error_bound: the error bound to check convergence
max_iter: the maximum number of iterations
Returns:
trans: optimal transport
a: updated dual variable | GromovWassersteinFramework.py | sinkhorn_knopp_iteration | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def sinkhorn_knopp_iteration(cost: np.ndarray, p_s: np.ndarray=None, p_t: np.ndarray=None, a: np.ndarray=None, trans0: np.ndarray=None, beta: float=0.1, error_bound: float=0.001, max_iter: int=50) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Sinkhorn-Knopp iteration algorithm\n\n When initial optimal transport "trans0" is not available, the function solves\n min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * <log(trans), trans>\n\n When initial optimal transport "trans0" is given, the function solves:\n min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * KL(trans || trans0)\n\n Args:\n cost: (n_s, n_t) array representing distance between nodes\n p_s: (n_s, 1) array representing the distribution of source nodes\n p_t: (n_t, 1) array representing the distribution of target nodes\n a: (n_s, 1) array representing the dual variable\n trans0: (n_s, n_t) initial array of optimal transport\n beta: the weight of entropic regularizer\n error_bound: the error bound to check convergence\n max_iter: the maximum number of iterations\n\n Returns:\n trans: optimal transport\n a: updated dual variable\n\n '
if (p_s is None):
p_s = (np.ones((cost.shape[0], 1)) / cost.shape[0])
if (p_t is None):
p_t = (np.ones((cost.shape[1], 1)) / cost.shape[1])
if (a is None):
a = (np.ones((cost.shape[0], 1)) / cost.shape[0])
if (trans0 is not None):
kernel = (np.exp(((- cost) / beta)) * trans0)
else:
kernel = np.exp(((- cost) / beta))
relative_error = np.inf
b = []
i = 0
while ((relative_error > error_bound) and (i < max_iter)):
b = (p_t / np.matmul(kernel.T, a))
a_new = (p_s / np.matmul(kernel, b))
relative_error = (np.sum(np.abs((a_new - a))) / np.sum(np.abs(a)))
a = a_new
i += 1
trans = (np.matmul(a, b.T) * kernel)
return (trans, a) | def sinkhorn_knopp_iteration(cost: np.ndarray, p_s: np.ndarray=None, p_t: np.ndarray=None, a: np.ndarray=None, trans0: np.ndarray=None, beta: float=0.1, error_bound: float=0.001, max_iter: int=50) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Sinkhorn-Knopp iteration algorithm\n\n When initial optimal transport "trans0" is not available, the function solves\n min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * <log(trans), trans>\n\n When initial optimal transport "trans0" is given, the function solves:\n min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * KL(trans || trans0)\n\n Args:\n cost: (n_s, n_t) array representing distance between nodes\n p_s: (n_s, 1) array representing the distribution of source nodes\n p_t: (n_t, 1) array representing the distribution of target nodes\n a: (n_s, 1) array representing the dual variable\n trans0: (n_s, n_t) initial array of optimal transport\n beta: the weight of entropic regularizer\n error_bound: the error bound to check convergence\n max_iter: the maximum number of iterations\n\n Returns:\n trans: optimal transport\n a: updated dual variable\n\n '
if (p_s is None):
p_s = (np.ones((cost.shape[0], 1)) / cost.shape[0])
if (p_t is None):
p_t = (np.ones((cost.shape[1], 1)) / cost.shape[1])
if (a is None):
a = (np.ones((cost.shape[0], 1)) / cost.shape[0])
if (trans0 is not None):
kernel = (np.exp(((- cost) / beta)) * trans0)
else:
kernel = np.exp(((- cost) / beta))
relative_error = np.inf
b = []
i = 0
while ((relative_error > error_bound) and (i < max_iter)):
b = (p_t / np.matmul(kernel.T, a))
a_new = (p_s / np.matmul(kernel, b))
relative_error = (np.sum(np.abs((a_new - a))) / np.sum(np.abs(a)))
a = a_new
i += 1
trans = (np.matmul(a, b.T) * kernel)
return (trans, a)<|docstring|>Sinkhorn-Knopp iteration algorithm
When initial optimal transport "trans0" is not available, the function solves
min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * <log(trans), trans>
When initial optimal transport "trans0" is given, the function solves:
min_{trans in Pi(p_s, p_t)} <cost, trans> + beta * KL(trans || trans0)
Args:
cost: (n_s, n_t) array representing distance between nodes
p_s: (n_s, 1) array representing the distribution of source nodes
p_t: (n_t, 1) array representing the distribution of target nodes
a: (n_s, 1) array representing the dual variable
trans0: (n_s, n_t) initial array of optimal transport
beta: the weight of entropic regularizer
error_bound: the error bound to check convergence
max_iter: the maximum number of iterations
Returns:
trans: optimal transport
a: updated dual variable<|endoftext|> |
e39743e188e726a6552d34fc3dfe29af5af59db22f7a6df5e592f25ed5fa4145 | def node_cost_st(cost_s: csr_matrix, cost_t: csr_matrix, p_s: np.ndarray, p_t: np.ndarray, loss_type: str='L2', prior: float=None) -> np.ndarray:
"\n Calculate invariant cost between the nodes in different graphs based on learned optimal transport\n Args:\n cost_s: (n_s, n_s) array, the cost matrix of source graph\n cost_t: (n_t, n_t) array, the cost matrix of target graph\n p_s: (n_s, 1) array, the distribution of source nodes\n p_t: (n_t, 1) array, the distribution of target nodes\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n prior: whether use node distribution similarity matrix as a prior\n\n Returns:\n cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs\n "
n_s = cost_s.shape[0]
n_t = cost_t.shape[0]
if (loss_type == 'L2'):
f1_st = np.repeat(((cost_s ** 2) @ p_s), n_t, axis=1)
f2_st = np.repeat(((cost_t ** 2) @ p_t).T, n_s, axis=0)
else:
f1_st = np.repeat(np.matmul(((cost_s * np.log((cost_s + 1e-15))) - cost_s), p_s), n_t, axis=1)
f2_st = np.repeat((cost_t @ p_t).T, n_s, axis=0)
cost_st = (f1_st + f2_st)
if (prior is not None):
cost_st += (prior * node_distribution_similarity(p_s, p_t))
return cost_st | Calculate invariant cost between the nodes in different graphs based on learned optimal transport
Args:
cost_s: (n_s, n_s) array, the cost matrix of source graph
cost_t: (n_t, n_t) array, the cost matrix of target graph
p_s: (n_s, 1) array, the distribution of source nodes
p_t: (n_t, 1) array, the distribution of target nodes
loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy
'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy
prior: whether use node distribution similarity matrix as a prior
Returns:
cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs | GromovWassersteinFramework.py | node_cost_st | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def node_cost_st(cost_s: csr_matrix, cost_t: csr_matrix, p_s: np.ndarray, p_t: np.ndarray, loss_type: str='L2', prior: float=None) -> np.ndarray:
"\n Calculate invariant cost between the nodes in different graphs based on learned optimal transport\n Args:\n cost_s: (n_s, n_s) array, the cost matrix of source graph\n cost_t: (n_t, n_t) array, the cost matrix of target graph\n p_s: (n_s, 1) array, the distribution of source nodes\n p_t: (n_t, 1) array, the distribution of target nodes\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n prior: whether use node distribution similarity matrix as a prior\n\n Returns:\n cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs\n "
n_s = cost_s.shape[0]
n_t = cost_t.shape[0]
if (loss_type == 'L2'):
f1_st = np.repeat(((cost_s ** 2) @ p_s), n_t, axis=1)
f2_st = np.repeat(((cost_t ** 2) @ p_t).T, n_s, axis=0)
else:
f1_st = np.repeat(np.matmul(((cost_s * np.log((cost_s + 1e-15))) - cost_s), p_s), n_t, axis=1)
f2_st = np.repeat((cost_t @ p_t).T, n_s, axis=0)
cost_st = (f1_st + f2_st)
if (prior is not None):
cost_st += (prior * node_distribution_similarity(p_s, p_t))
return cost_st | def node_cost_st(cost_s: csr_matrix, cost_t: csr_matrix, p_s: np.ndarray, p_t: np.ndarray, loss_type: str='L2', prior: float=None) -> np.ndarray:
"\n Calculate invariant cost between the nodes in different graphs based on learned optimal transport\n Args:\n cost_s: (n_s, n_s) array, the cost matrix of source graph\n cost_t: (n_t, n_t) array, the cost matrix of target graph\n p_s: (n_s, 1) array, the distribution of source nodes\n p_t: (n_t, 1) array, the distribution of target nodes\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n prior: whether use node distribution similarity matrix as a prior\n\n Returns:\n cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs\n "
n_s = cost_s.shape[0]
n_t = cost_t.shape[0]
if (loss_type == 'L2'):
f1_st = np.repeat(((cost_s ** 2) @ p_s), n_t, axis=1)
f2_st = np.repeat(((cost_t ** 2) @ p_t).T, n_s, axis=0)
else:
f1_st = np.repeat(np.matmul(((cost_s * np.log((cost_s + 1e-15))) - cost_s), p_s), n_t, axis=1)
f2_st = np.repeat((cost_t @ p_t).T, n_s, axis=0)
cost_st = (f1_st + f2_st)
if (prior is not None):
cost_st += (prior * node_distribution_similarity(p_s, p_t))
return cost_st<|docstring|>Calculate invariant cost between the nodes in different graphs based on learned optimal transport
Args:
cost_s: (n_s, n_s) array, the cost matrix of source graph
cost_t: (n_t, n_t) array, the cost matrix of target graph
p_s: (n_s, 1) array, the distribution of source nodes
p_t: (n_t, 1) array, the distribution of target nodes
loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy
'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy
prior: whether use node distribution similarity matrix as a prior
Returns:
cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs<|endoftext|> |
fb974a5c91b98dd4e8fe7ebfe48283ff2ce37d9fbfa4c5d9a3a686c471702c06 | def node_cost(cost_s: csr_matrix, cost_t: csr_matrix, trans: np.ndarray, cost_st: np.ndarray, loss_type: str='L2') -> np.ndarray:
"\n Calculate the cost between the nodes in different graphs based on learned optimal transport\n Args:\n cost_s: (n_s, n_s) array, the cost matrix of source graph\n cost_t: (n_t, n_t) array, the cost matrix of target graph\n trans: (n_s, n_t) array, the learned optimal transport between two graphs\n cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n\n Returns:\n cost: (n_s, n_t) array, the estimated cost between the nodes in two graphs\n "
if (loss_type == 'L2'):
cost = (cost_st - (2 * ((cost_s @ trans) @ cost_t.T)))
else:
cost = (cost_st - np.matmul((cost_s @ trans), np.log((cost_t + 1e-15)).T))
return cost | Calculate the cost between the nodes in different graphs based on learned optimal transport
Args:
cost_s: (n_s, n_s) array, the cost matrix of source graph
cost_t: (n_t, n_t) array, the cost matrix of target graph
trans: (n_s, n_t) array, the learned optimal transport between two graphs
cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs
loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy
'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy
Returns:
cost: (n_s, n_t) array, the estimated cost between the nodes in two graphs | GromovWassersteinFramework.py | node_cost | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def node_cost(cost_s: csr_matrix, cost_t: csr_matrix, trans: np.ndarray, cost_st: np.ndarray, loss_type: str='L2') -> np.ndarray:
"\n Calculate the cost between the nodes in different graphs based on learned optimal transport\n Args:\n cost_s: (n_s, n_s) array, the cost matrix of source graph\n cost_t: (n_t, n_t) array, the cost matrix of target graph\n trans: (n_s, n_t) array, the learned optimal transport between two graphs\n cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n\n Returns:\n cost: (n_s, n_t) array, the estimated cost between the nodes in two graphs\n "
if (loss_type == 'L2'):
cost = (cost_st - (2 * ((cost_s @ trans) @ cost_t.T)))
else:
cost = (cost_st - np.matmul((cost_s @ trans), np.log((cost_t + 1e-15)).T))
return cost | def node_cost(cost_s: csr_matrix, cost_t: csr_matrix, trans: np.ndarray, cost_st: np.ndarray, loss_type: str='L2') -> np.ndarray:
"\n Calculate the cost between the nodes in different graphs based on learned optimal transport\n Args:\n cost_s: (n_s, n_s) array, the cost matrix of source graph\n cost_t: (n_t, n_t) array, the cost matrix of target graph\n trans: (n_s, n_t) array, the learned optimal transport between two graphs\n cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n\n Returns:\n cost: (n_s, n_t) array, the estimated cost between the nodes in two graphs\n "
if (loss_type == 'L2'):
cost = (cost_st - (2 * ((cost_s @ trans) @ cost_t.T)))
else:
cost = (cost_st - np.matmul((cost_s @ trans), np.log((cost_t + 1e-15)).T))
return cost<|docstring|>Calculate the cost between the nodes in different graphs based on learned optimal transport
Args:
cost_s: (n_s, n_s) array, the cost matrix of source graph
cost_t: (n_t, n_t) array, the cost matrix of target graph
trans: (n_s, n_t) array, the learned optimal transport between two graphs
cost_st: (n_s, n_t) array, the estimated invariant cost between the nodes in two graphs
loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy
'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy
Returns:
cost: (n_s, n_t) array, the estimated cost between the nodes in two graphs<|endoftext|> |
c1035d018d1bd108b8025b26c3019e69381dfc5d4559de1ad5f1bd4e17685ad3 | def gromov_wasserstein_average(transports: Dict, costs: Dict, p_center: np.ndarray, weights: Dict, loss_type: str) -> np.ndarray:
"\n Averaging of cost matrix\n\n Args:\n transports: a dictionary, whose keys are graph ids and values are (n_s, n_c) np.ndarray of optimal transports\n costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) np.ndarray of cost matrices\n p_center: (n_c, 1) np.ndarray of barycenter's distribution\n weights: a dictionary, whose keys are graph ids and values are float number of weight\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n\n Returns:\n barycenter: (N, N) np.ndarray, the barycenter of cost matrix\n "
barycenter = 0
if (loss_type == 'L2'):
for n in costs.keys():
cost = costs[n]
trans = transports[n]
barycenter += (weights[n] * (trans.T @ (cost @ trans)))
barycenter /= np.matmul(p_center, p_center.T)
else:
for n in costs.keys():
cost = costs[n]
trans = transports[n]
barycenter += (weights[n] * np.matmul(np.matmul(trans.T, np.log((cost + 1e-15))), trans))
barycenter /= np.matmul(p_center, p_center.T)
barycenter = np.exp(barycenter)
return barycenter | Averaging of cost matrix
Args:
transports: a dictionary, whose keys are graph ids and values are (n_s, n_c) np.ndarray of optimal transports
costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) np.ndarray of cost matrices
p_center: (n_c, 1) np.ndarray of barycenter's distribution
weights: a dictionary, whose keys are graph ids and values are float number of weight
loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy
'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy
Returns:
barycenter: (N, N) np.ndarray, the barycenter of cost matrix | GromovWassersteinFramework.py | gromov_wasserstein_average | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def gromov_wasserstein_average(transports: Dict, costs: Dict, p_center: np.ndarray, weights: Dict, loss_type: str) -> np.ndarray:
"\n Averaging of cost matrix\n\n Args:\n transports: a dictionary, whose keys are graph ids and values are (n_s, n_c) np.ndarray of optimal transports\n costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) np.ndarray of cost matrices\n p_center: (n_c, 1) np.ndarray of barycenter's distribution\n weights: a dictionary, whose keys are graph ids and values are float number of weight\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n\n Returns:\n barycenter: (N, N) np.ndarray, the barycenter of cost matrix\n "
barycenter = 0
if (loss_type == 'L2'):
for n in costs.keys():
cost = costs[n]
trans = transports[n]
barycenter += (weights[n] * (trans.T @ (cost @ trans)))
barycenter /= np.matmul(p_center, p_center.T)
else:
for n in costs.keys():
cost = costs[n]
trans = transports[n]
barycenter += (weights[n] * np.matmul(np.matmul(trans.T, np.log((cost + 1e-15))), trans))
barycenter /= np.matmul(p_center, p_center.T)
barycenter = np.exp(barycenter)
return barycenter | def gromov_wasserstein_average(transports: Dict, costs: Dict, p_center: np.ndarray, weights: Dict, loss_type: str) -> np.ndarray:
"\n Averaging of cost matrix\n\n Args:\n transports: a dictionary, whose keys are graph ids and values are (n_s, n_c) np.ndarray of optimal transports\n costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) np.ndarray of cost matrices\n p_center: (n_c, 1) np.ndarray of barycenter's distribution\n weights: a dictionary, whose keys are graph ids and values are float number of weight\n loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy\n 'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy\n\n Returns:\n barycenter: (N, N) np.ndarray, the barycenter of cost matrix\n "
barycenter = 0
if (loss_type == 'L2'):
for n in costs.keys():
cost = costs[n]
trans = transports[n]
barycenter += (weights[n] * (trans.T @ (cost @ trans)))
barycenter /= np.matmul(p_center, p_center.T)
else:
for n in costs.keys():
cost = costs[n]
trans = transports[n]
barycenter += (weights[n] * np.matmul(np.matmul(trans.T, np.log((cost + 1e-15))), trans))
barycenter /= np.matmul(p_center, p_center.T)
barycenter = np.exp(barycenter)
return barycenter<|docstring|>Averaging of cost matrix
Args:
transports: a dictionary, whose keys are graph ids and values are (n_s, n_c) np.ndarray of optimal transports
costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) np.ndarray of cost matrices
p_center: (n_c, 1) np.ndarray of barycenter's distribution
weights: a dictionary, whose keys are graph ids and values are float number of weight
loss_type: 'L2' the Euclidean loss type for Gromov-Wasserstein discrepancy
'KL' the KL-divergence loss type for Gromov-Wasserstein discrepancy
Returns:
barycenter: (N, N) np.ndarray, the barycenter of cost matrix<|endoftext|> |
f3fd2dcff668d5589e1edc9a114e98aeae2a9fd9c152826f70c912f10fde71b2 | def gromov_wasserstein_discrepancy(cost_s: csr_matrix, cost_t: csr_matrix, p_s: np.ndarray, p_t: np.ndarray, ot_hyperpara: Dict, trans0=None) -> Tuple[(np.ndarray, float, np.ndarray)]:
'\n Calculate Gromov-Wasserstein discrepancy with optionally-updated source probability\n\n Args:\n cost_s: (n_s, n_s) np.ndarray of source cost matrix\n cost_t: (n_t, n_t) np.ndarray of target cost matrix\n p_s: (n_s, 1) np.ndarray, the predefined source distribution\n p_t: (n_t, 1) np.ndarray, the predefined target distribution\n ot_hyperpara: dictionary of hyperparameter\n trans0: optional (n_s, n_t) array, the initial transport\n\n Returns:\n trans0: (n_s, n_t) array, the optimal transport\n d_gw: a float representing Gromov-Wasserstein discrepancy\n p_s: (n_s, 1) array, the optimal source distribution\n '
n_s = cost_s.shape[0]
if ot_hyperpara['update_p']:
theta = np.zeros((n_s, 1))
p_s = softmax(theta)
else:
theta = np.zeros((n_s, 1))
if (trans0 is None):
trans0 = np.matmul(p_s, p_t.T)
a = (np.ones((n_s, 1)) / n_s)
t = 0
relative_error = np.inf
cost_st = node_cost_st(cost_s, cost_t, p_s, p_t, loss_type=ot_hyperpara['loss_type'], prior=ot_hyperpara['node_prior'])
while ((relative_error > ot_hyperpara['iter_bound']) and (t < ot_hyperpara['outer_iteration'])):
cost = node_cost(cost_s, cost_t, trans0, cost_st, ot_hyperpara['loss_type'])
if (ot_hyperpara['ot_method'] == 'proximal'):
(trans, a) = sinkhorn_knopp_iteration(cost=cost, p_s=p_s, p_t=p_t, a=a, trans0=trans0, beta=ot_hyperpara['beta'], error_bound=ot_hyperpara['sk_bound'], max_iter=ot_hyperpara['inner_iteration'])
else:
(trans, a) = sinkhorn_knopp_iteration(cost=cost, p_s=p_s, p_t=p_t, a=a, trans0=None, beta=ot_hyperpara['beta'], error_bound=ot_hyperpara['sk_bound'], max_iter=ot_hyperpara['inner_iteration'])
relative_error = (np.sum(np.abs((trans - trans0))) / np.sum(np.abs(trans0)))
trans0 = trans
t += 1
if ot_hyperpara['update_p']:
(p_s, theta) = update_distribution(a, p_s, theta, ot_hyperpara['beta'], ot_hyperpara['lr'], ot_hyperpara['alpha'])
cost = node_cost(cost_s, cost_t, trans0, cost_st, ot_hyperpara['loss_type'])
d_gw = (cost * trans0).sum()
return (trans0, d_gw, p_s) | Calculate Gromov-Wasserstein discrepancy with optionally-updated source probability
Args:
cost_s: (n_s, n_s) np.ndarray of source cost matrix
cost_t: (n_t, n_t) np.ndarray of target cost matrix
p_s: (n_s, 1) np.ndarray, the predefined source distribution
p_t: (n_t, 1) np.ndarray, the predefined target distribution
ot_hyperpara: dictionary of hyperparameter
trans0: optional (n_s, n_t) array, the initial transport
Returns:
trans0: (n_s, n_t) array, the optimal transport
d_gw: a float representing Gromov-Wasserstein discrepancy
p_s: (n_s, 1) array, the optimal source distribution | GromovWassersteinFramework.py | gromov_wasserstein_discrepancy | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def gromov_wasserstein_discrepancy(cost_s: csr_matrix, cost_t: csr_matrix, p_s: np.ndarray, p_t: np.ndarray, ot_hyperpara: Dict, trans0=None) -> Tuple[(np.ndarray, float, np.ndarray)]:
'\n Calculate Gromov-Wasserstein discrepancy with optionally-updated source probability\n\n Args:\n cost_s: (n_s, n_s) np.ndarray of source cost matrix\n cost_t: (n_t, n_t) np.ndarray of target cost matrix\n p_s: (n_s, 1) np.ndarray, the predefined source distribution\n p_t: (n_t, 1) np.ndarray, the predefined target distribution\n ot_hyperpara: dictionary of hyperparameter\n trans0: optional (n_s, n_t) array, the initial transport\n\n Returns:\n trans0: (n_s, n_t) array, the optimal transport\n d_gw: a float representing Gromov-Wasserstein discrepancy\n p_s: (n_s, 1) array, the optimal source distribution\n '
n_s = cost_s.shape[0]
if ot_hyperpara['update_p']:
theta = np.zeros((n_s, 1))
p_s = softmax(theta)
else:
theta = np.zeros((n_s, 1))
if (trans0 is None):
trans0 = np.matmul(p_s, p_t.T)
a = (np.ones((n_s, 1)) / n_s)
t = 0
relative_error = np.inf
cost_st = node_cost_st(cost_s, cost_t, p_s, p_t, loss_type=ot_hyperpara['loss_type'], prior=ot_hyperpara['node_prior'])
while ((relative_error > ot_hyperpara['iter_bound']) and (t < ot_hyperpara['outer_iteration'])):
cost = node_cost(cost_s, cost_t, trans0, cost_st, ot_hyperpara['loss_type'])
if (ot_hyperpara['ot_method'] == 'proximal'):
(trans, a) = sinkhorn_knopp_iteration(cost=cost, p_s=p_s, p_t=p_t, a=a, trans0=trans0, beta=ot_hyperpara['beta'], error_bound=ot_hyperpara['sk_bound'], max_iter=ot_hyperpara['inner_iteration'])
else:
(trans, a) = sinkhorn_knopp_iteration(cost=cost, p_s=p_s, p_t=p_t, a=a, trans0=None, beta=ot_hyperpara['beta'], error_bound=ot_hyperpara['sk_bound'], max_iter=ot_hyperpara['inner_iteration'])
relative_error = (np.sum(np.abs((trans - trans0))) / np.sum(np.abs(trans0)))
trans0 = trans
t += 1
if ot_hyperpara['update_p']:
(p_s, theta) = update_distribution(a, p_s, theta, ot_hyperpara['beta'], ot_hyperpara['lr'], ot_hyperpara['alpha'])
cost = node_cost(cost_s, cost_t, trans0, cost_st, ot_hyperpara['loss_type'])
d_gw = (cost * trans0).sum()
return (trans0, d_gw, p_s) | def gromov_wasserstein_discrepancy(cost_s: csr_matrix, cost_t: csr_matrix, p_s: np.ndarray, p_t: np.ndarray, ot_hyperpara: Dict, trans0=None) -> Tuple[(np.ndarray, float, np.ndarray)]:
'\n Calculate Gromov-Wasserstein discrepancy with optionally-updated source probability\n\n Args:\n cost_s: (n_s, n_s) np.ndarray of source cost matrix\n cost_t: (n_t, n_t) np.ndarray of target cost matrix\n p_s: (n_s, 1) np.ndarray, the predefined source distribution\n p_t: (n_t, 1) np.ndarray, the predefined target distribution\n ot_hyperpara: dictionary of hyperparameter\n trans0: optional (n_s, n_t) array, the initial transport\n\n Returns:\n trans0: (n_s, n_t) array, the optimal transport\n d_gw: a float representing Gromov-Wasserstein discrepancy\n p_s: (n_s, 1) array, the optimal source distribution\n '
n_s = cost_s.shape[0]
if ot_hyperpara['update_p']:
theta = np.zeros((n_s, 1))
p_s = softmax(theta)
else:
theta = np.zeros((n_s, 1))
if (trans0 is None):
trans0 = np.matmul(p_s, p_t.T)
a = (np.ones((n_s, 1)) / n_s)
t = 0
relative_error = np.inf
cost_st = node_cost_st(cost_s, cost_t, p_s, p_t, loss_type=ot_hyperpara['loss_type'], prior=ot_hyperpara['node_prior'])
while ((relative_error > ot_hyperpara['iter_bound']) and (t < ot_hyperpara['outer_iteration'])):
cost = node_cost(cost_s, cost_t, trans0, cost_st, ot_hyperpara['loss_type'])
if (ot_hyperpara['ot_method'] == 'proximal'):
(trans, a) = sinkhorn_knopp_iteration(cost=cost, p_s=p_s, p_t=p_t, a=a, trans0=trans0, beta=ot_hyperpara['beta'], error_bound=ot_hyperpara['sk_bound'], max_iter=ot_hyperpara['inner_iteration'])
else:
(trans, a) = sinkhorn_knopp_iteration(cost=cost, p_s=p_s, p_t=p_t, a=a, trans0=None, beta=ot_hyperpara['beta'], error_bound=ot_hyperpara['sk_bound'], max_iter=ot_hyperpara['inner_iteration'])
relative_error = (np.sum(np.abs((trans - trans0))) / np.sum(np.abs(trans0)))
trans0 = trans
t += 1
if ot_hyperpara['update_p']:
(p_s, theta) = update_distribution(a, p_s, theta, ot_hyperpara['beta'], ot_hyperpara['lr'], ot_hyperpara['alpha'])
cost = node_cost(cost_s, cost_t, trans0, cost_st, ot_hyperpara['loss_type'])
d_gw = (cost * trans0).sum()
return (trans0, d_gw, p_s)<|docstring|>Calculate Gromov-Wasserstein discrepancy with optionally-updated source probability
Args:
cost_s: (n_s, n_s) np.ndarray of source cost matrix
cost_t: (n_t, n_t) np.ndarray of target cost matrix
p_s: (n_s, 1) np.ndarray, the predefined source distribution
p_t: (n_t, 1) np.ndarray, the predefined target distribution
ot_hyperpara: dictionary of hyperparameter
trans0: optional (n_s, n_t) array, the initial transport
Returns:
trans0: (n_s, n_t) array, the optimal transport
d_gw: a float representing Gromov-Wasserstein discrepancy
p_s: (n_s, 1) array, the optimal source distribution<|endoftext|> |
d95c2d423aaf092c96e321a014ae535f2101584fc335fe86e85b852f46b27e58 | def gromov_wasserstein_barycenter(costs: Dict, p_s: Dict, p_center: np.ndarray, ot_hyperpara: Dict, weights: Dict=None) -> Tuple[(np.ndarray, Dict, List)]:
"\n Multi-graph matching based on one-step Gromov-Wasserstein barycenter learning.\n\n Args:\n costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) cost matrices of different graphs\n p_s: a dictionary, whose keys are graph ids and values ara (n_s, 1) distributions of nodes of different graphs\n p_center: (n_c, 1) array, the distribution of barycenter's nodes\n ot_hyperpara: the dictionary of hyperparameters to train the Gromov-Wasserstein barycenter.\n weights: a dictionary, whose keys are graph ids and values are the weights of the graphs\n\n Returns:\n barycenter: (n_c, n_c) the cost matrix corresponding to the barycenter graph\n transports: a dictionary whose keys are graph ids and values are (n_s, n_c) optimal transports\n d_gw_sum: the sum of Gromov-Wasserstein discrepancy over iterations\n "
num = len(costs)
transports = {}
for n in costs.keys():
transports[n] = np.matmul(p_s[n], p_center.T)
if (weights is None):
weights = {}
for n in costs.keys():
weights[n] = (1 / num)
barycenter0 = csr_matrix(np.diag(p_center[(:, 0)]))
d_gw_sum = []
i = 0
relative_error = np.inf
while ((relative_error > ot_hyperpara['cost_bound']) and (i < ot_hyperpara['max_iter'])):
d_gw = {}
for n in costs.keys():
(transports[n], d_gw[n], p_s[n]) = gromov_wasserstein_discrepancy(costs[n], barycenter0, p_s[n], p_center, ot_hyperpara, transports[n])
barycenter = gromov_wasserstein_average(transports, costs, p_center, weights, ot_hyperpara['loss_type'])
relative_error = (np.sum(np.abs((barycenter - barycenter0))) / np.sum(np.abs(barycenter0)))
i += 1
barycenter0 = barycenter
d_gw_sum.append(d_gw)
return (barycenter0, transports, d_gw_sum) | Multi-graph matching based on one-step Gromov-Wasserstein barycenter learning.
Args:
costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) cost matrices of different graphs
p_s: a dictionary, whose keys are graph ids and values ara (n_s, 1) distributions of nodes of different graphs
p_center: (n_c, 1) array, the distribution of barycenter's nodes
ot_hyperpara: the dictionary of hyperparameters to train the Gromov-Wasserstein barycenter.
weights: a dictionary, whose keys are graph ids and values are the weights of the graphs
Returns:
barycenter: (n_c, n_c) the cost matrix corresponding to the barycenter graph
transports: a dictionary whose keys are graph ids and values are (n_s, n_c) optimal transports
d_gw_sum: the sum of Gromov-Wasserstein discrepancy over iterations | GromovWassersteinFramework.py | gromov_wasserstein_barycenter | trneedham/Spectral-Gromov-Wasserstein | 13 | python | def gromov_wasserstein_barycenter(costs: Dict, p_s: Dict, p_center: np.ndarray, ot_hyperpara: Dict, weights: Dict=None) -> Tuple[(np.ndarray, Dict, List)]:
"\n Multi-graph matching based on one-step Gromov-Wasserstein barycenter learning.\n\n Args:\n costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) cost matrices of different graphs\n p_s: a dictionary, whose keys are graph ids and values ara (n_s, 1) distributions of nodes of different graphs\n p_center: (n_c, 1) array, the distribution of barycenter's nodes\n ot_hyperpara: the dictionary of hyperparameters to train the Gromov-Wasserstein barycenter.\n weights: a dictionary, whose keys are graph ids and values are the weights of the graphs\n\n Returns:\n barycenter: (n_c, n_c) the cost matrix corresponding to the barycenter graph\n transports: a dictionary whose keys are graph ids and values are (n_s, n_c) optimal transports\n d_gw_sum: the sum of Gromov-Wasserstein discrepancy over iterations\n "
num = len(costs)
transports = {}
for n in costs.keys():
transports[n] = np.matmul(p_s[n], p_center.T)
if (weights is None):
weights = {}
for n in costs.keys():
weights[n] = (1 / num)
barycenter0 = csr_matrix(np.diag(p_center[(:, 0)]))
d_gw_sum = []
i = 0
relative_error = np.inf
while ((relative_error > ot_hyperpara['cost_bound']) and (i < ot_hyperpara['max_iter'])):
d_gw = {}
for n in costs.keys():
(transports[n], d_gw[n], p_s[n]) = gromov_wasserstein_discrepancy(costs[n], barycenter0, p_s[n], p_center, ot_hyperpara, transports[n])
barycenter = gromov_wasserstein_average(transports, costs, p_center, weights, ot_hyperpara['loss_type'])
relative_error = (np.sum(np.abs((barycenter - barycenter0))) / np.sum(np.abs(barycenter0)))
i += 1
barycenter0 = barycenter
d_gw_sum.append(d_gw)
return (barycenter0, transports, d_gw_sum) | def gromov_wasserstein_barycenter(costs: Dict, p_s: Dict, p_center: np.ndarray, ot_hyperpara: Dict, weights: Dict=None) -> Tuple[(np.ndarray, Dict, List)]:
"\n Multi-graph matching based on one-step Gromov-Wasserstein barycenter learning.\n\n Args:\n costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) cost matrices of different graphs\n p_s: a dictionary, whose keys are graph ids and values ara (n_s, 1) distributions of nodes of different graphs\n p_center: (n_c, 1) array, the distribution of barycenter's nodes\n ot_hyperpara: the dictionary of hyperparameters to train the Gromov-Wasserstein barycenter.\n weights: a dictionary, whose keys are graph ids and values are the weights of the graphs\n\n Returns:\n barycenter: (n_c, n_c) the cost matrix corresponding to the barycenter graph\n transports: a dictionary whose keys are graph ids and values are (n_s, n_c) optimal transports\n d_gw_sum: the sum of Gromov-Wasserstein discrepancy over iterations\n "
num = len(costs)
transports = {}
for n in costs.keys():
transports[n] = np.matmul(p_s[n], p_center.T)
if (weights is None):
weights = {}
for n in costs.keys():
weights[n] = (1 / num)
barycenter0 = csr_matrix(np.diag(p_center[(:, 0)]))
d_gw_sum = []
i = 0
relative_error = np.inf
while ((relative_error > ot_hyperpara['cost_bound']) and (i < ot_hyperpara['max_iter'])):
d_gw = {}
for n in costs.keys():
(transports[n], d_gw[n], p_s[n]) = gromov_wasserstein_discrepancy(costs[n], barycenter0, p_s[n], p_center, ot_hyperpara, transports[n])
barycenter = gromov_wasserstein_average(transports, costs, p_center, weights, ot_hyperpara['loss_type'])
relative_error = (np.sum(np.abs((barycenter - barycenter0))) / np.sum(np.abs(barycenter0)))
i += 1
barycenter0 = barycenter
d_gw_sum.append(d_gw)
return (barycenter0, transports, d_gw_sum)<|docstring|>Multi-graph matching based on one-step Gromov-Wasserstein barycenter learning.
Args:
costs: a dictionary, whose keys are graph ids and values are (n_s, n_s) cost matrices of different graphs
p_s: a dictionary, whose keys are graph ids and values ara (n_s, 1) distributions of nodes of different graphs
p_center: (n_c, 1) array, the distribution of barycenter's nodes
ot_hyperpara: the dictionary of hyperparameters to train the Gromov-Wasserstein barycenter.
weights: a dictionary, whose keys are graph ids and values are the weights of the graphs
Returns:
barycenter: (n_c, n_c) the cost matrix corresponding to the barycenter graph
transports: a dictionary whose keys are graph ids and values are (n_s, n_c) optimal transports
d_gw_sum: the sum of Gromov-Wasserstein discrepancy over iterations<|endoftext|> |
05d7350d9a3714014f55a873e4aeb13c9b5a495736dcf4b5ad5ab1731b85b68e | def comparar_media_hora(hora):
'\n Metodo que filtra las horas de los registros para que concuerden\n con las horas de busqueda deseada\n :param hora: Timestamp completo\n :return: True si las horas del timestamp estan entre las deseadas\n False si lo contrario\n '
if ((hora.time() <= HORA_FIN.time()) and (hora.time() >= HORA_30.time())):
return True
return False | Metodo que filtra las horas de los registros para que concuerden
con las horas de busqueda deseada
:param hora: Timestamp completo
:return: True si las horas del timestamp estan entre las deseadas
False si lo contrario | src/ProfitableAreasDay.py | comparar_media_hora | adrigrillo/NYCSparkTaxi | 4 | python | def comparar_media_hora(hora):
'\n Metodo que filtra las horas de los registros para que concuerden\n con las horas de busqueda deseada\n :param hora: Timestamp completo\n :return: True si las horas del timestamp estan entre las deseadas\n False si lo contrario\n '
if ((hora.time() <= HORA_FIN.time()) and (hora.time() >= HORA_30.time())):
return True
return False | def comparar_media_hora(hora):
'\n Metodo que filtra las horas de los registros para que concuerden\n con las horas de busqueda deseada\n :param hora: Timestamp completo\n :return: True si las horas del timestamp estan entre las deseadas\n False si lo contrario\n '
if ((hora.time() <= HORA_FIN.time()) and (hora.time() >= HORA_30.time())):
return True
return False<|docstring|>Metodo que filtra las horas de los registros para que concuerden
con las horas de busqueda deseada
:param hora: Timestamp completo
:return: True si las horas del timestamp estan entre las deseadas
False si lo contrario<|endoftext|> |
9103b3e2d9b28021a956283e74992f20ac81b8c5efa2889c16a546a810b868cc | def comparar_cuarto_hora(hora):
'\n Metodo que filtra las horas de los registros para que concuerden\n con las horas de busqueda deseada\n :param hora: Timestamp completo\n :return: True si las horas del timestamp estan entre las deseadas\n False si lo contrario\n '
if ((hora.time() <= HORA_FIN.time()) and (hora.time() >= HORA_15.time())):
return True
return False | Metodo que filtra las horas de los registros para que concuerden
con las horas de busqueda deseada
:param hora: Timestamp completo
:return: True si las horas del timestamp estan entre las deseadas
False si lo contrario | src/ProfitableAreasDay.py | comparar_cuarto_hora | adrigrillo/NYCSparkTaxi | 4 | python | def comparar_cuarto_hora(hora):
'\n Metodo que filtra las horas de los registros para que concuerden\n con las horas de busqueda deseada\n :param hora: Timestamp completo\n :return: True si las horas del timestamp estan entre las deseadas\n False si lo contrario\n '
if ((hora.time() <= HORA_FIN.time()) and (hora.time() >= HORA_15.time())):
return True
return False | def comparar_cuarto_hora(hora):
'\n Metodo que filtra las horas de los registros para que concuerden\n con las horas de busqueda deseada\n :param hora: Timestamp completo\n :return: True si las horas del timestamp estan entre las deseadas\n False si lo contrario\n '
if ((hora.time() <= HORA_FIN.time()) and (hora.time() >= HORA_15.time())):
return True
return False<|docstring|>Metodo que filtra las horas de los registros para que concuerden
con las horas de busqueda deseada
:param hora: Timestamp completo
:return: True si las horas del timestamp estan entre las deseadas
False si lo contrario<|endoftext|> |
6301dd0f89a07ecafa7e8c69b60a6de58c889990fa287ea2fe43e4935f9c3e7c | def relevancia(fecha):
'\n Metodo que da mas relevancia a los viajes mas cercanos a la\n fecha de busqueda deseada.\n Si la diferencia es menor a un mes de la fecha\n dada los registros tienen más relevancia\n :param fecha: Timestamp completo\n :return: 2 si el viaje esta cerca de la fecha deseada, 1 si no\n '
diferencia = (fecha - HORA_FIN)
if ((diferencia < timedelta(days=7)) and (diferencia > timedelta(days=(- 7)))):
return 1.0
elif ((diferencia < timedelta(days=14)) and (diferencia > timedelta(days=(- 14)))):
return 0.75
elif ((diferencia < timedelta(days=21)) and (diferencia > timedelta(days=(- 21)))):
return 0.5
elif ((diferencia < timedelta(days=(- 28))) and (diferencia > timedelta(days=(- 28)))):
return 0.25
else:
return 0 | Metodo que da mas relevancia a los viajes mas cercanos a la
fecha de busqueda deseada.
Si la diferencia es menor a un mes de la fecha
dada los registros tienen más relevancia
:param fecha: Timestamp completo
:return: 2 si el viaje esta cerca de la fecha deseada, 1 si no | src/ProfitableAreasDay.py | relevancia | adrigrillo/NYCSparkTaxi | 4 | python | def relevancia(fecha):
'\n Metodo que da mas relevancia a los viajes mas cercanos a la\n fecha de busqueda deseada.\n Si la diferencia es menor a un mes de la fecha\n dada los registros tienen más relevancia\n :param fecha: Timestamp completo\n :return: 2 si el viaje esta cerca de la fecha deseada, 1 si no\n '
diferencia = (fecha - HORA_FIN)
if ((diferencia < timedelta(days=7)) and (diferencia > timedelta(days=(- 7)))):
return 1.0
elif ((diferencia < timedelta(days=14)) and (diferencia > timedelta(days=(- 14)))):
return 0.75
elif ((diferencia < timedelta(days=21)) and (diferencia > timedelta(days=(- 21)))):
return 0.5
elif ((diferencia < timedelta(days=(- 28))) and (diferencia > timedelta(days=(- 28)))):
return 0.25
else:
return 0 | def relevancia(fecha):
'\n Metodo que da mas relevancia a los viajes mas cercanos a la\n fecha de busqueda deseada.\n Si la diferencia es menor a un mes de la fecha\n dada los registros tienen más relevancia\n :param fecha: Timestamp completo\n :return: 2 si el viaje esta cerca de la fecha deseada, 1 si no\n '
diferencia = (fecha - HORA_FIN)
if ((diferencia < timedelta(days=7)) and (diferencia > timedelta(days=(- 7)))):
return 1.0
elif ((diferencia < timedelta(days=14)) and (diferencia > timedelta(days=(- 14)))):
return 0.75
elif ((diferencia < timedelta(days=21)) and (diferencia > timedelta(days=(- 21)))):
return 0.5
elif ((diferencia < timedelta(days=(- 28))) and (diferencia > timedelta(days=(- 28)))):
return 0.25
else:
return 0<|docstring|>Metodo que da mas relevancia a los viajes mas cercanos a la
fecha de busqueda deseada.
Si la diferencia es menor a un mes de la fecha
dada los registros tienen más relevancia
:param fecha: Timestamp completo
:return: 2 si el viaje esta cerca de la fecha deseada, 1 si no<|endoftext|> |
165254f9e869a1f7d531c0b4bf0c13f0bf7d9b9e4b17e5ac06efad40e740c4ba | def main(spark, fichero):
'\n Calculo de las zonas mas propensas a crear beneficio dado un mes,\n un dia de la semana y una hora dentro de todo el conjunto de datos.\n Los viajes mas cercanos al mes introducido tendran mas relevancia\n :param spark: Instancia de spark\n :param fichero: Fichero de datos\n :return: Diez rutas mas frecuentes\n '
inicio = timeit.default_timer()
data = spark.read.format('parquet').load(('./../data/processed/' + fichero))
dia_elegido = obtener_dia_semana(DIA_SEMANA)
trips_down = data.filter((data.dia_semana == dia_elegido)).filter(comprobar_media_hora(data.hora_bajada))
trips_up = data.filter((data.dia_semana == dia_elegido)).filter(comprobar_media_hora(data.hora_subida))
down_30_min = trips_down.select('medallon', 'hora_bajada', 'cuad_latitud_bajada', 'cuad_longitud_bajada').orderBy('hora_bajada', ascending=False).dropDuplicates(subset=['medallon'])
up_30_min = trips_up.select('medallon', 'hora_subida').orderBy('hora_subida', ascending=False).dropDuplicates(subset=['medallon']).withColumnRenamed('medallon', 'taxi')
joined = down_30_min.join(up_30_min, (down_30_min.medallon == up_30_min.taxi), 'leftouter').select('medallon', 'hora_bajada', 'hora_subida', 'cuad_latitud_bajada', 'cuad_longitud_bajada')
estado_taxis = joined.select(joined.medallon, joined.cuad_latitud_bajada, joined.cuad_longitud_bajada, joined.hora_bajada, when((joined.hora_subida > joined.hora_bajada), 1).otherwise(0).alias('taxi_ocupado'))
taxis_filtrados = estado_taxis.filter((estado_taxis.taxi_ocupado == 0)).withColumn('influencia', calcular_relevancia(estado_taxis.hora_bajada))
taxis_libres = taxis_filtrados.groupBy('cuad_latitud_bajada', 'cuad_longitud_bajada').count().select(col('cuad_latitud_bajada'), col('cuad_longitud_bajada'), col('count').alias('taxis_libres'))
influencia_taxis_libres = taxis_filtrados.groupBy('cuad_latitud_bajada', 'cuad_longitud_bajada').avg('influencia').select(col('cuad_latitud_bajada').alias('latitud'), col('cuad_longitud_bajada').alias('longitud'), col('avg(influencia)').alias('influencia'))
' Calculamos la proporcion de taxis libres por zona, esta proporcion\n es el numero de taxis libres en la zona por la influencia de estos taxis.\n Siendo menos influyentes cuanto mas alejados en el tiempo\n '
condition = [(taxis_libres.cuad_latitud_bajada == influencia_taxis_libres.latitud), (taxis_libres.cuad_longitud_bajada == influencia_taxis_libres.longitud)]
taxis_libres_prop = taxis_libres.join(influencia_taxis_libres, condition).select(col('cuad_latitud_bajada'), col('cuad_longitud_bajada'), round((col('taxis_libres') * col('influencia'))).alias('proporcion_taxis_libres'))
'\n PASAMOS A LOS BENEFICIOS\n '
trips_15 = trips_down.filter(comprobar_cuarto_hora(data.hora_bajada)).withColumn('influencia', calcular_relevancia(estado_taxis.hora_bajada))
beneficios = trips_15.groupBy('cuad_latitud_subida', 'cuad_longitud_subida').avg('tarifa', 'propina', 'influencia').select(col('cuad_latitud_subida'), col('cuad_longitud_subida'), ((col('avg(tarifa)') + col('avg(propina)')) * col('avg(influencia)')).alias('beneficios'))
condicion = [(beneficios.cuad_latitud_subida == taxis_libres.cuad_latitud_bajada), (beneficios.cuad_longitud_subida == taxis_libres.cuad_longitud_bajada)]
profitable = beneficios.join(taxis_libres_prop, condicion, 'leftouter').select(col('cuad_latitud_subida').alias('cuad_latitud'), col('cuad_longitud_subida').alias('cuad_longitud'), (col('beneficios') / col('proporcion_taxis_libres')).alias('beneficio')).orderBy('beneficio', ascending=False)
profitable = profitable.take(10)
fin = timeit.default_timer()
file = open(('./../data/results/' + 'resultadosProfitableDay.txt'), 'a')
file.write((((str(HORA_30) + ', ') + str(HORA_FIN)) + ', '))
for i in range(len(profitable)):
file.write((str(i) + ': '))
file.write((((('(' + str(profitable[i][0])) + ', ') + str(profitable[i][1])) + ') '))
file.write((str((fin - inicio)) + '\n'))
file.close() | Calculo de las zonas mas propensas a crear beneficio dado un mes,
un dia de la semana y una hora dentro de todo el conjunto de datos.
Los viajes mas cercanos al mes introducido tendran mas relevancia
:param spark: Instancia de spark
:param fichero: Fichero de datos
:return: Diez rutas mas frecuentes | src/ProfitableAreasDay.py | main | adrigrillo/NYCSparkTaxi | 4 | python | def main(spark, fichero):
'\n Calculo de las zonas mas propensas a crear beneficio dado un mes,\n un dia de la semana y una hora dentro de todo el conjunto de datos.\n Los viajes mas cercanos al mes introducido tendran mas relevancia\n :param spark: Instancia de spark\n :param fichero: Fichero de datos\n :return: Diez rutas mas frecuentes\n '
inicio = timeit.default_timer()
data = spark.read.format('parquet').load(('./../data/processed/' + fichero))
dia_elegido = obtener_dia_semana(DIA_SEMANA)
trips_down = data.filter((data.dia_semana == dia_elegido)).filter(comprobar_media_hora(data.hora_bajada))
trips_up = data.filter((data.dia_semana == dia_elegido)).filter(comprobar_media_hora(data.hora_subida))
down_30_min = trips_down.select('medallon', 'hora_bajada', 'cuad_latitud_bajada', 'cuad_longitud_bajada').orderBy('hora_bajada', ascending=False).dropDuplicates(subset=['medallon'])
up_30_min = trips_up.select('medallon', 'hora_subida').orderBy('hora_subida', ascending=False).dropDuplicates(subset=['medallon']).withColumnRenamed('medallon', 'taxi')
joined = down_30_min.join(up_30_min, (down_30_min.medallon == up_30_min.taxi), 'leftouter').select('medallon', 'hora_bajada', 'hora_subida', 'cuad_latitud_bajada', 'cuad_longitud_bajada')
estado_taxis = joined.select(joined.medallon, joined.cuad_latitud_bajada, joined.cuad_longitud_bajada, joined.hora_bajada, when((joined.hora_subida > joined.hora_bajada), 1).otherwise(0).alias('taxi_ocupado'))
taxis_filtrados = estado_taxis.filter((estado_taxis.taxi_ocupado == 0)).withColumn('influencia', calcular_relevancia(estado_taxis.hora_bajada))
taxis_libres = taxis_filtrados.groupBy('cuad_latitud_bajada', 'cuad_longitud_bajada').count().select(col('cuad_latitud_bajada'), col('cuad_longitud_bajada'), col('count').alias('taxis_libres'))
influencia_taxis_libres = taxis_filtrados.groupBy('cuad_latitud_bajada', 'cuad_longitud_bajada').avg('influencia').select(col('cuad_latitud_bajada').alias('latitud'), col('cuad_longitud_bajada').alias('longitud'), col('avg(influencia)').alias('influencia'))
' Calculamos la proporcion de taxis libres por zona, esta proporcion\n es el numero de taxis libres en la zona por la influencia de estos taxis.\n Siendo menos influyentes cuanto mas alejados en el tiempo\n '
condition = [(taxis_libres.cuad_latitud_bajada == influencia_taxis_libres.latitud), (taxis_libres.cuad_longitud_bajada == influencia_taxis_libres.longitud)]
taxis_libres_prop = taxis_libres.join(influencia_taxis_libres, condition).select(col('cuad_latitud_bajada'), col('cuad_longitud_bajada'), round((col('taxis_libres') * col('influencia'))).alias('proporcion_taxis_libres'))
'\n PASAMOS A LOS BENEFICIOS\n '
trips_15 = trips_down.filter(comprobar_cuarto_hora(data.hora_bajada)).withColumn('influencia', calcular_relevancia(estado_taxis.hora_bajada))
beneficios = trips_15.groupBy('cuad_latitud_subida', 'cuad_longitud_subida').avg('tarifa', 'propina', 'influencia').select(col('cuad_latitud_subida'), col('cuad_longitud_subida'), ((col('avg(tarifa)') + col('avg(propina)')) * col('avg(influencia)')).alias('beneficios'))
condicion = [(beneficios.cuad_latitud_subida == taxis_libres.cuad_latitud_bajada), (beneficios.cuad_longitud_subida == taxis_libres.cuad_longitud_bajada)]
profitable = beneficios.join(taxis_libres_prop, condicion, 'leftouter').select(col('cuad_latitud_subida').alias('cuad_latitud'), col('cuad_longitud_subida').alias('cuad_longitud'), (col('beneficios') / col('proporcion_taxis_libres')).alias('beneficio')).orderBy('beneficio', ascending=False)
profitable = profitable.take(10)
fin = timeit.default_timer()
file = open(('./../data/results/' + 'resultadosProfitableDay.txt'), 'a')
file.write((((str(HORA_30) + ', ') + str(HORA_FIN)) + ', '))
for i in range(len(profitable)):
file.write((str(i) + ': '))
file.write((((('(' + str(profitable[i][0])) + ', ') + str(profitable[i][1])) + ') '))
file.write((str((fin - inicio)) + '\n'))
file.close() | def main(spark, fichero):
'\n Calculo de las zonas mas propensas a crear beneficio dado un mes,\n un dia de la semana y una hora dentro de todo el conjunto de datos.\n Los viajes mas cercanos al mes introducido tendran mas relevancia\n :param spark: Instancia de spark\n :param fichero: Fichero de datos\n :return: Diez rutas mas frecuentes\n '
inicio = timeit.default_timer()
data = spark.read.format('parquet').load(('./../data/processed/' + fichero))
dia_elegido = obtener_dia_semana(DIA_SEMANA)
trips_down = data.filter((data.dia_semana == dia_elegido)).filter(comprobar_media_hora(data.hora_bajada))
trips_up = data.filter((data.dia_semana == dia_elegido)).filter(comprobar_media_hora(data.hora_subida))
down_30_min = trips_down.select('medallon', 'hora_bajada', 'cuad_latitud_bajada', 'cuad_longitud_bajada').orderBy('hora_bajada', ascending=False).dropDuplicates(subset=['medallon'])
up_30_min = trips_up.select('medallon', 'hora_subida').orderBy('hora_subida', ascending=False).dropDuplicates(subset=['medallon']).withColumnRenamed('medallon', 'taxi')
joined = down_30_min.join(up_30_min, (down_30_min.medallon == up_30_min.taxi), 'leftouter').select('medallon', 'hora_bajada', 'hora_subida', 'cuad_latitud_bajada', 'cuad_longitud_bajada')
estado_taxis = joined.select(joined.medallon, joined.cuad_latitud_bajada, joined.cuad_longitud_bajada, joined.hora_bajada, when((joined.hora_subida > joined.hora_bajada), 1).otherwise(0).alias('taxi_ocupado'))
taxis_filtrados = estado_taxis.filter((estado_taxis.taxi_ocupado == 0)).withColumn('influencia', calcular_relevancia(estado_taxis.hora_bajada))
taxis_libres = taxis_filtrados.groupBy('cuad_latitud_bajada', 'cuad_longitud_bajada').count().select(col('cuad_latitud_bajada'), col('cuad_longitud_bajada'), col('count').alias('taxis_libres'))
influencia_taxis_libres = taxis_filtrados.groupBy('cuad_latitud_bajada', 'cuad_longitud_bajada').avg('influencia').select(col('cuad_latitud_bajada').alias('latitud'), col('cuad_longitud_bajada').alias('longitud'), col('avg(influencia)').alias('influencia'))
' Calculamos la proporcion de taxis libres por zona, esta proporcion\n es el numero de taxis libres en la zona por la influencia de estos taxis.\n Siendo menos influyentes cuanto mas alejados en el tiempo\n '
condition = [(taxis_libres.cuad_latitud_bajada == influencia_taxis_libres.latitud), (taxis_libres.cuad_longitud_bajada == influencia_taxis_libres.longitud)]
taxis_libres_prop = taxis_libres.join(influencia_taxis_libres, condition).select(col('cuad_latitud_bajada'), col('cuad_longitud_bajada'), round((col('taxis_libres') * col('influencia'))).alias('proporcion_taxis_libres'))
'\n PASAMOS A LOS BENEFICIOS\n '
trips_15 = trips_down.filter(comprobar_cuarto_hora(data.hora_bajada)).withColumn('influencia', calcular_relevancia(estado_taxis.hora_bajada))
beneficios = trips_15.groupBy('cuad_latitud_subida', 'cuad_longitud_subida').avg('tarifa', 'propina', 'influencia').select(col('cuad_latitud_subida'), col('cuad_longitud_subida'), ((col('avg(tarifa)') + col('avg(propina)')) * col('avg(influencia)')).alias('beneficios'))
condicion = [(beneficios.cuad_latitud_subida == taxis_libres.cuad_latitud_bajada), (beneficios.cuad_longitud_subida == taxis_libres.cuad_longitud_bajada)]
profitable = beneficios.join(taxis_libres_prop, condicion, 'leftouter').select(col('cuad_latitud_subida').alias('cuad_latitud'), col('cuad_longitud_subida').alias('cuad_longitud'), (col('beneficios') / col('proporcion_taxis_libres')).alias('beneficio')).orderBy('beneficio', ascending=False)
profitable = profitable.take(10)
fin = timeit.default_timer()
file = open(('./../data/results/' + 'resultadosProfitableDay.txt'), 'a')
file.write((((str(HORA_30) + ', ') + str(HORA_FIN)) + ', '))
for i in range(len(profitable)):
file.write((str(i) + ': '))
file.write((((('(' + str(profitable[i][0])) + ', ') + str(profitable[i][1])) + ') '))
file.write((str((fin - inicio)) + '\n'))
file.close()<|docstring|>Calculo de las zonas mas propensas a crear beneficio dado un mes,
un dia de la semana y una hora dentro de todo el conjunto de datos.
Los viajes mas cercanos al mes introducido tendran mas relevancia
:param spark: Instancia de spark
:param fichero: Fichero de datos
:return: Diez rutas mas frecuentes<|endoftext|> |
701921d1ce69cc01053ab60c6ce2113aa20b1af1a9865e4bb845a54e36f489fb | def plot_kps(ax, row, show_text):
'\n Plot the key points on the image\n '
dne_kps = []
for kp in opt.kp_dict[row['image_category']]:
(x, y, visibility_type) = map(int, row[kp].split('_'))
if (visibility_type == 1):
draw_circles(ax, (x, y), 'white')
annotate_kp(ax, x, y, kp, 'white', show_text)
elif (visibility_type == 0):
draw_circles(ax, (x, y), 'red')
annotate_kp(ax, x, y, kp, 'red', show_text)
else:
dne_kps.append(kp)
if dne_kps:
annotate_dne_kps(ax, dne_kps, show_text) | Plot the key points on the image | utils/plot.py | plot_kps | alwc/fashionAI-keypoints-detection-pytorch | 7 | python | def plot_kps(ax, row, show_text):
'\n \n '
dne_kps = []
for kp in opt.kp_dict[row['image_category']]:
(x, y, visibility_type) = map(int, row[kp].split('_'))
if (visibility_type == 1):
draw_circles(ax, (x, y), 'white')
annotate_kp(ax, x, y, kp, 'white', show_text)
elif (visibility_type == 0):
draw_circles(ax, (x, y), 'red')
annotate_kp(ax, x, y, kp, 'red', show_text)
else:
dne_kps.append(kp)
if dne_kps:
annotate_dne_kps(ax, dne_kps, show_text) | def plot_kps(ax, row, show_text):
'\n \n '
dne_kps = []
for kp in opt.kp_dict[row['image_category']]:
(x, y, visibility_type) = map(int, row[kp].split('_'))
if (visibility_type == 1):
draw_circles(ax, (x, y), 'white')
annotate_kp(ax, x, y, kp, 'white', show_text)
elif (visibility_type == 0):
draw_circles(ax, (x, y), 'red')
annotate_kp(ax, x, y, kp, 'red', show_text)
else:
dne_kps.append(kp)
if dne_kps:
annotate_dne_kps(ax, dne_kps, show_text)<|docstring|>Plot the key points on the image<|endoftext|> |
020d90ce6ae45943af61bddbd5af02f12d95b8c73215a5eb05dcaec1fc3961f8 | def plot_img(dir_path, ax, row, show_text):
'\n Plot the image\n '
img_category = row['image_category']
img_id = row['image_id'].split('/')[(- 1)][:(- 4)]
ax.set_title('{}\n{}'.format(img_category, img_id))
im = cv2.imread(str((dir_path / row['image_id'])))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
ax.imshow(im)
plot_kps(ax, row, show_text) | Plot the image | utils/plot.py | plot_img | alwc/fashionAI-keypoints-detection-pytorch | 7 | python | def plot_img(dir_path, ax, row, show_text):
'\n \n '
img_category = row['image_category']
img_id = row['image_id'].split('/')[(- 1)][:(- 4)]
ax.set_title('{}\n{}'.format(img_category, img_id))
im = cv2.imread(str((dir_path / row['image_id'])))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
ax.imshow(im)
plot_kps(ax, row, show_text) | def plot_img(dir_path, ax, row, show_text):
'\n \n '
img_category = row['image_category']
img_id = row['image_id'].split('/')[(- 1)][:(- 4)]
ax.set_title('{}\n{}'.format(img_category, img_id))
im = cv2.imread(str((dir_path / row['image_id'])))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
ax.imshow(im)
plot_kps(ax, row, show_text)<|docstring|>Plot the image<|endoftext|> |
92851949aa0669508ba85e82fca212900ac8c1efedde621d0791ad5d788c4616 | @staticmethod
def data_scaling(data):
'\n ``data_scaling`` performs column-wise minimax scaling on the input dataset.\n\n Args:\n data : The input data set to be scaled. Must be a numpy array or dataframe.\n\n Returns:\n (tuple): tuple containing:\n - **scaled_data** : A 2-D Numpy Array containing the scaled data. All array values will be between [0, 1].\n - **data_minimum** : A 2-D row vector containing the column-wise minimums of the input data.\n - **data_maximum** : A 2-D row vector containing the column-wise maximums of the input data.\n\n Raises:\n TypeError:\n Raised when the input data is not a numpy array or dataframe\n\n '
if isinstance(data, np.ndarray):
input_data = data
data_headers = []
elif isinstance(data, pd.DataFrame):
input_data = data.values
data_headers = data.columns.values.tolist()
else:
raise TypeError('original_data_input: Pandas dataframe or numpy array required.')
if (input_data.ndim == 1):
input_data = input_data.reshape(len(input_data), 1)
data_minimum = np.min(input_data, axis=0)
data_maximum = np.max(input_data, axis=0)
scale = (data_maximum - data_minimum)
scale[(scale == 0.0)] = 1.0
scaled_data = ((input_data - data_minimum) / scale)
data_minimum = data_minimum.reshape(1, data_minimum.shape[0])
data_maximum = data_maximum.reshape(1, data_maximum.shape[0])
if (len(data_headers) > 0):
scaled_data = pd.DataFrame(scaled_data, columns=data_headers)
return (scaled_data, data_minimum, data_maximum) | ``data_scaling`` performs column-wise minimax scaling on the input dataset.
Args:
data : The input data set to be scaled. Must be a numpy array or dataframe.
Returns:
(tuple): tuple containing:
- **scaled_data** : A 2-D Numpy Array containing the scaled data. All array values will be between [0, 1].
- **data_minimum** : A 2-D row vector containing the column-wise minimums of the input data.
- **data_maximum** : A 2-D row vector containing the column-wise maximums of the input data.
Raises:
TypeError:
Raised when the input data is not a numpy array or dataframe | idaes/surrogate/pysmo/polynomial_regression.py | data_scaling | adowling2/idaes-pse | 112 | python | @staticmethod
def data_scaling(data):
'\n ``data_scaling`` performs column-wise minimax scaling on the input dataset.\n\n Args:\n data : The input data set to be scaled. Must be a numpy array or dataframe.\n\n Returns:\n (tuple): tuple containing:\n - **scaled_data** : A 2-D Numpy Array containing the scaled data. All array values will be between [0, 1].\n - **data_minimum** : A 2-D row vector containing the column-wise minimums of the input data.\n - **data_maximum** : A 2-D row vector containing the column-wise maximums of the input data.\n\n Raises:\n TypeError:\n Raised when the input data is not a numpy array or dataframe\n\n '
if isinstance(data, np.ndarray):
input_data = data
data_headers = []
elif isinstance(data, pd.DataFrame):
input_data = data.values
data_headers = data.columns.values.tolist()
else:
raise TypeError('original_data_input: Pandas dataframe or numpy array required.')
if (input_data.ndim == 1):
input_data = input_data.reshape(len(input_data), 1)
data_minimum = np.min(input_data, axis=0)
data_maximum = np.max(input_data, axis=0)
scale = (data_maximum - data_minimum)
scale[(scale == 0.0)] = 1.0
scaled_data = ((input_data - data_minimum) / scale)
data_minimum = data_minimum.reshape(1, data_minimum.shape[0])
data_maximum = data_maximum.reshape(1, data_maximum.shape[0])
if (len(data_headers) > 0):
scaled_data = pd.DataFrame(scaled_data, columns=data_headers)
return (scaled_data, data_minimum, data_maximum) | @staticmethod
def data_scaling(data):
'\n ``data_scaling`` performs column-wise minimax scaling on the input dataset.\n\n Args:\n data : The input data set to be scaled. Must be a numpy array or dataframe.\n\n Returns:\n (tuple): tuple containing:\n - **scaled_data** : A 2-D Numpy Array containing the scaled data. All array values will be between [0, 1].\n - **data_minimum** : A 2-D row vector containing the column-wise minimums of the input data.\n - **data_maximum** : A 2-D row vector containing the column-wise maximums of the input data.\n\n Raises:\n TypeError:\n Raised when the input data is not a numpy array or dataframe\n\n '
if isinstance(data, np.ndarray):
input_data = data
data_headers = []
elif isinstance(data, pd.DataFrame):
input_data = data.values
data_headers = data.columns.values.tolist()
else:
raise TypeError('original_data_input: Pandas dataframe or numpy array required.')
if (input_data.ndim == 1):
input_data = input_data.reshape(len(input_data), 1)
data_minimum = np.min(input_data, axis=0)
data_maximum = np.max(input_data, axis=0)
scale = (data_maximum - data_minimum)
scale[(scale == 0.0)] = 1.0
scaled_data = ((input_data - data_minimum) / scale)
data_minimum = data_minimum.reshape(1, data_minimum.shape[0])
data_maximum = data_maximum.reshape(1, data_maximum.shape[0])
if (len(data_headers) > 0):
scaled_data = pd.DataFrame(scaled_data, columns=data_headers)
return (scaled_data, data_minimum, data_maximum)<|docstring|>``data_scaling`` performs column-wise minimax scaling on the input dataset.
Args:
data : The input data set to be scaled. Must be a numpy array or dataframe.
Returns:
(tuple): tuple containing:
- **scaled_data** : A 2-D Numpy Array containing the scaled data. All array values will be between [0, 1].
- **data_minimum** : A 2-D row vector containing the column-wise minimums of the input data.
- **data_maximum** : A 2-D row vector containing the column-wise maximums of the input data.
Raises:
TypeError:
Raised when the input data is not a numpy array or dataframe<|endoftext|> |
32cda8f2398a49d8b3210135dd682ab9283240573466df5f62f3dfc5d3b851a2 | @staticmethod
def data_unscaling(x_scaled, x_min, x_max):
'\n\n ``data_unscaling`` performs column-wise un-scaling on the a minmax-scaled input dataset.\n\n Args:\n x_scaled (NumPy Array) : Data to be un-scaled. Data values should be between 0 and 1.\n x_min (NumPy vector) : :math:`n \\times 1` vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.\n x_max (NumPy vector) : :math:`n \\times 1` vector vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.\n\n Returns:\n NumPy Array : A 2-D numpy array containing the scaled data, :math:`x_{min} + x_{scaled} * (x_{max} - x_{min})`\n\n Raises:\n IndexError: Raised when the dimensions of the arrays are inconsistent.\n\n '
if (x_scaled.ndim == 1):
x_scaled = x_scaled.reshape(len(x_scaled), 1)
if ((x_scaled.shape[1] != x_min.size) or (x_scaled.shape[1] != x_max.size)):
raise IndexError('Dimensionality problems with data for un-scaling.')
unscaled_data = (x_min + (x_scaled * (x_max - x_min)))
return unscaled_data | ``data_unscaling`` performs column-wise un-scaling on the a minmax-scaled input dataset.
Args:
x_scaled (NumPy Array) : Data to be un-scaled. Data values should be between 0 and 1.
x_min (NumPy vector) : :math:`n \times 1` vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.
x_max (NumPy vector) : :math:`n \times 1` vector vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.
Returns:
NumPy Array : A 2-D numpy array containing the scaled data, :math:`x_{min} + x_{scaled} * (x_{max} - x_{min})`
Raises:
IndexError: Raised when the dimensions of the arrays are inconsistent. | idaes/surrogate/pysmo/polynomial_regression.py | data_unscaling | adowling2/idaes-pse | 112 | python | @staticmethod
def data_unscaling(x_scaled, x_min, x_max):
'\n\n ``data_unscaling`` performs column-wise un-scaling on the a minmax-scaled input dataset.\n\n Args:\n x_scaled (NumPy Array) : Data to be un-scaled. Data values should be between 0 and 1.\n x_min (NumPy vector) : :math:`n \\times 1` vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.\n x_max (NumPy vector) : :math:`n \\times 1` vector vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.\n\n Returns:\n NumPy Array : A 2-D numpy array containing the scaled data, :math:`x_{min} + x_{scaled} * (x_{max} - x_{min})`\n\n Raises:\n IndexError: Raised when the dimensions of the arrays are inconsistent.\n\n '
if (x_scaled.ndim == 1):
x_scaled = x_scaled.reshape(len(x_scaled), 1)
if ((x_scaled.shape[1] != x_min.size) or (x_scaled.shape[1] != x_max.size)):
raise IndexError('Dimensionality problems with data for un-scaling.')
unscaled_data = (x_min + (x_scaled * (x_max - x_min)))
return unscaled_data | @staticmethod
def data_unscaling(x_scaled, x_min, x_max):
'\n\n ``data_unscaling`` performs column-wise un-scaling on the a minmax-scaled input dataset.\n\n Args:\n x_scaled (NumPy Array) : Data to be un-scaled. Data values should be between 0 and 1.\n x_min (NumPy vector) : :math:`n \\times 1` vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.\n x_max (NumPy vector) : :math:`n \\times 1` vector vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.\n\n Returns:\n NumPy Array : A 2-D numpy array containing the scaled data, :math:`x_{min} + x_{scaled} * (x_{max} - x_{min})`\n\n Raises:\n IndexError: Raised when the dimensions of the arrays are inconsistent.\n\n '
if (x_scaled.ndim == 1):
x_scaled = x_scaled.reshape(len(x_scaled), 1)
if ((x_scaled.shape[1] != x_min.size) or (x_scaled.shape[1] != x_max.size)):
raise IndexError('Dimensionality problems with data for un-scaling.')
unscaled_data = (x_min + (x_scaled * (x_max - x_min)))
return unscaled_data<|docstring|>``data_unscaling`` performs column-wise un-scaling on the a minmax-scaled input dataset.
Args:
x_scaled (NumPy Array) : Data to be un-scaled. Data values should be between 0 and 1.
x_min (NumPy vector) : :math:`n \times 1` vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.
x_max (NumPy vector) : :math:`n \times 1` vector vector containing the actual minimum value for each column. Must contain same number of elements as the number of columns in x_scaled.
Returns:
NumPy Array : A 2-D numpy array containing the scaled data, :math:`x_{min} + x_{scaled} * (x_{max} - x_{min})`
Raises:
IndexError: Raised when the dimensions of the arrays are inconsistent.<|endoftext|> |
19e4574e2dafbcafec0b4304f17de35f8596cb3b8cea8527a2fa375620ea134b | def __init__(self, original_data_input, regression_data_input, maximum_polynomial_order, number_of_crossvalidations=None, no_adaptive_samples=None, training_split=None, max_fraction_training_samples=None, max_iter=None, solution_method=None, multinomials=None, fname=None, overwrite=False):
'\n Initialization of PolynomialRegression class.\n\n Args:\n regression_data_input(NumPy Array of Pandas Dataframe) : The dataset for regression training. It is expected to contain features and output data, with the output values (Y) in the last column.\n original_data_input(NumPy Array of Pandas Dataframe) : If **regression_data_input** was drawn from a larger dataset by some sampling approach, the larger dataset may be provided here.\n maximum_polynomial_order(int) : The maximum polynomial order to be considered.\n\n Keyword Args:\n number_of_crossvalidations(int) : The number of polynomial fittings and cross-validations to be carried out for each polynomial function/expression. Must be a positive, non-zero integer. Default=3.\n\n training_split(float): The training/test split to be used for regression_data_input. Must be between 0 and 1. Default = 0.75\n\n solution_method(str): The method to be used for solving the least squares optimization problem for polynomial regression. Three options are available:\n\n (a) "MLE" : The mle (maximum likelihood estimate) method solves the least squares problem using linear algebra. Details of the method may be found in Forrester et al.\n (b) "BFGS" : This approach solves the least squares problem using scipy\'s BFGS algorithm.\n (c) "pyomo": This option solves the optimization problem in pyomo with IPOPT as solver. This is the default option.\n\n multinomials(bool): This option determines whether or not multinomial terms are considered during polynomial fitting. Takes 0 for No and 1 for Yes. Default = 1.\n\n Returns:\n **self** object containing all the input information.\n\n Raises:\n ValueError:\n - The input datasets (**original_data_input** or **regression_data_input**) are of the wrong type (not Numpy arrays or Pandas Dataframes)\n\n Exception:\n * **maximum_polynomial_order** is not a positive, non-zero integer or **maximum_polynomial_order** is higher than the number of training samples available\n Exception:\n * **solution_method** is not \'mle\', \'pyomo\' or \'bfgs\n Exception:\n - **multinomials** is not binary (0 or 1)\n Exception:\n - **training_split** is not between 0 and 1\n Exception:\n - **number_of_crossvalidations** is not a positive, non-zero integer\n Exception:\n - **max_fraction_training_samples** is not between 0 and 1\n Exception:\n - **no_adaptive_samples** is not a positive, non-zero integer\n Exception:\n - **max_iter** is not a positive, non-zero integer\n\n warnings.warn:\n - When the number of cross-validations is too high, i.e. number_of_crossvalidations > 10\n '
print('\n===========================Polynomial Regression===============================================\n')
if (not isinstance(overwrite, bool)):
raise Exception('overwrite must be boolean.')
self.overwrite = overwrite
if (fname is None):
fname = 'solution.pickle'
self.filename = 'solution.pickle'
elif ((not isinstance(fname, str)) or (os.path.splitext(fname)[(- 1)].lower() != '.pickle')):
raise Exception('fname must be a string with extension ".pickle". Please correct.')
if (os.path.exists(fname) and (overwrite is True)):
print('Warning:', fname, 'already exists; previous file will be overwritten.\n')
self.filename = fname
elif (os.path.exists(fname) and (overwrite is False)):
self.filename = (((os.path.splitext(fname)[0] + '_v_') + pd.Timestamp.today().strftime('%m-%d-%y_%H%M%S')) + '.pickle')
print('Warning:', fname, 'already exists; results will be saved to "', self.filename, '".\n')
elif (os.path.exists(fname) is False):
self.filename = fname
if isinstance(original_data_input, pd.DataFrame):
original_data = original_data_input.values
self.regression_data_columns = list(original_data_input.columns)[:(- 1)]
elif isinstance(original_data_input, np.ndarray):
original_data = original_data_input
self.regression_data_columns = list(range((original_data_input.shape[1] - 1)))
else:
raise ValueError('original_data_input: Pandas dataframe or numpy array required.')
if isinstance(regression_data_input, pd.DataFrame):
regression_data = regression_data_input.values
elif isinstance(regression_data_input, np.ndarray):
regression_data = regression_data_input
else:
raise ValueError('regression_data_input: Pandas dataframe or numpy array required.')
if (regression_data.shape[0] > original_data.shape[0]):
raise Exception('The sampled data has more entries than the original dataset.')
elif (regression_data.shape[1] != original_data.shape[1]):
raise Exception('Dimensional discrepancies in the dimensions of the original and regression datasets.')
elif ((regression_data.shape[1] == 1) or (original_data.shape[1] == 1)):
raise Exception('Input data requires at least two dimensions (X and Y data).')
self.original_data = original_data
self.regression_data = regression_data
if (number_of_crossvalidations is None):
print('The number of cross-validation cases (3) is used.')
number_of_crossvalidations = 3
elif (number_of_crossvalidations > 10):
warnings.warn('The number of cross-validations entered is large. The simulation may take a while to run')
self.number_of_crossvalidations = number_of_crossvalidations
if (not isinstance(maximum_polynomial_order, int)):
raise Exception('Maximum polynomial order must be an integer')
elif (maximum_polynomial_order > 10):
warnings.warn('The maximum allowed polynomial order is 10. Value has been adjusted to 10.')
maximum_polynomial_order = 10
self.max_polynomial_order = maximum_polynomial_order
self.number_of_x_vars = (regression_data.shape[1] - 1)
if (training_split is None):
print('The default training/cross-validation split of 0.75 is used.')
training_split = 0.75
elif ((training_split >= 1) or (training_split <= 0)):
raise Exception('Fraction of samples used for training must be between 0 and 1')
self.fraction_training = training_split
if (no_adaptive_samples is None):
no_adaptive_samples = 4
self.no_adaptive_samples = no_adaptive_samples
self.number_of_samples = regression_data.shape[0]
if (max_fraction_training_samples is None):
max_fraction_training_samples = 0.5
elif ((max_fraction_training_samples > 1) or (max_fraction_training_samples < 0)):
raise Exception('The fraction for the maximum number of training samples must be between 0 and 1')
self.max_fraction_training_samples = max_fraction_training_samples
if ((regression_data.shape[0] < original_data.shape[0]) and (max_iter is None)):
max_iter = 10
if ((regression_data.shape[0] == original_data.shape[0]) or (no_adaptive_samples == 0)):
print('No iterations will be run.')
max_iter = 0
self.max_iter = max_iter
if (not isinstance(self.number_of_crossvalidations, int)):
raise Exception('Number of cross-validations must be an integer')
elif (not isinstance(self.no_adaptive_samples, int)):
raise Exception('Number of adaptive samples must be an integer')
elif (not isinstance(self.max_iter, int)):
raise Exception('Maximum number of iterations must be an integer')
elif (self.max_polynomial_order >= regression_data.shape[0]):
raise Exception('max_polynomial_order too high for the number of samples supplied')
if ((self.max_polynomial_order <= 0) or (self.number_of_crossvalidations <= 0)):
raise Exception('maximum_polynomial_order and number_of_crossvalidations must be positive, non-zero integers')
elif ((self.no_adaptive_samples < 0) or (self.max_iter < 0)):
raise Exception('no_adaptive_samples and max_iter must be positive')
if (solution_method is None):
solution_method = 'pyomo'
self.solution_method = solution_method
print('Default parameter estimation method is used.')
elif (not isinstance(solution_method, string_types)):
raise Exception('Invalid solution method. Must be of type <str>.')
elif ((solution_method.lower() == 'mle') or (solution_method.lower() == 'pyomo') or (solution_method.lower() == 'bfgs')):
solution_method = solution_method.lower()
self.solution_method = solution_method
else:
raise Exception('Invalid parameter estimation method entered. Select one of maximum likelihood (solution_method="mle"), Pyomo optimization (solution_method="pyomo") or BFGS (solution_method="bfgs") methods. ')
print('Parameter estimation method: ', self.solution_method, '\n')
if (multinomials is None):
self.multinomials = 1
elif (multinomials == 1):
self.multinomials = 1
elif (multinomials == 0):
self.multinomials = 0
else:
raise Exception('Multinomial must be binary: input "1" for "Yes" and "0" for "No". ')
self.feature_list = []
self.additional_term_expressions = []
self.optimal_weights_array = None
self.final_polynomial_order = None
self.errors = None
self.number_of_iterations = None
self.iteration_summary = None
self.additional_features_data = None
self.final_training_data = None
self.dataframe_of_optimal_weights_polynomial = None
self.dataframe_of_optimal_weights_extra_terms = None
self.extra_terms_feature_vector = None
self.fit_status = None | Initialization of PolynomialRegression class.
Args:
regression_data_input(NumPy Array of Pandas Dataframe) : The dataset for regression training. It is expected to contain features and output data, with the output values (Y) in the last column.
original_data_input(NumPy Array of Pandas Dataframe) : If **regression_data_input** was drawn from a larger dataset by some sampling approach, the larger dataset may be provided here.
maximum_polynomial_order(int) : The maximum polynomial order to be considered.
Keyword Args:
number_of_crossvalidations(int) : The number of polynomial fittings and cross-validations to be carried out for each polynomial function/expression. Must be a positive, non-zero integer. Default=3.
training_split(float): The training/test split to be used for regression_data_input. Must be between 0 and 1. Default = 0.75
solution_method(str): The method to be used for solving the least squares optimization problem for polynomial regression. Three options are available:
(a) "MLE" : The mle (maximum likelihood estimate) method solves the least squares problem using linear algebra. Details of the method may be found in Forrester et al.
(b) "BFGS" : This approach solves the least squares problem using scipy's BFGS algorithm.
(c) "pyomo": This option solves the optimization problem in pyomo with IPOPT as solver. This is the default option.
multinomials(bool): This option determines whether or not multinomial terms are considered during polynomial fitting. Takes 0 for No and 1 for Yes. Default = 1.
Returns:
**self** object containing all the input information.
Raises:
ValueError:
- The input datasets (**original_data_input** or **regression_data_input**) are of the wrong type (not Numpy arrays or Pandas Dataframes)
Exception:
* **maximum_polynomial_order** is not a positive, non-zero integer or **maximum_polynomial_order** is higher than the number of training samples available
Exception:
* **solution_method** is not 'mle', 'pyomo' or 'bfgs
Exception:
- **multinomials** is not binary (0 or 1)
Exception:
- **training_split** is not between 0 and 1
Exception:
- **number_of_crossvalidations** is not a positive, non-zero integer
Exception:
- **max_fraction_training_samples** is not between 0 and 1
Exception:
- **no_adaptive_samples** is not a positive, non-zero integer
Exception:
- **max_iter** is not a positive, non-zero integer
warnings.warn:
- When the number of cross-validations is too high, i.e. number_of_crossvalidations > 10 | idaes/surrogate/pysmo/polynomial_regression.py | __init__ | adowling2/idaes-pse | 112 | python | def __init__(self, original_data_input, regression_data_input, maximum_polynomial_order, number_of_crossvalidations=None, no_adaptive_samples=None, training_split=None, max_fraction_training_samples=None, max_iter=None, solution_method=None, multinomials=None, fname=None, overwrite=False):
'\n Initialization of PolynomialRegression class.\n\n Args:\n regression_data_input(NumPy Array of Pandas Dataframe) : The dataset for regression training. It is expected to contain features and output data, with the output values (Y) in the last column.\n original_data_input(NumPy Array of Pandas Dataframe) : If **regression_data_input** was drawn from a larger dataset by some sampling approach, the larger dataset may be provided here.\n maximum_polynomial_order(int) : The maximum polynomial order to be considered.\n\n Keyword Args:\n number_of_crossvalidations(int) : The number of polynomial fittings and cross-validations to be carried out for each polynomial function/expression. Must be a positive, non-zero integer. Default=3.\n\n training_split(float): The training/test split to be used for regression_data_input. Must be between 0 and 1. Default = 0.75\n\n solution_method(str): The method to be used for solving the least squares optimization problem for polynomial regression. Three options are available:\n\n (a) "MLE" : The mle (maximum likelihood estimate) method solves the least squares problem using linear algebra. Details of the method may be found in Forrester et al.\n (b) "BFGS" : This approach solves the least squares problem using scipy\'s BFGS algorithm.\n (c) "pyomo": This option solves the optimization problem in pyomo with IPOPT as solver. This is the default option.\n\n multinomials(bool): This option determines whether or not multinomial terms are considered during polynomial fitting. Takes 0 for No and 1 for Yes. Default = 1.\n\n Returns:\n **self** object containing all the input information.\n\n Raises:\n ValueError:\n - The input datasets (**original_data_input** or **regression_data_input**) are of the wrong type (not Numpy arrays or Pandas Dataframes)\n\n Exception:\n * **maximum_polynomial_order** is not a positive, non-zero integer or **maximum_polynomial_order** is higher than the number of training samples available\n Exception:\n * **solution_method** is not \'mle\', \'pyomo\' or \'bfgs\n Exception:\n - **multinomials** is not binary (0 or 1)\n Exception:\n - **training_split** is not between 0 and 1\n Exception:\n - **number_of_crossvalidations** is not a positive, non-zero integer\n Exception:\n - **max_fraction_training_samples** is not between 0 and 1\n Exception:\n - **no_adaptive_samples** is not a positive, non-zero integer\n Exception:\n - **max_iter** is not a positive, non-zero integer\n\n warnings.warn:\n - When the number of cross-validations is too high, i.e. number_of_crossvalidations > 10\n '
print('\n===========================Polynomial Regression===============================================\n')
if (not isinstance(overwrite, bool)):
raise Exception('overwrite must be boolean.')
self.overwrite = overwrite
if (fname is None):
fname = 'solution.pickle'
self.filename = 'solution.pickle'
elif ((not isinstance(fname, str)) or (os.path.splitext(fname)[(- 1)].lower() != '.pickle')):
raise Exception('fname must be a string with extension ".pickle". Please correct.')
if (os.path.exists(fname) and (overwrite is True)):
print('Warning:', fname, 'already exists; previous file will be overwritten.\n')
self.filename = fname
elif (os.path.exists(fname) and (overwrite is False)):
self.filename = (((os.path.splitext(fname)[0] + '_v_') + pd.Timestamp.today().strftime('%m-%d-%y_%H%M%S')) + '.pickle')
print('Warning:', fname, 'already exists; results will be saved to "', self.filename, '".\n')
elif (os.path.exists(fname) is False):
self.filename = fname
if isinstance(original_data_input, pd.DataFrame):
original_data = original_data_input.values
self.regression_data_columns = list(original_data_input.columns)[:(- 1)]
elif isinstance(original_data_input, np.ndarray):
original_data = original_data_input
self.regression_data_columns = list(range((original_data_input.shape[1] - 1)))
else:
raise ValueError('original_data_input: Pandas dataframe or numpy array required.')
if isinstance(regression_data_input, pd.DataFrame):
regression_data = regression_data_input.values
elif isinstance(regression_data_input, np.ndarray):
regression_data = regression_data_input
else:
raise ValueError('regression_data_input: Pandas dataframe or numpy array required.')
if (regression_data.shape[0] > original_data.shape[0]):
raise Exception('The sampled data has more entries than the original dataset.')
elif (regression_data.shape[1] != original_data.shape[1]):
raise Exception('Dimensional discrepancies in the dimensions of the original and regression datasets.')
elif ((regression_data.shape[1] == 1) or (original_data.shape[1] == 1)):
raise Exception('Input data requires at least two dimensions (X and Y data).')
self.original_data = original_data
self.regression_data = regression_data
if (number_of_crossvalidations is None):
print('The number of cross-validation cases (3) is used.')
number_of_crossvalidations = 3
elif (number_of_crossvalidations > 10):
warnings.warn('The number of cross-validations entered is large. The simulation may take a while to run')
self.number_of_crossvalidations = number_of_crossvalidations
if (not isinstance(maximum_polynomial_order, int)):
raise Exception('Maximum polynomial order must be an integer')
elif (maximum_polynomial_order > 10):
warnings.warn('The maximum allowed polynomial order is 10. Value has been adjusted to 10.')
maximum_polynomial_order = 10
self.max_polynomial_order = maximum_polynomial_order
self.number_of_x_vars = (regression_data.shape[1] - 1)
if (training_split is None):
print('The default training/cross-validation split of 0.75 is used.')
training_split = 0.75
elif ((training_split >= 1) or (training_split <= 0)):
raise Exception('Fraction of samples used for training must be between 0 and 1')
self.fraction_training = training_split
if (no_adaptive_samples is None):
no_adaptive_samples = 4
self.no_adaptive_samples = no_adaptive_samples
self.number_of_samples = regression_data.shape[0]
if (max_fraction_training_samples is None):
max_fraction_training_samples = 0.5
elif ((max_fraction_training_samples > 1) or (max_fraction_training_samples < 0)):
raise Exception('The fraction for the maximum number of training samples must be between 0 and 1')
self.max_fraction_training_samples = max_fraction_training_samples
if ((regression_data.shape[0] < original_data.shape[0]) and (max_iter is None)):
max_iter = 10
if ((regression_data.shape[0] == original_data.shape[0]) or (no_adaptive_samples == 0)):
print('No iterations will be run.')
max_iter = 0
self.max_iter = max_iter
if (not isinstance(self.number_of_crossvalidations, int)):
raise Exception('Number of cross-validations must be an integer')
elif (not isinstance(self.no_adaptive_samples, int)):
raise Exception('Number of adaptive samples must be an integer')
elif (not isinstance(self.max_iter, int)):
raise Exception('Maximum number of iterations must be an integer')
elif (self.max_polynomial_order >= regression_data.shape[0]):
raise Exception('max_polynomial_order too high for the number of samples supplied')
if ((self.max_polynomial_order <= 0) or (self.number_of_crossvalidations <= 0)):
raise Exception('maximum_polynomial_order and number_of_crossvalidations must be positive, non-zero integers')
elif ((self.no_adaptive_samples < 0) or (self.max_iter < 0)):
raise Exception('no_adaptive_samples and max_iter must be positive')
if (solution_method is None):
solution_method = 'pyomo'
self.solution_method = solution_method
print('Default parameter estimation method is used.')
elif (not isinstance(solution_method, string_types)):
raise Exception('Invalid solution method. Must be of type <str>.')
elif ((solution_method.lower() == 'mle') or (solution_method.lower() == 'pyomo') or (solution_method.lower() == 'bfgs')):
solution_method = solution_method.lower()
self.solution_method = solution_method
else:
raise Exception('Invalid parameter estimation method entered. Select one of maximum likelihood (solution_method="mle"), Pyomo optimization (solution_method="pyomo") or BFGS (solution_method="bfgs") methods. ')
print('Parameter estimation method: ', self.solution_method, '\n')
if (multinomials is None):
self.multinomials = 1
elif (multinomials == 1):
self.multinomials = 1
elif (multinomials == 0):
self.multinomials = 0
else:
raise Exception('Multinomial must be binary: input "1" for "Yes" and "0" for "No". ')
self.feature_list = []
self.additional_term_expressions = []
self.optimal_weights_array = None
self.final_polynomial_order = None
self.errors = None
self.number_of_iterations = None
self.iteration_summary = None
self.additional_features_data = None
self.final_training_data = None
self.dataframe_of_optimal_weights_polynomial = None
self.dataframe_of_optimal_weights_extra_terms = None
self.extra_terms_feature_vector = None
self.fit_status = None | def __init__(self, original_data_input, regression_data_input, maximum_polynomial_order, number_of_crossvalidations=None, no_adaptive_samples=None, training_split=None, max_fraction_training_samples=None, max_iter=None, solution_method=None, multinomials=None, fname=None, overwrite=False):
'\n Initialization of PolynomialRegression class.\n\n Args:\n regression_data_input(NumPy Array of Pandas Dataframe) : The dataset for regression training. It is expected to contain features and output data, with the output values (Y) in the last column.\n original_data_input(NumPy Array of Pandas Dataframe) : If **regression_data_input** was drawn from a larger dataset by some sampling approach, the larger dataset may be provided here.\n maximum_polynomial_order(int) : The maximum polynomial order to be considered.\n\n Keyword Args:\n number_of_crossvalidations(int) : The number of polynomial fittings and cross-validations to be carried out for each polynomial function/expression. Must be a positive, non-zero integer. Default=3.\n\n training_split(float): The training/test split to be used for regression_data_input. Must be between 0 and 1. Default = 0.75\n\n solution_method(str): The method to be used for solving the least squares optimization problem for polynomial regression. Three options are available:\n\n (a) "MLE" : The mle (maximum likelihood estimate) method solves the least squares problem using linear algebra. Details of the method may be found in Forrester et al.\n (b) "BFGS" : This approach solves the least squares problem using scipy\'s BFGS algorithm.\n (c) "pyomo": This option solves the optimization problem in pyomo with IPOPT as solver. This is the default option.\n\n multinomials(bool): This option determines whether or not multinomial terms are considered during polynomial fitting. Takes 0 for No and 1 for Yes. Default = 1.\n\n Returns:\n **self** object containing all the input information.\n\n Raises:\n ValueError:\n - The input datasets (**original_data_input** or **regression_data_input**) are of the wrong type (not Numpy arrays or Pandas Dataframes)\n\n Exception:\n * **maximum_polynomial_order** is not a positive, non-zero integer or **maximum_polynomial_order** is higher than the number of training samples available\n Exception:\n * **solution_method** is not \'mle\', \'pyomo\' or \'bfgs\n Exception:\n - **multinomials** is not binary (0 or 1)\n Exception:\n - **training_split** is not between 0 and 1\n Exception:\n - **number_of_crossvalidations** is not a positive, non-zero integer\n Exception:\n - **max_fraction_training_samples** is not between 0 and 1\n Exception:\n - **no_adaptive_samples** is not a positive, non-zero integer\n Exception:\n - **max_iter** is not a positive, non-zero integer\n\n warnings.warn:\n - When the number of cross-validations is too high, i.e. number_of_crossvalidations > 10\n '
print('\n===========================Polynomial Regression===============================================\n')
if (not isinstance(overwrite, bool)):
raise Exception('overwrite must be boolean.')
self.overwrite = overwrite
if (fname is None):
fname = 'solution.pickle'
self.filename = 'solution.pickle'
elif ((not isinstance(fname, str)) or (os.path.splitext(fname)[(- 1)].lower() != '.pickle')):
raise Exception('fname must be a string with extension ".pickle". Please correct.')
if (os.path.exists(fname) and (overwrite is True)):
print('Warning:', fname, 'already exists; previous file will be overwritten.\n')
self.filename = fname
elif (os.path.exists(fname) and (overwrite is False)):
self.filename = (((os.path.splitext(fname)[0] + '_v_') + pd.Timestamp.today().strftime('%m-%d-%y_%H%M%S')) + '.pickle')
print('Warning:', fname, 'already exists; results will be saved to "', self.filename, '".\n')
elif (os.path.exists(fname) is False):
self.filename = fname
if isinstance(original_data_input, pd.DataFrame):
original_data = original_data_input.values
self.regression_data_columns = list(original_data_input.columns)[:(- 1)]
elif isinstance(original_data_input, np.ndarray):
original_data = original_data_input
self.regression_data_columns = list(range((original_data_input.shape[1] - 1)))
else:
raise ValueError('original_data_input: Pandas dataframe or numpy array required.')
if isinstance(regression_data_input, pd.DataFrame):
regression_data = regression_data_input.values
elif isinstance(regression_data_input, np.ndarray):
regression_data = regression_data_input
else:
raise ValueError('regression_data_input: Pandas dataframe or numpy array required.')
if (regression_data.shape[0] > original_data.shape[0]):
raise Exception('The sampled data has more entries than the original dataset.')
elif (regression_data.shape[1] != original_data.shape[1]):
raise Exception('Dimensional discrepancies in the dimensions of the original and regression datasets.')
elif ((regression_data.shape[1] == 1) or (original_data.shape[1] == 1)):
raise Exception('Input data requires at least two dimensions (X and Y data).')
self.original_data = original_data
self.regression_data = regression_data
if (number_of_crossvalidations is None):
print('The number of cross-validation cases (3) is used.')
number_of_crossvalidations = 3
elif (number_of_crossvalidations > 10):
warnings.warn('The number of cross-validations entered is large. The simulation may take a while to run')
self.number_of_crossvalidations = number_of_crossvalidations
if (not isinstance(maximum_polynomial_order, int)):
raise Exception('Maximum polynomial order must be an integer')
elif (maximum_polynomial_order > 10):
warnings.warn('The maximum allowed polynomial order is 10. Value has been adjusted to 10.')
maximum_polynomial_order = 10
self.max_polynomial_order = maximum_polynomial_order
self.number_of_x_vars = (regression_data.shape[1] - 1)
if (training_split is None):
print('The default training/cross-validation split of 0.75 is used.')
training_split = 0.75
elif ((training_split >= 1) or (training_split <= 0)):
raise Exception('Fraction of samples used for training must be between 0 and 1')
self.fraction_training = training_split
if (no_adaptive_samples is None):
no_adaptive_samples = 4
self.no_adaptive_samples = no_adaptive_samples
self.number_of_samples = regression_data.shape[0]
if (max_fraction_training_samples is None):
max_fraction_training_samples = 0.5
elif ((max_fraction_training_samples > 1) or (max_fraction_training_samples < 0)):
raise Exception('The fraction for the maximum number of training samples must be between 0 and 1')
self.max_fraction_training_samples = max_fraction_training_samples
if ((regression_data.shape[0] < original_data.shape[0]) and (max_iter is None)):
max_iter = 10
if ((regression_data.shape[0] == original_data.shape[0]) or (no_adaptive_samples == 0)):
print('No iterations will be run.')
max_iter = 0
self.max_iter = max_iter
if (not isinstance(self.number_of_crossvalidations, int)):
raise Exception('Number of cross-validations must be an integer')
elif (not isinstance(self.no_adaptive_samples, int)):
raise Exception('Number of adaptive samples must be an integer')
elif (not isinstance(self.max_iter, int)):
raise Exception('Maximum number of iterations must be an integer')
elif (self.max_polynomial_order >= regression_data.shape[0]):
raise Exception('max_polynomial_order too high for the number of samples supplied')
if ((self.max_polynomial_order <= 0) or (self.number_of_crossvalidations <= 0)):
raise Exception('maximum_polynomial_order and number_of_crossvalidations must be positive, non-zero integers')
elif ((self.no_adaptive_samples < 0) or (self.max_iter < 0)):
raise Exception('no_adaptive_samples and max_iter must be positive')
if (solution_method is None):
solution_method = 'pyomo'
self.solution_method = solution_method
print('Default parameter estimation method is used.')
elif (not isinstance(solution_method, string_types)):
raise Exception('Invalid solution method. Must be of type <str>.')
elif ((solution_method.lower() == 'mle') or (solution_method.lower() == 'pyomo') or (solution_method.lower() == 'bfgs')):
solution_method = solution_method.lower()
self.solution_method = solution_method
else:
raise Exception('Invalid parameter estimation method entered. Select one of maximum likelihood (solution_method="mle"), Pyomo optimization (solution_method="pyomo") or BFGS (solution_method="bfgs") methods. ')
print('Parameter estimation method: ', self.solution_method, '\n')
if (multinomials is None):
self.multinomials = 1
elif (multinomials == 1):
self.multinomials = 1
elif (multinomials == 0):
self.multinomials = 0
else:
raise Exception('Multinomial must be binary: input "1" for "Yes" and "0" for "No". ')
self.feature_list = []
self.additional_term_expressions = []
self.optimal_weights_array = None
self.final_polynomial_order = None
self.errors = None
self.number_of_iterations = None
self.iteration_summary = None
self.additional_features_data = None
self.final_training_data = None
self.dataframe_of_optimal_weights_polynomial = None
self.dataframe_of_optimal_weights_extra_terms = None
self.extra_terms_feature_vector = None
self.fit_status = None<|docstring|>Initialization of PolynomialRegression class.
Args:
regression_data_input(NumPy Array of Pandas Dataframe) : The dataset for regression training. It is expected to contain features and output data, with the output values (Y) in the last column.
original_data_input(NumPy Array of Pandas Dataframe) : If **regression_data_input** was drawn from a larger dataset by some sampling approach, the larger dataset may be provided here.
maximum_polynomial_order(int) : The maximum polynomial order to be considered.
Keyword Args:
number_of_crossvalidations(int) : The number of polynomial fittings and cross-validations to be carried out for each polynomial function/expression. Must be a positive, non-zero integer. Default=3.
training_split(float): The training/test split to be used for regression_data_input. Must be between 0 and 1. Default = 0.75
solution_method(str): The method to be used for solving the least squares optimization problem for polynomial regression. Three options are available:
(a) "MLE" : The mle (maximum likelihood estimate) method solves the least squares problem using linear algebra. Details of the method may be found in Forrester et al.
(b) "BFGS" : This approach solves the least squares problem using scipy's BFGS algorithm.
(c) "pyomo": This option solves the optimization problem in pyomo with IPOPT as solver. This is the default option.
multinomials(bool): This option determines whether or not multinomial terms are considered during polynomial fitting. Takes 0 for No and 1 for Yes. Default = 1.
Returns:
**self** object containing all the input information.
Raises:
ValueError:
- The input datasets (**original_data_input** or **regression_data_input**) are of the wrong type (not Numpy arrays or Pandas Dataframes)
Exception:
* **maximum_polynomial_order** is not a positive, non-zero integer or **maximum_polynomial_order** is higher than the number of training samples available
Exception:
* **solution_method** is not 'mle', 'pyomo' or 'bfgs
Exception:
- **multinomials** is not binary (0 or 1)
Exception:
- **training_split** is not between 0 and 1
Exception:
- **number_of_crossvalidations** is not a positive, non-zero integer
Exception:
- **max_fraction_training_samples** is not between 0 and 1
Exception:
- **no_adaptive_samples** is not a positive, non-zero integer
Exception:
- **max_iter** is not a positive, non-zero integer
warnings.warn:
- When the number of cross-validations is too high, i.e. number_of_crossvalidations > 10<|endoftext|> |
391c5973abd5bf7ffbe16a2121162536d9dee571a163e02e1a6ddad2a1f46559 | def training_test_data_creation(self, additional_features=None):
'\n\n The training_test_data_creation splits data into training and test data sets.\n\n Given the number of cross-validations and the required training/test split, it:\n - calculates the number of training samples as num_training = int(training_split x total number of samples),\n - shuffles regression_data number_of_crossvalidations times,\n - splits off the top num_training samples in each shuffle as individual training sets, and\n - takes the bottom (total number of samples - num_training) samples in each shuffle to create its corresponding test dataset.\n\n Args:\n self: containing the number of training samples (self.number_of_samples), training/test split (self.fraction_training) and the required number of cross-validations (self.number_of_crossvalidations).\n\n Keyword Args:\n additional_features(NumPy Array): A numpy array containing additional features provided by the user. When supplied, additional_features is column-appended to self.regression data before the training and tests sets are created.\n\n Returns:\n Tuple(training_data, cross_val_data)\n\n - training_data: Dictionary containing all the training datasets created.\n\n * When no additional features have been specified, the dictionary has a length of number_of_crossvalidations.\n * When additional features have been specified, the dictionary has a length of 2 * number_of_crossvalidations, with the training data for additional_features stored separately.\n\n - cross_val_data: Dictionary containing all the test datasets created. Dictionary will have the same length as training_data.\n\n '
training_data = {}
cross_val_data = {}
num_training = int(np.around((self.number_of_samples * self.fraction_training)))
if (num_training == 0):
raise Exception('The inputted of fraction_training is too low.')
elif (num_training == self.number_of_samples):
raise Exception('The inputted of fraction_training is too high.')
for i in range(1, (self.number_of_crossvalidations + 1)):
np.random.seed(i)
if (additional_features is None):
A = np.zeros((self.regression_data.shape[0], self.regression_data.shape[1]))
A[(:, :)] = self.regression_data
np.random.shuffle(A)
training_data[('training_set_' + str(i))] = A[(0:num_training, :)]
cross_val_data[('test_set_' + str(i))] = A[(num_training:, :)]
elif (additional_features is not None):
A = np.zeros((self.regression_data.shape[0], (self.regression_data.shape[1] + additional_features.shape[1])))
A[(:, 0:self.regression_data.shape[1])] = self.regression_data
A[(:, self.regression_data.shape[1]:)] = additional_features
np.random.shuffle(A)
training_data[('training_set_' + str(i))] = A[(0:num_training, :self.regression_data.shape[1])]
training_data[('training_extras_' + str(i))] = A[(0:num_training, self.regression_data.shape[1]:)]
cross_val_data[('test_set_' + str(i))] = A[(num_training:, :self.regression_data.shape[1])]
cross_val_data[('test_extras_' + str(i))] = A[(num_training:, self.regression_data.shape[1]:)]
return (training_data, cross_val_data) | The training_test_data_creation splits data into training and test data sets.
Given the number of cross-validations and the required training/test split, it:
- calculates the number of training samples as num_training = int(training_split x total number of samples),
- shuffles regression_data number_of_crossvalidations times,
- splits off the top num_training samples in each shuffle as individual training sets, and
- takes the bottom (total number of samples - num_training) samples in each shuffle to create its corresponding test dataset.
Args:
self: containing the number of training samples (self.number_of_samples), training/test split (self.fraction_training) and the required number of cross-validations (self.number_of_crossvalidations).
Keyword Args:
additional_features(NumPy Array): A numpy array containing additional features provided by the user. When supplied, additional_features is column-appended to self.regression data before the training and tests sets are created.
Returns:
Tuple(training_data, cross_val_data)
- training_data: Dictionary containing all the training datasets created.
* When no additional features have been specified, the dictionary has a length of number_of_crossvalidations.
* When additional features have been specified, the dictionary has a length of 2 * number_of_crossvalidations, with the training data for additional_features stored separately.
- cross_val_data: Dictionary containing all the test datasets created. Dictionary will have the same length as training_data. | idaes/surrogate/pysmo/polynomial_regression.py | training_test_data_creation | adowling2/idaes-pse | 112 | python | def training_test_data_creation(self, additional_features=None):
'\n\n The training_test_data_creation splits data into training and test data sets.\n\n Given the number of cross-validations and the required training/test split, it:\n - calculates the number of training samples as num_training = int(training_split x total number of samples),\n - shuffles regression_data number_of_crossvalidations times,\n - splits off the top num_training samples in each shuffle as individual training sets, and\n - takes the bottom (total number of samples - num_training) samples in each shuffle to create its corresponding test dataset.\n\n Args:\n self: containing the number of training samples (self.number_of_samples), training/test split (self.fraction_training) and the required number of cross-validations (self.number_of_crossvalidations).\n\n Keyword Args:\n additional_features(NumPy Array): A numpy array containing additional features provided by the user. When supplied, additional_features is column-appended to self.regression data before the training and tests sets are created.\n\n Returns:\n Tuple(training_data, cross_val_data)\n\n - training_data: Dictionary containing all the training datasets created.\n\n * When no additional features have been specified, the dictionary has a length of number_of_crossvalidations.\n * When additional features have been specified, the dictionary has a length of 2 * number_of_crossvalidations, with the training data for additional_features stored separately.\n\n - cross_val_data: Dictionary containing all the test datasets created. Dictionary will have the same length as training_data.\n\n '
training_data = {}
cross_val_data = {}
num_training = int(np.around((self.number_of_samples * self.fraction_training)))
if (num_training == 0):
raise Exception('The inputted of fraction_training is too low.')
elif (num_training == self.number_of_samples):
raise Exception('The inputted of fraction_training is too high.')
for i in range(1, (self.number_of_crossvalidations + 1)):
np.random.seed(i)
if (additional_features is None):
A = np.zeros((self.regression_data.shape[0], self.regression_data.shape[1]))
A[(:, :)] = self.regression_data
np.random.shuffle(A)
training_data[('training_set_' + str(i))] = A[(0:num_training, :)]
cross_val_data[('test_set_' + str(i))] = A[(num_training:, :)]
elif (additional_features is not None):
A = np.zeros((self.regression_data.shape[0], (self.regression_data.shape[1] + additional_features.shape[1])))
A[(:, 0:self.regression_data.shape[1])] = self.regression_data
A[(:, self.regression_data.shape[1]:)] = additional_features
np.random.shuffle(A)
training_data[('training_set_' + str(i))] = A[(0:num_training, :self.regression_data.shape[1])]
training_data[('training_extras_' + str(i))] = A[(0:num_training, self.regression_data.shape[1]:)]
cross_val_data[('test_set_' + str(i))] = A[(num_training:, :self.regression_data.shape[1])]
cross_val_data[('test_extras_' + str(i))] = A[(num_training:, self.regression_data.shape[1]:)]
return (training_data, cross_val_data) | def training_test_data_creation(self, additional_features=None):
'\n\n The training_test_data_creation splits data into training and test data sets.\n\n Given the number of cross-validations and the required training/test split, it:\n - calculates the number of training samples as num_training = int(training_split x total number of samples),\n - shuffles regression_data number_of_crossvalidations times,\n - splits off the top num_training samples in each shuffle as individual training sets, and\n - takes the bottom (total number of samples - num_training) samples in each shuffle to create its corresponding test dataset.\n\n Args:\n self: containing the number of training samples (self.number_of_samples), training/test split (self.fraction_training) and the required number of cross-validations (self.number_of_crossvalidations).\n\n Keyword Args:\n additional_features(NumPy Array): A numpy array containing additional features provided by the user. When supplied, additional_features is column-appended to self.regression data before the training and tests sets are created.\n\n Returns:\n Tuple(training_data, cross_val_data)\n\n - training_data: Dictionary containing all the training datasets created.\n\n * When no additional features have been specified, the dictionary has a length of number_of_crossvalidations.\n * When additional features have been specified, the dictionary has a length of 2 * number_of_crossvalidations, with the training data for additional_features stored separately.\n\n - cross_val_data: Dictionary containing all the test datasets created. Dictionary will have the same length as training_data.\n\n '
training_data = {}
cross_val_data = {}
num_training = int(np.around((self.number_of_samples * self.fraction_training)))
if (num_training == 0):
raise Exception('The inputted of fraction_training is too low.')
elif (num_training == self.number_of_samples):
raise Exception('The inputted of fraction_training is too high.')
for i in range(1, (self.number_of_crossvalidations + 1)):
np.random.seed(i)
if (additional_features is None):
A = np.zeros((self.regression_data.shape[0], self.regression_data.shape[1]))
A[(:, :)] = self.regression_data
np.random.shuffle(A)
training_data[('training_set_' + str(i))] = A[(0:num_training, :)]
cross_val_data[('test_set_' + str(i))] = A[(num_training:, :)]
elif (additional_features is not None):
A = np.zeros((self.regression_data.shape[0], (self.regression_data.shape[1] + additional_features.shape[1])))
A[(:, 0:self.regression_data.shape[1])] = self.regression_data
A[(:, self.regression_data.shape[1]:)] = additional_features
np.random.shuffle(A)
training_data[('training_set_' + str(i))] = A[(0:num_training, :self.regression_data.shape[1])]
training_data[('training_extras_' + str(i))] = A[(0:num_training, self.regression_data.shape[1]:)]
cross_val_data[('test_set_' + str(i))] = A[(num_training:, :self.regression_data.shape[1])]
cross_val_data[('test_extras_' + str(i))] = A[(num_training:, self.regression_data.shape[1]:)]
return (training_data, cross_val_data)<|docstring|>The training_test_data_creation splits data into training and test data sets.
Given the number of cross-validations and the required training/test split, it:
- calculates the number of training samples as num_training = int(training_split x total number of samples),
- shuffles regression_data number_of_crossvalidations times,
- splits off the top num_training samples in each shuffle as individual training sets, and
- takes the bottom (total number of samples - num_training) samples in each shuffle to create its corresponding test dataset.
Args:
self: containing the number of training samples (self.number_of_samples), training/test split (self.fraction_training) and the required number of cross-validations (self.number_of_crossvalidations).
Keyword Args:
additional_features(NumPy Array): A numpy array containing additional features provided by the user. When supplied, additional_features is column-appended to self.regression data before the training and tests sets are created.
Returns:
Tuple(training_data, cross_val_data)
- training_data: Dictionary containing all the training datasets created.
* When no additional features have been specified, the dictionary has a length of number_of_crossvalidations.
* When additional features have been specified, the dictionary has a length of 2 * number_of_crossvalidations, with the training data for additional_features stored separately.
- cross_val_data: Dictionary containing all the test datasets created. Dictionary will have the same length as training_data.<|endoftext|> |
06fe12d875eb77fb7ea14cffa885f666e936342e40428b61c7896e6e7028adae | @classmethod
def polygeneration(self, polynomial_order, multinomials, x_input_train_data, additional_x_training_data=None):
'\n\n This function generates a x-variable vector for the required polynomial order. This is done in four stages:\n\n - First, generates the pure mononomials are generated by increasing the polynomial degree by 1 until polynomial_order is reached.\n - Next, the first-order multinomials are generated if self.multinomials = 1. This is implemented in suci a way that each multinomial appears only once, i.e. x_i.x_j = x_j.x_i. The multinomial columns are appended to the enx of the array.\n - Next, a column of ones is inserted in the first column space to represent the constant term.\n - Finally, the columns containing the extra terms supplied by the user are added to the end of the array (when available).\n\n Thus, the format of the output array is [constant, nmononomials, multinomials, extra terms]\n\n Args:\n polynomial_order(int): The polynomial order currently under consideration\n multinomials(bool): Boolean variable that determines whether or not multinomial terms are considered during polynomial fitting.\n x_input_train_data(NumPy Array): Input data containing features supplied by the user\n\n Keyword Args:\n additional_x_training_data(NumPy Array): Array containing additional features supplied by the user\n\n Returns:\n x_train_data(NumPy Array): Array containing all polynomial features to be considered during regression\n\n Example:\n if polynomial_order=2, numtinomials=1, x_input_train_data = [x1, x2, x3], additional_x_training_data = [sin(x1), tanh(x3)], then x_train_data will contain the regression features\n x_train_data = [1, x1, x2, x3, x1^2, x2^2, x3^2, x1.x2, x1.x3, x2.x3, sin(x1), tanh(x3)]\n\n '
N = x_input_train_data.shape[0]
x_train_data = x_input_train_data
for i in range(2, (polynomial_order + 1)):
x_train_data = np.concatenate((x_train_data, (x_input_train_data ** i)), axis=1)
if (multinomials == 1):
for i in range(0, x_input_train_data.shape[1]):
for j in range(0, i):
x_train_data = np.concatenate((x_train_data, (x_input_train_data[(:, i)] * x_input_train_data[(:, j)]).reshape(N, 1)), axis=1)
x_train_data = np.concatenate((np.ones((N, 1)), x_train_data), axis=1)
if (additional_x_training_data is not None):
x_train_data = np.concatenate((x_train_data, additional_x_training_data), axis=1)
return x_train_data | This function generates a x-variable vector for the required polynomial order. This is done in four stages:
- First, generates the pure mononomials are generated by increasing the polynomial degree by 1 until polynomial_order is reached.
- Next, the first-order multinomials are generated if self.multinomials = 1. This is implemented in suci a way that each multinomial appears only once, i.e. x_i.x_j = x_j.x_i. The multinomial columns are appended to the enx of the array.
- Next, a column of ones is inserted in the first column space to represent the constant term.
- Finally, the columns containing the extra terms supplied by the user are added to the end of the array (when available).
Thus, the format of the output array is [constant, nmononomials, multinomials, extra terms]
Args:
polynomial_order(int): The polynomial order currently under consideration
multinomials(bool): Boolean variable that determines whether or not multinomial terms are considered during polynomial fitting.
x_input_train_data(NumPy Array): Input data containing features supplied by the user
Keyword Args:
additional_x_training_data(NumPy Array): Array containing additional features supplied by the user
Returns:
x_train_data(NumPy Array): Array containing all polynomial features to be considered during regression
Example:
if polynomial_order=2, numtinomials=1, x_input_train_data = [x1, x2, x3], additional_x_training_data = [sin(x1), tanh(x3)], then x_train_data will contain the regression features
x_train_data = [1, x1, x2, x3, x1^2, x2^2, x3^2, x1.x2, x1.x3, x2.x3, sin(x1), tanh(x3)] | idaes/surrogate/pysmo/polynomial_regression.py | polygeneration | adowling2/idaes-pse | 112 | python | @classmethod
def polygeneration(self, polynomial_order, multinomials, x_input_train_data, additional_x_training_data=None):
'\n\n This function generates a x-variable vector for the required polynomial order. This is done in four stages:\n\n - First, generates the pure mononomials are generated by increasing the polynomial degree by 1 until polynomial_order is reached.\n - Next, the first-order multinomials are generated if self.multinomials = 1. This is implemented in suci a way that each multinomial appears only once, i.e. x_i.x_j = x_j.x_i. The multinomial columns are appended to the enx of the array.\n - Next, a column of ones is inserted in the first column space to represent the constant term.\n - Finally, the columns containing the extra terms supplied by the user are added to the end of the array (when available).\n\n Thus, the format of the output array is [constant, nmononomials, multinomials, extra terms]\n\n Args:\n polynomial_order(int): The polynomial order currently under consideration\n multinomials(bool): Boolean variable that determines whether or not multinomial terms are considered during polynomial fitting.\n x_input_train_data(NumPy Array): Input data containing features supplied by the user\n\n Keyword Args:\n additional_x_training_data(NumPy Array): Array containing additional features supplied by the user\n\n Returns:\n x_train_data(NumPy Array): Array containing all polynomial features to be considered during regression\n\n Example:\n if polynomial_order=2, numtinomials=1, x_input_train_data = [x1, x2, x3], additional_x_training_data = [sin(x1), tanh(x3)], then x_train_data will contain the regression features\n x_train_data = [1, x1, x2, x3, x1^2, x2^2, x3^2, x1.x2, x1.x3, x2.x3, sin(x1), tanh(x3)]\n\n '
N = x_input_train_data.shape[0]
x_train_data = x_input_train_data
for i in range(2, (polynomial_order + 1)):
x_train_data = np.concatenate((x_train_data, (x_input_train_data ** i)), axis=1)
if (multinomials == 1):
for i in range(0, x_input_train_data.shape[1]):
for j in range(0, i):
x_train_data = np.concatenate((x_train_data, (x_input_train_data[(:, i)] * x_input_train_data[(:, j)]).reshape(N, 1)), axis=1)
x_train_data = np.concatenate((np.ones((N, 1)), x_train_data), axis=1)
if (additional_x_training_data is not None):
x_train_data = np.concatenate((x_train_data, additional_x_training_data), axis=1)
return x_train_data | @classmethod
def polygeneration(self, polynomial_order, multinomials, x_input_train_data, additional_x_training_data=None):
'\n\n This function generates a x-variable vector for the required polynomial order. This is done in four stages:\n\n - First, generates the pure mononomials are generated by increasing the polynomial degree by 1 until polynomial_order is reached.\n - Next, the first-order multinomials are generated if self.multinomials = 1. This is implemented in suci a way that each multinomial appears only once, i.e. x_i.x_j = x_j.x_i. The multinomial columns are appended to the enx of the array.\n - Next, a column of ones is inserted in the first column space to represent the constant term.\n - Finally, the columns containing the extra terms supplied by the user are added to the end of the array (when available).\n\n Thus, the format of the output array is [constant, nmononomials, multinomials, extra terms]\n\n Args:\n polynomial_order(int): The polynomial order currently under consideration\n multinomials(bool): Boolean variable that determines whether or not multinomial terms are considered during polynomial fitting.\n x_input_train_data(NumPy Array): Input data containing features supplied by the user\n\n Keyword Args:\n additional_x_training_data(NumPy Array): Array containing additional features supplied by the user\n\n Returns:\n x_train_data(NumPy Array): Array containing all polynomial features to be considered during regression\n\n Example:\n if polynomial_order=2, numtinomials=1, x_input_train_data = [x1, x2, x3], additional_x_training_data = [sin(x1), tanh(x3)], then x_train_data will contain the regression features\n x_train_data = [1, x1, x2, x3, x1^2, x2^2, x3^2, x1.x2, x1.x3, x2.x3, sin(x1), tanh(x3)]\n\n '
N = x_input_train_data.shape[0]
x_train_data = x_input_train_data
for i in range(2, (polynomial_order + 1)):
x_train_data = np.concatenate((x_train_data, (x_input_train_data ** i)), axis=1)
if (multinomials == 1):
for i in range(0, x_input_train_data.shape[1]):
for j in range(0, i):
x_train_data = np.concatenate((x_train_data, (x_input_train_data[(:, i)] * x_input_train_data[(:, j)]).reshape(N, 1)), axis=1)
x_train_data = np.concatenate((np.ones((N, 1)), x_train_data), axis=1)
if (additional_x_training_data is not None):
x_train_data = np.concatenate((x_train_data, additional_x_training_data), axis=1)
return x_train_data<|docstring|>This function generates a x-variable vector for the required polynomial order. This is done in four stages:
- First, generates the pure mononomials are generated by increasing the polynomial degree by 1 until polynomial_order is reached.
- Next, the first-order multinomials are generated if self.multinomials = 1. This is implemented in suci a way that each multinomial appears only once, i.e. x_i.x_j = x_j.x_i. The multinomial columns are appended to the enx of the array.
- Next, a column of ones is inserted in the first column space to represent the constant term.
- Finally, the columns containing the extra terms supplied by the user are added to the end of the array (when available).
Thus, the format of the output array is [constant, nmononomials, multinomials, extra terms]
Args:
polynomial_order(int): The polynomial order currently under consideration
multinomials(bool): Boolean variable that determines whether or not multinomial terms are considered during polynomial fitting.
x_input_train_data(NumPy Array): Input data containing features supplied by the user
Keyword Args:
additional_x_training_data(NumPy Array): Array containing additional features supplied by the user
Returns:
x_train_data(NumPy Array): Array containing all polynomial features to be considered during regression
Example:
if polynomial_order=2, numtinomials=1, x_input_train_data = [x1, x2, x3], additional_x_training_data = [sin(x1), tanh(x3)], then x_train_data will contain the regression features
x_train_data = [1, x1, x2, x3, x1^2, x2^2, x3^2, x1.x2, x1.x3, x2.x3, sin(x1), tanh(x3)]<|endoftext|> |
b680fa797d67dbf1d08697bd31b981863a965300b2d7997b94cba4f45db0c54f | @staticmethod
def cost_function(theta, x, y, reg_parameter):
'\n\n This function is an implementation of the cost function for linear regression:\n cost = [sum of square errors over m samples / (2 * m)] + [reg_parameter * theta*2 / (2 * m)]\n\n This is the objective function for the BFGS optimization problem.\n\n Args:\n theta : polynomial coefficients/weights, (n x 1) in size\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n reg_parameter: reqularization parameter, set to\n\n Returns:\n cost_value : the cost value for the fit, the objective value of the optimization problem\n\n '
y = y.reshape(y.shape[0], 1)
y_prediction = np.matmul(x, theta)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
cost_value = ((0.5 / x.shape[0]) * np.sum(((y - y_prediction) ** 2)))
cost_penalty = (((reg_parameter * 0.5) / x.shape[0]) * np.sum((theta ** 2)))
cost_value = (cost_value + cost_penalty)
return cost_value | This function is an implementation of the cost function for linear regression:
cost = [sum of square errors over m samples / (2 * m)] + [reg_parameter * theta*2 / (2 * m)]
This is the objective function for the BFGS optimization problem.
Args:
theta : polynomial coefficients/weights, (n x 1) in size
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
reg_parameter: reqularization parameter, set to
Returns:
cost_value : the cost value for the fit, the objective value of the optimization problem | idaes/surrogate/pysmo/polynomial_regression.py | cost_function | adowling2/idaes-pse | 112 | python | @staticmethod
def cost_function(theta, x, y, reg_parameter):
'\n\n This function is an implementation of the cost function for linear regression:\n cost = [sum of square errors over m samples / (2 * m)] + [reg_parameter * theta*2 / (2 * m)]\n\n This is the objective function for the BFGS optimization problem.\n\n Args:\n theta : polynomial coefficients/weights, (n x 1) in size\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n reg_parameter: reqularization parameter, set to\n\n Returns:\n cost_value : the cost value for the fit, the objective value of the optimization problem\n\n '
y = y.reshape(y.shape[0], 1)
y_prediction = np.matmul(x, theta)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
cost_value = ((0.5 / x.shape[0]) * np.sum(((y - y_prediction) ** 2)))
cost_penalty = (((reg_parameter * 0.5) / x.shape[0]) * np.sum((theta ** 2)))
cost_value = (cost_value + cost_penalty)
return cost_value | @staticmethod
def cost_function(theta, x, y, reg_parameter):
'\n\n This function is an implementation of the cost function for linear regression:\n cost = [sum of square errors over m samples / (2 * m)] + [reg_parameter * theta*2 / (2 * m)]\n\n This is the objective function for the BFGS optimization problem.\n\n Args:\n theta : polynomial coefficients/weights, (n x 1) in size\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n reg_parameter: reqularization parameter, set to\n\n Returns:\n cost_value : the cost value for the fit, the objective value of the optimization problem\n\n '
y = y.reshape(y.shape[0], 1)
y_prediction = np.matmul(x, theta)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
cost_value = ((0.5 / x.shape[0]) * np.sum(((y - y_prediction) ** 2)))
cost_penalty = (((reg_parameter * 0.5) / x.shape[0]) * np.sum((theta ** 2)))
cost_value = (cost_value + cost_penalty)
return cost_value<|docstring|>This function is an implementation of the cost function for linear regression:
cost = [sum of square errors over m samples / (2 * m)] + [reg_parameter * theta*2 / (2 * m)]
This is the objective function for the BFGS optimization problem.
Args:
theta : polynomial coefficients/weights, (n x 1) in size
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
reg_parameter: reqularization parameter, set to
Returns:
cost_value : the cost value for the fit, the objective value of the optimization problem<|endoftext|> |
f7359be1b2f20d766a5bec96c381b6bb178c5666d19c498d73d12444bdcf6eeb | @staticmethod
def gradient_function(theta, x, y, reg_parameter):
'\n\n This function is an implementation of the gradient function for linear regression:\n if\n cost = [(A.x - y)^2 / 2m] + [reg_parameter * theta*2/ (2 * m)],\n then\n gradient = [((A.x - y)* A) / m] + [reg_parameter * theta/ m]\n\n This is the gradient function supplied to the BFGS optimization algorithm.\n\n Args:\n theta : polynomial coefficients/weights, (n x 1) in size\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n reg_parameter: reqularization parameter\n\n Returns:\n grad_value : the cost gradients for the fit, size (n x 1)\n\n '
y = y.reshape(y.shape[0], 1)
y_prediction = np.matmul(x, theta)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
t1 = ((y_prediction - y) * x)
grad_values = ((1 / x.shape[0]) * np.sum(t1, axis=0))
gradient_penalty = ((reg_parameter / x.shape[0]) * theta)
grad_values = (grad_values + gradient_penalty)
grad_values = grad_values.reshape(theta.size)
return grad_values | This function is an implementation of the gradient function for linear regression:
if
cost = [(A.x - y)^2 / 2m] + [reg_parameter * theta*2/ (2 * m)],
then
gradient = [((A.x - y)* A) / m] + [reg_parameter * theta/ m]
This is the gradient function supplied to the BFGS optimization algorithm.
Args:
theta : polynomial coefficients/weights, (n x 1) in size
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
reg_parameter: reqularization parameter
Returns:
grad_value : the cost gradients for the fit, size (n x 1) | idaes/surrogate/pysmo/polynomial_regression.py | gradient_function | adowling2/idaes-pse | 112 | python | @staticmethod
def gradient_function(theta, x, y, reg_parameter):
'\n\n This function is an implementation of the gradient function for linear regression:\n if\n cost = [(A.x - y)^2 / 2m] + [reg_parameter * theta*2/ (2 * m)],\n then\n gradient = [((A.x - y)* A) / m] + [reg_parameter * theta/ m]\n\n This is the gradient function supplied to the BFGS optimization algorithm.\n\n Args:\n theta : polynomial coefficients/weights, (n x 1) in size\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n reg_parameter: reqularization parameter\n\n Returns:\n grad_value : the cost gradients for the fit, size (n x 1)\n\n '
y = y.reshape(y.shape[0], 1)
y_prediction = np.matmul(x, theta)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
t1 = ((y_prediction - y) * x)
grad_values = ((1 / x.shape[0]) * np.sum(t1, axis=0))
gradient_penalty = ((reg_parameter / x.shape[0]) * theta)
grad_values = (grad_values + gradient_penalty)
grad_values = grad_values.reshape(theta.size)
return grad_values | @staticmethod
def gradient_function(theta, x, y, reg_parameter):
'\n\n This function is an implementation of the gradient function for linear regression:\n if\n cost = [(A.x - y)^2 / 2m] + [reg_parameter * theta*2/ (2 * m)],\n then\n gradient = [((A.x - y)* A) / m] + [reg_parameter * theta/ m]\n\n This is the gradient function supplied to the BFGS optimization algorithm.\n\n Args:\n theta : polynomial coefficients/weights, (n x 1) in size\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n reg_parameter: reqularization parameter\n\n Returns:\n grad_value : the cost gradients for the fit, size (n x 1)\n\n '
y = y.reshape(y.shape[0], 1)
y_prediction = np.matmul(x, theta)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
t1 = ((y_prediction - y) * x)
grad_values = ((1 / x.shape[0]) * np.sum(t1, axis=0))
gradient_penalty = ((reg_parameter / x.shape[0]) * theta)
grad_values = (grad_values + gradient_penalty)
grad_values = grad_values.reshape(theta.size)
return grad_values<|docstring|>This function is an implementation of the gradient function for linear regression:
if
cost = [(A.x - y)^2 / 2m] + [reg_parameter * theta*2/ (2 * m)],
then
gradient = [((A.x - y)* A) / m] + [reg_parameter * theta/ m]
This is the gradient function supplied to the BFGS optimization algorithm.
Args:
theta : polynomial coefficients/weights, (n x 1) in size
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
reg_parameter: reqularization parameter
Returns:
grad_value : the cost gradients for the fit, size (n x 1)<|endoftext|> |
788bf5afbedaeb25210b7699bb3ef127a3261ca1cbeea8024722579f382e4e4e | def bfgs_parameter_optimization(self, x, y):
"\n This function performs parameter optimization using scipy's BFGS algorithm.\n It takes in the functions pre-defined functions cost_function and gradient_function as the cost and gradient functions.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Initialization:\n The regularization parameter and initial weights are set to zero,\n reg_parameter = 0\n init_theta = 0\n\n Returns:\n theta: The optimal linear regression weights found\n\n "
init_theta = np.zeros((x.shape[1], 1))
reg_parameter = 0.0
other_args = (x, y, reg_parameter)
theta = opt.fmin_bfgs(self.cost_function, init_theta, fprime=self.gradient_function, args=other_args)
return theta | This function performs parameter optimization using scipy's BFGS algorithm.
It takes in the functions pre-defined functions cost_function and gradient_function as the cost and gradient functions.
Args:
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
Initialization:
The regularization parameter and initial weights are set to zero,
reg_parameter = 0
init_theta = 0
Returns:
theta: The optimal linear regression weights found | idaes/surrogate/pysmo/polynomial_regression.py | bfgs_parameter_optimization | adowling2/idaes-pse | 112 | python | def bfgs_parameter_optimization(self, x, y):
"\n This function performs parameter optimization using scipy's BFGS algorithm.\n It takes in the functions pre-defined functions cost_function and gradient_function as the cost and gradient functions.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Initialization:\n The regularization parameter and initial weights are set to zero,\n reg_parameter = 0\n init_theta = 0\n\n Returns:\n theta: The optimal linear regression weights found\n\n "
init_theta = np.zeros((x.shape[1], 1))
reg_parameter = 0.0
other_args = (x, y, reg_parameter)
theta = opt.fmin_bfgs(self.cost_function, init_theta, fprime=self.gradient_function, args=other_args)
return theta | def bfgs_parameter_optimization(self, x, y):
"\n This function performs parameter optimization using scipy's BFGS algorithm.\n It takes in the functions pre-defined functions cost_function and gradient_function as the cost and gradient functions.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Initialization:\n The regularization parameter and initial weights are set to zero,\n reg_parameter = 0\n init_theta = 0\n\n Returns:\n theta: The optimal linear regression weights found\n\n "
init_theta = np.zeros((x.shape[1], 1))
reg_parameter = 0.0
other_args = (x, y, reg_parameter)
theta = opt.fmin_bfgs(self.cost_function, init_theta, fprime=self.gradient_function, args=other_args)
return theta<|docstring|>This function performs parameter optimization using scipy's BFGS algorithm.
It takes in the functions pre-defined functions cost_function and gradient_function as the cost and gradient functions.
Args:
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
Initialization:
The regularization parameter and initial weights are set to zero,
reg_parameter = 0
init_theta = 0
Returns:
theta: The optimal linear regression weights found<|endoftext|> |
e43d346cb57ab437a1f4cffb5121a4cdd0557e6c92931fb22d5adf1e670f17fe | @staticmethod
def MLE_estimate(x, y):
"\n\n Maximum likelihood estimate method for solving polynomial regression problems:\n\n If\n Ax = B,\n then\n x = inv_A * B\n\n where the inv_A is called the Moore-Penrose inverse.\n\n Numpy's pseudoinverse function has been used to calculate the inverse here.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Returns:\n phi: The optimal linear regression weights found\n\n For more details about the maximum likelihood estimate methos, see to Forrester et al.\n\n "
moore_penrose_inverse = np.linalg.pinv(x)
phi = np.matmul(moore_penrose_inverse, y)
return phi | Maximum likelihood estimate method for solving polynomial regression problems:
If
Ax = B,
then
x = inv_A * B
where the inv_A is called the Moore-Penrose inverse.
Numpy's pseudoinverse function has been used to calculate the inverse here.
Args:
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
Returns:
phi: The optimal linear regression weights found
For more details about the maximum likelihood estimate methos, see to Forrester et al. | idaes/surrogate/pysmo/polynomial_regression.py | MLE_estimate | adowling2/idaes-pse | 112 | python | @staticmethod
def MLE_estimate(x, y):
"\n\n Maximum likelihood estimate method for solving polynomial regression problems:\n\n If\n Ax = B,\n then\n x = inv_A * B\n\n where the inv_A is called the Moore-Penrose inverse.\n\n Numpy's pseudoinverse function has been used to calculate the inverse here.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Returns:\n phi: The optimal linear regression weights found\n\n For more details about the maximum likelihood estimate methos, see to Forrester et al.\n\n "
moore_penrose_inverse = np.linalg.pinv(x)
phi = np.matmul(moore_penrose_inverse, y)
return phi | @staticmethod
def MLE_estimate(x, y):
"\n\n Maximum likelihood estimate method for solving polynomial regression problems:\n\n If\n Ax = B,\n then\n x = inv_A * B\n\n where the inv_A is called the Moore-Penrose inverse.\n\n Numpy's pseudoinverse function has been used to calculate the inverse here.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Returns:\n phi: The optimal linear regression weights found\n\n For more details about the maximum likelihood estimate methos, see to Forrester et al.\n\n "
moore_penrose_inverse = np.linalg.pinv(x)
phi = np.matmul(moore_penrose_inverse, y)
return phi<|docstring|>Maximum likelihood estimate method for solving polynomial regression problems:
If
Ax = B,
then
x = inv_A * B
where the inv_A is called the Moore-Penrose inverse.
Numpy's pseudoinverse function has been used to calculate the inverse here.
Args:
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
Returns:
phi: The optimal linear regression weights found
For more details about the maximum likelihood estimate methos, see to Forrester et al.<|endoftext|> |
d793088e9fa1a5741260dbb633e9ef84f097c5d555e3f0028b648f1a8b8306e8 | @staticmethod
def pyomo_optimization(x, y):
"\n Pyomo implementation of least squares optimization problem:\n\n Minimize cost = (y' - y) ^ 2\n subject to: y' = Ax\n\n The problem is solved within Pyomo's framework using IPOPT as solver.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Returns:\n phi: The optimal linear regression weights found\n "
model = ConcreteModel()
x_data = pd.DataFrame(x)
y_data = pd.DataFrame(y)
model.M = Set(initialize=x_data.index.values)
model.N = Set(initialize=x_data.columns.values)
model.P = Set(initialize=y_data.columns.values)
model.x = Param(model.M, model.N, initialize=x_data.stack().to_dict())
model.y_real = Param(model.M, model.P, initialize=y_data.stack().to_dict())
model.theta = Var(model.N, initialize=0.1, domain=Reals)
model.y_predictions = Var(model.M, model.P, initialize=y_data.stack().to_dict(), domain=Reals)
def xy_product(model, i, k):
return (model.y_predictions[(i, k)] == sum(((model.theta[j] * model.x[(i, j)]) for j in model.N for k in model.P)))
model.x_theta_product = Constraint(model.M, model.P, rule=xy_product, doc='Predicted value calc: y = hx')
def model_rms_error(model):
cost_value = ((1 / len(model.M)) * sum((((model.y_real[(i, k)] - model.y_predictions[(i, k)]) ** 2) for i in model.M for k in model.P)))
return cost_value
model.prediction_error = Objective(rule=model_rms_error, sense=minimize, doc='Minimum RMSE error')
instance = model
opt = SolverFactory('ipopt')
opt.options['max_iter'] = 10000000
result = opt.solve(instance)
phi = np.zeros((len(instance.theta), 1))
iterator = 0
for s in instance.N:
phi[(iterator, 0)] = instance.theta[s].value
iterator += 1
return phi | Pyomo implementation of least squares optimization problem:
Minimize cost = (y' - y) ^ 2
subject to: y' = Ax
The problem is solved within Pyomo's framework using IPOPT as solver.
Args:
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
Returns:
phi: The optimal linear regression weights found | idaes/surrogate/pysmo/polynomial_regression.py | pyomo_optimization | adowling2/idaes-pse | 112 | python | @staticmethod
def pyomo_optimization(x, y):
"\n Pyomo implementation of least squares optimization problem:\n\n Minimize cost = (y' - y) ^ 2\n subject to: y' = Ax\n\n The problem is solved within Pyomo's framework using IPOPT as solver.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Returns:\n phi: The optimal linear regression weights found\n "
model = ConcreteModel()
x_data = pd.DataFrame(x)
y_data = pd.DataFrame(y)
model.M = Set(initialize=x_data.index.values)
model.N = Set(initialize=x_data.columns.values)
model.P = Set(initialize=y_data.columns.values)
model.x = Param(model.M, model.N, initialize=x_data.stack().to_dict())
model.y_real = Param(model.M, model.P, initialize=y_data.stack().to_dict())
model.theta = Var(model.N, initialize=0.1, domain=Reals)
model.y_predictions = Var(model.M, model.P, initialize=y_data.stack().to_dict(), domain=Reals)
def xy_product(model, i, k):
return (model.y_predictions[(i, k)] == sum(((model.theta[j] * model.x[(i, j)]) for j in model.N for k in model.P)))
model.x_theta_product = Constraint(model.M, model.P, rule=xy_product, doc='Predicted value calc: y = hx')
def model_rms_error(model):
cost_value = ((1 / len(model.M)) * sum((((model.y_real[(i, k)] - model.y_predictions[(i, k)]) ** 2) for i in model.M for k in model.P)))
return cost_value
model.prediction_error = Objective(rule=model_rms_error, sense=minimize, doc='Minimum RMSE error')
instance = model
opt = SolverFactory('ipopt')
opt.options['max_iter'] = 10000000
result = opt.solve(instance)
phi = np.zeros((len(instance.theta), 1))
iterator = 0
for s in instance.N:
phi[(iterator, 0)] = instance.theta[s].value
iterator += 1
return phi | @staticmethod
def pyomo_optimization(x, y):
"\n Pyomo implementation of least squares optimization problem:\n\n Minimize cost = (y' - y) ^ 2\n subject to: y' = Ax\n\n The problem is solved within Pyomo's framework using IPOPT as solver.\n\n Args:\n x : array of features, (m x n) in size\n y : actual output vector, size (m x 1)\n\n Returns:\n phi: The optimal linear regression weights found\n "
model = ConcreteModel()
x_data = pd.DataFrame(x)
y_data = pd.DataFrame(y)
model.M = Set(initialize=x_data.index.values)
model.N = Set(initialize=x_data.columns.values)
model.P = Set(initialize=y_data.columns.values)
model.x = Param(model.M, model.N, initialize=x_data.stack().to_dict())
model.y_real = Param(model.M, model.P, initialize=y_data.stack().to_dict())
model.theta = Var(model.N, initialize=0.1, domain=Reals)
model.y_predictions = Var(model.M, model.P, initialize=y_data.stack().to_dict(), domain=Reals)
def xy_product(model, i, k):
return (model.y_predictions[(i, k)] == sum(((model.theta[j] * model.x[(i, j)]) for j in model.N for k in model.P)))
model.x_theta_product = Constraint(model.M, model.P, rule=xy_product, doc='Predicted value calc: y = hx')
def model_rms_error(model):
cost_value = ((1 / len(model.M)) * sum((((model.y_real[(i, k)] - model.y_predictions[(i, k)]) ** 2) for i in model.M for k in model.P)))
return cost_value
model.prediction_error = Objective(rule=model_rms_error, sense=minimize, doc='Minimum RMSE error')
instance = model
opt = SolverFactory('ipopt')
opt.options['max_iter'] = 10000000
result = opt.solve(instance)
phi = np.zeros((len(instance.theta), 1))
iterator = 0
for s in instance.N:
phi[(iterator, 0)] = instance.theta[s].value
iterator += 1
return phi<|docstring|>Pyomo implementation of least squares optimization problem:
Minimize cost = (y' - y) ^ 2
subject to: y' = Ax
The problem is solved within Pyomo's framework using IPOPT as solver.
Args:
x : array of features, (m x n) in size
y : actual output vector, size (m x 1)
Returns:
phi: The optimal linear regression weights found<|endoftext|> |
9d2869aabcedcfcb30d2d461b42ff3a81756d85a6974dfe2fe0fcc9292fbdd04 | @staticmethod
def cross_validation_error_calculation(phi, x_test_data, y_test_data):
'\n\n This function calculates the average sum of square errors between the actual and predicted output values,\n ss_error = sum of squared errors / number of samples\n\n Args:\n phi : optimal weight vector obtained by optimization\n x_test_data : vector of features x_test_data\n y_test_data : actual output values associated with\n\n Returns:\n ss_error : The average sum of squared errors\n\n '
y_test_prediction = np.matmul(x_test_data, phi)
ss_error = ((1 / y_test_data.shape[0]) * np.sum(((y_test_data - y_test_prediction) ** 2)))
return ss_error | This function calculates the average sum of square errors between the actual and predicted output values,
ss_error = sum of squared errors / number of samples
Args:
phi : optimal weight vector obtained by optimization
x_test_data : vector of features x_test_data
y_test_data : actual output values associated with
Returns:
ss_error : The average sum of squared errors | idaes/surrogate/pysmo/polynomial_regression.py | cross_validation_error_calculation | adowling2/idaes-pse | 112 | python | @staticmethod
def cross_validation_error_calculation(phi, x_test_data, y_test_data):
'\n\n This function calculates the average sum of square errors between the actual and predicted output values,\n ss_error = sum of squared errors / number of samples\n\n Args:\n phi : optimal weight vector obtained by optimization\n x_test_data : vector of features x_test_data\n y_test_data : actual output values associated with\n\n Returns:\n ss_error : The average sum of squared errors\n\n '
y_test_prediction = np.matmul(x_test_data, phi)
ss_error = ((1 / y_test_data.shape[0]) * np.sum(((y_test_data - y_test_prediction) ** 2)))
return ss_error | @staticmethod
def cross_validation_error_calculation(phi, x_test_data, y_test_data):
'\n\n This function calculates the average sum of square errors between the actual and predicted output values,\n ss_error = sum of squared errors / number of samples\n\n Args:\n phi : optimal weight vector obtained by optimization\n x_test_data : vector of features x_test_data\n y_test_data : actual output values associated with\n\n Returns:\n ss_error : The average sum of squared errors\n\n '
y_test_prediction = np.matmul(x_test_data, phi)
ss_error = ((1 / y_test_data.shape[0]) * np.sum(((y_test_data - y_test_prediction) ** 2)))
return ss_error<|docstring|>This function calculates the average sum of square errors between the actual and predicted output values,
ss_error = sum of squared errors / number of samples
Args:
phi : optimal weight vector obtained by optimization
x_test_data : vector of features x_test_data
y_test_data : actual output values associated with
Returns:
ss_error : The average sum of squared errors<|endoftext|> |
ad97377f4e8d23e83bca994e5286861944aca474c926b337c4ce5237895d7804 | def polyregression(self, poly_order, training_data, test_data, additional_x_training_data=None, additional_x_test_data=None):
'\n\n Function that performs polynomial regression on a given dataset. It returns the estimated parameters and the fitting errors. It\n\n - calls the method self.polygeneration to generate the required polynomial/feature array based on the current polynomial order poly_order,\n - calls the pre-selected solution algorithm to solve the least squares problem, and\n - calls the cross_validation_error_calculation method to calculate the training and cross-validation errors.\n\n\n Args:\n poly_order(int) : The polynomial order currently being considered - between 1 and max_polynomial_order\n training_data(NumPy Array) : The training data to be regressed\n test_data(NumPy Array) : The test data to be used to cross-validate the polynomial fit\n\n Keyword Args:\n additional_x_training_data : Array containing additional training features based on additional_features list supplied by the user. Will have same number of rows as training_data.\n additional_x_test_data : Array of additional cross-validation features based on additional_features list supplied by the user. Will have same number of rows as test_data.\n\n Returns:\n phi_vector : the optimal weight vector for the polynomial considered here, returns zeros when problem is underspecified, i.e number of features > number of training samples.\n training_error : the average SSE estimate in the training dataset, returns Inf when number of features > number of training samples (DoF < 0).\n crossval_error : the average SSE estimate on the cross-validation dataset, returns Inf when number of features > number of training samples (DoF < 0).\n\n '
x_training_data = training_data[(:, :(- 1))]
y_training_data = training_data[(:, (- 1))]
x_test_data = test_data[(:, :(- 1))]
y_test_data = test_data[(:, (- 1))]
x_polynomial_data = self.polygeneration(poly_order, self.multinomials, x_training_data, additional_x_training_data)
if (x_polynomial_data.shape[0] >= x_polynomial_data.shape[1]):
if (self.solution_method == 'mle'):
phi_vector = self.MLE_estimate(x_polynomial_data, y_training_data.reshape(y_training_data.shape[0], 1))
elif (self.solution_method == 'bfgs'):
phi_vector = self.bfgs_parameter_optimization(x_polynomial_data, y_training_data)
elif (self.solution_method == 'pyomo'):
phi_vector = self.pyomo_optimization(x_polynomial_data, y_training_data)
phi_vector = phi_vector.reshape(phi_vector.shape[0], 1)
x_polynomial_data_test = self.polygeneration(poly_order, self.multinomials, x_test_data, additional_x_test_data)
training_error = self.cross_validation_error_calculation(phi_vector, x_polynomial_data, y_training_data.reshape(y_training_data.shape[0], 1))
crossval_error = self.cross_validation_error_calculation(phi_vector, x_polynomial_data_test, y_test_data.reshape(y_test_data.shape[0], 1))
else:
phi_vector = np.zeros((x_polynomial_data.shape[1], 1))
phi_vector[(:, 0)] = np.Inf
training_error = np.Inf
crossval_error = np.Inf
return (phi_vector, training_error, crossval_error) | Function that performs polynomial regression on a given dataset. It returns the estimated parameters and the fitting errors. It
- calls the method self.polygeneration to generate the required polynomial/feature array based on the current polynomial order poly_order,
- calls the pre-selected solution algorithm to solve the least squares problem, and
- calls the cross_validation_error_calculation method to calculate the training and cross-validation errors.
Args:
poly_order(int) : The polynomial order currently being considered - between 1 and max_polynomial_order
training_data(NumPy Array) : The training data to be regressed
test_data(NumPy Array) : The test data to be used to cross-validate the polynomial fit
Keyword Args:
additional_x_training_data : Array containing additional training features based on additional_features list supplied by the user. Will have same number of rows as training_data.
additional_x_test_data : Array of additional cross-validation features based on additional_features list supplied by the user. Will have same number of rows as test_data.
Returns:
phi_vector : the optimal weight vector for the polynomial considered here, returns zeros when problem is underspecified, i.e number of features > number of training samples.
training_error : the average SSE estimate in the training dataset, returns Inf when number of features > number of training samples (DoF < 0).
crossval_error : the average SSE estimate on the cross-validation dataset, returns Inf when number of features > number of training samples (DoF < 0). | idaes/surrogate/pysmo/polynomial_regression.py | polyregression | adowling2/idaes-pse | 112 | python | def polyregression(self, poly_order, training_data, test_data, additional_x_training_data=None, additional_x_test_data=None):
'\n\n Function that performs polynomial regression on a given dataset. It returns the estimated parameters and the fitting errors. It\n\n - calls the method self.polygeneration to generate the required polynomial/feature array based on the current polynomial order poly_order,\n - calls the pre-selected solution algorithm to solve the least squares problem, and\n - calls the cross_validation_error_calculation method to calculate the training and cross-validation errors.\n\n\n Args:\n poly_order(int) : The polynomial order currently being considered - between 1 and max_polynomial_order\n training_data(NumPy Array) : The training data to be regressed\n test_data(NumPy Array) : The test data to be used to cross-validate the polynomial fit\n\n Keyword Args:\n additional_x_training_data : Array containing additional training features based on additional_features list supplied by the user. Will have same number of rows as training_data.\n additional_x_test_data : Array of additional cross-validation features based on additional_features list supplied by the user. Will have same number of rows as test_data.\n\n Returns:\n phi_vector : the optimal weight vector for the polynomial considered here, returns zeros when problem is underspecified, i.e number of features > number of training samples.\n training_error : the average SSE estimate in the training dataset, returns Inf when number of features > number of training samples (DoF < 0).\n crossval_error : the average SSE estimate on the cross-validation dataset, returns Inf when number of features > number of training samples (DoF < 0).\n\n '
x_training_data = training_data[(:, :(- 1))]
y_training_data = training_data[(:, (- 1))]
x_test_data = test_data[(:, :(- 1))]
y_test_data = test_data[(:, (- 1))]
x_polynomial_data = self.polygeneration(poly_order, self.multinomials, x_training_data, additional_x_training_data)
if (x_polynomial_data.shape[0] >= x_polynomial_data.shape[1]):
if (self.solution_method == 'mle'):
phi_vector = self.MLE_estimate(x_polynomial_data, y_training_data.reshape(y_training_data.shape[0], 1))
elif (self.solution_method == 'bfgs'):
phi_vector = self.bfgs_parameter_optimization(x_polynomial_data, y_training_data)
elif (self.solution_method == 'pyomo'):
phi_vector = self.pyomo_optimization(x_polynomial_data, y_training_data)
phi_vector = phi_vector.reshape(phi_vector.shape[0], 1)
x_polynomial_data_test = self.polygeneration(poly_order, self.multinomials, x_test_data, additional_x_test_data)
training_error = self.cross_validation_error_calculation(phi_vector, x_polynomial_data, y_training_data.reshape(y_training_data.shape[0], 1))
crossval_error = self.cross_validation_error_calculation(phi_vector, x_polynomial_data_test, y_test_data.reshape(y_test_data.shape[0], 1))
else:
phi_vector = np.zeros((x_polynomial_data.shape[1], 1))
phi_vector[(:, 0)] = np.Inf
training_error = np.Inf
crossval_error = np.Inf
return (phi_vector, training_error, crossval_error) | def polyregression(self, poly_order, training_data, test_data, additional_x_training_data=None, additional_x_test_data=None):
'\n\n Function that performs polynomial regression on a given dataset. It returns the estimated parameters and the fitting errors. It\n\n - calls the method self.polygeneration to generate the required polynomial/feature array based on the current polynomial order poly_order,\n - calls the pre-selected solution algorithm to solve the least squares problem, and\n - calls the cross_validation_error_calculation method to calculate the training and cross-validation errors.\n\n\n Args:\n poly_order(int) : The polynomial order currently being considered - between 1 and max_polynomial_order\n training_data(NumPy Array) : The training data to be regressed\n test_data(NumPy Array) : The test data to be used to cross-validate the polynomial fit\n\n Keyword Args:\n additional_x_training_data : Array containing additional training features based on additional_features list supplied by the user. Will have same number of rows as training_data.\n additional_x_test_data : Array of additional cross-validation features based on additional_features list supplied by the user. Will have same number of rows as test_data.\n\n Returns:\n phi_vector : the optimal weight vector for the polynomial considered here, returns zeros when problem is underspecified, i.e number of features > number of training samples.\n training_error : the average SSE estimate in the training dataset, returns Inf when number of features > number of training samples (DoF < 0).\n crossval_error : the average SSE estimate on the cross-validation dataset, returns Inf when number of features > number of training samples (DoF < 0).\n\n '
x_training_data = training_data[(:, :(- 1))]
y_training_data = training_data[(:, (- 1))]
x_test_data = test_data[(:, :(- 1))]
y_test_data = test_data[(:, (- 1))]
x_polynomial_data = self.polygeneration(poly_order, self.multinomials, x_training_data, additional_x_training_data)
if (x_polynomial_data.shape[0] >= x_polynomial_data.shape[1]):
if (self.solution_method == 'mle'):
phi_vector = self.MLE_estimate(x_polynomial_data, y_training_data.reshape(y_training_data.shape[0], 1))
elif (self.solution_method == 'bfgs'):
phi_vector = self.bfgs_parameter_optimization(x_polynomial_data, y_training_data)
elif (self.solution_method == 'pyomo'):
phi_vector = self.pyomo_optimization(x_polynomial_data, y_training_data)
phi_vector = phi_vector.reshape(phi_vector.shape[0], 1)
x_polynomial_data_test = self.polygeneration(poly_order, self.multinomials, x_test_data, additional_x_test_data)
training_error = self.cross_validation_error_calculation(phi_vector, x_polynomial_data, y_training_data.reshape(y_training_data.shape[0], 1))
crossval_error = self.cross_validation_error_calculation(phi_vector, x_polynomial_data_test, y_test_data.reshape(y_test_data.shape[0], 1))
else:
phi_vector = np.zeros((x_polynomial_data.shape[1], 1))
phi_vector[(:, 0)] = np.Inf
training_error = np.Inf
crossval_error = np.Inf
return (phi_vector, training_error, crossval_error)<|docstring|>Function that performs polynomial regression on a given dataset. It returns the estimated parameters and the fitting errors. It
- calls the method self.polygeneration to generate the required polynomial/feature array based on the current polynomial order poly_order,
- calls the pre-selected solution algorithm to solve the least squares problem, and
- calls the cross_validation_error_calculation method to calculate the training and cross-validation errors.
Args:
poly_order(int) : The polynomial order currently being considered - between 1 and max_polynomial_order
training_data(NumPy Array) : The training data to be regressed
test_data(NumPy Array) : The test data to be used to cross-validate the polynomial fit
Keyword Args:
additional_x_training_data : Array containing additional training features based on additional_features list supplied by the user. Will have same number of rows as training_data.
additional_x_test_data : Array of additional cross-validation features based on additional_features list supplied by the user. Will have same number of rows as test_data.
Returns:
phi_vector : the optimal weight vector for the polynomial considered here, returns zeros when problem is underspecified, i.e number of features > number of training samples.
training_error : the average SSE estimate in the training dataset, returns Inf when number of features > number of training samples (DoF < 0).
crossval_error : the average SSE estimate on the cross-validation dataset, returns Inf when number of features > number of training samples (DoF < 0).<|endoftext|> |
60a4a9abf13cef7f07891e0b6fe5cc3ef1cab5e534019d0eb2e7faa0d348d2d9 | def surrogate_performance(self, phi_best, order_best, additional_features_array=None):
'\n\n This function evaluates the performance of the surrogate model on the entire dataset.\n 1. A vector is created to hold the original input data and the predicted y values from the surrogate model is created - comparison_vector\n 2. The predicted values from the surrogate model are then evaluated.\n 3. The errors on each datapoint(individual error), the mean absolute error and the mean square errors are calculated.\n 4. The R-square coefficient is then calculated.\n 5. The adjusted R2 is calculated next, taking into account the number of terms in the equation\n\n The comparison vector is sorted based on the performance of the surrogate model in its prediction - best to worst.\n Note that the error on each data point is based on the error maximization function in ALAMO (Cozad et al., Eq. 7)\n\n '
comparison_vector = np.zeros((self.original_data.shape[0], (self.original_data.shape[1] + 1)))
comparison_vector[(:, :self.original_data.shape[1])] = self.original_data[(:, :)]
x_evaluation_data = self.polygeneration(order_best, self.multinomials, self.original_data[(:, 0:(self.original_data.shape[1] - 1))], additional_features_array)
y_prediction = np.matmul(x_evaluation_data, phi_best)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
comparison_vector[(:, self.original_data.shape[1])] = y_prediction[(:, 0)]
den = (np.max(comparison_vector[(:, (- 2))]) - np.min(comparison_vector[(:, (- 2))]))
individual_error = (((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) / den) ** 2)
mae_error = ((1 / comparison_vector.shape[0]) * np.sum(np.abs((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]))))
mse_error = ((1 / comparison_vector.shape[0]) * np.sum(((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) ** 2)))
input_y_mean = np.mean(comparison_vector[(:, (- 2))], axis=0)
ss_total = np.sum(((comparison_vector[(:, (- 2))] - input_y_mean) ** 2))
ss_residual = np.sum(((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) ** 2))
r_square = (1 - (ss_residual / ss_total))
individual_error = individual_error.reshape(individual_error.shape[0], 1)
comparison_vector = np.append(comparison_vector, individual_error, 1)
sorted_comparison_vector = comparison_vector[comparison_vector[(:, (- 1))].argsort()]
samp_size = self.original_data.shape[0]
no_nonzero_terms = np.count_nonzero(phi_best[(1:, 0)])
if (r_square > 0):
r2_adjusted = (1 - ((1 - r_square) * ((samp_size - 1) / ((samp_size - no_nonzero_terms) - 1))))
else:
r2_adjusted = 0
return (sorted_comparison_vector, mae_error, mse_error, r_square, r2_adjusted) | This function evaluates the performance of the surrogate model on the entire dataset.
1. A vector is created to hold the original input data and the predicted y values from the surrogate model is created - comparison_vector
2. The predicted values from the surrogate model are then evaluated.
3. The errors on each datapoint(individual error), the mean absolute error and the mean square errors are calculated.
4. The R-square coefficient is then calculated.
5. The adjusted R2 is calculated next, taking into account the number of terms in the equation
The comparison vector is sorted based on the performance of the surrogate model in its prediction - best to worst.
Note that the error on each data point is based on the error maximization function in ALAMO (Cozad et al., Eq. 7) | idaes/surrogate/pysmo/polynomial_regression.py | surrogate_performance | adowling2/idaes-pse | 112 | python | def surrogate_performance(self, phi_best, order_best, additional_features_array=None):
'\n\n This function evaluates the performance of the surrogate model on the entire dataset.\n 1. A vector is created to hold the original input data and the predicted y values from the surrogate model is created - comparison_vector\n 2. The predicted values from the surrogate model are then evaluated.\n 3. The errors on each datapoint(individual error), the mean absolute error and the mean square errors are calculated.\n 4. The R-square coefficient is then calculated.\n 5. The adjusted R2 is calculated next, taking into account the number of terms in the equation\n\n The comparison vector is sorted based on the performance of the surrogate model in its prediction - best to worst.\n Note that the error on each data point is based on the error maximization function in ALAMO (Cozad et al., Eq. 7)\n\n '
comparison_vector = np.zeros((self.original_data.shape[0], (self.original_data.shape[1] + 1)))
comparison_vector[(:, :self.original_data.shape[1])] = self.original_data[(:, :)]
x_evaluation_data = self.polygeneration(order_best, self.multinomials, self.original_data[(:, 0:(self.original_data.shape[1] - 1))], additional_features_array)
y_prediction = np.matmul(x_evaluation_data, phi_best)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
comparison_vector[(:, self.original_data.shape[1])] = y_prediction[(:, 0)]
den = (np.max(comparison_vector[(:, (- 2))]) - np.min(comparison_vector[(:, (- 2))]))
individual_error = (((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) / den) ** 2)
mae_error = ((1 / comparison_vector.shape[0]) * np.sum(np.abs((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]))))
mse_error = ((1 / comparison_vector.shape[0]) * np.sum(((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) ** 2)))
input_y_mean = np.mean(comparison_vector[(:, (- 2))], axis=0)
ss_total = np.sum(((comparison_vector[(:, (- 2))] - input_y_mean) ** 2))
ss_residual = np.sum(((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) ** 2))
r_square = (1 - (ss_residual / ss_total))
individual_error = individual_error.reshape(individual_error.shape[0], 1)
comparison_vector = np.append(comparison_vector, individual_error, 1)
sorted_comparison_vector = comparison_vector[comparison_vector[(:, (- 1))].argsort()]
samp_size = self.original_data.shape[0]
no_nonzero_terms = np.count_nonzero(phi_best[(1:, 0)])
if (r_square > 0):
r2_adjusted = (1 - ((1 - r_square) * ((samp_size - 1) / ((samp_size - no_nonzero_terms) - 1))))
else:
r2_adjusted = 0
return (sorted_comparison_vector, mae_error, mse_error, r_square, r2_adjusted) | def surrogate_performance(self, phi_best, order_best, additional_features_array=None):
'\n\n This function evaluates the performance of the surrogate model on the entire dataset.\n 1. A vector is created to hold the original input data and the predicted y values from the surrogate model is created - comparison_vector\n 2. The predicted values from the surrogate model are then evaluated.\n 3. The errors on each datapoint(individual error), the mean absolute error and the mean square errors are calculated.\n 4. The R-square coefficient is then calculated.\n 5. The adjusted R2 is calculated next, taking into account the number of terms in the equation\n\n The comparison vector is sorted based on the performance of the surrogate model in its prediction - best to worst.\n Note that the error on each data point is based on the error maximization function in ALAMO (Cozad et al., Eq. 7)\n\n '
comparison_vector = np.zeros((self.original_data.shape[0], (self.original_data.shape[1] + 1)))
comparison_vector[(:, :self.original_data.shape[1])] = self.original_data[(:, :)]
x_evaluation_data = self.polygeneration(order_best, self.multinomials, self.original_data[(:, 0:(self.original_data.shape[1] - 1))], additional_features_array)
y_prediction = np.matmul(x_evaluation_data, phi_best)
y_prediction = y_prediction.reshape(y_prediction.shape[0], 1)
comparison_vector[(:, self.original_data.shape[1])] = y_prediction[(:, 0)]
den = (np.max(comparison_vector[(:, (- 2))]) - np.min(comparison_vector[(:, (- 2))]))
individual_error = (((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) / den) ** 2)
mae_error = ((1 / comparison_vector.shape[0]) * np.sum(np.abs((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]))))
mse_error = ((1 / comparison_vector.shape[0]) * np.sum(((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) ** 2)))
input_y_mean = np.mean(comparison_vector[(:, (- 2))], axis=0)
ss_total = np.sum(((comparison_vector[(:, (- 2))] - input_y_mean) ** 2))
ss_residual = np.sum(((comparison_vector[(:, (- 1))] - comparison_vector[(:, (- 2))]) ** 2))
r_square = (1 - (ss_residual / ss_total))
individual_error = individual_error.reshape(individual_error.shape[0], 1)
comparison_vector = np.append(comparison_vector, individual_error, 1)
sorted_comparison_vector = comparison_vector[comparison_vector[(:, (- 1))].argsort()]
samp_size = self.original_data.shape[0]
no_nonzero_terms = np.count_nonzero(phi_best[(1:, 0)])
if (r_square > 0):
r2_adjusted = (1 - ((1 - r_square) * ((samp_size - 1) / ((samp_size - no_nonzero_terms) - 1))))
else:
r2_adjusted = 0
return (sorted_comparison_vector, mae_error, mse_error, r_square, r2_adjusted)<|docstring|>This function evaluates the performance of the surrogate model on the entire dataset.
1. A vector is created to hold the original input data and the predicted y values from the surrogate model is created - comparison_vector
2. The predicted values from the surrogate model are then evaluated.
3. The errors on each datapoint(individual error), the mean absolute error and the mean square errors are calculated.
4. The R-square coefficient is then calculated.
5. The adjusted R2 is calculated next, taking into account the number of terms in the equation
The comparison vector is sorted based on the performance of the surrogate model in its prediction - best to worst.
Note that the error on each data point is based on the error maximization function in ALAMO (Cozad et al., Eq. 7)<|endoftext|> |
d18cba2a8dbc25db716cebcba73d4f8dbdeaf0fee395fd1d4a764b403a4adec1 | def results_generation(self, beta, order):
'\n This function prints the results of the fitting to the screen.\n '
results_df = pd.Series()
counter = 1
print('\n------------------------------------------------------------')
print('The final coefficients of the regression terms are: \n')
print('k |', beta[(0, 0)])
results_df = results_df.append(pd.Series({'k': beta[(0, 0)]}))
if (self.multinomials == 1):
for i in range(1, (order + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
print('(x_', j, ')^', i, ' |', beta[(counter, 0)])
col_name = ((('(x_' + str(j)) + ')^') + str(i))
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
for i in range(1, (self.number_of_x_vars + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
if (i > j):
print('x_', j, '.x_', i, ' |', beta[(counter, 0)])
col_name = ((((('(x_' + str(j)) + ')') + '.(x_') + str(i)) + ')')
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
else:
for i in range(1, (order + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
print('(x_', j, ')^', i, ' |', beta[(counter, 0)])
col_name = ((('(x_' + str(j)) + ')^') + str(i))
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
return results_df | This function prints the results of the fitting to the screen. | idaes/surrogate/pysmo/polynomial_regression.py | results_generation | adowling2/idaes-pse | 112 | python | def results_generation(self, beta, order):
'\n \n '
results_df = pd.Series()
counter = 1
print('\n------------------------------------------------------------')
print('The final coefficients of the regression terms are: \n')
print('k |', beta[(0, 0)])
results_df = results_df.append(pd.Series({'k': beta[(0, 0)]}))
if (self.multinomials == 1):
for i in range(1, (order + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
print('(x_', j, ')^', i, ' |', beta[(counter, 0)])
col_name = ((('(x_' + str(j)) + ')^') + str(i))
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
for i in range(1, (self.number_of_x_vars + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
if (i > j):
print('x_', j, '.x_', i, ' |', beta[(counter, 0)])
col_name = ((((('(x_' + str(j)) + ')') + '.(x_') + str(i)) + ')')
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
else:
for i in range(1, (order + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
print('(x_', j, ')^', i, ' |', beta[(counter, 0)])
col_name = ((('(x_' + str(j)) + ')^') + str(i))
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
return results_df | def results_generation(self, beta, order):
'\n \n '
results_df = pd.Series()
counter = 1
print('\n------------------------------------------------------------')
print('The final coefficients of the regression terms are: \n')
print('k |', beta[(0, 0)])
results_df = results_df.append(pd.Series({'k': beta[(0, 0)]}))
if (self.multinomials == 1):
for i in range(1, (order + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
print('(x_', j, ')^', i, ' |', beta[(counter, 0)])
col_name = ((('(x_' + str(j)) + ')^') + str(i))
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
for i in range(1, (self.number_of_x_vars + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
if (i > j):
print('x_', j, '.x_', i, ' |', beta[(counter, 0)])
col_name = ((((('(x_' + str(j)) + ')') + '.(x_') + str(i)) + ')')
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
else:
for i in range(1, (order + 1)):
for j in range(1, (self.number_of_x_vars + 1)):
print('(x_', j, ')^', i, ' |', beta[(counter, 0)])
col_name = ((('(x_' + str(j)) + ')^') + str(i))
results_df = results_df.append(pd.Series({col_name: beta[(counter, 0)]}))
counter += 1
return results_df<|docstring|>This function prints the results of the fitting to the screen.<|endoftext|> |
b8db661118bca7d977c34ee72826ec96ed9413de5b1bd5bf2acc8a9241e36155 | @staticmethod
def error_plotting(vector_of_results):
'\n This function generates displays a plot of the different errors\n '
ax1 = plt.subplot(2, 2, 1)
ax1.plot(vector_of_results[(:, 0)], vector_of_results[(:, 2)], 'green', vector_of_results[(:, 0)], vector_of_results[(:, 3)], 'red')
ax1.set_title('Training (green) vs Cross-validation error (red)')
ax2 = plt.subplot(2, 2, 2)
ax2.plot(vector_of_results[(:, 0)], vector_of_results[(:, 4)], 'green')
ax2.set_title('MAE')
ax3 = plt.subplot(2, 2, 3)
ax3.plot(vector_of_results[(:, 0)], vector_of_results[(:, 5)], 'blue')
ax3.set_title('MSE')
ax4 = plt.subplot(2, 2, 4)
ax4.plot(vector_of_results[(:, 0)], vector_of_results[(:, 6)], 'blue', vector_of_results[(:, 0)], vector_of_results[(:, 7)], 'red')
ax4.set_title('R-squared (blue) and Adjusted R-squared (red)')
plt.show()
return (ax1, ax2, ax3, ax4) | This function generates displays a plot of the different errors | idaes/surrogate/pysmo/polynomial_regression.py | error_plotting | adowling2/idaes-pse | 112 | python | @staticmethod
def error_plotting(vector_of_results):
'\n \n '
ax1 = plt.subplot(2, 2, 1)
ax1.plot(vector_of_results[(:, 0)], vector_of_results[(:, 2)], 'green', vector_of_results[(:, 0)], vector_of_results[(:, 3)], 'red')
ax1.set_title('Training (green) vs Cross-validation error (red)')
ax2 = plt.subplot(2, 2, 2)
ax2.plot(vector_of_results[(:, 0)], vector_of_results[(:, 4)], 'green')
ax2.set_title('MAE')
ax3 = plt.subplot(2, 2, 3)
ax3.plot(vector_of_results[(:, 0)], vector_of_results[(:, 5)], 'blue')
ax3.set_title('MSE')
ax4 = plt.subplot(2, 2, 4)
ax4.plot(vector_of_results[(:, 0)], vector_of_results[(:, 6)], 'blue', vector_of_results[(:, 0)], vector_of_results[(:, 7)], 'red')
ax4.set_title('R-squared (blue) and Adjusted R-squared (red)')
plt.show()
return (ax1, ax2, ax3, ax4) | @staticmethod
def error_plotting(vector_of_results):
'\n \n '
ax1 = plt.subplot(2, 2, 1)
ax1.plot(vector_of_results[(:, 0)], vector_of_results[(:, 2)], 'green', vector_of_results[(:, 0)], vector_of_results[(:, 3)], 'red')
ax1.set_title('Training (green) vs Cross-validation error (red)')
ax2 = plt.subplot(2, 2, 2)
ax2.plot(vector_of_results[(:, 0)], vector_of_results[(:, 4)], 'green')
ax2.set_title('MAE')
ax3 = plt.subplot(2, 2, 3)
ax3.plot(vector_of_results[(:, 0)], vector_of_results[(:, 5)], 'blue')
ax3.set_title('MSE')
ax4 = plt.subplot(2, 2, 4)
ax4.plot(vector_of_results[(:, 0)], vector_of_results[(:, 6)], 'blue', vector_of_results[(:, 0)], vector_of_results[(:, 7)], 'red')
ax4.set_title('R-squared (blue) and Adjusted R-squared (red)')
plt.show()
return (ax1, ax2, ax3, ax4)<|docstring|>This function generates displays a plot of the different errors<|endoftext|> |
ebb9a1b18ea624f4bcad611ec7cb286c451e0e97a184bd9c1c85f5bbae1196d2 | def user_defined_terms(self, additional_regression_features):
'\n\n This function generates a 2D array of the additional features from the list supplied by the user.\n Note: It assumes that each list element is 1D\n\n Args:\n additional_regression_features(list): a list of features to be added to the regression problem. Each element of the list must have the same number of entries as self.number_of_samples\n\n Returns:\n additional_features_array(NumPy Array): an array of additional training features with len(additional_regression_features) columns to be considered during regression.\n\n Raises:\n Exception:\n * when additional_regression_features is not a list\n Exception:\n * when the entries in additional_regression_features are not of type 1-D NumPy Array or Pandas Series\n Exception:\n * when the length of the entries in additional_regression_features do not match the number of rows in self.regression_data\n\n '
if (not isinstance(additional_regression_features, list)):
raise ValueError('additional_regression_features: list required.')
number_additional_features = len(additional_regression_features)
additional_features_array = np.zeros((self.regression_data.shape[0], number_additional_features))
for i in range(0, number_additional_features):
if (isinstance(additional_regression_features[i], np.ndarray) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i]
elif (isinstance(additional_regression_features[i], pd.DataFrame) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i].values
elif (isinstance(additional_regression_features[i], pd.Series) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i].values
else:
raise Exception('Wrong data dimensions or type - additional_regression_features contain 1-D vectors, have same number of entries as regression_data and be of type pd.Series, pd.Dataframe or np.ndarray.')
return additional_features_array | This function generates a 2D array of the additional features from the list supplied by the user.
Note: It assumes that each list element is 1D
Args:
additional_regression_features(list): a list of features to be added to the regression problem. Each element of the list must have the same number of entries as self.number_of_samples
Returns:
additional_features_array(NumPy Array): an array of additional training features with len(additional_regression_features) columns to be considered during regression.
Raises:
Exception:
* when additional_regression_features is not a list
Exception:
* when the entries in additional_regression_features are not of type 1-D NumPy Array or Pandas Series
Exception:
* when the length of the entries in additional_regression_features do not match the number of rows in self.regression_data | idaes/surrogate/pysmo/polynomial_regression.py | user_defined_terms | adowling2/idaes-pse | 112 | python | def user_defined_terms(self, additional_regression_features):
'\n\n This function generates a 2D array of the additional features from the list supplied by the user.\n Note: It assumes that each list element is 1D\n\n Args:\n additional_regression_features(list): a list of features to be added to the regression problem. Each element of the list must have the same number of entries as self.number_of_samples\n\n Returns:\n additional_features_array(NumPy Array): an array of additional training features with len(additional_regression_features) columns to be considered during regression.\n\n Raises:\n Exception:\n * when additional_regression_features is not a list\n Exception:\n * when the entries in additional_regression_features are not of type 1-D NumPy Array or Pandas Series\n Exception:\n * when the length of the entries in additional_regression_features do not match the number of rows in self.regression_data\n\n '
if (not isinstance(additional_regression_features, list)):
raise ValueError('additional_regression_features: list required.')
number_additional_features = len(additional_regression_features)
additional_features_array = np.zeros((self.regression_data.shape[0], number_additional_features))
for i in range(0, number_additional_features):
if (isinstance(additional_regression_features[i], np.ndarray) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i]
elif (isinstance(additional_regression_features[i], pd.DataFrame) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i].values
elif (isinstance(additional_regression_features[i], pd.Series) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i].values
else:
raise Exception('Wrong data dimensions or type - additional_regression_features contain 1-D vectors, have same number of entries as regression_data and be of type pd.Series, pd.Dataframe or np.ndarray.')
return additional_features_array | def user_defined_terms(self, additional_regression_features):
'\n\n This function generates a 2D array of the additional features from the list supplied by the user.\n Note: It assumes that each list element is 1D\n\n Args:\n additional_regression_features(list): a list of features to be added to the regression problem. Each element of the list must have the same number of entries as self.number_of_samples\n\n Returns:\n additional_features_array(NumPy Array): an array of additional training features with len(additional_regression_features) columns to be considered during regression.\n\n Raises:\n Exception:\n * when additional_regression_features is not a list\n Exception:\n * when the entries in additional_regression_features are not of type 1-D NumPy Array or Pandas Series\n Exception:\n * when the length of the entries in additional_regression_features do not match the number of rows in self.regression_data\n\n '
if (not isinstance(additional_regression_features, list)):
raise ValueError('additional_regression_features: list required.')
number_additional_features = len(additional_regression_features)
additional_features_array = np.zeros((self.regression_data.shape[0], number_additional_features))
for i in range(0, number_additional_features):
if (isinstance(additional_regression_features[i], np.ndarray) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i]
elif (isinstance(additional_regression_features[i], pd.DataFrame) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i].values
elif (isinstance(additional_regression_features[i], pd.Series) and (len(additional_regression_features[i]) == self.regression_data.shape[0]) and (additional_regression_features[i].ndim == 1)):
additional_features_array[(:, i)] = additional_regression_features[i].values
else:
raise Exception('Wrong data dimensions or type - additional_regression_features contain 1-D vectors, have same number of entries as regression_data and be of type pd.Series, pd.Dataframe or np.ndarray.')
return additional_features_array<|docstring|>This function generates a 2D array of the additional features from the list supplied by the user.
Note: It assumes that each list element is 1D
Args:
additional_regression_features(list): a list of features to be added to the regression problem. Each element of the list must have the same number of entries as self.number_of_samples
Returns:
additional_features_array(NumPy Array): an array of additional training features with len(additional_regression_features) columns to be considered during regression.
Raises:
Exception:
* when additional_regression_features is not a list
Exception:
* when the entries in additional_regression_features are not of type 1-D NumPy Array or Pandas Series
Exception:
* when the length of the entries in additional_regression_features do not match the number of rows in self.regression_data<|endoftext|> |
3bb8da395cf327fc817a6c74ca9990663a1c9e0aa08ddd40d6fa376940b4e4ef | def polynomial_regression_fitting(self, additional_regression_features=None):
'\n\n polynomial_regression_fitting is the core method which is called in the PolynomialRegression class.\n It ties together all the other functions in the class.\n\n For each polynomial order, it\n - calls the function user_defined_terms to generate the array of additional features (when required),\n - calls the function training_test_data_creation to generate the training and test data sets,\n - calls the function polyregression to determine the optimal weight vector and the fitting errors,\n - determines whether the new fit improves is the best so far by the crossvalidation error of the current fit to the previous best,\n - calls the function surrogate_performance to calculate the errors and R-values of the current fit, and\n - returns results to user.\n\n When adaptive sampling is done, the function also\n - selects the adaptive samples to be added to the training data based on the magnitudes of the prediction errors of individual samples in self.original_data, and\n - determines when the the stopping conditions have been satisfied.\n\n\n The following stopping conditions are considered when adaptive sampling is in use:\n - The maximum number of training samples allowed by the user has been exceeded\n - Mean absolute error ~= 0\n - Mean squared error ~= 0\n - R^2 = 1\n - The preset iteration number given by the user has been reached\n - All available points in self.original_data have been used for training.\n\n Keyword Args:\n additional_regression_features(<list>): Additional features the user wants the algorithm to consider during regression.\n It should be noted that adaptive sampling is not available when additional features have been supplied by the user, i.e. when len(additional_regression_features) > 0.\n\n Returns:\n results: Python object containing the results of the polynomial regression process including the polynomial order\n (results.polynomial_order), polynomial coefficients (results.optimal_weights_array) and fit errors (results.errors).\n See information on ResultReport class for details on contents.\n\n '
best_error = 1e+20
train_error_fit = 1e+20
phi_best = 0
order_best = 0
if ((additional_regression_features is None) or (len(additional_regression_features) == 0)):
print('max_fraction_training_samples set at ', self.max_fraction_training_samples)
print('Number of adaptive samples (no_adaptive_samples) set at ', self.no_adaptive_samples)
print('Maximum number of iterations (Max_iter) set at: ', self.max_iter)
(training_data, cross_val_data) = self.training_test_data_creation()
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nInitial surrogate model is of order', order_best, (' with a cross-val error of %4f' % best_error))
(sorted_comparison_vector, mae_error, mse_error, r_square, r_square_adj) = self.surrogate_performance(phi_best, order_best)
print('Initial Regression Model Performance:\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R^2: %4f' % r_square), (' / Adjusted R^2: %4f' % r_square_adj))
(order_opt, train_error_opt, best_error_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, phi_opt) = (order_best, train_error_fit, best_error, mae_error, mse_error, r_square, r_square_adj, phi_best)
eps_neg = 1e-06
eps_pos = 0.999999
iteration_number = 1
stopping_criterion = int(np.ceil((self.max_fraction_training_samples * self.original_data.shape[0])))
vector_of_results = np.zeros((stopping_criterion, 9))
while ((self.regression_data.shape[0] < stopping_criterion) and (mae_error > eps_neg) and (mse_error > eps_neg) and (r_square < eps_pos) and (iteration_number < self.max_iter) and ((self.regression_data.shape[0] + self.no_adaptive_samples) < self.original_data.shape[0])):
print('\n-------------------------------------------------')
print('\nIteration ', iteration_number)
best_error = 1e+20
scv_input_data = sorted_comparison_vector[(:, :(- 2))]
sorted_comparison_vector_unique = scv_input_data[np.all(np.any((scv_input_data - self.regression_data[(:, None)]), axis=2), axis=0)]
adaptive_samples = sorted_comparison_vector_unique[((- self.no_adaptive_samples):, :)]
self.regression_data = np.concatenate((self.regression_data, adaptive_samples), axis=0)
self.number_of_samples = self.regression_data.shape[0]
print('\n', self.no_adaptive_samples, ' additional points added to training data. New number of training samples: ', self.regression_data.shape[0])
(training_data, cross_val_data) = self.training_test_data_creation()
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nThe best regression model is of order', order_best, (' with a cross-val error of %4f' % best_error))
(sorted_comparison_vector, mae_error, mse_error, r_square, r_square_adj) = self.surrogate_performance(phi_best, order_best)
print('Regression performance on full data in iteration', iteration_number, '\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R_sq: %4f' % r_square), (' / Adjusted R^2: %4f' % r_square_adj))
if (r_square_adj > r_square_adj_opt):
(phi_opt, order_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, train_error_opt, best_error_opt) = (phi_best, order_best, mae_error, mse_error, r_square, r_square_adj, train_error_fit, best_error)
print('New solution found.')
else:
print('Previous solution retained.')
vector_of_results[(iteration_number, :)] = [iteration_number, order_opt, train_error_opt, best_error_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, self.regression_data.shape[0]]
iteration_number += 1
vector_of_results = vector_of_results[(~ np.all((vector_of_results == 0), axis=1))]
beta_vector = np.round(phi_opt, 6)
if (r_square_adj_opt < 0.95):
print('\nPolynomial regression performs poorly for this dataset.')
else:
print('\nPolynomial regression generates a good surrogate model for the input data.')
if (iteration_number > 1):
(_, _, _, _) = self.error_plotting(vector_of_results)
print('\n-------------------------------------------------\n-------------------------------------------------')
print('Best solution found: ', '\nOrder: ', order_opt, (' / MAE: %4f' % mae_error_opt), (' / MSE: %4f' % mse_error_opt), (' / R_sq: %4f' % r_square_opt), (' / Adjusted R^2: %4f' % r_square_adj_opt))
dataframe_coeffs = self.results_generation(beta_vector, order_opt)
vector_of_results_df = pd.DataFrame({'Iteration_number': vector_of_results[(:, 0)], 'Polynomial order': vector_of_results[(:, 1)], 'Training error': vector_of_results[(:, 2)], 'Cross-val error': vector_of_results[(:, 3)], 'MAE': vector_of_results[(:, 4)], 'MSE': vector_of_results[(:, 5)], 'R2': vector_of_results[(:, 6)], 'Adjusted R2': vector_of_results[(:, 7)], 'Number of training samples': vector_of_results[(:, 8)]})
extra_terms_feature_vector = list((self.feature_list[i] for i in self.regression_data_columns))
self.optimal_weights_array = phi_opt
self.final_polynomial_order = order_opt
self.errors = {'MAE': mae_error_opt, 'MSE': mse_error_opt, 'R2': r_square_opt, 'Adjusted R2': r_square_adj_opt}
self.number_of_iterations = iteration_number
self.iteration_summary = vector_of_results_df
self.additional_features_data = None
self.final_training_data = self.regression_data
self.dataframe_of_optimal_weights_polynomial = dataframe_coeffs
self.dataframe_of_optimal_weights_extra_terms = []
self.extra_terms_feature_vector = extra_terms_feature_vector
if (r_square_opt > 0.95):
self.fit_status = 'ok'
else:
warnings.warn('Polynomial regression generates poor fit for the dataset')
self.fit_status = 'poor'
self.pickle_save({'model': self})
return self
else:
print('No iterations will be run.')
number_additional_features = len(additional_regression_features)
additional_features_array = self.user_defined_terms(additional_regression_features)
(training_data, cross_val_data) = self.training_test_data_creation(additional_features_array)
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))], training_data[('training_extras_' + str(cv_number))], cross_val_data[('test_extras_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nBest surrogate model is of order', order_best, (' with a cross-val S.S. Error of %4f' % best_error))
self.original_data = self.regression_data
(_, mae_error, mse_error, r_square, _) = self.surrogate_performance(phi_best, order_best, additional_features_array)
beta_vector = np.round(phi_best, 6)
dataframe_coeffs = self.results_generation(beta_vector, order_best)
extra_terms_coeffs = pd.Series()
print('\nThe coefficients of the extra terms in additional_regression_features are:\n')
for af in range(number_additional_features, 0, (- 1)):
print('Coeff. additional_regression_features[', ((number_additional_features - af) + 1), ']: ', beta_vector[((len(beta_vector) - af), 0)])
col_name = (('Coeff. additional_regression_features[' + str(((number_additional_features - af) + 1))) + ']')
extra_terms_coeffs = extra_terms_coeffs.append(pd.Series({col_name: beta_vector[((len(beta_vector) - af), 0)]}))
print('\nRegression model performance on training data:\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R^2: %4f' % r_square))
extra_terms_feature_vector = list((self.feature_list[i] for i in self.regression_data_columns))
self.optimal_weights_array = phi_best
self.final_polynomial_order = order_best
self.errors = {'MAE': mae_error, 'MSE': mse_error, 'R2': r_square}
self.number_of_iterations = []
self.iteration_summary = []
self.additional_features_data = additional_features_array
self.final_training_data = self.regression_data
self.dataframe_of_optimal_weights_polynomial = dataframe_coeffs
self.dataframe_of_optimal_weights_extra_terms = extra_terms_coeffs
self.extra_terms_feature_vector = extra_terms_feature_vector
if (r_square > 0.95):
self.fit_status = 'ok'
else:
warnings.warn('Polynomial regression generates poor fit for the dataset')
self.fit_status = 'poor'
self.pickle_save({'model': self})
return self | polynomial_regression_fitting is the core method which is called in the PolynomialRegression class.
It ties together all the other functions in the class.
For each polynomial order, it
- calls the function user_defined_terms to generate the array of additional features (when required),
- calls the function training_test_data_creation to generate the training and test data sets,
- calls the function polyregression to determine the optimal weight vector and the fitting errors,
- determines whether the new fit improves is the best so far by the crossvalidation error of the current fit to the previous best,
- calls the function surrogate_performance to calculate the errors and R-values of the current fit, and
- returns results to user.
When adaptive sampling is done, the function also
- selects the adaptive samples to be added to the training data based on the magnitudes of the prediction errors of individual samples in self.original_data, and
- determines when the the stopping conditions have been satisfied.
The following stopping conditions are considered when adaptive sampling is in use:
- The maximum number of training samples allowed by the user has been exceeded
- Mean absolute error ~= 0
- Mean squared error ~= 0
- R^2 = 1
- The preset iteration number given by the user has been reached
- All available points in self.original_data have been used for training.
Keyword Args:
additional_regression_features(<list>): Additional features the user wants the algorithm to consider during regression.
It should be noted that adaptive sampling is not available when additional features have been supplied by the user, i.e. when len(additional_regression_features) > 0.
Returns:
results: Python object containing the results of the polynomial regression process including the polynomial order
(results.polynomial_order), polynomial coefficients (results.optimal_weights_array) and fit errors (results.errors).
See information on ResultReport class for details on contents. | idaes/surrogate/pysmo/polynomial_regression.py | polynomial_regression_fitting | adowling2/idaes-pse | 112 | python | def polynomial_regression_fitting(self, additional_regression_features=None):
'\n\n polynomial_regression_fitting is the core method which is called in the PolynomialRegression class.\n It ties together all the other functions in the class.\n\n For each polynomial order, it\n - calls the function user_defined_terms to generate the array of additional features (when required),\n - calls the function training_test_data_creation to generate the training and test data sets,\n - calls the function polyregression to determine the optimal weight vector and the fitting errors,\n - determines whether the new fit improves is the best so far by the crossvalidation error of the current fit to the previous best,\n - calls the function surrogate_performance to calculate the errors and R-values of the current fit, and\n - returns results to user.\n\n When adaptive sampling is done, the function also\n - selects the adaptive samples to be added to the training data based on the magnitudes of the prediction errors of individual samples in self.original_data, and\n - determines when the the stopping conditions have been satisfied.\n\n\n The following stopping conditions are considered when adaptive sampling is in use:\n - The maximum number of training samples allowed by the user has been exceeded\n - Mean absolute error ~= 0\n - Mean squared error ~= 0\n - R^2 = 1\n - The preset iteration number given by the user has been reached\n - All available points in self.original_data have been used for training.\n\n Keyword Args:\n additional_regression_features(<list>): Additional features the user wants the algorithm to consider during regression.\n It should be noted that adaptive sampling is not available when additional features have been supplied by the user, i.e. when len(additional_regression_features) > 0.\n\n Returns:\n results: Python object containing the results of the polynomial regression process including the polynomial order\n (results.polynomial_order), polynomial coefficients (results.optimal_weights_array) and fit errors (results.errors).\n See information on ResultReport class for details on contents.\n\n '
best_error = 1e+20
train_error_fit = 1e+20
phi_best = 0
order_best = 0
if ((additional_regression_features is None) or (len(additional_regression_features) == 0)):
print('max_fraction_training_samples set at ', self.max_fraction_training_samples)
print('Number of adaptive samples (no_adaptive_samples) set at ', self.no_adaptive_samples)
print('Maximum number of iterations (Max_iter) set at: ', self.max_iter)
(training_data, cross_val_data) = self.training_test_data_creation()
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nInitial surrogate model is of order', order_best, (' with a cross-val error of %4f' % best_error))
(sorted_comparison_vector, mae_error, mse_error, r_square, r_square_adj) = self.surrogate_performance(phi_best, order_best)
print('Initial Regression Model Performance:\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R^2: %4f' % r_square), (' / Adjusted R^2: %4f' % r_square_adj))
(order_opt, train_error_opt, best_error_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, phi_opt) = (order_best, train_error_fit, best_error, mae_error, mse_error, r_square, r_square_adj, phi_best)
eps_neg = 1e-06
eps_pos = 0.999999
iteration_number = 1
stopping_criterion = int(np.ceil((self.max_fraction_training_samples * self.original_data.shape[0])))
vector_of_results = np.zeros((stopping_criterion, 9))
while ((self.regression_data.shape[0] < stopping_criterion) and (mae_error > eps_neg) and (mse_error > eps_neg) and (r_square < eps_pos) and (iteration_number < self.max_iter) and ((self.regression_data.shape[0] + self.no_adaptive_samples) < self.original_data.shape[0])):
print('\n-------------------------------------------------')
print('\nIteration ', iteration_number)
best_error = 1e+20
scv_input_data = sorted_comparison_vector[(:, :(- 2))]
sorted_comparison_vector_unique = scv_input_data[np.all(np.any((scv_input_data - self.regression_data[(:, None)]), axis=2), axis=0)]
adaptive_samples = sorted_comparison_vector_unique[((- self.no_adaptive_samples):, :)]
self.regression_data = np.concatenate((self.regression_data, adaptive_samples), axis=0)
self.number_of_samples = self.regression_data.shape[0]
print('\n', self.no_adaptive_samples, ' additional points added to training data. New number of training samples: ', self.regression_data.shape[0])
(training_data, cross_val_data) = self.training_test_data_creation()
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nThe best regression model is of order', order_best, (' with a cross-val error of %4f' % best_error))
(sorted_comparison_vector, mae_error, mse_error, r_square, r_square_adj) = self.surrogate_performance(phi_best, order_best)
print('Regression performance on full data in iteration', iteration_number, '\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R_sq: %4f' % r_square), (' / Adjusted R^2: %4f' % r_square_adj))
if (r_square_adj > r_square_adj_opt):
(phi_opt, order_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, train_error_opt, best_error_opt) = (phi_best, order_best, mae_error, mse_error, r_square, r_square_adj, train_error_fit, best_error)
print('New solution found.')
else:
print('Previous solution retained.')
vector_of_results[(iteration_number, :)] = [iteration_number, order_opt, train_error_opt, best_error_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, self.regression_data.shape[0]]
iteration_number += 1
vector_of_results = vector_of_results[(~ np.all((vector_of_results == 0), axis=1))]
beta_vector = np.round(phi_opt, 6)
if (r_square_adj_opt < 0.95):
print('\nPolynomial regression performs poorly for this dataset.')
else:
print('\nPolynomial regression generates a good surrogate model for the input data.')
if (iteration_number > 1):
(_, _, _, _) = self.error_plotting(vector_of_results)
print('\n-------------------------------------------------\n-------------------------------------------------')
print('Best solution found: ', '\nOrder: ', order_opt, (' / MAE: %4f' % mae_error_opt), (' / MSE: %4f' % mse_error_opt), (' / R_sq: %4f' % r_square_opt), (' / Adjusted R^2: %4f' % r_square_adj_opt))
dataframe_coeffs = self.results_generation(beta_vector, order_opt)
vector_of_results_df = pd.DataFrame({'Iteration_number': vector_of_results[(:, 0)], 'Polynomial order': vector_of_results[(:, 1)], 'Training error': vector_of_results[(:, 2)], 'Cross-val error': vector_of_results[(:, 3)], 'MAE': vector_of_results[(:, 4)], 'MSE': vector_of_results[(:, 5)], 'R2': vector_of_results[(:, 6)], 'Adjusted R2': vector_of_results[(:, 7)], 'Number of training samples': vector_of_results[(:, 8)]})
extra_terms_feature_vector = list((self.feature_list[i] for i in self.regression_data_columns))
self.optimal_weights_array = phi_opt
self.final_polynomial_order = order_opt
self.errors = {'MAE': mae_error_opt, 'MSE': mse_error_opt, 'R2': r_square_opt, 'Adjusted R2': r_square_adj_opt}
self.number_of_iterations = iteration_number
self.iteration_summary = vector_of_results_df
self.additional_features_data = None
self.final_training_data = self.regression_data
self.dataframe_of_optimal_weights_polynomial = dataframe_coeffs
self.dataframe_of_optimal_weights_extra_terms = []
self.extra_terms_feature_vector = extra_terms_feature_vector
if (r_square_opt > 0.95):
self.fit_status = 'ok'
else:
warnings.warn('Polynomial regression generates poor fit for the dataset')
self.fit_status = 'poor'
self.pickle_save({'model': self})
return self
else:
print('No iterations will be run.')
number_additional_features = len(additional_regression_features)
additional_features_array = self.user_defined_terms(additional_regression_features)
(training_data, cross_val_data) = self.training_test_data_creation(additional_features_array)
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))], training_data[('training_extras_' + str(cv_number))], cross_val_data[('test_extras_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nBest surrogate model is of order', order_best, (' with a cross-val S.S. Error of %4f' % best_error))
self.original_data = self.regression_data
(_, mae_error, mse_error, r_square, _) = self.surrogate_performance(phi_best, order_best, additional_features_array)
beta_vector = np.round(phi_best, 6)
dataframe_coeffs = self.results_generation(beta_vector, order_best)
extra_terms_coeffs = pd.Series()
print('\nThe coefficients of the extra terms in additional_regression_features are:\n')
for af in range(number_additional_features, 0, (- 1)):
print('Coeff. additional_regression_features[', ((number_additional_features - af) + 1), ']: ', beta_vector[((len(beta_vector) - af), 0)])
col_name = (('Coeff. additional_regression_features[' + str(((number_additional_features - af) + 1))) + ']')
extra_terms_coeffs = extra_terms_coeffs.append(pd.Series({col_name: beta_vector[((len(beta_vector) - af), 0)]}))
print('\nRegression model performance on training data:\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R^2: %4f' % r_square))
extra_terms_feature_vector = list((self.feature_list[i] for i in self.regression_data_columns))
self.optimal_weights_array = phi_best
self.final_polynomial_order = order_best
self.errors = {'MAE': mae_error, 'MSE': mse_error, 'R2': r_square}
self.number_of_iterations = []
self.iteration_summary = []
self.additional_features_data = additional_features_array
self.final_training_data = self.regression_data
self.dataframe_of_optimal_weights_polynomial = dataframe_coeffs
self.dataframe_of_optimal_weights_extra_terms = extra_terms_coeffs
self.extra_terms_feature_vector = extra_terms_feature_vector
if (r_square > 0.95):
self.fit_status = 'ok'
else:
warnings.warn('Polynomial regression generates poor fit for the dataset')
self.fit_status = 'poor'
self.pickle_save({'model': self})
return self | def polynomial_regression_fitting(self, additional_regression_features=None):
'\n\n polynomial_regression_fitting is the core method which is called in the PolynomialRegression class.\n It ties together all the other functions in the class.\n\n For each polynomial order, it\n - calls the function user_defined_terms to generate the array of additional features (when required),\n - calls the function training_test_data_creation to generate the training and test data sets,\n - calls the function polyregression to determine the optimal weight vector and the fitting errors,\n - determines whether the new fit improves is the best so far by the crossvalidation error of the current fit to the previous best,\n - calls the function surrogate_performance to calculate the errors and R-values of the current fit, and\n - returns results to user.\n\n When adaptive sampling is done, the function also\n - selects the adaptive samples to be added to the training data based on the magnitudes of the prediction errors of individual samples in self.original_data, and\n - determines when the the stopping conditions have been satisfied.\n\n\n The following stopping conditions are considered when adaptive sampling is in use:\n - The maximum number of training samples allowed by the user has been exceeded\n - Mean absolute error ~= 0\n - Mean squared error ~= 0\n - R^2 = 1\n - The preset iteration number given by the user has been reached\n - All available points in self.original_data have been used for training.\n\n Keyword Args:\n additional_regression_features(<list>): Additional features the user wants the algorithm to consider during regression.\n It should be noted that adaptive sampling is not available when additional features have been supplied by the user, i.e. when len(additional_regression_features) > 0.\n\n Returns:\n results: Python object containing the results of the polynomial regression process including the polynomial order\n (results.polynomial_order), polynomial coefficients (results.optimal_weights_array) and fit errors (results.errors).\n See information on ResultReport class for details on contents.\n\n '
best_error = 1e+20
train_error_fit = 1e+20
phi_best = 0
order_best = 0
if ((additional_regression_features is None) or (len(additional_regression_features) == 0)):
print('max_fraction_training_samples set at ', self.max_fraction_training_samples)
print('Number of adaptive samples (no_adaptive_samples) set at ', self.no_adaptive_samples)
print('Maximum number of iterations (Max_iter) set at: ', self.max_iter)
(training_data, cross_val_data) = self.training_test_data_creation()
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nInitial surrogate model is of order', order_best, (' with a cross-val error of %4f' % best_error))
(sorted_comparison_vector, mae_error, mse_error, r_square, r_square_adj) = self.surrogate_performance(phi_best, order_best)
print('Initial Regression Model Performance:\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R^2: %4f' % r_square), (' / Adjusted R^2: %4f' % r_square_adj))
(order_opt, train_error_opt, best_error_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, phi_opt) = (order_best, train_error_fit, best_error, mae_error, mse_error, r_square, r_square_adj, phi_best)
eps_neg = 1e-06
eps_pos = 0.999999
iteration_number = 1
stopping_criterion = int(np.ceil((self.max_fraction_training_samples * self.original_data.shape[0])))
vector_of_results = np.zeros((stopping_criterion, 9))
while ((self.regression_data.shape[0] < stopping_criterion) and (mae_error > eps_neg) and (mse_error > eps_neg) and (r_square < eps_pos) and (iteration_number < self.max_iter) and ((self.regression_data.shape[0] + self.no_adaptive_samples) < self.original_data.shape[0])):
print('\n-------------------------------------------------')
print('\nIteration ', iteration_number)
best_error = 1e+20
scv_input_data = sorted_comparison_vector[(:, :(- 2))]
sorted_comparison_vector_unique = scv_input_data[np.all(np.any((scv_input_data - self.regression_data[(:, None)]), axis=2), axis=0)]
adaptive_samples = sorted_comparison_vector_unique[((- self.no_adaptive_samples):, :)]
self.regression_data = np.concatenate((self.regression_data, adaptive_samples), axis=0)
self.number_of_samples = self.regression_data.shape[0]
print('\n', self.no_adaptive_samples, ' additional points added to training data. New number of training samples: ', self.regression_data.shape[0])
(training_data, cross_val_data) = self.training_test_data_creation()
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nThe best regression model is of order', order_best, (' with a cross-val error of %4f' % best_error))
(sorted_comparison_vector, mae_error, mse_error, r_square, r_square_adj) = self.surrogate_performance(phi_best, order_best)
print('Regression performance on full data in iteration', iteration_number, '\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R_sq: %4f' % r_square), (' / Adjusted R^2: %4f' % r_square_adj))
if (r_square_adj > r_square_adj_opt):
(phi_opt, order_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, train_error_opt, best_error_opt) = (phi_best, order_best, mae_error, mse_error, r_square, r_square_adj, train_error_fit, best_error)
print('New solution found.')
else:
print('Previous solution retained.')
vector_of_results[(iteration_number, :)] = [iteration_number, order_opt, train_error_opt, best_error_opt, mae_error_opt, mse_error_opt, r_square_opt, r_square_adj_opt, self.regression_data.shape[0]]
iteration_number += 1
vector_of_results = vector_of_results[(~ np.all((vector_of_results == 0), axis=1))]
beta_vector = np.round(phi_opt, 6)
if (r_square_adj_opt < 0.95):
print('\nPolynomial regression performs poorly for this dataset.')
else:
print('\nPolynomial regression generates a good surrogate model for the input data.')
if (iteration_number > 1):
(_, _, _, _) = self.error_plotting(vector_of_results)
print('\n-------------------------------------------------\n-------------------------------------------------')
print('Best solution found: ', '\nOrder: ', order_opt, (' / MAE: %4f' % mae_error_opt), (' / MSE: %4f' % mse_error_opt), (' / R_sq: %4f' % r_square_opt), (' / Adjusted R^2: %4f' % r_square_adj_opt))
dataframe_coeffs = self.results_generation(beta_vector, order_opt)
vector_of_results_df = pd.DataFrame({'Iteration_number': vector_of_results[(:, 0)], 'Polynomial order': vector_of_results[(:, 1)], 'Training error': vector_of_results[(:, 2)], 'Cross-val error': vector_of_results[(:, 3)], 'MAE': vector_of_results[(:, 4)], 'MSE': vector_of_results[(:, 5)], 'R2': vector_of_results[(:, 6)], 'Adjusted R2': vector_of_results[(:, 7)], 'Number of training samples': vector_of_results[(:, 8)]})
extra_terms_feature_vector = list((self.feature_list[i] for i in self.regression_data_columns))
self.optimal_weights_array = phi_opt
self.final_polynomial_order = order_opt
self.errors = {'MAE': mae_error_opt, 'MSE': mse_error_opt, 'R2': r_square_opt, 'Adjusted R2': r_square_adj_opt}
self.number_of_iterations = iteration_number
self.iteration_summary = vector_of_results_df
self.additional_features_data = None
self.final_training_data = self.regression_data
self.dataframe_of_optimal_weights_polynomial = dataframe_coeffs
self.dataframe_of_optimal_weights_extra_terms = []
self.extra_terms_feature_vector = extra_terms_feature_vector
if (r_square_opt > 0.95):
self.fit_status = 'ok'
else:
warnings.warn('Polynomial regression generates poor fit for the dataset')
self.fit_status = 'poor'
self.pickle_save({'model': self})
return self
else:
print('No iterations will be run.')
number_additional_features = len(additional_regression_features)
additional_features_array = self.user_defined_terms(additional_regression_features)
(training_data, cross_val_data) = self.training_test_data_creation(additional_features_array)
for poly_order in range(1, (self.max_polynomial_order + 1)):
for cv_number in range(1, (self.number_of_crossvalidations + 1)):
(phi, train_error, cv_error) = self.polyregression(poly_order, training_data[('training_set_' + str(cv_number))], cross_val_data[('test_set_' + str(cv_number))], training_data[('training_extras_' + str(cv_number))], cross_val_data[('test_extras_' + str(cv_number))])
if (cv_error < best_error):
best_error = cv_error
phi_best = phi
order_best = poly_order
train_error_fit = train_error
print('\nBest surrogate model is of order', order_best, (' with a cross-val S.S. Error of %4f' % best_error))
self.original_data = self.regression_data
(_, mae_error, mse_error, r_square, _) = self.surrogate_performance(phi_best, order_best, additional_features_array)
beta_vector = np.round(phi_best, 6)
dataframe_coeffs = self.results_generation(beta_vector, order_best)
extra_terms_coeffs = pd.Series()
print('\nThe coefficients of the extra terms in additional_regression_features are:\n')
for af in range(number_additional_features, 0, (- 1)):
print('Coeff. additional_regression_features[', ((number_additional_features - af) + 1), ']: ', beta_vector[((len(beta_vector) - af), 0)])
col_name = (('Coeff. additional_regression_features[' + str(((number_additional_features - af) + 1))) + ']')
extra_terms_coeffs = extra_terms_coeffs.append(pd.Series({col_name: beta_vector[((len(beta_vector) - af), 0)]}))
print('\nRegression model performance on training data:\nOrder: ', order_best, (' / MAE: %4f' % mae_error), (' / MSE: %4f' % mse_error), (' / R^2: %4f' % r_square))
extra_terms_feature_vector = list((self.feature_list[i] for i in self.regression_data_columns))
self.optimal_weights_array = phi_best
self.final_polynomial_order = order_best
self.errors = {'MAE': mae_error, 'MSE': mse_error, 'R2': r_square}
self.number_of_iterations = []
self.iteration_summary = []
self.additional_features_data = additional_features_array
self.final_training_data = self.regression_data
self.dataframe_of_optimal_weights_polynomial = dataframe_coeffs
self.dataframe_of_optimal_weights_extra_terms = extra_terms_coeffs
self.extra_terms_feature_vector = extra_terms_feature_vector
if (r_square > 0.95):
self.fit_status = 'ok'
else:
warnings.warn('Polynomial regression generates poor fit for the dataset')
self.fit_status = 'poor'
self.pickle_save({'model': self})
return self<|docstring|>polynomial_regression_fitting is the core method which is called in the PolynomialRegression class.
It ties together all the other functions in the class.
For each polynomial order, it
- calls the function user_defined_terms to generate the array of additional features (when required),
- calls the function training_test_data_creation to generate the training and test data sets,
- calls the function polyregression to determine the optimal weight vector and the fitting errors,
- determines whether the new fit improves is the best so far by the crossvalidation error of the current fit to the previous best,
- calls the function surrogate_performance to calculate the errors and R-values of the current fit, and
- returns results to user.
When adaptive sampling is done, the function also
- selects the adaptive samples to be added to the training data based on the magnitudes of the prediction errors of individual samples in self.original_data, and
- determines when the the stopping conditions have been satisfied.
The following stopping conditions are considered when adaptive sampling is in use:
- The maximum number of training samples allowed by the user has been exceeded
- Mean absolute error ~= 0
- Mean squared error ~= 0
- R^2 = 1
- The preset iteration number given by the user has been reached
- All available points in self.original_data have been used for training.
Keyword Args:
additional_regression_features(<list>): Additional features the user wants the algorithm to consider during regression.
It should be noted that adaptive sampling is not available when additional features have been supplied by the user, i.e. when len(additional_regression_features) > 0.
Returns:
results: Python object containing the results of the polynomial regression process including the polynomial order
(results.polynomial_order), polynomial coefficients (results.optimal_weights_array) and fit errors (results.errors).
See information on ResultReport class for details on contents.<|endoftext|> |
5d06b46721ce6c652d6877c732e9318bcd69cb9f47d314a4d76393b966aa502e | def get_feature_vector(self):
"\n\n The ``get_feature_vector`` method generates the list of regression features from the column headers of the input dataset.\n\n Returns:\n Pyomo IndexedParam : An indexed parameter list of the variables supplied in the original data\n\n\n **Example:**\n\n .. code-block:: python\n\n # Create a small dataframe with three columns ('one', 'two', 'three') and two rows (A, B)\n >>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['one', 'two', 'three'])\n\n # Initialize the **PolynomialRegression** class and print the column headers for the variables\n >>> f = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=1, multinomials=True, training_split=0.8)\n >>> p = f.get_feature_vector()\n >>> for i in p.keys():\n >>> print(i)\n one\n two\n\n "
p = Param(self.regression_data_columns, mutable=True, initialize=0)
p.index_set().construct()
p.construct()
self.feature_list = p
return p | The ``get_feature_vector`` method generates the list of regression features from the column headers of the input dataset.
Returns:
Pyomo IndexedParam : An indexed parameter list of the variables supplied in the original data
**Example:**
.. code-block:: python
# Create a small dataframe with three columns ('one', 'two', 'three') and two rows (A, B)
>>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['one', 'two', 'three'])
# Initialize the **PolynomialRegression** class and print the column headers for the variables
>>> f = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=1, multinomials=True, training_split=0.8)
>>> p = f.get_feature_vector()
>>> for i in p.keys():
>>> print(i)
one
two | idaes/surrogate/pysmo/polynomial_regression.py | get_feature_vector | adowling2/idaes-pse | 112 | python | def get_feature_vector(self):
"\n\n The ``get_feature_vector`` method generates the list of regression features from the column headers of the input dataset.\n\n Returns:\n Pyomo IndexedParam : An indexed parameter list of the variables supplied in the original data\n\n\n **Example:**\n\n .. code-block:: python\n\n # Create a small dataframe with three columns ('one', 'two', 'three') and two rows (A, B)\n >>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['one', 'two', 'three'])\n\n # Initialize the **PolynomialRegression** class and print the column headers for the variables\n >>> f = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=1, multinomials=True, training_split=0.8)\n >>> p = f.get_feature_vector()\n >>> for i in p.keys():\n >>> print(i)\n one\n two\n\n "
p = Param(self.regression_data_columns, mutable=True, initialize=0)
p.index_set().construct()
p.construct()
self.feature_list = p
return p | def get_feature_vector(self):
"\n\n The ``get_feature_vector`` method generates the list of regression features from the column headers of the input dataset.\n\n Returns:\n Pyomo IndexedParam : An indexed parameter list of the variables supplied in the original data\n\n\n **Example:**\n\n .. code-block:: python\n\n # Create a small dataframe with three columns ('one', 'two', 'three') and two rows (A, B)\n >>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['one', 'two', 'three'])\n\n # Initialize the **PolynomialRegression** class and print the column headers for the variables\n >>> f = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=1, multinomials=True, training_split=0.8)\n >>> p = f.get_feature_vector()\n >>> for i in p.keys():\n >>> print(i)\n one\n two\n\n "
p = Param(self.regression_data_columns, mutable=True, initialize=0)
p.index_set().construct()
p.construct()
self.feature_list = p
return p<|docstring|>The ``get_feature_vector`` method generates the list of regression features from the column headers of the input dataset.
Returns:
Pyomo IndexedParam : An indexed parameter list of the variables supplied in the original data
**Example:**
.. code-block:: python
# Create a small dataframe with three columns ('one', 'two', 'three') and two rows (A, B)
>>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['one', 'two', 'three'])
# Initialize the **PolynomialRegression** class and print the column headers for the variables
>>> f = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=1, multinomials=True, training_split=0.8)
>>> p = f.get_feature_vector()
>>> for i in p.keys():
>>> print(i)
one
two<|endoftext|> |
d16fb0fc2cb73296ebbdc7f1cd0cf64235fb2cf9e603df35b6d810cb19cfae55 | def set_additional_terms(self, term_list):
"\n\n ``set_additional_terms`` accepts additional user-defined features for consideration during regression.\n\n Args:\n term_list (list) : List of additional terms to be considered as regression features. Each term in the list must be a Pyomo-supported intrinsic function.\n\n\n **Example:**\n\n .. code-block:: python\n\n # To add the sine and cosine of a variable with header 'X1' in the dataset as additional regression features:\n >>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['X1', 'X2', 'Y'])\n >>> A = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=5)\n >>> p = A.get_feature_vector()\n >>> A.set_additional_terms([ pyo.sin(p['X1']) , pyo.cos(p['X1']) ])\n\n "
self.additional_term_expressions = term_list | ``set_additional_terms`` accepts additional user-defined features for consideration during regression.
Args:
term_list (list) : List of additional terms to be considered as regression features. Each term in the list must be a Pyomo-supported intrinsic function.
**Example:**
.. code-block:: python
# To add the sine and cosine of a variable with header 'X1' in the dataset as additional regression features:
>>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['X1', 'X2', 'Y'])
>>> A = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=5)
>>> p = A.get_feature_vector()
>>> A.set_additional_terms([ pyo.sin(p['X1']) , pyo.cos(p['X1']) ]) | idaes/surrogate/pysmo/polynomial_regression.py | set_additional_terms | adowling2/idaes-pse | 112 | python | def set_additional_terms(self, term_list):
"\n\n ``set_additional_terms`` accepts additional user-defined features for consideration during regression.\n\n Args:\n term_list (list) : List of additional terms to be considered as regression features. Each term in the list must be a Pyomo-supported intrinsic function.\n\n\n **Example:**\n\n .. code-block:: python\n\n # To add the sine and cosine of a variable with header 'X1' in the dataset as additional regression features:\n >>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['X1', 'X2', 'Y'])\n >>> A = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=5)\n >>> p = A.get_feature_vector()\n >>> A.set_additional_terms([ pyo.sin(p['X1']) , pyo.cos(p['X1']) ])\n\n "
self.additional_term_expressions = term_list | def set_additional_terms(self, term_list):
"\n\n ``set_additional_terms`` accepts additional user-defined features for consideration during regression.\n\n Args:\n term_list (list) : List of additional terms to be considered as regression features. Each term in the list must be a Pyomo-supported intrinsic function.\n\n\n **Example:**\n\n .. code-block:: python\n\n # To add the sine and cosine of a variable with header 'X1' in the dataset as additional regression features:\n >>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['X1', 'X2', 'Y'])\n >>> A = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=5)\n >>> p = A.get_feature_vector()\n >>> A.set_additional_terms([ pyo.sin(p['X1']) , pyo.cos(p['X1']) ])\n\n "
self.additional_term_expressions = term_list<|docstring|>``set_additional_terms`` accepts additional user-defined features for consideration during regression.
Args:
term_list (list) : List of additional terms to be considered as regression features. Each term in the list must be a Pyomo-supported intrinsic function.
**Example:**
.. code-block:: python
# To add the sine and cosine of a variable with header 'X1' in the dataset as additional regression features:
>>> xy_data = pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['X1', 'X2', 'Y'])
>>> A = PolynomialRegression(xy_data, xy_data, maximum_polynomial_order=5)
>>> p = A.get_feature_vector()
>>> A.set_additional_terms([ pyo.sin(p['X1']) , pyo.cos(p['X1']) ])<|endoftext|> |
e94c08fcfe39517b15d52abdf34d56b54f9f0e53aa486e2ff22811fe5f247c19 | def training(self):
'\n\n The ``training`` method trains a polynomial model to an input dataset.\n It calls the core method which is called in the PolynomialRegression class (polynomial_regression_fitting).\n It accepts no user input, inheriting the information passed in class initialization.\n\n Returns:\n tuple : Python Object (**results**) containing the results of the polynomial regression process including:\n - the polynomial order (**self.final_polynomial_order**)\n - polynomial coefficients (**self.optimal_weights_array**), and\n - MAE and MSE errors as well as the :math:`R^{2}` (**results.errors**).\n\n '
cMap = ComponentMap()
for (i, col) in enumerate(self.regression_data_columns):
cMap[self.feature_list[col]] = self.regression_data[(:, i)]
npe = NumpyEvaluator(cMap)
additional_data = list((npe.walk_expression(term) for term in self.additional_term_expressions))
return self.polynomial_regression_fitting(additional_data) | The ``training`` method trains a polynomial model to an input dataset.
It calls the core method which is called in the PolynomialRegression class (polynomial_regression_fitting).
It accepts no user input, inheriting the information passed in class initialization.
Returns:
tuple : Python Object (**results**) containing the results of the polynomial regression process including:
- the polynomial order (**self.final_polynomial_order**)
- polynomial coefficients (**self.optimal_weights_array**), and
- MAE and MSE errors as well as the :math:`R^{2}` (**results.errors**). | idaes/surrogate/pysmo/polynomial_regression.py | training | adowling2/idaes-pse | 112 | python | def training(self):
'\n\n The ``training`` method trains a polynomial model to an input dataset.\n It calls the core method which is called in the PolynomialRegression class (polynomial_regression_fitting).\n It accepts no user input, inheriting the information passed in class initialization.\n\n Returns:\n tuple : Python Object (**results**) containing the results of the polynomial regression process including:\n - the polynomial order (**self.final_polynomial_order**)\n - polynomial coefficients (**self.optimal_weights_array**), and\n - MAE and MSE errors as well as the :math:`R^{2}` (**results.errors**).\n\n '
cMap = ComponentMap()
for (i, col) in enumerate(self.regression_data_columns):
cMap[self.feature_list[col]] = self.regression_data[(:, i)]
npe = NumpyEvaluator(cMap)
additional_data = list((npe.walk_expression(term) for term in self.additional_term_expressions))
return self.polynomial_regression_fitting(additional_data) | def training(self):
'\n\n The ``training`` method trains a polynomial model to an input dataset.\n It calls the core method which is called in the PolynomialRegression class (polynomial_regression_fitting).\n It accepts no user input, inheriting the information passed in class initialization.\n\n Returns:\n tuple : Python Object (**results**) containing the results of the polynomial regression process including:\n - the polynomial order (**self.final_polynomial_order**)\n - polynomial coefficients (**self.optimal_weights_array**), and\n - MAE and MSE errors as well as the :math:`R^{2}` (**results.errors**).\n\n '
cMap = ComponentMap()
for (i, col) in enumerate(self.regression_data_columns):
cMap[self.feature_list[col]] = self.regression_data[(:, i)]
npe = NumpyEvaluator(cMap)
additional_data = list((npe.walk_expression(term) for term in self.additional_term_expressions))
return self.polynomial_regression_fitting(additional_data)<|docstring|>The ``training`` method trains a polynomial model to an input dataset.
It calls the core method which is called in the PolynomialRegression class (polynomial_regression_fitting).
It accepts no user input, inheriting the information passed in class initialization.
Returns:
tuple : Python Object (**results**) containing the results of the polynomial regression process including:
- the polynomial order (**self.final_polynomial_order**)
- polynomial coefficients (**self.optimal_weights_array**), and
- MAE and MSE errors as well as the :math:`R^{2}` (**results.errors**).<|endoftext|> |
9e587068683deb055ad6db97cbfc0ac1f00016cb3d4fabd6d861debc0122bb54 | def generate_expression(self, variable_list):
'\n\n The ``generate_expression`` method returns the Pyomo expression for the polynomial model trained.\n\n The expression is constructed based on a supplied list of variables **variable_list** and the output of ``training``.\n\n Args:\n variable_list(list) : List of input variables to be used in generating expression. This can be the a list generated from the results of ``get_feature_vector``. The user can also choose to supply a new list of the appropriate length.\n\n Returns:\n Pyomo Expression : Pyomo expression of the polynomial model based on the variables provided in **variable_list**.\n\n '
terms = PolynomialRegression.polygeneration(self.final_polynomial_order, self.multinomials, np.array([variable_list])).transpose()
n = len(terms)
ans = sum(((w * t) for (w, t) in zip(np.nditer(self.optimal_weights_array), np.nditer(terms, flags=['refs_ok']))))
user_term_map = dict(((id(a), b) for (a, b) in zip(self.extra_terms_feature_vector, variable_list)))
if (len(self.additional_term_expressions) > 0):
for (w, expr) in zip(np.nditer(self.optimal_weights_array[n:]), self.additional_term_expressions):
ans += (float(w) * replace_expressions(expr, user_term_map))
return ans | The ``generate_expression`` method returns the Pyomo expression for the polynomial model trained.
The expression is constructed based on a supplied list of variables **variable_list** and the output of ``training``.
Args:
variable_list(list) : List of input variables to be used in generating expression. This can be the a list generated from the results of ``get_feature_vector``. The user can also choose to supply a new list of the appropriate length.
Returns:
Pyomo Expression : Pyomo expression of the polynomial model based on the variables provided in **variable_list**. | idaes/surrogate/pysmo/polynomial_regression.py | generate_expression | adowling2/idaes-pse | 112 | python | def generate_expression(self, variable_list):
'\n\n The ``generate_expression`` method returns the Pyomo expression for the polynomial model trained.\n\n The expression is constructed based on a supplied list of variables **variable_list** and the output of ``training``.\n\n Args:\n variable_list(list) : List of input variables to be used in generating expression. This can be the a list generated from the results of ``get_feature_vector``. The user can also choose to supply a new list of the appropriate length.\n\n Returns:\n Pyomo Expression : Pyomo expression of the polynomial model based on the variables provided in **variable_list**.\n\n '
terms = PolynomialRegression.polygeneration(self.final_polynomial_order, self.multinomials, np.array([variable_list])).transpose()
n = len(terms)
ans = sum(((w * t) for (w, t) in zip(np.nditer(self.optimal_weights_array), np.nditer(terms, flags=['refs_ok']))))
user_term_map = dict(((id(a), b) for (a, b) in zip(self.extra_terms_feature_vector, variable_list)))
if (len(self.additional_term_expressions) > 0):
for (w, expr) in zip(np.nditer(self.optimal_weights_array[n:]), self.additional_term_expressions):
ans += (float(w) * replace_expressions(expr, user_term_map))
return ans | def generate_expression(self, variable_list):
'\n\n The ``generate_expression`` method returns the Pyomo expression for the polynomial model trained.\n\n The expression is constructed based on a supplied list of variables **variable_list** and the output of ``training``.\n\n Args:\n variable_list(list) : List of input variables to be used in generating expression. This can be the a list generated from the results of ``get_feature_vector``. The user can also choose to supply a new list of the appropriate length.\n\n Returns:\n Pyomo Expression : Pyomo expression of the polynomial model based on the variables provided in **variable_list**.\n\n '
terms = PolynomialRegression.polygeneration(self.final_polynomial_order, self.multinomials, np.array([variable_list])).transpose()
n = len(terms)
ans = sum(((w * t) for (w, t) in zip(np.nditer(self.optimal_weights_array), np.nditer(terms, flags=['refs_ok']))))
user_term_map = dict(((id(a), b) for (a, b) in zip(self.extra_terms_feature_vector, variable_list)))
if (len(self.additional_term_expressions) > 0):
for (w, expr) in zip(np.nditer(self.optimal_weights_array[n:]), self.additional_term_expressions):
ans += (float(w) * replace_expressions(expr, user_term_map))
return ans<|docstring|>The ``generate_expression`` method returns the Pyomo expression for the polynomial model trained.
The expression is constructed based on a supplied list of variables **variable_list** and the output of ``training``.
Args:
variable_list(list) : List of input variables to be used in generating expression. This can be the a list generated from the results of ``get_feature_vector``. The user can also choose to supply a new list of the appropriate length.
Returns:
Pyomo Expression : Pyomo expression of the polynomial model based on the variables provided in **variable_list**.<|endoftext|> |
74a8410046e19f612c81e5318a86707d8f26cdab411e55ebde48704a1ddc1c6a | def predict_output(self, x_data):
'\n\n The ``predict_output`` method generates output predictions for input data x_data based a previously generated polynomial fitting.\n\n Args:\n x_data : Numpy array of designs for which the output is to be evaluated/predicted.\n\n Returns:\n Numpy Array : Output variable predictions based on the polynomial fit.\n\n '
nf = x_data.shape[1]
x_list = [i for i in range(0, nf)]
import pyomo.environ as aml
m = aml.ConcreteModel()
i = aml.Set(initialize=x_list)
m.xx = aml.Var(i)
m.o2 = aml.Objective(expr=self.generate_expression([m.xx[i] for i in x_list]))
y_eq = np.zeros((x_data.shape[0], 1))
for j in range(0, x_data.shape[0]):
for i in x_list:
m.xx[i] = x_data[(j, i)]
y_eq[(j, 0)] = aml.value(m.o2([m.xx[i] for i in x_list]))
return y_eq | The ``predict_output`` method generates output predictions for input data x_data based a previously generated polynomial fitting.
Args:
x_data : Numpy array of designs for which the output is to be evaluated/predicted.
Returns:
Numpy Array : Output variable predictions based on the polynomial fit. | idaes/surrogate/pysmo/polynomial_regression.py | predict_output | adowling2/idaes-pse | 112 | python | def predict_output(self, x_data):
'\n\n The ``predict_output`` method generates output predictions for input data x_data based a previously generated polynomial fitting.\n\n Args:\n x_data : Numpy array of designs for which the output is to be evaluated/predicted.\n\n Returns:\n Numpy Array : Output variable predictions based on the polynomial fit.\n\n '
nf = x_data.shape[1]
x_list = [i for i in range(0, nf)]
import pyomo.environ as aml
m = aml.ConcreteModel()
i = aml.Set(initialize=x_list)
m.xx = aml.Var(i)
m.o2 = aml.Objective(expr=self.generate_expression([m.xx[i] for i in x_list]))
y_eq = np.zeros((x_data.shape[0], 1))
for j in range(0, x_data.shape[0]):
for i in x_list:
m.xx[i] = x_data[(j, i)]
y_eq[(j, 0)] = aml.value(m.o2([m.xx[i] for i in x_list]))
return y_eq | def predict_output(self, x_data):
'\n\n The ``predict_output`` method generates output predictions for input data x_data based a previously generated polynomial fitting.\n\n Args:\n x_data : Numpy array of designs for which the output is to be evaluated/predicted.\n\n Returns:\n Numpy Array : Output variable predictions based on the polynomial fit.\n\n '
nf = x_data.shape[1]
x_list = [i for i in range(0, nf)]
import pyomo.environ as aml
m = aml.ConcreteModel()
i = aml.Set(initialize=x_list)
m.xx = aml.Var(i)
m.o2 = aml.Objective(expr=self.generate_expression([m.xx[i] for i in x_list]))
y_eq = np.zeros((x_data.shape[0], 1))
for j in range(0, x_data.shape[0]):
for i in x_list:
m.xx[i] = x_data[(j, i)]
y_eq[(j, 0)] = aml.value(m.o2([m.xx[i] for i in x_list]))
return y_eq<|docstring|>The ``predict_output`` method generates output predictions for input data x_data based a previously generated polynomial fitting.
Args:
x_data : Numpy array of designs for which the output is to be evaluated/predicted.
Returns:
Numpy Array : Output variable predictions based on the polynomial fit.<|endoftext|> |
0888acd06ef5ea34a1120ce9011d9d3f98185940523fd02a20070581b7850552 | def pickle_save(self, solutions):
'\n The training method saves the results of the run in a pickle object. It saves an object with two elements: the setup (index[0]) and the results (index[1]).\n '
try:
filehandler = open(self.filename, 'wb')
pickle.dump(solutions, filehandler)
print('\nResults saved in ', str(self.filename))
except:
raise IOError('File could not be saved.') | The training method saves the results of the run in a pickle object. It saves an object with two elements: the setup (index[0]) and the results (index[1]). | idaes/surrogate/pysmo/polynomial_regression.py | pickle_save | adowling2/idaes-pse | 112 | python | def pickle_save(self, solutions):
'\n \n '
try:
filehandler = open(self.filename, 'wb')
pickle.dump(solutions, filehandler)
print('\nResults saved in ', str(self.filename))
except:
raise IOError('File could not be saved.') | def pickle_save(self, solutions):
'\n \n '
try:
filehandler = open(self.filename, 'wb')
pickle.dump(solutions, filehandler)
print('\nResults saved in ', str(self.filename))
except:
raise IOError('File could not be saved.')<|docstring|>The training method saves the results of the run in a pickle object. It saves an object with two elements: the setup (index[0]) and the results (index[1]).<|endoftext|> |
f998d7cea1944057d55997bb20e35dfa984f29a5152db4a44afbc51860c61e75 | @staticmethod
def pickle_load(solution_file):
"\n pickle_load loads the results of a saved run 'file.obj'. It returns an array of two elements: the setup (index[0]) and the results (index[1]).\n\n Input arguments:\n solution_file : Pickle object file containing previous solution to be loaded.\n\n "
try:
filehandler = open(solution_file, 'rb')
return pickle.load(filehandler)
except:
raise Exception('File could not be loaded.') | pickle_load loads the results of a saved run 'file.obj'. It returns an array of two elements: the setup (index[0]) and the results (index[1]).
Input arguments:
solution_file : Pickle object file containing previous solution to be loaded. | idaes/surrogate/pysmo/polynomial_regression.py | pickle_load | adowling2/idaes-pse | 112 | python | @staticmethod
def pickle_load(solution_file):
"\n pickle_load loads the results of a saved run 'file.obj'. It returns an array of two elements: the setup (index[0]) and the results (index[1]).\n\n Input arguments:\n solution_file : Pickle object file containing previous solution to be loaded.\n\n "
try:
filehandler = open(solution_file, 'rb')
return pickle.load(filehandler)
except:
raise Exception('File could not be loaded.') | @staticmethod
def pickle_load(solution_file):
"\n pickle_load loads the results of a saved run 'file.obj'. It returns an array of two elements: the setup (index[0]) and the results (index[1]).\n\n Input arguments:\n solution_file : Pickle object file containing previous solution to be loaded.\n\n "
try:
filehandler = open(solution_file, 'rb')
return pickle.load(filehandler)
except:
raise Exception('File could not be loaded.')<|docstring|>pickle_load loads the results of a saved run 'file.obj'. It returns an array of two elements: the setup (index[0]) and the results (index[1]).
Input arguments:
solution_file : Pickle object file containing previous solution to be loaded.<|endoftext|> |
6004b2d54a6a18d8c9e174f1f2b1085e276f0075185fd8f99f5e7f3d320376c3 | def parity_residual_plots(self):
'\n\n inputs:\n\n Returns:\n\n '
y_predicted = self.predict_output(self.final_training_data[(:, :(- 1))])
fig1 = plt.figure(figsize=(16, 9), tight_layout=True)
ax = fig1.add_subplot(121)
ax.plot(self.final_training_data[(:, (- 1))], self.final_training_data[(:, (- 1))], '-')
ax.plot(self.final_training_data[(:, (- 1))], y_predicted, 'o')
ax.set_xlabel('True data', fontsize=12)
ax.set_ylabel('Surrogate values', fontsize=12)
ax.set_title('Parity plot', fontsize=12)
ax2 = fig1.add_subplot(122)
ax2.plot(self.final_training_data[(:, (- 1))], (self.final_training_data[(:, (- 1))] - y_predicted[(:,)].reshape(y_predicted.shape[0])), 's', mfc='w', mec='m', ms=6)
ax2.axhline(y=0, xmin=0, xmax=1)
ax2.set_xlabel('True data', fontsize=12)
ax2.set_ylabel('Residuals', fontsize=12)
ax2.set_title('Residual plot', fontsize=12)
plt.show()
return | inputs:
Returns: | idaes/surrogate/pysmo/polynomial_regression.py | parity_residual_plots | adowling2/idaes-pse | 112 | python | def parity_residual_plots(self):
'\n\n inputs:\n\n Returns:\n\n '
y_predicted = self.predict_output(self.final_training_data[(:, :(- 1))])
fig1 = plt.figure(figsize=(16, 9), tight_layout=True)
ax = fig1.add_subplot(121)
ax.plot(self.final_training_data[(:, (- 1))], self.final_training_data[(:, (- 1))], '-')
ax.plot(self.final_training_data[(:, (- 1))], y_predicted, 'o')
ax.set_xlabel('True data', fontsize=12)
ax.set_ylabel('Surrogate values', fontsize=12)
ax.set_title('Parity plot', fontsize=12)
ax2 = fig1.add_subplot(122)
ax2.plot(self.final_training_data[(:, (- 1))], (self.final_training_data[(:, (- 1))] - y_predicted[(:,)].reshape(y_predicted.shape[0])), 's', mfc='w', mec='m', ms=6)
ax2.axhline(y=0, xmin=0, xmax=1)
ax2.set_xlabel('True data', fontsize=12)
ax2.set_ylabel('Residuals', fontsize=12)
ax2.set_title('Residual plot', fontsize=12)
plt.show()
return | def parity_residual_plots(self):
'\n\n inputs:\n\n Returns:\n\n '
y_predicted = self.predict_output(self.final_training_data[(:, :(- 1))])
fig1 = plt.figure(figsize=(16, 9), tight_layout=True)
ax = fig1.add_subplot(121)
ax.plot(self.final_training_data[(:, (- 1))], self.final_training_data[(:, (- 1))], '-')
ax.plot(self.final_training_data[(:, (- 1))], y_predicted, 'o')
ax.set_xlabel('True data', fontsize=12)
ax.set_ylabel('Surrogate values', fontsize=12)
ax.set_title('Parity plot', fontsize=12)
ax2 = fig1.add_subplot(122)
ax2.plot(self.final_training_data[(:, (- 1))], (self.final_training_data[(:, (- 1))] - y_predicted[(:,)].reshape(y_predicted.shape[0])), 's', mfc='w', mec='m', ms=6)
ax2.axhline(y=0, xmin=0, xmax=1)
ax2.set_xlabel('True data', fontsize=12)
ax2.set_ylabel('Residuals', fontsize=12)
ax2.set_title('Residual plot', fontsize=12)
plt.show()
return<|docstring|>inputs:
Returns:<|endoftext|> |
050a1689c3a68cc41e3ecafe8fd05c2eeb6e8f7a118697a554074d3358d07399 | def confint_regression(self, confidence=0.95):
'\n The ``confint_regression`` method prints the confidence intervals for the regression patamaters.\n\n Args:\n confidence : Required confidence interval level, default = 0.95 (95%)\n\n '
from scipy.stats import t
data = self.final_training_data
y_pred = self.predict_output(data[(:, :(- 1))])
dof = ((data.shape[0] - len(self.optimal_weights_array)) + 1)
ssr = np.sum(((data[(:, (- 1))] - y_pred[(:, 0)]) ** 2))
sig_sq = (ssr / dof)
if ((self.additional_features_data is None) or (len(self.additional_features_data) == 0)):
x_exp = self.polygeneration(self.final_polynomial_order, self.multinomials, data[(:, :(- 1))])
else:
x_exp = self.polygeneration(self.final_polynomial_order, self.multinomials, data[(:, :(- 1))], additional_x_training_data=self.additional_features_data)
covar = (sig_sq * np.linalg.pinv((x_exp.transpose() @ x_exp)))
ss_reg_params = np.sqrt(np.diag(covar))
t_dist = t.ppf(((1 + confidence) / 2), dof)
c_data = np.zeros((self.optimal_weights_array.shape[0], 4))
c_data[(:, 0)] = self.optimal_weights_array[(:, 0)]
c_data[(:, 1)] = ss_reg_params[(:,)]
c_data[(:, 2)] = (self.optimal_weights_array[(:, 0)] - (t_dist * ss_reg_params[(:,)]))
c_data[(:, 3)] = (self.optimal_weights_array[(:, 0)] + (t_dist * ss_reg_params[(:,)]))
headers = ['Regression coeff.', 'Std. error', 'Conf. int. lower', 'Conf. int. upper']
c_data_df = pd.DataFrame(data=c_data, columns=headers)
print(c_data_df)
return c_data_df | The ``confint_regression`` method prints the confidence intervals for the regression patamaters.
Args:
confidence : Required confidence interval level, default = 0.95 (95%) | idaes/surrogate/pysmo/polynomial_regression.py | confint_regression | adowling2/idaes-pse | 112 | python | def confint_regression(self, confidence=0.95):
'\n The ``confint_regression`` method prints the confidence intervals for the regression patamaters.\n\n Args:\n confidence : Required confidence interval level, default = 0.95 (95%)\n\n '
from scipy.stats import t
data = self.final_training_data
y_pred = self.predict_output(data[(:, :(- 1))])
dof = ((data.shape[0] - len(self.optimal_weights_array)) + 1)
ssr = np.sum(((data[(:, (- 1))] - y_pred[(:, 0)]) ** 2))
sig_sq = (ssr / dof)
if ((self.additional_features_data is None) or (len(self.additional_features_data) == 0)):
x_exp = self.polygeneration(self.final_polynomial_order, self.multinomials, data[(:, :(- 1))])
else:
x_exp = self.polygeneration(self.final_polynomial_order, self.multinomials, data[(:, :(- 1))], additional_x_training_data=self.additional_features_data)
covar = (sig_sq * np.linalg.pinv((x_exp.transpose() @ x_exp)))
ss_reg_params = np.sqrt(np.diag(covar))
t_dist = t.ppf(((1 + confidence) / 2), dof)
c_data = np.zeros((self.optimal_weights_array.shape[0], 4))
c_data[(:, 0)] = self.optimal_weights_array[(:, 0)]
c_data[(:, 1)] = ss_reg_params[(:,)]
c_data[(:, 2)] = (self.optimal_weights_array[(:, 0)] - (t_dist * ss_reg_params[(:,)]))
c_data[(:, 3)] = (self.optimal_weights_array[(:, 0)] + (t_dist * ss_reg_params[(:,)]))
headers = ['Regression coeff.', 'Std. error', 'Conf. int. lower', 'Conf. int. upper']
c_data_df = pd.DataFrame(data=c_data, columns=headers)
print(c_data_df)
return c_data_df | def confint_regression(self, confidence=0.95):
'\n The ``confint_regression`` method prints the confidence intervals for the regression patamaters.\n\n Args:\n confidence : Required confidence interval level, default = 0.95 (95%)\n\n '
from scipy.stats import t
data = self.final_training_data
y_pred = self.predict_output(data[(:, :(- 1))])
dof = ((data.shape[0] - len(self.optimal_weights_array)) + 1)
ssr = np.sum(((data[(:, (- 1))] - y_pred[(:, 0)]) ** 2))
sig_sq = (ssr / dof)
if ((self.additional_features_data is None) or (len(self.additional_features_data) == 0)):
x_exp = self.polygeneration(self.final_polynomial_order, self.multinomials, data[(:, :(- 1))])
else:
x_exp = self.polygeneration(self.final_polynomial_order, self.multinomials, data[(:, :(- 1))], additional_x_training_data=self.additional_features_data)
covar = (sig_sq * np.linalg.pinv((x_exp.transpose() @ x_exp)))
ss_reg_params = np.sqrt(np.diag(covar))
t_dist = t.ppf(((1 + confidence) / 2), dof)
c_data = np.zeros((self.optimal_weights_array.shape[0], 4))
c_data[(:, 0)] = self.optimal_weights_array[(:, 0)]
c_data[(:, 1)] = ss_reg_params[(:,)]
c_data[(:, 2)] = (self.optimal_weights_array[(:, 0)] - (t_dist * ss_reg_params[(:,)]))
c_data[(:, 3)] = (self.optimal_weights_array[(:, 0)] + (t_dist * ss_reg_params[(:,)]))
headers = ['Regression coeff.', 'Std. error', 'Conf. int. lower', 'Conf. int. upper']
c_data_df = pd.DataFrame(data=c_data, columns=headers)
print(c_data_df)
return c_data_df<|docstring|>The ``confint_regression`` method prints the confidence intervals for the regression patamaters.
Args:
confidence : Required confidence interval level, default = 0.95 (95%)<|endoftext|> |
16af2f921d534f85f20ead2033a4f1e190bd8148424bd58972a654b21eb41f21 | def test_is_leaf_module_positive(self):
'With an actual leaf module'
conv1 = nn.Conv2d(1, 10, 5)
assert utils.is_leaf_module(conv1) | With an actual leaf module | TrainingExtensions/torch/test/python/test_quantizer.py | test_is_leaf_module_positive | lcybuzz/aimet | 945 | python | def test_is_leaf_module_positive(self):
conv1 = nn.Conv2d(1, 10, 5)
assert utils.is_leaf_module(conv1) | def test_is_leaf_module_positive(self):
conv1 = nn.Conv2d(1, 10, 5)
assert utils.is_leaf_module(conv1)<|docstring|>With an actual leaf module<|endoftext|> |
d8b576a0792f60e1bff8f14c2b09ea89e67b7604f63f206090d761a0aebd4524 | def test_is_leaf_module_negative(self):
'With a non-leaf module'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
net = Net()
model = net.to(torch.device('cpu'))
assert (not utils.is_leaf_module(model)) | With a non-leaf module | TrainingExtensions/torch/test/python/test_quantizer.py | test_is_leaf_module_negative | lcybuzz/aimet | 945 | python | def test_is_leaf_module_negative(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
net = Net()
model = net.to(torch.device('cpu'))
assert (not utils.is_leaf_module(model)) | def test_is_leaf_module_negative(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
net = Net()
model = net.to(torch.device('cpu'))
assert (not utils.is_leaf_module(model))<|docstring|>With a non-leaf module<|endoftext|> |
baca75aeb06f68dd10f98d3a5b4ad5bfa5cf92c72dd54d99d3cda0eb708894ca | def test_is_quantizable_module_positive(self):
'With a quantizable module'
conv1 = nn.Conv2d(1, 10, 5)
assert QuantizationSimModel._is_quantizable_module(conv1) | With a quantizable module | TrainingExtensions/torch/test/python/test_quantizer.py | test_is_quantizable_module_positive | lcybuzz/aimet | 945 | python | def test_is_quantizable_module_positive(self):
conv1 = nn.Conv2d(1, 10, 5)
assert QuantizationSimModel._is_quantizable_module(conv1) | def test_is_quantizable_module_positive(self):
conv1 = nn.Conv2d(1, 10, 5)
assert QuantizationSimModel._is_quantizable_module(conv1)<|docstring|>With a quantizable module<|endoftext|> |
9ad782ed7543a502bb0801535fd14233cb9a2c4d04dbd34e230055f0f444ea83 | def test_is_quantizable_module_negative(self):
'With a non-quantizable module'
conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
assert (not QuantizationSimModel._is_quantizable_module(conv1)) | With a non-quantizable module | TrainingExtensions/torch/test/python/test_quantizer.py | test_is_quantizable_module_negative | lcybuzz/aimet | 945 | python | def test_is_quantizable_module_negative(self):
conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
assert (not QuantizationSimModel._is_quantizable_module(conv1)) | def test_is_quantizable_module_negative(self):
conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
assert (not QuantizationSimModel._is_quantizable_module(conv1))<|docstring|>With a non-quantizable module<|endoftext|> |
70a0c39179e05ac872956acf2f04590de01e2e9cb63c12195204d80886eb96b7 | def verify_quantization_wrappers(self, original_model, quantized_model, quant_scheme=QuantScheme.post_training_tf_enhanced):
'Test utility to determine if quantization wrappers were added correctly'
orig_modules = [(name, module) for (name, module) in original_model.named_modules() if (len(list(module.modules())) == 1)]
quant_modules = [(name, module) for (name, module) in quantized_model.named_modules() if isinstance(module, QcQuantizeWrapper)]
for (i, orig_mod_tuple) in enumerate(orig_modules):
quant_mod_tuple = quant_modules[i]
if ('_module_to_wrap' in orig_mod_tuple[0]):
continue
assert (orig_mod_tuple[0] == quant_mod_tuple[0]), 'Quantized model has a incorrectly named module'
if (quant_scheme in [QuantScheme.post_training_tf, QuantScheme.post_training_tf_enhanced]):
assert (str(type(quant_mod_tuple[1]).__name__) == 'StaticGridQuantWrapper')
assert (len(list(quant_mod_tuple[1].modules())) == 2)
child = list(quant_mod_tuple[1].modules())[1]
logger.debug('{} -> {}'.format(type(child), type(orig_mod_tuple[1])))
assert (type(child) == type(orig_mod_tuple[1])) | Test utility to determine if quantization wrappers were added correctly | TrainingExtensions/torch/test/python/test_quantizer.py | verify_quantization_wrappers | lcybuzz/aimet | 945 | python | def verify_quantization_wrappers(self, original_model, quantized_model, quant_scheme=QuantScheme.post_training_tf_enhanced):
orig_modules = [(name, module) for (name, module) in original_model.named_modules() if (len(list(module.modules())) == 1)]
quant_modules = [(name, module) for (name, module) in quantized_model.named_modules() if isinstance(module, QcQuantizeWrapper)]
for (i, orig_mod_tuple) in enumerate(orig_modules):
quant_mod_tuple = quant_modules[i]
if ('_module_to_wrap' in orig_mod_tuple[0]):
continue
assert (orig_mod_tuple[0] == quant_mod_tuple[0]), 'Quantized model has a incorrectly named module'
if (quant_scheme in [QuantScheme.post_training_tf, QuantScheme.post_training_tf_enhanced]):
assert (str(type(quant_mod_tuple[1]).__name__) == 'StaticGridQuantWrapper')
assert (len(list(quant_mod_tuple[1].modules())) == 2)
child = list(quant_mod_tuple[1].modules())[1]
logger.debug('{} -> {}'.format(type(child), type(orig_mod_tuple[1])))
assert (type(child) == type(orig_mod_tuple[1])) | def verify_quantization_wrappers(self, original_model, quantized_model, quant_scheme=QuantScheme.post_training_tf_enhanced):
orig_modules = [(name, module) for (name, module) in original_model.named_modules() if (len(list(module.modules())) == 1)]
quant_modules = [(name, module) for (name, module) in quantized_model.named_modules() if isinstance(module, QcQuantizeWrapper)]
for (i, orig_mod_tuple) in enumerate(orig_modules):
quant_mod_tuple = quant_modules[i]
if ('_module_to_wrap' in orig_mod_tuple[0]):
continue
assert (orig_mod_tuple[0] == quant_mod_tuple[0]), 'Quantized model has a incorrectly named module'
if (quant_scheme in [QuantScheme.post_training_tf, QuantScheme.post_training_tf_enhanced]):
assert (str(type(quant_mod_tuple[1]).__name__) == 'StaticGridQuantWrapper')
assert (len(list(quant_mod_tuple[1].modules())) == 2)
child = list(quant_mod_tuple[1].modules())[1]
logger.debug('{} -> {}'.format(type(child), type(orig_mod_tuple[1])))
assert (type(child) == type(orig_mod_tuple[1]))<|docstring|>Test utility to determine if quantization wrappers were added correctly<|endoftext|> |
c66a747cf384f91ce8c9066e9be2f076cf727c1798a21e121a68401c2e68316e | def test_add_quantization_wrappers_one_deep(self):
'With a one-deep model'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
x = self.conv2_drop(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
x = self.fc2(x)
return x
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | With a one-deep model | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_one_deep | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_one_deep(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
x = self.conv2_drop(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
x = self.fc2(x)
return x
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | def test_add_quantization_wrappers_one_deep(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
x = self.conv2_drop(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
x = self.fc2(x)
return x
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model)<|docstring|>With a one-deep model<|endoftext|> |
b3b91631a0f83b5099df3f4b28d58eaf63b52a79bef54a790322f8d1ee91e71c | def test_add_quantization_wrappers_with_preexisting_quantization_layers(self):
'With a one-deep model'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='stochastic', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
x = self.conv2_drop(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
x = self.fc2(x)
return x
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
sim._add_quantization_wrappers(model, num_inout_tensors={}, default_data_type=QuantizationDataType.int)
self.verify_quantization_wrappers(model, sim.model) | With a one-deep model | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_with_preexisting_quantization_layers | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_with_preexisting_quantization_layers(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='stochastic', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
x = self.conv2_drop(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
x = self.fc2(x)
return x
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
sim._add_quantization_wrappers(model, num_inout_tensors={}, default_data_type=QuantizationDataType.int)
self.verify_quantization_wrappers(model, sim.model) | def test_add_quantization_wrappers_with_preexisting_quantization_layers(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='stochastic', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.conv2(x)
x = self.conv2_drop(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
x = self.fc2(x)
return x
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
sim._add_quantization_wrappers(model, num_inout_tensors={}, default_data_type=QuantizationDataType.int)
self.verify_quantization_wrappers(model, sim.model)<|docstring|>With a one-deep model<|endoftext|> |
76d98c75fa30af8cc8a5c61c157e9a900737826f9390871ef1e6b47e917f2df6 | def test_add_quantization_wrappers_two_deep(self):
'With a one-deep model'
class SubNet(nn.Module):
def __init__(self):
super(SubNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = SubNet()
self.fc1 = nn.Linear(320, 50)
self.SubNet2 = SubNet()
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | With a one-deep model | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_two_deep | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_two_deep(self):
class SubNet(nn.Module):
def __init__(self):
super(SubNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = SubNet()
self.fc1 = nn.Linear(320, 50)
self.SubNet2 = SubNet()
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | def test_add_quantization_wrappers_two_deep(self):
class SubNet(nn.Module):
def __init__(self):
super(SubNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = SubNet()
self.fc1 = nn.Linear(320, 50)
self.SubNet2 = SubNet()
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model)<|docstring|>With a one-deep model<|endoftext|> |
db6b8c9b9da707265e42ca94b09ba5924246f013e1135073cfc13cb2d47e45fa | def test_add_quantization_wrappers_with_sequentials(self):
'With a one-deep model'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5))
self.fc1 = nn.Linear(320, 50)
self.subnet2 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5))
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
print(model)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
print(sim.model)
self.verify_quantization_wrappers(model, sim.model) | With a one-deep model | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_with_sequentials | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_with_sequentials(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5))
self.fc1 = nn.Linear(320, 50)
self.subnet2 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5))
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
print(model)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
print(sim.model)
self.verify_quantization_wrappers(model, sim.model) | def test_add_quantization_wrappers_with_sequentials(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5))
self.fc1 = nn.Linear(320, 50)
self.subnet2 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5))
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
print(model)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
print(sim.model)
self.verify_quantization_wrappers(model, sim.model)<|docstring|>With a one-deep model<|endoftext|> |
1327bb71aa9de3fff4a67a6d22567ebd9c385ee174afd0b934f4ac9513f0fbc8 | def test_add_quantization_wrappers_with_sequential_two_deep(self):
'With a one-deep model'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5)), nn.Conv2d(1, 10, 5))
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | With a one-deep model | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_with_sequential_two_deep | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_with_sequential_two_deep(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5)), nn.Conv2d(1, 10, 5))
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | def test_add_quantization_wrappers_with_sequential_two_deep(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5)), nn.Conv2d(1, 10, 5))
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
return self.conv1(inputs[0])
net = Net()
model = net.to(torch.device('cpu'))
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model)<|docstring|>With a one-deep model<|endoftext|> |
75881b6245e2490ac79c9aabde54caa151cf6c5e38870b545aa7a07c3ea028b7 | def test_add_quantization_wrappers_with_modulelist(self):
'With a one-deep model using ModuleList'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | With a one-deep model using ModuleList | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_with_modulelist | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_with_modulelist(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model) | def test_add_quantization_wrappers_with_modulelist(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 12, 12))
self.verify_quantization_wrappers(model, sim.model)<|docstring|>With a one-deep model using ModuleList<|endoftext|> |
271d34dc62d338354b6bc5919075c748e21211c5679fec84cd6f98f69ee66efd | def test_add_quantization_wrappers_with_modulelist_two_deep(self):
'With a two-deep model using ModuleList'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(3, 32, kernel_size=3)])
self.layers_deep = nn.ModuleList([nn.ModuleList([nn.BatchNorm2d(10), nn.ReLU()]), nn.Linear(3, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
print(model)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 12, 12))
print(sim.model)
self.verify_quantization_wrappers(model, sim.model) | With a two-deep model using ModuleList | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_with_modulelist_two_deep | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_with_modulelist_two_deep(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(3, 32, kernel_size=3)])
self.layers_deep = nn.ModuleList([nn.ModuleList([nn.BatchNorm2d(10), nn.ReLU()]), nn.Linear(3, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
print(model)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 12, 12))
print(sim.model)
self.verify_quantization_wrappers(model, sim.model) | def test_add_quantization_wrappers_with_modulelist_two_deep(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(3, 32, kernel_size=3)])
self.layers_deep = nn.ModuleList([nn.ModuleList([nn.BatchNorm2d(10), nn.ReLU()]), nn.Linear(3, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
print(model)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 12, 12))
print(sim.model)
self.verify_quantization_wrappers(model, sim.model)<|docstring|>With a two-deep model using ModuleList<|endoftext|> |
9b88ee261e1b04096cd826d58b5de0291f4725729d821fdb0b1c49d34935aa37 | def test_add_quantization_wrappers_with_modulelist_with_layers_to_ignore(self):
'With a two-deep model using ModuleList and layers_to_ignore'
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(3, 32, kernel_size=3)])
self.layers_deep = nn.ModuleList([nn.ModuleList([nn.BatchNorm2d(10), nn.ReLU()]), nn.Linear(3, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 12, 12))
layers_to_exclude = [sim.model.layers_deep[1], sim.model.layers_deep[3]]
sim.exclude_layers_from_quantization(layers_to_exclude)
print(sim.model)
assert isinstance(sim.model.layers[0]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers[1]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers[2]._module_to_wrap, nn.Conv2d)
assert isinstance(sim.model.layers_deep[0][0]._module_to_wrap, nn.BatchNorm2d)
assert isinstance(sim.model.layers_deep[0][1]._module_to_wrap, nn.ReLU)
assert isinstance(sim.model.layers_deep[1], nn.Linear)
assert isinstance(sim.model.layers_deep[2]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers_deep[3], nn.Conv2d) | With a two-deep model using ModuleList and layers_to_ignore | TrainingExtensions/torch/test/python/test_quantizer.py | test_add_quantization_wrappers_with_modulelist_with_layers_to_ignore | lcybuzz/aimet | 945 | python | def test_add_quantization_wrappers_with_modulelist_with_layers_to_ignore(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(3, 32, kernel_size=3)])
self.layers_deep = nn.ModuleList([nn.ModuleList([nn.BatchNorm2d(10), nn.ReLU()]), nn.Linear(3, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 12, 12))
layers_to_exclude = [sim.model.layers_deep[1], sim.model.layers_deep[3]]
sim.exclude_layers_from_quantization(layers_to_exclude)
print(sim.model)
assert isinstance(sim.model.layers[0]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers[1]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers[2]._module_to_wrap, nn.Conv2d)
assert isinstance(sim.model.layers_deep[0][0]._module_to_wrap, nn.BatchNorm2d)
assert isinstance(sim.model.layers_deep[0][1]._module_to_wrap, nn.ReLU)
assert isinstance(sim.model.layers_deep[1], nn.Linear)
assert isinstance(sim.model.layers_deep[2]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers_deep[3], nn.Conv2d) | def test_add_quantization_wrappers_with_modulelist_with_layers_to_ignore(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layers = nn.ModuleList([nn.Linear(1, 32), nn.Linear(32, 64), nn.Conv2d(3, 32, kernel_size=3)])
self.layers_deep = nn.ModuleList([nn.ModuleList([nn.BatchNorm2d(10), nn.ReLU()]), nn.Linear(3, 32), nn.Linear(32, 64), nn.Conv2d(1, 32, 5), StaticGridQuantWrapper(nn.Conv2d(1, 10, 5), weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)])
def forward(self, *inputs):
return self.layers[2](inputs[0])
model = Net()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 12, 12))
layers_to_exclude = [sim.model.layers_deep[1], sim.model.layers_deep[3]]
sim.exclude_layers_from_quantization(layers_to_exclude)
print(sim.model)
assert isinstance(sim.model.layers[0]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers[1]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers[2]._module_to_wrap, nn.Conv2d)
assert isinstance(sim.model.layers_deep[0][0]._module_to_wrap, nn.BatchNorm2d)
assert isinstance(sim.model.layers_deep[0][1]._module_to_wrap, nn.ReLU)
assert isinstance(sim.model.layers_deep[1], nn.Linear)
assert isinstance(sim.model.layers_deep[2]._module_to_wrap, nn.Linear)
assert isinstance(sim.model.layers_deep[3], nn.Conv2d)<|docstring|>With a two-deep model using ModuleList and layers_to_ignore<|endoftext|> |
943bcfa4a4cc3327415d12547c074b80df0adb1d03c7c75f66316542d103e1b6 | def test_model_with_two_inputs(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.conv1_a.param_quantizers['weight'].use_symmetric_encodings = True
sim.model.conv1_a.param_quantizers['weight'].use_strict_symmetric = True
sim.compute_encodings(forward_pass, None)
model(*dummy_input)
sim.export('./data/', 'two_input_model', dummy_input) | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_with_two_inputs | lcybuzz/aimet | 945 | python | def test_model_with_two_inputs(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.conv1_a.param_quantizers['weight'].use_symmetric_encodings = True
sim.model.conv1_a.param_quantizers['weight'].use_strict_symmetric = True
sim.compute_encodings(forward_pass, None)
model(*dummy_input)
sim.export('./data/', 'two_input_model', dummy_input) | def test_model_with_two_inputs(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.conv1_a.param_quantizers['weight'].use_symmetric_encodings = True
sim.model.conv1_a.param_quantizers['weight'].use_strict_symmetric = True
sim.compute_encodings(forward_pass, None)
model(*dummy_input)
sim.export('./data/', 'two_input_model', dummy_input)<|docstring|>Model with more than 1 input<|endoftext|> |
e1a48f6526085f3caaccdd56304d0163855856af5bfb3e80531b0ad8183f204e | def test_model_with_two_inputs_fp16(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, default_output_bw=16, default_param_bw=16, dummy_input=dummy_input, default_data_type=QuantizationDataType.float)
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'two_input_model_fp16', dummy_input) | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_with_two_inputs_fp16 | lcybuzz/aimet | 945 | python | def test_model_with_two_inputs_fp16(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, default_output_bw=16, default_param_bw=16, dummy_input=dummy_input, default_data_type=QuantizationDataType.float)
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'two_input_model_fp16', dummy_input) | def test_model_with_two_inputs_fp16(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, default_output_bw=16, default_param_bw=16, dummy_input=dummy_input, default_data_type=QuantizationDataType.float)
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'two_input_model_fp16', dummy_input)<|docstring|>Model with more than 1 input<|endoftext|> |
34f855480654dc2fa6d41c4a4961fea4e9ed3aec57338770128003be90e57a24 | def test_model_with_two_inputs_per_channel(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
assert isinstance(sim.model.fc2.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.fc2.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.fc2.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
assert (len(sim.model.conv1_a.param_quantizers['weight'].encoding) == 10)
assert (len(sim.model.fc2.param_quantizers['weight'].encoding) == 10)
model(*dummy_input)
assert (sim.model.conv1_a.param_quantizers['weight'].encoding[0] != sim.model.conv1_a.param_quantizers['weight'].encoding[1])
assert (sim.model.fc2.param_quantizers['weight'].encoding[0] != sim.model.fc2.param_quantizers['weight'].encoding[1])
sim.export('./data/', 'two_input_model_per_channel', dummy_input)
with open('./data/two_input_model_per_channel.encodings', 'r') as encodings_file:
encodings = json.load(encodings_file)
assert (len(encodings['param_encodings']) == 10)
assert (len(encodings['param_encodings']['conv1_a.bias']) == 1)
assert (len(encodings['param_encodings']['conv1_a.weight']) == 10)
assert (encodings['param_encodings']['conv1_a.weight'][1]['bitwidth'] == 8)
assert (encodings['param_encodings']['conv1_a.weight'][1]['is_symmetric'] == 'False') | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_with_two_inputs_per_channel | lcybuzz/aimet | 945 | python | def test_model_with_two_inputs_per_channel(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
assert isinstance(sim.model.fc2.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.fc2.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.fc2.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
assert (len(sim.model.conv1_a.param_quantizers['weight'].encoding) == 10)
assert (len(sim.model.fc2.param_quantizers['weight'].encoding) == 10)
model(*dummy_input)
assert (sim.model.conv1_a.param_quantizers['weight'].encoding[0] != sim.model.conv1_a.param_quantizers['weight'].encoding[1])
assert (sim.model.fc2.param_quantizers['weight'].encoding[0] != sim.model.fc2.param_quantizers['weight'].encoding[1])
sim.export('./data/', 'two_input_model_per_channel', dummy_input)
with open('./data/two_input_model_per_channel.encodings', 'r') as encodings_file:
encodings = json.load(encodings_file)
assert (len(encodings['param_encodings']) == 10)
assert (len(encodings['param_encodings']['conv1_a.bias']) == 1)
assert (len(encodings['param_encodings']['conv1_a.weight']) == 10)
assert (encodings['param_encodings']['conv1_a.weight'][1]['bitwidth'] == 8)
assert (encodings['param_encodings']['conv1_a.weight'][1]['is_symmetric'] == 'False') | def test_model_with_two_inputs_per_channel(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
assert isinstance(sim.model.fc2.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.fc2.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.fc2.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
assert (len(sim.model.conv1_a.param_quantizers['weight'].encoding) == 10)
assert (len(sim.model.fc2.param_quantizers['weight'].encoding) == 10)
model(*dummy_input)
assert (sim.model.conv1_a.param_quantizers['weight'].encoding[0] != sim.model.conv1_a.param_quantizers['weight'].encoding[1])
assert (sim.model.fc2.param_quantizers['weight'].encoding[0] != sim.model.fc2.param_quantizers['weight'].encoding[1])
sim.export('./data/', 'two_input_model_per_channel', dummy_input)
with open('./data/two_input_model_per_channel.encodings', 'r') as encodings_file:
encodings = json.load(encodings_file)
assert (len(encodings['param_encodings']) == 10)
assert (len(encodings['param_encodings']['conv1_a.bias']) == 1)
assert (len(encodings['param_encodings']['conv1_a.weight']) == 10)
assert (encodings['param_encodings']['conv1_a.weight'][1]['bitwidth'] == 8)
assert (encodings['param_encodings']['conv1_a.weight'][1]['is_symmetric'] == 'False')<|docstring|>Model with more than 1 input<|endoftext|> |
f97c272f8fd439507df148d741967b3d808bd6f665fe115a599a3deb5c574013 | def test_model_with_two_inputs_per_channel_qat(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward() | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_with_two_inputs_per_channel_qat | lcybuzz/aimet | 945 | python | def test_model_with_two_inputs_per_channel_qat(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward() | def test_model_with_two_inputs_per_channel_qat(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward()<|docstring|>Model with more than 1 input<|endoftext|> |
ace9793fa02f5c1805794ecf8dde4a8877257b015e04798654b3c2ed177035c1 | def test_model_with_two_inputs_per_channel_fp16_qat(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input, default_output_bw=16, default_param_bw=16, default_data_type=QuantizationDataType.float)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward() | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_with_two_inputs_per_channel_fp16_qat | lcybuzz/aimet | 945 | python | def test_model_with_two_inputs_per_channel_fp16_qat(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input, default_output_bw=16, default_param_bw=16, default_data_type=QuantizationDataType.float)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward() | def test_model_with_two_inputs_per_channel_fp16_qat(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input, default_output_bw=16, default_param_bw=16, default_data_type=QuantizationDataType.float)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward()<|docstring|>Model with more than 1 input<|endoftext|> |
ea3f9ed7129a8cdc3d9e0e5f1cd49ea5c81538a6200bcc2e3d221919b44b1104 | def test_model_transposed_conv_per_channel_qat(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTransposeConv()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward() | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_transposed_conv_per_channel_qat | lcybuzz/aimet | 945 | python | def test_model_transposed_conv_per_channel_qat(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTransposeConv()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward() | def test_model_transposed_conv_per_channel_qat(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTransposeConv()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
for wrapper in sim.quant_wrappers():
wrapper.enable_per_channel_quantization()
assert isinstance(sim.model.conv1_a.param_quantizers['weight'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.param_quantizers['bias'], StaticGridPerChannelQuantizer)
assert isinstance(sim.model.conv1_a.output_quantizers[0], StaticGridPerTensorQuantizer)
sim.compute_encodings(forward_pass, None)
sim.model.train()
output = sim.model(*dummy_input)
loss = output.flatten().sum()
loss.backward()<|docstring|>Model with more than 1 input<|endoftext|> |
8527256cbfb162fbd4568d7901d0146128d55799aa56f0c8dd3307bc776d0f17 | def test_model_with_two_inputs_one_to_add(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 100, 100), torch.rand(32, 10, 22, 22))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
model = ModelWithTwoInputsOneToAdd()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
assert (2 == len(sim.model.add.input_quantizers))
assert (not sim.model.add.input_quantizers[0].enabled)
assert (not sim.model.add.input_quantizers[1].enabled)
sim.model.add.input_quantizers[1].enabled = True
sim.compute_encodings(forward_pass, None)
print(sim)
sim.export('./data/', 'two_input_model_one_with_add', dummy_input)
onnx_model = onnx_model = onnx.load('./data/two_input_model_one_with_add.onnx')
for node in onnx_model.graph.node:
if (node.name == 'add'):
break
assert (2 == len(node.input))
model_input_tensor = node.input[1]
with open('./data/two_input_model_one_with_add.encodings', 'r') as encodings_file:
encodings = json.load(encodings_file)
assert (model_input_tensor in encodings['activation_encodings'])
activation_enc = encodings['activation_encodings'][model_input_tensor]
assert isinstance(activation_enc[0]['offset'], int)
param_enc = encodings['param_encodings']['conv1_a.weight']
assert isinstance(param_enc[0]['offset'], int) | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_with_two_inputs_one_to_add | lcybuzz/aimet | 945 | python | def test_model_with_two_inputs_one_to_add(self):
dummy_input = (torch.rand(32, 1, 100, 100), torch.rand(32, 10, 22, 22))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
model = ModelWithTwoInputsOneToAdd()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
assert (2 == len(sim.model.add.input_quantizers))
assert (not sim.model.add.input_quantizers[0].enabled)
assert (not sim.model.add.input_quantizers[1].enabled)
sim.model.add.input_quantizers[1].enabled = True
sim.compute_encodings(forward_pass, None)
print(sim)
sim.export('./data/', 'two_input_model_one_with_add', dummy_input)
onnx_model = onnx_model = onnx.load('./data/two_input_model_one_with_add.onnx')
for node in onnx_model.graph.node:
if (node.name == 'add'):
break
assert (2 == len(node.input))
model_input_tensor = node.input[1]
with open('./data/two_input_model_one_with_add.encodings', 'r') as encodings_file:
encodings = json.load(encodings_file)
assert (model_input_tensor in encodings['activation_encodings'])
activation_enc = encodings['activation_encodings'][model_input_tensor]
assert isinstance(activation_enc[0]['offset'], int)
param_enc = encodings['param_encodings']['conv1_a.weight']
assert isinstance(param_enc[0]['offset'], int) | def test_model_with_two_inputs_one_to_add(self):
dummy_input = (torch.rand(32, 1, 100, 100), torch.rand(32, 10, 22, 22))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
model = ModelWithTwoInputsOneToAdd()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
assert (2 == len(sim.model.add.input_quantizers))
assert (not sim.model.add.input_quantizers[0].enabled)
assert (not sim.model.add.input_quantizers[1].enabled)
sim.model.add.input_quantizers[1].enabled = True
sim.compute_encodings(forward_pass, None)
print(sim)
sim.export('./data/', 'two_input_model_one_with_add', dummy_input)
onnx_model = onnx_model = onnx.load('./data/two_input_model_one_with_add.onnx')
for node in onnx_model.graph.node:
if (node.name == 'add'):
break
assert (2 == len(node.input))
model_input_tensor = node.input[1]
with open('./data/two_input_model_one_with_add.encodings', 'r') as encodings_file:
encodings = json.load(encodings_file)
assert (model_input_tensor in encodings['activation_encodings'])
activation_enc = encodings['activation_encodings'][model_input_tensor]
assert isinstance(activation_enc[0]['offset'], int)
param_enc = encodings['param_encodings']['conv1_a.weight']
assert isinstance(param_enc[0]['offset'], int)<|docstring|>Model with more than 1 input<|endoftext|> |
0fb7302a850542cf9f923b8b3c47b35e05ebdc3d6ce8b84d43a2059415453a4a | def test_export_unified_encoding_format(self):
' test export functionality on ResNet18 '
resnet18 = models.resnet18()
resnet18.eval()
dummy_input = torch.randn(1, 3, 224, 224)
sim = QuantizationSimModel(resnet18, dummy_input=dummy_input)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'resnet18', dummy_input)
with open('./data/resnet18.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '124')
assert isinstance(encoding_data['activation_encodings']['124'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list) | test export functionality on ResNet18 | TrainingExtensions/torch/test/python/test_quantizer.py | test_export_unified_encoding_format | lcybuzz/aimet | 945 | python | def test_export_unified_encoding_format(self):
' '
resnet18 = models.resnet18()
resnet18.eval()
dummy_input = torch.randn(1, 3, 224, 224)
sim = QuantizationSimModel(resnet18, dummy_input=dummy_input)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'resnet18', dummy_input)
with open('./data/resnet18.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '124')
assert isinstance(encoding_data['activation_encodings']['124'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list) | def test_export_unified_encoding_format(self):
' '
resnet18 = models.resnet18()
resnet18.eval()
dummy_input = torch.randn(1, 3, 224, 224)
sim = QuantizationSimModel(resnet18, dummy_input=dummy_input)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'resnet18', dummy_input)
with open('./data/resnet18.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '124')
assert isinstance(encoding_data['activation_encodings']['124'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list)<|docstring|>test export functionality on ResNet18<|endoftext|> |
b47faa294ee3badc90dbbce6022462218113c6a4e731359c366487bf3e2a555b | def test_export_to_torch_script(self):
' test export functionality on ResNet18 '
resnet50 = models.resnet50()
resnet50.eval()
dummy_input = torch.randn(1, 3, 224, 224)
sim = QuantizationSimModel(resnet50, dummy_input)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(torch.randn(1, 3, 224, 224))
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'resnet50', dummy_input, onnx_export_args=None)
with open('./data/resnet50.encodings') as json_file:
encoding_data = json.load(json_file)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '103')
assert isinstance(encoding_data['activation_encodings']['103'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list)
with open('./data/resnet50.encodings.yaml') as yaml_file:
encoding_data = yaml.load(yaml_file, Loader=yaml.FullLoader)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '103')
assert isinstance(encoding_data['activation_encodings']['103'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list) | test export functionality on ResNet18 | TrainingExtensions/torch/test/python/test_quantizer.py | test_export_to_torch_script | lcybuzz/aimet | 945 | python | def test_export_to_torch_script(self):
' '
resnet50 = models.resnet50()
resnet50.eval()
dummy_input = torch.randn(1, 3, 224, 224)
sim = QuantizationSimModel(resnet50, dummy_input)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(torch.randn(1, 3, 224, 224))
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'resnet50', dummy_input, onnx_export_args=None)
with open('./data/resnet50.encodings') as json_file:
encoding_data = json.load(json_file)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '103')
assert isinstance(encoding_data['activation_encodings']['103'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list)
with open('./data/resnet50.encodings.yaml') as yaml_file:
encoding_data = yaml.load(yaml_file, Loader=yaml.FullLoader)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '103')
assert isinstance(encoding_data['activation_encodings']['103'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list) | def test_export_to_torch_script(self):
' '
resnet50 = models.resnet50()
resnet50.eval()
dummy_input = torch.randn(1, 3, 224, 224)
sim = QuantizationSimModel(resnet50, dummy_input)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(torch.randn(1, 3, 224, 224))
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'resnet50', dummy_input, onnx_export_args=None)
with open('./data/resnet50.encodings') as json_file:
encoding_data = json.load(json_file)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '103')
assert isinstance(encoding_data['activation_encodings']['103'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list)
with open('./data/resnet50.encodings.yaml') as yaml_file:
encoding_data = yaml.load(yaml_file, Loader=yaml.FullLoader)
activation_keys = list(encoding_data['activation_encodings'].keys())
assert (activation_keys[0] == '103')
assert isinstance(encoding_data['activation_encodings']['103'], list)
param_keys = list(encoding_data['param_encodings'].keys())
assert (param_keys[2] == 'conv1.weight')
assert isinstance(encoding_data['param_encodings']['conv1.weight'], list)<|docstring|>test export functionality on ResNet18<|endoftext|> |
2ff501e7b4e576c810f0178da37cc9dd5b444031bee6371cd425b54c43e5f208 | def test_export_to_onnx(self):
'Exporting encodings and model'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
sim.model.conv1_a.param_quantizers['weight'].encoding.max = 10
sim.model.conv1_a.output_quantizers[0].encoding.max = 30
sim.export('./data/', 'two_input_model', dummy_input)
with open('./data/two_input_model.encodings', 'r') as fp:
encodings = json.load(fp)
activation_encodings = encodings['activation_encodings']
param_encodings = encodings['param_encodings']
assert (16 == len(activation_encodings))
assert ('conv1_a.bias' in param_encodings)
assert (param_encodings['conv1_a.bias'][0]['bitwidth'] == 32)
assert (7 == len(param_encodings['conv1_a.weight'][0]))
assert (10 == param_encodings['conv1_a.weight'][0]['max'])
with open('./data/two_input_model.encodings.yaml', 'r') as fp_yaml:
encodings = yaml.load(fp_yaml, Loader=yaml.FullLoader)
activation_encodings = encodings['activation_encodings']
param_encodings = encodings['param_encodings']
assert (16 == len(activation_encodings))
assert ('conv1_a.bias' in param_encodings)
assert (param_encodings['conv1_a.bias'][0]['bitwidth'] == 32)
assert (7 == len(param_encodings['conv1_a.weight'][0]))
assert (10 == param_encodings['conv1_a.weight'][0]['max'])
loaded_model = torch.load('./data/two_input_model.pth')
loaded_model(torch.rand(1, 1, 28, 28), torch.rand(1, 1, 28, 28)) | Exporting encodings and model | TrainingExtensions/torch/test/python/test_quantizer.py | test_export_to_onnx | lcybuzz/aimet | 945 | python | def test_export_to_onnx(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
sim.model.conv1_a.param_quantizers['weight'].encoding.max = 10
sim.model.conv1_a.output_quantizers[0].encoding.max = 30
sim.export('./data/', 'two_input_model', dummy_input)
with open('./data/two_input_model.encodings', 'r') as fp:
encodings = json.load(fp)
activation_encodings = encodings['activation_encodings']
param_encodings = encodings['param_encodings']
assert (16 == len(activation_encodings))
assert ('conv1_a.bias' in param_encodings)
assert (param_encodings['conv1_a.bias'][0]['bitwidth'] == 32)
assert (7 == len(param_encodings['conv1_a.weight'][0]))
assert (10 == param_encodings['conv1_a.weight'][0]['max'])
with open('./data/two_input_model.encodings.yaml', 'r') as fp_yaml:
encodings = yaml.load(fp_yaml, Loader=yaml.FullLoader)
activation_encodings = encodings['activation_encodings']
param_encodings = encodings['param_encodings']
assert (16 == len(activation_encodings))
assert ('conv1_a.bias' in param_encodings)
assert (param_encodings['conv1_a.bias'][0]['bitwidth'] == 32)
assert (7 == len(param_encodings['conv1_a.weight'][0]))
assert (10 == param_encodings['conv1_a.weight'][0]['max'])
loaded_model = torch.load('./data/two_input_model.pth')
loaded_model(torch.rand(1, 1, 28, 28), torch.rand(1, 1, 28, 28)) | def test_export_to_onnx(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
sim.model.conv1_a.param_quantizers['weight'].encoding.max = 10
sim.model.conv1_a.output_quantizers[0].encoding.max = 30
sim.export('./data/', 'two_input_model', dummy_input)
with open('./data/two_input_model.encodings', 'r') as fp:
encodings = json.load(fp)
activation_encodings = encodings['activation_encodings']
param_encodings = encodings['param_encodings']
assert (16 == len(activation_encodings))
assert ('conv1_a.bias' in param_encodings)
assert (param_encodings['conv1_a.bias'][0]['bitwidth'] == 32)
assert (7 == len(param_encodings['conv1_a.weight'][0]))
assert (10 == param_encodings['conv1_a.weight'][0]['max'])
with open('./data/two_input_model.encodings.yaml', 'r') as fp_yaml:
encodings = yaml.load(fp_yaml, Loader=yaml.FullLoader)
activation_encodings = encodings['activation_encodings']
param_encodings = encodings['param_encodings']
assert (16 == len(activation_encodings))
assert ('conv1_a.bias' in param_encodings)
assert (param_encodings['conv1_a.bias'][0]['bitwidth'] == 32)
assert (7 == len(param_encodings['conv1_a.weight'][0]))
assert (10 == param_encodings['conv1_a.weight'][0]['max'])
loaded_model = torch.load('./data/two_input_model.pth')
loaded_model(torch.rand(1, 1, 28, 28), torch.rand(1, 1, 28, 28))<|docstring|>Exporting encodings and model<|endoftext|> |
db9f0d819062658faac52f630aeb6f6de7eceb9db3a2cc36bbcd93caa86d538d | def test_quantizing_models_with_funtional_add_ops(self):
'\n Testing models with add functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | Testing models with add functional ops
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_models_with_funtional_add_ops | lcybuzz/aimet | 945 | python | def test_quantizing_models_with_funtional_add_ops(self):
'\n Testing models with add functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | def test_quantizing_models_with_funtional_add_ops(self):
'\n Testing models with add functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim)<|docstring|>Testing models with add functional ops
:return:<|endoftext|> |
a9239420d9260bd4699694028e17ea7c19074d6476daf3c56dbc960774bcf292 | def test_quantizing_models_with_module_add_ops(self):
'\n Testing models with add functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
self.add1 = elementwise_ops.Add()
self.add2 = elementwise_ops.Add()
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = self.add1(ya, yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = self.add2(ya, yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert (not sim.model.conv3.input_quantizer.enabled)
assert sim.model.add1.output_quantizer.enabled
print(sim) | Testing models with add functional ops
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_models_with_module_add_ops | lcybuzz/aimet | 945 | python | def test_quantizing_models_with_module_add_ops(self):
'\n Testing models with add functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
self.add1 = elementwise_ops.Add()
self.add2 = elementwise_ops.Add()
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = self.add1(ya, yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = self.add2(ya, yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert (not sim.model.conv3.input_quantizer.enabled)
assert sim.model.add1.output_quantizer.enabled
print(sim) | def test_quantizing_models_with_module_add_ops(self):
'\n Testing models with add functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
self.add1 = elementwise_ops.Add()
self.add2 = elementwise_ops.Add()
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = self.add1(ya, yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = self.add2(ya, yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert (not sim.model.conv3.input_quantizer.enabled)
assert sim.model.add1.output_quantizer.enabled
print(sim)<|docstring|>Testing models with add functional ops
:return:<|endoftext|> |
703c4025b2e51f3c79910f44632cf9e94a66aceffa816cbd28ff7bf7c0dad835 | def test_quantizing_models_with_add_followed_by_split(self):
'\n Testing models with add functional ops followed by a split\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv4a.input_quantizer.enabled
assert sim.model.conv4b.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | Testing models with add functional ops followed by a split
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_models_with_add_followed_by_split | lcybuzz/aimet | 945 | python | def test_quantizing_models_with_add_followed_by_split(self):
'\n Testing models with add functional ops followed by a split\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv4a.input_quantizer.enabled
assert sim.model.conv4b.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | def test_quantizing_models_with_add_followed_by_split(self):
'\n Testing models with add functional ops followed by a split\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv4a.input_quantizer.enabled
assert sim.model.conv4b.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim)<|docstring|>Testing models with add functional ops followed by a split
:return:<|endoftext|> |
de412eb18e6736ecac55c8dfd998768736808168cfb974ff3632f2d5c266d0d3 | def test_quantizing_models_with_add_followed_by_add(self):
'\n Testing models with add functional ops followed by a split and then another add.\n This is similar to the resnet architecture where there are no ops on the residual connection\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5, padding=2)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
ya = self.conv4a(x)
yb = x
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv4a.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | Testing models with add functional ops followed by a split and then another add.
This is similar to the resnet architecture where there are no ops on the residual connection
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_models_with_add_followed_by_add | lcybuzz/aimet | 945 | python | def test_quantizing_models_with_add_followed_by_add(self):
'\n Testing models with add functional ops followed by a split and then another add.\n This is similar to the resnet architecture where there are no ops on the residual connection\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5, padding=2)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
ya = self.conv4a(x)
yb = x
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv4a.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | def test_quantizing_models_with_add_followed_by_add(self):
'\n Testing models with add functional ops followed by a split and then another add.\n This is similar to the resnet architecture where there are no ops on the residual connection\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5, padding=2)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya + yb)
ya = self.conv4a(x)
yb = x
x = (ya + yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv4a.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim)<|docstring|>Testing models with add functional ops followed by a split and then another add.
This is similar to the resnet architecture where there are no ops on the residual connection
:return:<|endoftext|> |
0854a4563d2f258a2bfc0a48af2d66ce85befe11601410eac76d9e9f043eb98c | def test_quantizing_models_with_mul_ops(self):
'\n Testing models with elementwise multiply functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya * yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya * yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | Testing models with elementwise multiply functional ops
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_models_with_mul_ops | lcybuzz/aimet | 945 | python | def test_quantizing_models_with_mul_ops(self):
'\n Testing models with elementwise multiply functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya * yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya * yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | def test_quantizing_models_with_mul_ops(self):
'\n Testing models with elementwise multiply functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya * yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya * yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim)<|docstring|>Testing models with elementwise multiply functional ops
:return:<|endoftext|> |
9f02d1e78bd1392b11b7feca3d18ccb4aa76f1d77126bd462d374cda152cd265 | def test_quantizing_models_with_div_ops(self):
'\n Testing models with elementwise division functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya / yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya / yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | Testing models with elementwise division functional ops
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_models_with_div_ops | lcybuzz/aimet | 945 | python | def test_quantizing_models_with_div_ops(self):
'\n Testing models with elementwise division functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya / yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya / yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | def test_quantizing_models_with_div_ops(self):
'\n Testing models with elementwise division functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 20, kernel_size=5)
self.conv2b = nn.Conv2d(10, 20, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 20, kernel_size=5)
self.conv4b = nn.Conv2d(20, 20, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = (ya / yb)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = (ya / yb)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim)<|docstring|>Testing models with elementwise division functional ops
:return:<|endoftext|> |
4209d2e81d9c043f930982178b3d0e0cf3729835b9af8e6a9dcf027f292f5962 | def test_quantizing_models_with_concat_ops(self):
'\n Testing models with concat functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 10, kernel_size=5)
self.conv2b = nn.Conv2d(10, 10, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 10, kernel_size=5)
self.conv4b = nn.Conv2d(20, 10, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = torch.cat((ya, yb), 1)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = torch.cat((ya, yb), 1)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | Testing models with concat functional ops
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_models_with_concat_ops | lcybuzz/aimet | 945 | python | def test_quantizing_models_with_concat_ops(self):
'\n Testing models with concat functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 10, kernel_size=5)
self.conv2b = nn.Conv2d(10, 10, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 10, kernel_size=5)
self.conv4b = nn.Conv2d(20, 10, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = torch.cat((ya, yb), 1)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = torch.cat((ya, yb), 1)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim) | def test_quantizing_models_with_concat_ops(self):
'\n Testing models with concat functional ops\n :return:\n '
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2a = nn.Conv2d(10, 10, kernel_size=5)
self.conv2b = nn.Conv2d(10, 10, kernel_size=5)
self.conv3 = nn.Conv2d(20, 20, kernel_size=5)
self.conv4a = nn.Conv2d(20, 10, kernel_size=5)
self.conv4b = nn.Conv2d(20, 10, kernel_size=5)
self.conv5 = nn.Conv2d(20, 20, kernel_size=5)
def forward(self, input):
x = self.conv1(input)
ya = self.conv2a(x)
yb = self.conv2b(x)
x = torch.cat((ya, yb), 1)
x = self.conv3(x)
ya = self.conv4a(x)
yb = self.conv4b(x)
x = torch.cat((ya, yb), 1)
x = self.conv5(x)
return x
model = Net()
model(torch.rand(1, 3, 28, 28))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=torch.rand(1, 3, 28, 28))
assert sim.model.conv3.input_quantizer.enabled
assert sim.model.conv5.input_quantizer.enabled
print(sim)<|docstring|>Testing models with concat functional ops
:return:<|endoftext|> |
8f6bced28b7844da9ff8134293fc6e51ac41b4bd04ccbaf954f059bf06f17d14 | def test_layers_to_ignore(self):
' Test the capability to skip quantizing the layers specified by the user'
model = SmallMnist()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
layers_to_ignore = [sim.model.conv1, sim.model.fc2]
sim.exclude_layers_from_quantization(layers_to_ignore)
sim.compute_encodings(dummy_forward_pass, None)
assert isinstance(sim.model.conv1, nn.Conv2d)
assert (not isinstance(sim.model.conv2, nn.Conv2d))
assert isinstance(sim.model.fc2, nn.Linear) | Test the capability to skip quantizing the layers specified by the user | TrainingExtensions/torch/test/python/test_quantizer.py | test_layers_to_ignore | lcybuzz/aimet | 945 | python | def test_layers_to_ignore(self):
' '
model = SmallMnist()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
layers_to_ignore = [sim.model.conv1, sim.model.fc2]
sim.exclude_layers_from_quantization(layers_to_ignore)
sim.compute_encodings(dummy_forward_pass, None)
assert isinstance(sim.model.conv1, nn.Conv2d)
assert (not isinstance(sim.model.conv2, nn.Conv2d))
assert isinstance(sim.model.fc2, nn.Linear) | def test_layers_to_ignore(self):
' '
model = SmallMnist()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
layers_to_ignore = [sim.model.conv1, sim.model.fc2]
sim.exclude_layers_from_quantization(layers_to_ignore)
sim.compute_encodings(dummy_forward_pass, None)
assert isinstance(sim.model.conv1, nn.Conv2d)
assert (not isinstance(sim.model.conv2, nn.Conv2d))
assert isinstance(sim.model.fc2, nn.Linear)<|docstring|>Test the capability to skip quantizing the layers specified by the user<|endoftext|> |
dccd666b327c7f0871abe3a8d8a281f67a05bb9ecea5d96d64a38bc38751542f | def test_ste_gradient_math_tensors(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = [(- 0.25), (- 0.25)]
c_enc_max = [1.0, 1.0]
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]])
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_1, grad_out_1)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]])
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]])
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_2, grad_out_2)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]])
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_3, grad_out_3) | Unit test to validate custom gradient computation with auto grad computation.
:return: None | TrainingExtensions/torch/test/python/test_quantizer.py | test_ste_gradient_math_tensors | lcybuzz/aimet | 945 | python | def test_ste_gradient_math_tensors(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = [(- 0.25), (- 0.25)]
c_enc_max = [1.0, 1.0]
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]])
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_1, grad_out_1)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]])
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]])
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_2, grad_out_2)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]])
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_3, grad_out_3) | def test_ste_gradient_math_tensors(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = [(- 0.25), (- 0.25)]
c_enc_max = [1.0, 1.0]
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]])
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_1, grad_out_1)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]])
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]])
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_2, grad_out_2)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]])
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_3, grad_out_3)<|docstring|>Unit test to validate custom gradient computation with auto grad computation.
:return: None<|endoftext|> |
331281c4e7726f0cda8089958931664c32df662caf87a482f3f9953af0d175bf | @pytest.mark.cuda
def test_ste_gradient_math_tensors_cuda(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = [(- 0.25), (- 0.25)]
c_enc_max = [1.0, 1.0]
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]]).cuda()
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]]).cuda()
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]]).cuda()
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_1, grad_out_1)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]]).cuda()
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]]).cuda()
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_2, grad_out_2)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]]).cuda()
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]]).cuda()
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_3, grad_out_3) | Unit test to validate custom gradient computation with auto grad computation.
:return: None | TrainingExtensions/torch/test/python/test_quantizer.py | test_ste_gradient_math_tensors_cuda | lcybuzz/aimet | 945 | python | @pytest.mark.cuda
def test_ste_gradient_math_tensors_cuda(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = [(- 0.25), (- 0.25)]
c_enc_max = [1.0, 1.0]
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]]).cuda()
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]]).cuda()
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]]).cuda()
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_1, grad_out_1)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]]).cuda()
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]]).cuda()
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_2, grad_out_2)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]]).cuda()
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]]).cuda()
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_3, grad_out_3) | @pytest.mark.cuda
def test_ste_gradient_math_tensors_cuda(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = [(- 0.25), (- 0.25)]
c_enc_max = [1.0, 1.0]
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]]).cuda()
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]]).cuda()
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]]).cuda()
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_1, grad_out_1)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]]).cuda()
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]]).cuda()
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_2, grad_out_2)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]]).cuda()
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]]).cuda()
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert torch.allclose(expected_grad_3, grad_out_3)<|docstring|>Unit test to validate custom gradient computation with auto grad computation.
:return: None<|endoftext|> |
2e4a39fae7030fe79e764d524a80bf58c25cb3cce46238757d17354416815dd1 | def test_ste_gradient_math(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = (- 0.25)
c_enc_max = 1.0
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]])
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]])
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]])
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]])
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_1, grad_out_1)
assert np.allclose(expected_grad_2, grad_out_2)
assert np.allclose(expected_grad_3, grad_out_3) | Unit test to validate custom gradient computation with auto grad computation.
:return: None | TrainingExtensions/torch/test/python/test_quantizer.py | test_ste_gradient_math | lcybuzz/aimet | 945 | python | def test_ste_gradient_math(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = (- 0.25)
c_enc_max = 1.0
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]])
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]])
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]])
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]])
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_1, grad_out_1)
assert np.allclose(expected_grad_2, grad_out_2)
assert np.allclose(expected_grad_3, grad_out_3) | def test_ste_gradient_math(self):
'\n Unit test to validate custom gradient computation with auto grad computation.\n :return: None\n '
c_enc_min = (- 0.25)
c_enc_max = 1.0
grad = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
custom_input_1 = torch.Tensor([[1.0, 1.5], [0.125, (- 0.12)]])
expected_grad_1 = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
grad_out_1 = compute_dloss_by_dx(custom_input_1, grad, c_enc_min, c_enc_max)
custom_input_2 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.3)]])
expected_grad_2 = torch.Tensor([[1.0, 1.0], [1.0, 0.0]])
grad_out_2 = compute_dloss_by_dx(custom_input_2, grad, c_enc_min, c_enc_max)
custom_input_3 = torch.Tensor([[1.0, 0.5], [0.125, (- 0.25)]])
expected_grad_3 = torch.Tensor([[1.0, 1.0], [1.0, 1.0]])
grad_out_3 = compute_dloss_by_dx(custom_input_3, grad, c_enc_min, c_enc_max)
assert np.allclose(expected_grad_1, grad_out_1)
assert np.allclose(expected_grad_2, grad_out_2)
assert np.allclose(expected_grad_3, grad_out_3)<|docstring|>Unit test to validate custom gradient computation with auto grad computation.
:return: None<|endoftext|> |
6cd466d9c71b0f739e3f29df059c5867465e303ea76104f390d78eae6db76de3 | def test_changing_param_quantizer_settings(self):
' Test that changing param quantizer settings takes effect after computing encodings is run '
model = SmallMnist()
old_weight = model.conv1.weight.detach().clone()
model.conv1.weight = torch.nn.Parameter((old_weight + (0.9 * torch.abs(torch.min(old_weight)))), requires_grad=False)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
assert (not sim.model.conv1.param_quantizers['weight'].encoding)
sim.compute_encodings(dummy_forward_pass, None)
asym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
asym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert (8 == sim.model.conv1.param_quantizers['weight'].encoding.bw)
assert (not (sim.model.conv1.param_quantizers['weight'].encoding.offset in [(- 127), (- 128)]))
sim.model.conv1.param_quantizers['weight'].use_symmetric_encodings = True
sim.model.conv1.param_quantizers['weight'].bitwidth = 4
sim.compute_encodings(dummy_forward_pass, None)
sym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
sym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert (4 == sim.model.conv1.param_quantizers['weight'].encoding.bw)
assert (sim.model.conv1.param_quantizers['weight'].encoding.offset in [(- 7), (- 8)])
assert (not (asym_min == sym_min))
assert (not (asym_max == sym_max)) | Test that changing param quantizer settings takes effect after computing encodings is run | TrainingExtensions/torch/test/python/test_quantizer.py | test_changing_param_quantizer_settings | lcybuzz/aimet | 945 | python | def test_changing_param_quantizer_settings(self):
' '
model = SmallMnist()
old_weight = model.conv1.weight.detach().clone()
model.conv1.weight = torch.nn.Parameter((old_weight + (0.9 * torch.abs(torch.min(old_weight)))), requires_grad=False)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
assert (not sim.model.conv1.param_quantizers['weight'].encoding)
sim.compute_encodings(dummy_forward_pass, None)
asym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
asym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert (8 == sim.model.conv1.param_quantizers['weight'].encoding.bw)
assert (not (sim.model.conv1.param_quantizers['weight'].encoding.offset in [(- 127), (- 128)]))
sim.model.conv1.param_quantizers['weight'].use_symmetric_encodings = True
sim.model.conv1.param_quantizers['weight'].bitwidth = 4
sim.compute_encodings(dummy_forward_pass, None)
sym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
sym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert (4 == sim.model.conv1.param_quantizers['weight'].encoding.bw)
assert (sim.model.conv1.param_quantizers['weight'].encoding.offset in [(- 7), (- 8)])
assert (not (asym_min == sym_min))
assert (not (asym_max == sym_max)) | def test_changing_param_quantizer_settings(self):
' '
model = SmallMnist()
old_weight = model.conv1.weight.detach().clone()
model.conv1.weight = torch.nn.Parameter((old_weight + (0.9 * torch.abs(torch.min(old_weight)))), requires_grad=False)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
assert (not sim.model.conv1.param_quantizers['weight'].encoding)
sim.compute_encodings(dummy_forward_pass, None)
asym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
asym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert (8 == sim.model.conv1.param_quantizers['weight'].encoding.bw)
assert (not (sim.model.conv1.param_quantizers['weight'].encoding.offset in [(- 127), (- 128)]))
sim.model.conv1.param_quantizers['weight'].use_symmetric_encodings = True
sim.model.conv1.param_quantizers['weight'].bitwidth = 4
sim.compute_encodings(dummy_forward_pass, None)
sym_min = sim.model.conv1.param_quantizers['weight'].encoding.min
sym_max = sim.model.conv1.param_quantizers['weight'].encoding.max
assert (4 == sim.model.conv1.param_quantizers['weight'].encoding.bw)
assert (sim.model.conv1.param_quantizers['weight'].encoding.offset in [(- 7), (- 8)])
assert (not (asym_min == sym_min))
assert (not (asym_max == sym_max))<|docstring|>Test that changing param quantizer settings takes effect after computing encodings is run<|endoftext|> |
d8cf080b539178f0fc294278c47e426207b8a6cb85242c4d7bf590e2f4ab9820 | def test_compute_encodings_on_subset_of_modules(self):
' Test that computing encodings on a subset of modules causes remaining quantized modules to be set to\n passThrough mode. '
def dummy_forward_pass(model, _):
conv1_out = model.conv1(torch.randn((1, 1, 28, 28)))
relu1_out = model.relu1(conv1_out)
model = SmallMnist()
model.eval()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
sim.compute_encodings(dummy_forward_pass, None)
for (name, module) in sim.model.named_modules():
if isinstance(module, StaticGridQuantWrapper):
assert (QcQuantizeOpMode.ACTIVE == module._mode)
if (name == 'relu1'):
assert module.output_quantizers[0].enabled
elif (name in ['conv2', 'conv2_drop', 'relu2', 'relu3', 'dropout', 'fc2', 'log_softmax']):
assert (not module.output_quantizers[0].enabled) | Test that computing encodings on a subset of modules causes remaining quantized modules to be set to
passThrough mode. | TrainingExtensions/torch/test/python/test_quantizer.py | test_compute_encodings_on_subset_of_modules | lcybuzz/aimet | 945 | python | def test_compute_encodings_on_subset_of_modules(self):
' Test that computing encodings on a subset of modules causes remaining quantized modules to be set to\n passThrough mode. '
def dummy_forward_pass(model, _):
conv1_out = model.conv1(torch.randn((1, 1, 28, 28)))
relu1_out = model.relu1(conv1_out)
model = SmallMnist()
model.eval()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
sim.compute_encodings(dummy_forward_pass, None)
for (name, module) in sim.model.named_modules():
if isinstance(module, StaticGridQuantWrapper):
assert (QcQuantizeOpMode.ACTIVE == module._mode)
if (name == 'relu1'):
assert module.output_quantizers[0].enabled
elif (name in ['conv2', 'conv2_drop', 'relu2', 'relu3', 'dropout', 'fc2', 'log_softmax']):
assert (not module.output_quantizers[0].enabled) | def test_compute_encodings_on_subset_of_modules(self):
' Test that computing encodings on a subset of modules causes remaining quantized modules to be set to\n passThrough mode. '
def dummy_forward_pass(model, _):
conv1_out = model.conv1(torch.randn((1, 1, 28, 28)))
relu1_out = model.relu1(conv1_out)
model = SmallMnist()
model.eval()
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))
sim.compute_encodings(dummy_forward_pass, None)
for (name, module) in sim.model.named_modules():
if isinstance(module, StaticGridQuantWrapper):
assert (QcQuantizeOpMode.ACTIVE == module._mode)
if (name == 'relu1'):
assert module.output_quantizers[0].enabled
elif (name in ['conv2', 'conv2_drop', 'relu2', 'relu3', 'dropout', 'fc2', 'log_softmax']):
assert (not module.output_quantizers[0].enabled)<|docstring|>Test that computing encodings on a subset of modules causes remaining quantized modules to be set to
passThrough mode.<|endoftext|> |
91b6b2dab6ab084a2e7d6485081064766e1de3d3769bd516001c4f0b8321fa1a | def test_connected_graph_is_none(self):
' Test that an assertion is thrown when connected graph is not able to be built. '
def raise_trace_error(_self, _model, _inputs):
raise torch.jit.TracingCheckError(None, None)
model = SmallMnist()
model.eval()
with unittest.mock.patch.object(ConnectedGraph, '__init__', raise_trace_error):
with unittest.mock.patch.object(ConnectedGraph, '__del__', (lambda _self: None)):
with pytest.raises(AssertionError):
_ = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28)) | Test that an assertion is thrown when connected graph is not able to be built. | TrainingExtensions/torch/test/python/test_quantizer.py | test_connected_graph_is_none | lcybuzz/aimet | 945 | python | def test_connected_graph_is_none(self):
' '
def raise_trace_error(_self, _model, _inputs):
raise torch.jit.TracingCheckError(None, None)
model = SmallMnist()
model.eval()
with unittest.mock.patch.object(ConnectedGraph, '__init__', raise_trace_error):
with unittest.mock.patch.object(ConnectedGraph, '__del__', (lambda _self: None)):
with pytest.raises(AssertionError):
_ = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28)) | def test_connected_graph_is_none(self):
' '
def raise_trace_error(_self, _model, _inputs):
raise torch.jit.TracingCheckError(None, None)
model = SmallMnist()
model.eval()
with unittest.mock.patch.object(ConnectedGraph, '__init__', raise_trace_error):
with unittest.mock.patch.object(ConnectedGraph, '__del__', (lambda _self: None)):
with pytest.raises(AssertionError):
_ = QuantizationSimModel(model, dummy_input=torch.rand(1, 1, 28, 28))<|docstring|>Test that an assertion is thrown when connected graph is not able to be built.<|endoftext|> |
d236ae217e76fe72b29672e7cc63b4c630ed7ef3cb9bf690fd1b26a0aa291641 | def test_rnn_quantization(self):
' Test quantizing a model with rnn layer '
model = SingleLayerRNNModel()
dummy_input = torch.randn(10, 1, 3)
sim = QuantizationSimModel(model, dummy_input)
assert isinstance(sim.model.rnn, QcQuantizeRecurrent) | Test quantizing a model with rnn layer | TrainingExtensions/torch/test/python/test_quantizer.py | test_rnn_quantization | lcybuzz/aimet | 945 | python | def test_rnn_quantization(self):
' '
model = SingleLayerRNNModel()
dummy_input = torch.randn(10, 1, 3)
sim = QuantizationSimModel(model, dummy_input)
assert isinstance(sim.model.rnn, QcQuantizeRecurrent) | def test_rnn_quantization(self):
' '
model = SingleLayerRNNModel()
dummy_input = torch.randn(10, 1, 3)
sim = QuantizationSimModel(model, dummy_input)
assert isinstance(sim.model.rnn, QcQuantizeRecurrent)<|docstring|>Test quantizing a model with rnn layer<|endoftext|> |
e78bbdc6876de6eb02cee47bf90b57d4ee46985c3a7d93b3c436b6b5e0e673b4 | def test_quantizing_qc_quantize_module(self):
' Test that qc_quantize_module is identified as not quantizable '
qc_quantize_module = QcQuantizeRecurrent(torch.nn.RNN(input_size=3, hidden_size=5, num_layers=1), weight_bw=16, activation_bw=16, quant_scheme=QuantScheme.post_training_tf, round_mode='nearest', data_type=QuantizationDataType.int)
assert (not QuantizationSimModel._is_quantizable_module(qc_quantize_module)) | Test that qc_quantize_module is identified as not quantizable | TrainingExtensions/torch/test/python/test_quantizer.py | test_quantizing_qc_quantize_module | lcybuzz/aimet | 945 | python | def test_quantizing_qc_quantize_module(self):
' '
qc_quantize_module = QcQuantizeRecurrent(torch.nn.RNN(input_size=3, hidden_size=5, num_layers=1), weight_bw=16, activation_bw=16, quant_scheme=QuantScheme.post_training_tf, round_mode='nearest', data_type=QuantizationDataType.int)
assert (not QuantizationSimModel._is_quantizable_module(qc_quantize_module)) | def test_quantizing_qc_quantize_module(self):
' '
qc_quantize_module = QcQuantizeRecurrent(torch.nn.RNN(input_size=3, hidden_size=5, num_layers=1), weight_bw=16, activation_bw=16, quant_scheme=QuantScheme.post_training_tf, round_mode='nearest', data_type=QuantizationDataType.int)
assert (not QuantizationSimModel._is_quantizable_module(qc_quantize_module))<|docstring|>Test that qc_quantize_module is identified as not quantizable<|endoftext|> |
cd2ce9a6150cbbb0dae5c0c538bc517abbf6be992c5cb79d4b8fb76bedfc68a4 | def test_export_recurrent_model(self):
' Test export functionality with recurrent models '
models = [TwoLayerBidirectionaRNNModel(), TwoLayerBidirectionalLSTMModel(), TwoLayerBidirectionalGRUModel()]
dummy_input = torch.randn(10, 1, 3)
def forward_pass(model, args):
model.eval()
model(dummy_input)
for model in models:
sim = QuantizationSimModel(model, dummy_input)
sim.compute_encodings(forward_pass, None)
with torch.no_grad():
sim.model.recurrent.weight_ih_l0[0][0] = 1
edited_weight = sim.model.recurrent.weight_ih_l0.detach().clone()
assert (not torch.equal(edited_weight, sim.model.recurrent.module_to_quantize.weight_ih_l0))
sim.export('./data', 'recurrent_save', dummy_input)
exported_model = torch.load('./data/recurrent_save.pth')
assert isinstance(exported_model.recurrent, (torch.nn.RNN, torch.nn.LSTM, torch.nn.GRU))
assert torch.equal(edited_weight, exported_model.recurrent.weight_ih_l0)
with open('./data/recurrent_save.encodings') as f:
encodings = json.load(f)
assert (8 == len(encodings['activation_encodings']))
assert (4 == len(encodings['param_encodings']))
os.remove('./data/recurrent_save.pth')
os.remove('./data/recurrent_save.onnx')
os.remove('./data/recurrent_save.encodings') | Test export functionality with recurrent models | TrainingExtensions/torch/test/python/test_quantizer.py | test_export_recurrent_model | lcybuzz/aimet | 945 | python | def test_export_recurrent_model(self):
' '
models = [TwoLayerBidirectionaRNNModel(), TwoLayerBidirectionalLSTMModel(), TwoLayerBidirectionalGRUModel()]
dummy_input = torch.randn(10, 1, 3)
def forward_pass(model, args):
model.eval()
model(dummy_input)
for model in models:
sim = QuantizationSimModel(model, dummy_input)
sim.compute_encodings(forward_pass, None)
with torch.no_grad():
sim.model.recurrent.weight_ih_l0[0][0] = 1
edited_weight = sim.model.recurrent.weight_ih_l0.detach().clone()
assert (not torch.equal(edited_weight, sim.model.recurrent.module_to_quantize.weight_ih_l0))
sim.export('./data', 'recurrent_save', dummy_input)
exported_model = torch.load('./data/recurrent_save.pth')
assert isinstance(exported_model.recurrent, (torch.nn.RNN, torch.nn.LSTM, torch.nn.GRU))
assert torch.equal(edited_weight, exported_model.recurrent.weight_ih_l0)
with open('./data/recurrent_save.encodings') as f:
encodings = json.load(f)
assert (8 == len(encodings['activation_encodings']))
assert (4 == len(encodings['param_encodings']))
os.remove('./data/recurrent_save.pth')
os.remove('./data/recurrent_save.onnx')
os.remove('./data/recurrent_save.encodings') | def test_export_recurrent_model(self):
' '
models = [TwoLayerBidirectionaRNNModel(), TwoLayerBidirectionalLSTMModel(), TwoLayerBidirectionalGRUModel()]
dummy_input = torch.randn(10, 1, 3)
def forward_pass(model, args):
model.eval()
model(dummy_input)
for model in models:
sim = QuantizationSimModel(model, dummy_input)
sim.compute_encodings(forward_pass, None)
with torch.no_grad():
sim.model.recurrent.weight_ih_l0[0][0] = 1
edited_weight = sim.model.recurrent.weight_ih_l0.detach().clone()
assert (not torch.equal(edited_weight, sim.model.recurrent.module_to_quantize.weight_ih_l0))
sim.export('./data', 'recurrent_save', dummy_input)
exported_model = torch.load('./data/recurrent_save.pth')
assert isinstance(exported_model.recurrent, (torch.nn.RNN, torch.nn.LSTM, torch.nn.GRU))
assert torch.equal(edited_weight, exported_model.recurrent.weight_ih_l0)
with open('./data/recurrent_save.encodings') as f:
encodings = json.load(f)
assert (8 == len(encodings['activation_encodings']))
assert (4 == len(encodings['param_encodings']))
os.remove('./data/recurrent_save.pth')
os.remove('./data/recurrent_save.onnx')
os.remove('./data/recurrent_save.encodings')<|docstring|>Test export functionality with recurrent models<|endoftext|> |
852fe40d60720f57f8e27d3ba17f2e2c29b6c82446adf6b52688d323819efcf1 | def test_set_and_freeze_param_encoding(self):
' Test set and freeze parameter encoding '
conv1 = torch.nn.Conv2d(4, 4, 1)
quant_module = StaticGridQuantWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
param_encodings = {'conv1.weight': [{'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}]}
quant_module.set_and_freeze_param_encoding('conv1', param_encodings)
assert (quant_module.param_quantizers['weight'].encoding.bw == 4)
assert (quant_module.param_quantizers['weight'].encoding.offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding.delta == 0.038)
assert (not quant_module.param_quantizers['weight'].use_symmetric_encodings)
assert (quant_module.param_quantizers['weight'].bitwidth == 4)
quant_module.reset_encodings()
assert quant_module.param_quantizers['weight'].encoding | Test set and freeze parameter encoding | TrainingExtensions/torch/test/python/test_quantizer.py | test_set_and_freeze_param_encoding | lcybuzz/aimet | 945 | python | def test_set_and_freeze_param_encoding(self):
' '
conv1 = torch.nn.Conv2d(4, 4, 1)
quant_module = StaticGridQuantWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
param_encodings = {'conv1.weight': [{'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}]}
quant_module.set_and_freeze_param_encoding('conv1', param_encodings)
assert (quant_module.param_quantizers['weight'].encoding.bw == 4)
assert (quant_module.param_quantizers['weight'].encoding.offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding.delta == 0.038)
assert (not quant_module.param_quantizers['weight'].use_symmetric_encodings)
assert (quant_module.param_quantizers['weight'].bitwidth == 4)
quant_module.reset_encodings()
assert quant_module.param_quantizers['weight'].encoding | def test_set_and_freeze_param_encoding(self):
' '
conv1 = torch.nn.Conv2d(4, 4, 1)
quant_module = StaticGridQuantWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
param_encodings = {'conv1.weight': [{'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}]}
quant_module.set_and_freeze_param_encoding('conv1', param_encodings)
assert (quant_module.param_quantizers['weight'].encoding.bw == 4)
assert (quant_module.param_quantizers['weight'].encoding.offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding.delta == 0.038)
assert (not quant_module.param_quantizers['weight'].use_symmetric_encodings)
assert (quant_module.param_quantizers['weight'].bitwidth == 4)
quant_module.reset_encodings()
assert quant_module.param_quantizers['weight'].encoding<|docstring|>Test set and freeze parameter encoding<|endoftext|> |
aeaad972f3bacf7cad608a472c14436d10f6084cede8901ef1862c8ac297fd57 | def test_set_and_freeze_param_encoding_per_channel(self):
' Test set and freeze parameter encoding for per-channel encodings '
conv1 = torch.nn.Conv2d(4, 4, 1)
quant_module = StaticGridQuantWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
quant_module.enable_per_channel_quantization()
param_encodings = {'conv1.weight': [{'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}]}
quant_module.set_and_freeze_param_encoding('conv1', param_encodings)
assert (len(quant_module.param_quantizers['weight'].encoding) == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[0].delta == 0.038)
assert (quant_module.param_quantizers['weight'].encoding[3].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[3].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[3].delta == 0.038)
assert (not quant_module.param_quantizers['weight'].use_symmetric_encodings)
assert (quant_module.param_quantizers['weight'].bitwidth == 4)
quant_module.reset_encodings()
assert (len(quant_module.param_quantizers['weight'].encoding) == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[0].delta == 0.038)
assert (quant_module.param_quantizers['weight'].encoding[3].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[3].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[3].delta == 0.038) | Test set and freeze parameter encoding for per-channel encodings | TrainingExtensions/torch/test/python/test_quantizer.py | test_set_and_freeze_param_encoding_per_channel | lcybuzz/aimet | 945 | python | def test_set_and_freeze_param_encoding_per_channel(self):
' '
conv1 = torch.nn.Conv2d(4, 4, 1)
quant_module = StaticGridQuantWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
quant_module.enable_per_channel_quantization()
param_encodings = {'conv1.weight': [{'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}]}
quant_module.set_and_freeze_param_encoding('conv1', param_encodings)
assert (len(quant_module.param_quantizers['weight'].encoding) == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[0].delta == 0.038)
assert (quant_module.param_quantizers['weight'].encoding[3].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[3].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[3].delta == 0.038)
assert (not quant_module.param_quantizers['weight'].use_symmetric_encodings)
assert (quant_module.param_quantizers['weight'].bitwidth == 4)
quant_module.reset_encodings()
assert (len(quant_module.param_quantizers['weight'].encoding) == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[0].delta == 0.038)
assert (quant_module.param_quantizers['weight'].encoding[3].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[3].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[3].delta == 0.038) | def test_set_and_freeze_param_encoding_per_channel(self):
' '
conv1 = torch.nn.Conv2d(4, 4, 1)
quant_module = StaticGridQuantWrapper(conv1, weight_bw=8, activation_bw=8, round_mode='nearest', quant_scheme=QuantScheme.post_training_tf_enhanced, data_type=QuantizationDataType.int)
quant_module.enable_per_channel_quantization()
param_encodings = {'conv1.weight': [{'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}, {'bitwidth': 4, 'is_symmetric': 'False', 'max': 0.3, 'min': (- 0.2), 'offset': (- 7.0), 'scale': 0.038}]}
quant_module.set_and_freeze_param_encoding('conv1', param_encodings)
assert (len(quant_module.param_quantizers['weight'].encoding) == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[0].delta == 0.038)
assert (quant_module.param_quantizers['weight'].encoding[3].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[3].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[3].delta == 0.038)
assert (not quant_module.param_quantizers['weight'].use_symmetric_encodings)
assert (quant_module.param_quantizers['weight'].bitwidth == 4)
quant_module.reset_encodings()
assert (len(quant_module.param_quantizers['weight'].encoding) == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[0].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[0].delta == 0.038)
assert (quant_module.param_quantizers['weight'].encoding[3].bw == 4)
assert (quant_module.param_quantizers['weight'].encoding[3].offset == (- 7.0))
assert (quant_module.param_quantizers['weight'].encoding[3].delta == 0.038)<|docstring|>Test set and freeze parameter encoding for per-channel encodings<|endoftext|> |
e1f0883537493e7c94e96902f85caa7f90009dc687c1ad92372949ac8eb57cc1 | def test_compute_encoding_with_given_bitwidth(self):
'\n Test functionality to compute encoding for given bitwidth\n '
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([1.203197181224823, 0], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert ((- 1.2031972414) == round(encoding_dict['min'], 10))
assert (1.2031972408 == round(encoding_dict['max'], 10))
assert (round(encoding_dict['scale'], 14) == 5.6028e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([0.7796169519533523, (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert ((- 0.9791506533) == round(encoding_dict['min'], 10))
assert (0.9791506529 == round(encoding_dict['max'], 10))
assert (round(encoding_dict['scale'], 14) == 4.5595e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([(- 0.7796169519533523), (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert (round(encoding_dict['scale'], 14) == 4.5595e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([(- 0.7796169519533523), (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.float)
assert (16 == encoding_dict['bitwidth'])
assert ('float' == encoding_dict['dtype']) | Test functionality to compute encoding for given bitwidth | TrainingExtensions/torch/test/python/test_quantizer.py | test_compute_encoding_with_given_bitwidth | lcybuzz/aimet | 945 | python | def test_compute_encoding_with_given_bitwidth(self):
'\n \n '
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([1.203197181224823, 0], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert ((- 1.2031972414) == round(encoding_dict['min'], 10))
assert (1.2031972408 == round(encoding_dict['max'], 10))
assert (round(encoding_dict['scale'], 14) == 5.6028e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([0.7796169519533523, (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert ((- 0.9791506533) == round(encoding_dict['min'], 10))
assert (0.9791506529 == round(encoding_dict['max'], 10))
assert (round(encoding_dict['scale'], 14) == 4.5595e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([(- 0.7796169519533523), (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert (round(encoding_dict['scale'], 14) == 4.5595e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([(- 0.7796169519533523), (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.float)
assert (16 == encoding_dict['bitwidth'])
assert ('float' == encoding_dict['dtype']) | def test_compute_encoding_with_given_bitwidth(self):
'\n \n '
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([1.203197181224823, 0], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert ((- 1.2031972414) == round(encoding_dict['min'], 10))
assert (1.2031972408 == round(encoding_dict['max'], 10))
assert (round(encoding_dict['scale'], 14) == 5.6028e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([0.7796169519533523, (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert ((- 0.9791506533) == round(encoding_dict['min'], 10))
assert (0.9791506529 == round(encoding_dict['max'], 10))
assert (round(encoding_dict['scale'], 14) == 4.5595e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([(- 0.7796169519533523), (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.int)
assert ((- 2147483648) == encoding_dict['offset'])
assert (round(encoding_dict['scale'], 14) == 4.5595e-10)
encoding_dict = QuantizationSimModel.generate_symmetric_encoding_dict_for_disabled_param(torch.as_tensor(np.array([(- 0.7796169519533523), (- 0.9791506528745285)], dtype='float32')), data_type=QuantizationDataType.float)
assert (16 == encoding_dict['bitwidth'])
assert ('float' == encoding_dict['dtype'])<|docstring|>Test functionality to compute encoding for given bitwidth<|endoftext|> |
a6877ab1ce8b9bae2936c668d97f2b44c8aa7c038a7cece06d4b36aa8aafc387 | def test_export_dict_input_output(self):
' test export functionality on dictionary input and output '
dummy_input = {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10)}
model = InputOutputDictModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.mul1.output_quantizers[0].enabled = True
sim.model.mul2.output_quantizers[0].enabled = True
sim.model.mul3.output_quantizers[0].enabled = True
sim.compute_encodings(forward_pass, None)
o_names = ['ab', 'bc', 'ca']
sim.export('./data/', 'dict_input_output_model', dummy_input, onnx_export_args=OnnxExportApiArgs(input_names=list(dummy_input.keys()), output_names=o_names, opset_version=12))
with open('./data/dict_input_output_model.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
onnx_model = onnx.load('./data/dict_input_output_model.onnx')
for inp in onnx_model.graph.input:
assert (inp.name in ['a', 'b', 'c'])
for (exp, act) in zip(o_names, onnx_model.graph.output):
assert (exp == act.name)
for tensor_name in encoding_data['activation_encodings'].keys():
assert (tensor_name in o_names) | test export functionality on dictionary input and output | TrainingExtensions/torch/test/python/test_quantizer.py | test_export_dict_input_output | lcybuzz/aimet | 945 | python | def test_export_dict_input_output(self):
' '
dummy_input = {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10)}
model = InputOutputDictModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.mul1.output_quantizers[0].enabled = True
sim.model.mul2.output_quantizers[0].enabled = True
sim.model.mul3.output_quantizers[0].enabled = True
sim.compute_encodings(forward_pass, None)
o_names = ['ab', 'bc', 'ca']
sim.export('./data/', 'dict_input_output_model', dummy_input, onnx_export_args=OnnxExportApiArgs(input_names=list(dummy_input.keys()), output_names=o_names, opset_version=12))
with open('./data/dict_input_output_model.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
onnx_model = onnx.load('./data/dict_input_output_model.onnx')
for inp in onnx_model.graph.input:
assert (inp.name in ['a', 'b', 'c'])
for (exp, act) in zip(o_names, onnx_model.graph.output):
assert (exp == act.name)
for tensor_name in encoding_data['activation_encodings'].keys():
assert (tensor_name in o_names) | def test_export_dict_input_output(self):
' '
dummy_input = {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10)}
model = InputOutputDictModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.mul1.output_quantizers[0].enabled = True
sim.model.mul2.output_quantizers[0].enabled = True
sim.model.mul3.output_quantizers[0].enabled = True
sim.compute_encodings(forward_pass, None)
o_names = ['ab', 'bc', 'ca']
sim.export('./data/', 'dict_input_output_model', dummy_input, onnx_export_args=OnnxExportApiArgs(input_names=list(dummy_input.keys()), output_names=o_names, opset_version=12))
with open('./data/dict_input_output_model.encodings') as json_file:
encoding_data = json.load(json_file)
print(encoding_data)
onnx_model = onnx.load('./data/dict_input_output_model.onnx')
for inp in onnx_model.graph.input:
assert (inp.name in ['a', 'b', 'c'])
for (exp, act) in zip(o_names, onnx_model.graph.output):
assert (exp == act.name)
for tensor_name in encoding_data['activation_encodings'].keys():
assert (tensor_name in o_names)<|docstring|>test export functionality on dictionary input and output<|endoftext|> |
7c8762a0969bc849381a4e1be41bac5dd50af5f3dbc93ccab9532454b5d42736 | def test_compute_encoding_fp16(self):
'\n Test encodings generated for fp16\n '
dummy_input = {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10)}
model = InputOutputDictModel()
sim = QuantizationSimModel(model, default_output_bw=16, default_param_bw=16, dummy_input=dummy_input, default_data_type=QuantizationDataType.float)
quantizer = sim.model.mul1.input_quantizer
enc_dict = sim._create_encoding_dict(encoding=None, quantizer=quantizer)
assert (enc_dict['dtype'] == 'float')
assert (enc_dict['bitwidth'] == 16)
assert ('min' not in enc_dict)
assert ('max' not in enc_dict)
assert ('scale' not in enc_dict)
assert ('offset' not in enc_dict)
assert ('is_symmetric' not in enc_dict) | Test encodings generated for fp16 | TrainingExtensions/torch/test/python/test_quantizer.py | test_compute_encoding_fp16 | lcybuzz/aimet | 945 | python | def test_compute_encoding_fp16(self):
'\n \n '
dummy_input = {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10)}
model = InputOutputDictModel()
sim = QuantizationSimModel(model, default_output_bw=16, default_param_bw=16, dummy_input=dummy_input, default_data_type=QuantizationDataType.float)
quantizer = sim.model.mul1.input_quantizer
enc_dict = sim._create_encoding_dict(encoding=None, quantizer=quantizer)
assert (enc_dict['dtype'] == 'float')
assert (enc_dict['bitwidth'] == 16)
assert ('min' not in enc_dict)
assert ('max' not in enc_dict)
assert ('scale' not in enc_dict)
assert ('offset' not in enc_dict)
assert ('is_symmetric' not in enc_dict) | def test_compute_encoding_fp16(self):
'\n \n '
dummy_input = {'a': torch.randn(1, 10, 10, 10), 'b': torch.randn(1, 10, 10, 10), 'c': torch.randn(1, 10, 10, 10)}
model = InputOutputDictModel()
sim = QuantizationSimModel(model, default_output_bw=16, default_param_bw=16, dummy_input=dummy_input, default_data_type=QuantizationDataType.float)
quantizer = sim.model.mul1.input_quantizer
enc_dict = sim._create_encoding_dict(encoding=None, quantizer=quantizer)
assert (enc_dict['dtype'] == 'float')
assert (enc_dict['bitwidth'] == 16)
assert ('min' not in enc_dict)
assert ('max' not in enc_dict)
assert ('scale' not in enc_dict)
assert ('offset' not in enc_dict)
assert ('is_symmetric' not in enc_dict)<|docstring|>Test encodings generated for fp16<|endoftext|> |
54fe067e8ca4127acb043344570ca6d328dea91ad7faa8ceec0834d7649565f5 | def test_mapping_encoding_for_torch_module_with_multiple_onnx_ops(self):
'\n Test the input and output encoding map to input/output at subgraph level when atorch module generates\n multiple onnx ops i.e. a sub-graph\n '
dummy_input = torch.randn(1, 4, 256, 512)
model = SoftMaxAvgPoolModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.sfmax.output_quantizers[0].enabled = True
sim.model.sfmax.input_quantizers[0].enabled = True
sim.model.avgpool.output_quantizers[0].enabled = True
sim.model.avgpool.input_quantizers[0].enabled = True
sim.compute_encodings(forward_pass, None)
sim.export('./data', 'sfmaxavgpool_model', dummy_input)
with open('./data/sfmaxavgpool_model.encodings') as json_file:
encoding_data = json.load(json_file)
assert (not set(encoding_data['activation_encodings'].keys()).symmetric_difference(('4', '9', 't.1'))) | Test the input and output encoding map to input/output at subgraph level when atorch module generates
multiple onnx ops i.e. a sub-graph | TrainingExtensions/torch/test/python/test_quantizer.py | test_mapping_encoding_for_torch_module_with_multiple_onnx_ops | lcybuzz/aimet | 945 | python | def test_mapping_encoding_for_torch_module_with_multiple_onnx_ops(self):
'\n Test the input and output encoding map to input/output at subgraph level when atorch module generates\n multiple onnx ops i.e. a sub-graph\n '
dummy_input = torch.randn(1, 4, 256, 512)
model = SoftMaxAvgPoolModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.sfmax.output_quantizers[0].enabled = True
sim.model.sfmax.input_quantizers[0].enabled = True
sim.model.avgpool.output_quantizers[0].enabled = True
sim.model.avgpool.input_quantizers[0].enabled = True
sim.compute_encodings(forward_pass, None)
sim.export('./data', 'sfmaxavgpool_model', dummy_input)
with open('./data/sfmaxavgpool_model.encodings') as json_file:
encoding_data = json.load(json_file)
assert (not set(encoding_data['activation_encodings'].keys()).symmetric_difference(('4', '9', 't.1'))) | def test_mapping_encoding_for_torch_module_with_multiple_onnx_ops(self):
'\n Test the input and output encoding map to input/output at subgraph level when atorch module generates\n multiple onnx ops i.e. a sub-graph\n '
dummy_input = torch.randn(1, 4, 256, 512)
model = SoftMaxAvgPoolModel()
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(dummy_input)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.model.sfmax.output_quantizers[0].enabled = True
sim.model.sfmax.input_quantizers[0].enabled = True
sim.model.avgpool.output_quantizers[0].enabled = True
sim.model.avgpool.input_quantizers[0].enabled = True
sim.compute_encodings(forward_pass, None)
sim.export('./data', 'sfmaxavgpool_model', dummy_input)
with open('./data/sfmaxavgpool_model.encodings') as json_file:
encoding_data = json.load(json_file)
assert (not set(encoding_data['activation_encodings'].keys()).symmetric_difference(('4', '9', 't.1')))<|docstring|>Test the input and output encoding map to input/output at subgraph level when atorch module generates
multiple onnx ops i.e. a sub-graph<|endoftext|> |
e304e7ab8ff823d712fa8b70d1822bc254af204d3a910e882b0c563e36f784a3 | def test_model_with_two_inputs(self):
'Model with more than 1 input'
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.training_range_learning_with_tf_init, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
assert sim.model.conv1_a.output_quantizer.encoding
forward_pass(sim.model, None) | Model with more than 1 input | TrainingExtensions/torch/test/python/test_quantizer.py | test_model_with_two_inputs | lcybuzz/aimet | 945 | python | def test_model_with_two_inputs(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.training_range_learning_with_tf_init, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
assert sim.model.conv1_a.output_quantizer.encoding
forward_pass(sim.model, None) | def test_model_with_two_inputs(self):
dummy_input = (torch.rand(32, 1, 28, 28), torch.rand(32, 1, 28, 28))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*dummy_input)
model = ModelWithTwoInputs()
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.training_range_learning_with_tf_init, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
assert sim.model.conv1_a.output_quantizer.encoding
forward_pass(sim.model, None)<|docstring|>Model with more than 1 input<|endoftext|> |
1fb93c178813adf609d3352eea8f34b3212cebb81091463a9bc290d6421e9be3 | def test_export_prelu_weight_encoding(self):
' Test that prelu weight is exported correctly '
model = PreluModel()
dummy_input = torch.rand(1, 3, 8, 8)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings((lambda model, _: model(dummy_input)), None)
sim.export('./data', 'prelu_model', dummy_input=dummy_input)
with open('./data/prelu_model.encodings') as json_file:
encoding_data = json.load(json_file)
assert ('prelu.weight' in encoding_data['param_encodings'].keys()) | Test that prelu weight is exported correctly | TrainingExtensions/torch/test/python/test_quantizer.py | test_export_prelu_weight_encoding | lcybuzz/aimet | 945 | python | def test_export_prelu_weight_encoding(self):
' '
model = PreluModel()
dummy_input = torch.rand(1, 3, 8, 8)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings((lambda model, _: model(dummy_input)), None)
sim.export('./data', 'prelu_model', dummy_input=dummy_input)
with open('./data/prelu_model.encodings') as json_file:
encoding_data = json.load(json_file)
assert ('prelu.weight' in encoding_data['param_encodings'].keys()) | def test_export_prelu_weight_encoding(self):
' '
model = PreluModel()
dummy_input = torch.rand(1, 3, 8, 8)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings((lambda model, _: model(dummy_input)), None)
sim.export('./data', 'prelu_model', dummy_input=dummy_input)
with open('./data/prelu_model.encodings') as json_file:
encoding_data = json.load(json_file)
assert ('prelu.weight' in encoding_data['param_encodings'].keys())<|docstring|>Test that prelu weight is exported correctly<|endoftext|> |
052fcc00661cfee0195717945c061f68ee877580e6ddb3cf849b6b967fa873aa | def test_transformer_mask_override(self):
'\n test logic to override mask for a custom block with mask op\n :return:\n '
class AttnBlock(nn.Module):
def __init__(self):
super(AttnBlock, self).__init__()
self.add = elementwise_ops.Add()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = self.add(x1, x2)
return self.softmax(x)
class DummyAttnBlockModel(nn.Module):
def __init__(self):
super(DummyAttnBlockModel, self).__init__()
self.block = AttnBlock()
def forward(self, x1, x2):
return self.block(x1, x2)
dummy_input = (torch.FloatTensor(32, 1, 100, 100).uniform_((- 6000), (- 4000)), torch.FloatTensor(32, 1, 100, 100).uniform_((- 5000), (- 4000)))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
model = DummyAttnBlockModel()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
old_encoding_min = sim.model.block.add.output_quantizer.encoding.min
transformer_utils.register_attention_mask_override('AttnBlock', 'add')
sim.compute_encodings(forward_pass, None)
new_encoding_min = sim.model.block.add.output_quantizer.encoding.min
assert (old_encoding_min != new_encoding_min)
assert (new_encoding_min == (- 6)) | test logic to override mask for a custom block with mask op
:return: | TrainingExtensions/torch/test/python/test_quantizer.py | test_transformer_mask_override | lcybuzz/aimet | 945 | python | def test_transformer_mask_override(self):
'\n test logic to override mask for a custom block with mask op\n :return:\n '
class AttnBlock(nn.Module):
def __init__(self):
super(AttnBlock, self).__init__()
self.add = elementwise_ops.Add()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = self.add(x1, x2)
return self.softmax(x)
class DummyAttnBlockModel(nn.Module):
def __init__(self):
super(DummyAttnBlockModel, self).__init__()
self.block = AttnBlock()
def forward(self, x1, x2):
return self.block(x1, x2)
dummy_input = (torch.FloatTensor(32, 1, 100, 100).uniform_((- 6000), (- 4000)), torch.FloatTensor(32, 1, 100, 100).uniform_((- 5000), (- 4000)))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
model = DummyAttnBlockModel()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
old_encoding_min = sim.model.block.add.output_quantizer.encoding.min
transformer_utils.register_attention_mask_override('AttnBlock', 'add')
sim.compute_encodings(forward_pass, None)
new_encoding_min = sim.model.block.add.output_quantizer.encoding.min
assert (old_encoding_min != new_encoding_min)
assert (new_encoding_min == (- 6)) | def test_transformer_mask_override(self):
'\n test logic to override mask for a custom block with mask op\n :return:\n '
class AttnBlock(nn.Module):
def __init__(self):
super(AttnBlock, self).__init__()
self.add = elementwise_ops.Add()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = self.add(x1, x2)
return self.softmax(x)
class DummyAttnBlockModel(nn.Module):
def __init__(self):
super(DummyAttnBlockModel, self).__init__()
self.block = AttnBlock()
def forward(self, x1, x2):
return self.block(x1, x2)
dummy_input = (torch.FloatTensor(32, 1, 100, 100).uniform_((- 6000), (- 4000)), torch.FloatTensor(32, 1, 100, 100).uniform_((- 5000), (- 4000)))
def forward_pass(sim_model, _):
sim_model.eval()
with torch.no_grad():
sim_model(*dummy_input)
model = DummyAttnBlockModel()
sim = QuantizationSimModel(model, dummy_input=dummy_input)
sim.compute_encodings(forward_pass, None)
old_encoding_min = sim.model.block.add.output_quantizer.encoding.min
transformer_utils.register_attention_mask_override('AttnBlock', 'add')
sim.compute_encodings(forward_pass, None)
new_encoding_min = sim.model.block.add.output_quantizer.encoding.min
assert (old_encoding_min != new_encoding_min)
assert (new_encoding_min == (- 6))<|docstring|>test logic to override mask for a custom block with mask op
:return:<|endoftext|> |
777150cc1ad0c1e04f6807fecb58d4f52bfd204bcb9e98c2196050bc3240b36e | def __init__(self, *args, **kwargs):
'Initialize problem report object.'
super().__init__(*args, **kwargs) | Initialize problem report object. | aries_cloudagent/protocols/issue_credential/v1_0/messages/credential_problem_report.py | __init__ | LisandroV/aries-cloudagent-python | 1 | python | def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) | def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)<|docstring|>Initialize problem report object.<|endoftext|> |
d4cac6e8f3bb7c558aef2ece87e971e9be0d92b22254e887d20445c7ba8e3b50 | @validates_schema
def validate_fields(self, data, **kwargs):
'\n Validate schema fields.\n\n Args:\n data: The data to validate\n\n '
if (data.get('description', {}).get('code', '') != ProblemReportReason.ISSUANCE_ABANDONED.value):
raise ValidationError(f'Value for description.code must be {ProblemReportReason.ISSUANCE_ABANDONED.value}') | Validate schema fields.
Args:
data: The data to validate | aries_cloudagent/protocols/issue_credential/v1_0/messages/credential_problem_report.py | validate_fields | LisandroV/aries-cloudagent-python | 1 | python | @validates_schema
def validate_fields(self, data, **kwargs):
'\n Validate schema fields.\n\n Args:\n data: The data to validate\n\n '
if (data.get('description', {}).get('code', ) != ProblemReportReason.ISSUANCE_ABANDONED.value):
raise ValidationError(f'Value for description.code must be {ProblemReportReason.ISSUANCE_ABANDONED.value}') | @validates_schema
def validate_fields(self, data, **kwargs):
'\n Validate schema fields.\n\n Args:\n data: The data to validate\n\n '
if (data.get('description', {}).get('code', ) != ProblemReportReason.ISSUANCE_ABANDONED.value):
raise ValidationError(f'Value for description.code must be {ProblemReportReason.ISSUANCE_ABANDONED.value}')<|docstring|>Validate schema fields.
Args:
data: The data to validate<|endoftext|> |
913cc7056a8b1438bb41be84e2517358cdab3d1c68967597d0f151e9177dbe68 | async def handle_manage_servers(self, message):
'Updates server info and adds new servers'
if (not self.get_server(name=message.server.name)):
real_server = self.get_server(server=message.server)
if real_server:
real_server.name = message.server.name
storage.write_server_data(real_server)
else:
new_server = server.Server(name=message.server.name, id=message.server.id, mods=[message.server.owner.id])
self.local_servers.append(new_server)
storage.write_server(new_server) | Updates server info and adds new servers | discordbot.py | handle_manage_servers | CantSayIHave/OllieBotCore_v3 | 4 | python | async def handle_manage_servers(self, message):
if (not self.get_server(name=message.server.name)):
real_server = self.get_server(server=message.server)
if real_server:
real_server.name = message.server.name
storage.write_server_data(real_server)
else:
new_server = server.Server(name=message.server.name, id=message.server.id, mods=[message.server.owner.id])
self.local_servers.append(new_server)
storage.write_server(new_server) | async def handle_manage_servers(self, message):
if (not self.get_server(name=message.server.name)):
real_server = self.get_server(server=message.server)
if real_server:
real_server.name = message.server.name
storage.write_server_data(real_server)
else:
new_server = server.Server(name=message.server.name, id=message.server.id, mods=[message.server.owner.id])
self.local_servers.append(new_server)
storage.write_server(new_server)<|docstring|>Updates server info and adds new servers<|endoftext|> |
fde356db68769963ecbb65569f5d8271f4a0d781408fb5a323095929a2b6f2b8 | def test_high_perm(self, func):
'Decorator for generic server-based high permission test\n\n Passes found :class:`Server` object as first arg, expects a :class:`Context`\n from above\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
(await self.send_message(ctx.message.author, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
if (not self.has_high_permissions(ctx.message.author, in_server)):
(await self.send_message(ctx.message.channel, "Sorry, but you don't have access to this command"))
return
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator | Decorator for generic server-based high permission test
Passes found :class:`Server` object as first arg, expects a :class:`Context`
from above | discordbot.py | test_high_perm | CantSayIHave/OllieBotCore_v3 | 4 | python | def test_high_perm(self, func):
'Decorator for generic server-based high permission test\n\n Passes found :class:`Server` object as first arg, expects a :class:`Context`\n from above\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
(await self.send_message(ctx.message.author, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
if (not self.has_high_permissions(ctx.message.author, in_server)):
(await self.send_message(ctx.message.channel, "Sorry, but you don't have access to this command"))
return
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator | def test_high_perm(self, func):
'Decorator for generic server-based high permission test\n\n Passes found :class:`Server` object as first arg, expects a :class:`Context`\n from above\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
(await self.send_message(ctx.message.author, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
if (not self.has_high_permissions(ctx.message.author, in_server)):
(await self.send_message(ctx.message.channel, "Sorry, but you don't have access to this command"))
return
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator<|docstring|>Decorator for generic server-based high permission test
Passes found :class:`Server` object as first arg, expects a :class:`Context`
from above<|endoftext|> |
5359f2a746c1a485b1421dfa049ae6f4f06aa5a7853761076af28b1ec211af9d | def test_server(self, func):
'Decorator for testing for server\n\n Passes found :class:`Server` object as second arg\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
(await self.send_message(ctx.message.author, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator | Decorator for testing for server
Passes found :class:`Server` object as second arg | discordbot.py | test_server | CantSayIHave/OllieBotCore_v3 | 4 | python | def test_server(self, func):
'Decorator for testing for server\n\n Passes found :class:`Server` object as second arg\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
(await self.send_message(ctx.message.author, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator | def test_server(self, func):
'Decorator for testing for server\n\n Passes found :class:`Server` object as second arg\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
(await self.send_message(ctx.message.author, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator<|docstring|>Decorator for testing for server
Passes found :class:`Server` object as second arg<|endoftext|> |
a149e42cdce6e3c9936052086e745f4cf11f66b1845175b43f3b83eca983eb02 | def test_admin(self, func):
'Decorator for testing for server\n\n Passes found :class:`Server` object as second arg\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
if (not self.check_admin(ctx.message.author)):
return
(await self.send_message(ctx.message.channel, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator | Decorator for testing for server
Passes found :class:`Server` object as second arg | discordbot.py | test_admin | CantSayIHave/OllieBotCore_v3 | 4 | python | def test_admin(self, func):
'Decorator for testing for server\n\n Passes found :class:`Server` object as second arg\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
if (not self.check_admin(ctx.message.author)):
return
(await self.send_message(ctx.message.channel, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator | def test_admin(self, func):
'Decorator for testing for server\n\n Passes found :class:`Server` object as second arg\n\n '
async def decorator(ctx, *args, **kwargs):
if (not ctx.message.server):
if (not self.check_admin(ctx.message.author)):
return
(await self.send_message(ctx.message.channel, 'Sorry, but this command is only accessible from a server'))
return
in_server = self.get_server(server=ctx.message.server)
(await func(in_server, ctx, *args, **kwargs))
decorator.__name__ = func.__name__
sig = inspect.signature(func)
decorator.__signature__ = sig.replace(parameters=tuple(sig.parameters.values())[1:])
return decorator<|docstring|>Decorator for testing for server
Passes found :class:`Server` object as second arg<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.