body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
d136c8177a772f0738fdd383246db46419efedc01f764a59b07413aa6eb236ab | def create_electricity_market_for_fuel_prep(self):
' This function fills the electricity market that supplies battery charging operations\n and hydrogen production through electrolysis.\n '
try:
losses_to_low = float(self.bs.losses[self.country]['LV'])
except KeyError:
losses_to_low = float(self.bs.losses['RER']['LV'])
for (y, year) in enumerate(self.scope['year']):
m = np.array(self.mix[y]).reshape((- 1), 15, 1)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[self.elec_map[t]] for t in self.elec_map], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((m * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, high voltage', 'CH', 'kilometer', 'transmission network, electricity, high voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((6.58e-09 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, medium voltage', 'CH', 'kilometer', 'transmission network, electricity, medium voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((1.86e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, long-distance', 'UCTE', 'kilometer', 'transmission network, long-distance')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((3.17e-10 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('distribution network construction, electricity, low voltage', 'CH', 'kilometer', 'distribution network, electricity, low voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((8.74e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('market for sulfur hexafluoride, liquid', 'RER', 'kilogram', 'sulfur hexafluoride, liquid')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('Sulfur hexafluoride', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low) | This function fills the electricity market that supplies battery charging operations
and hydrogen production through electrolysis. | carculator/inventory.py | create_electricity_market_for_fuel_prep | rena-nong/carculator | 1 | python | def create_electricity_market_for_fuel_prep(self):
' This function fills the electricity market that supplies battery charging operations\n and hydrogen production through electrolysis.\n '
try:
losses_to_low = float(self.bs.losses[self.country]['LV'])
except KeyError:
losses_to_low = float(self.bs.losses['RER']['LV'])
for (y, year) in enumerate(self.scope['year']):
m = np.array(self.mix[y]).reshape((- 1), 15, 1)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[self.elec_map[t]] for t in self.elec_map], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((m * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, high voltage', 'CH', 'kilometer', 'transmission network, electricity, high voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((6.58e-09 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, medium voltage', 'CH', 'kilometer', 'transmission network, electricity, medium voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((1.86e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, long-distance', 'UCTE', 'kilometer', 'transmission network, long-distance')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((3.17e-10 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('distribution network construction, electricity, low voltage', 'CH', 'kilometer', 'distribution network, electricity, low voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((8.74e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('market for sulfur hexafluoride, liquid', 'RER', 'kilogram', 'sulfur hexafluoride, liquid')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('Sulfur hexafluoride', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low) | def create_electricity_market_for_fuel_prep(self):
' This function fills the electricity market that supplies battery charging operations\n and hydrogen production through electrolysis.\n '
try:
losses_to_low = float(self.bs.losses[self.country]['LV'])
except KeyError:
losses_to_low = float(self.bs.losses['RER']['LV'])
for (y, year) in enumerate(self.scope['year']):
m = np.array(self.mix[y]).reshape((- 1), 15, 1)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[self.elec_map[t]] for t in self.elec_map], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((m * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, high voltage', 'CH', 'kilometer', 'transmission network, electricity, high voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((6.58e-09 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, medium voltage', 'CH', 'kilometer', 'transmission network, electricity, medium voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((1.86e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, long-distance', 'UCTE', 'kilometer', 'transmission network, long-distance')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((3.17e-10 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('distribution network construction, electricity, low voltage', 'CH', 'kilometer', 'distribution network, electricity, low voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = ((8.74e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('market for sulfur hexafluoride, liquid', 'RER', 'kilogram', 'sulfur hexafluoride, liquid')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('Sulfur hexafluoride', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for fuel preparation' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)<|docstring|>This function fills the electricity market that supplies battery charging operations
and hydrogen production through electrolysis.<|endoftext|> |
b6849d08b40dcab8a054435f4fb0d84689024d0cf3ab5c2840cb2c7d027236af | def create_electricity_market_for_battery_production(self):
'\n This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells\n :return:\n '
battery_origin = self.background_configuration['energy storage']['electric']['origin']
if (battery_origin != 'custom electricity mix'):
try:
losses_to_low = float(self.bs.losses[battery_origin]['LV'])
except KeyError:
losses_to_low = float(self.bs.losses['CN']['LV'])
if (battery_origin not in self.bs.electricity_mix.country.values):
print('The electricity mix for {} could not be found. Average Chinese electricity mix is used for battery manufacture instead.'.format(self.country))
battery_origin = 'CN'
mix_battery_manufacturing = self.bs.electricity_mix.sel(country=battery_origin, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=self.scope['year'], kwargs={'fill_value': 'extrapolate'}).values
else:
mix_battery_manufacturing = self.mix
losses_to_low = 1.1
for (y, year) in enumerate(self.scope['year']):
m = np.array(mix_battery_manufacturing[y]).reshape((- 1), 15, 1)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[self.elec_map[t]] for t in self.elec_map], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((m * losses_to_low) * (- 1))
self.A[(:, self.inputs[('transmission network construction, electricity, high voltage', 'CH', 'kilometer', 'transmission network, electricity, high voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((6.58e-09 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, medium voltage', 'CH', 'kilometer', 'transmission network, electricity, medium voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((1.86e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, long-distance', 'UCTE', 'kilometer', 'transmission network, long-distance')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((3.17e-10 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('distribution network construction, electricity, low voltage', 'CH', 'kilometer', 'distribution network, electricity, low voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((8.74e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('market for sulfur hexafluoride, liquid', 'RER', 'kilogram', 'sulfur hexafluoride, liquid')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('Sulfur hexafluoride', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low) | This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells
:return: | carculator/inventory.py | create_electricity_market_for_battery_production | rena-nong/carculator | 1 | python | def create_electricity_market_for_battery_production(self):
'\n This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells\n :return:\n '
battery_origin = self.background_configuration['energy storage']['electric']['origin']
if (battery_origin != 'custom electricity mix'):
try:
losses_to_low = float(self.bs.losses[battery_origin]['LV'])
except KeyError:
losses_to_low = float(self.bs.losses['CN']['LV'])
if (battery_origin not in self.bs.electricity_mix.country.values):
print('The electricity mix for {} could not be found. Average Chinese electricity mix is used for battery manufacture instead.'.format(self.country))
battery_origin = 'CN'
mix_battery_manufacturing = self.bs.electricity_mix.sel(country=battery_origin, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=self.scope['year'], kwargs={'fill_value': 'extrapolate'}).values
else:
mix_battery_manufacturing = self.mix
losses_to_low = 1.1
for (y, year) in enumerate(self.scope['year']):
m = np.array(mix_battery_manufacturing[y]).reshape((- 1), 15, 1)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[self.elec_map[t]] for t in self.elec_map], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((m * losses_to_low) * (- 1))
self.A[(:, self.inputs[('transmission network construction, electricity, high voltage', 'CH', 'kilometer', 'transmission network, electricity, high voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((6.58e-09 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, medium voltage', 'CH', 'kilometer', 'transmission network, electricity, medium voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((1.86e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, long-distance', 'UCTE', 'kilometer', 'transmission network, long-distance')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((3.17e-10 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('distribution network construction, electricity, low voltage', 'CH', 'kilometer', 'distribution network, electricity, low voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((8.74e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('market for sulfur hexafluoride, liquid', 'RER', 'kilogram', 'sulfur hexafluoride, liquid')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('Sulfur hexafluoride', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low) | def create_electricity_market_for_battery_production(self):
'\n This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells\n :return:\n '
battery_origin = self.background_configuration['energy storage']['electric']['origin']
if (battery_origin != 'custom electricity mix'):
try:
losses_to_low = float(self.bs.losses[battery_origin]['LV'])
except KeyError:
losses_to_low = float(self.bs.losses['CN']['LV'])
if (battery_origin not in self.bs.electricity_mix.country.values):
print('The electricity mix for {} could not be found. Average Chinese electricity mix is used for battery manufacture instead.'.format(self.country))
battery_origin = 'CN'
mix_battery_manufacturing = self.bs.electricity_mix.sel(country=battery_origin, variable=['Hydro', 'Nuclear', 'Gas', 'Solar', 'Wind', 'Biomass', 'Coal', 'Oil', 'Geothermal', 'Waste', 'Biogas CCS', 'Biomass CCS', 'Coal CCS', 'Gas CCS', 'Wood CCS']).interp(year=self.scope['year'], kwargs={'fill_value': 'extrapolate'}).values
else:
mix_battery_manufacturing = self.mix
losses_to_low = 1.1
for (y, year) in enumerate(self.scope['year']):
m = np.array(mix_battery_manufacturing[y]).reshape((- 1), 15, 1)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[self.elec_map[t]] for t in self.elec_map], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((m * losses_to_low) * (- 1))
self.A[(:, self.inputs[('transmission network construction, electricity, high voltage', 'CH', 'kilometer', 'transmission network, electricity, high voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((6.58e-09 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, electricity, medium voltage', 'CH', 'kilometer', 'transmission network, electricity, medium voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((1.86e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('transmission network construction, long-distance', 'UCTE', 'kilometer', 'transmission network, long-distance')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((3.17e-10 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('distribution network construction, electricity, low voltage', 'CH', 'kilometer', 'distribution network, electricity, low voltage')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = ((8.74e-08 * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('market for sulfur hexafluoride, liquid', 'RER', 'kilogram', 'sulfur hexafluoride, liquid')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)
self.A[(:, self.inputs[('Sulfur hexafluoride', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('electricity market for energy storage production' in i[0]))])] = (((5.4e-08 + 2.99e-09) * (- 1)) * losses_to_low)<|docstring|>This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells
:return:<|endoftext|> |
45287ae3111093efbd0c7588e18c83555e8797190ab8f02124ed31f5cbbafe96 | def set_actual_range(self):
'\n Set the actual range considering the blend.\n Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate\n the vehicle range.\n Modifies parameter `range` of `array` in place\n '
if {'ICEV-p', 'HEV-p', 'PHEV-p'}.intersection(set(self.scope['powertrain'])):
for (y, year) in enumerate(self.scope['year']):
share_primary = self.fuel_blends['petrol']['primary']['share'][y]
lhv_primary = self.fuel_blends['petrol']['primary']['lhv']
share_secondary = self.fuel_blends['petrol']['secondary']['share'][y]
lhv_secondary = self.fuel_blends['petrol']['secondary']['lhv']
if ('tertiary' in self.fuel_blends['petrol']):
share_tertiary = self.fuel_blends['petrol']['tertiary']['share'][y]
lhv_tertiary = self.fuel_blends['petrol']['tertiary']['lhv']
else:
share_tertiary = 0
lhv_tertiary = 0
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'], year, method='and')
self.array.values[(self.array_inputs['range'], :, index)] = ((((((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_primary) * lhv_primary) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_secondary) * lhv_secondary)) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_tertiary) * lhv_tertiary)) * 1000) / self.array.values[(self.array_inputs['TtW energy'], :, index)])
if {'ICEV-d', 'HEV-d', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
for (y, year) in enumerate(self.scope['year']):
share_primary = self.fuel_blends['diesel']['primary']['share'][y]
lhv_primary = self.fuel_blends['diesel']['primary']['lhv']
share_secondary = self.fuel_blends['diesel']['secondary']['share'][y]
lhv_secondary = self.fuel_blends['diesel']['secondary']['lhv']
if ('tertiary' in self.fuel_blends['diesel']):
share_tertiary = self.fuel_blends['diesel']['tertiary']['share'][y]
lhv_tertiary = self.fuel_blends['diesel']['tertiary']['lhv']
else:
share_tertiary = 0
lhv_tertiary = 0
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'], year, method='and')
self.array.values[(self.array_inputs['range'], :, index)] = ((((((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_primary) * lhv_primary) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_secondary) * lhv_secondary)) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_tertiary) * lhv_tertiary)) * 1000) / self.array.values[(self.array_inputs['TtW energy'], :, index)]) | Set the actual range considering the blend.
Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate
the vehicle range.
Modifies parameter `range` of `array` in place | carculator/inventory.py | set_actual_range | rena-nong/carculator | 1 | python | def set_actual_range(self):
'\n Set the actual range considering the blend.\n Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate\n the vehicle range.\n Modifies parameter `range` of `array` in place\n '
if {'ICEV-p', 'HEV-p', 'PHEV-p'}.intersection(set(self.scope['powertrain'])):
for (y, year) in enumerate(self.scope['year']):
share_primary = self.fuel_blends['petrol']['primary']['share'][y]
lhv_primary = self.fuel_blends['petrol']['primary']['lhv']
share_secondary = self.fuel_blends['petrol']['secondary']['share'][y]
lhv_secondary = self.fuel_blends['petrol']['secondary']['lhv']
if ('tertiary' in self.fuel_blends['petrol']):
share_tertiary = self.fuel_blends['petrol']['tertiary']['share'][y]
lhv_tertiary = self.fuel_blends['petrol']['tertiary']['lhv']
else:
share_tertiary = 0
lhv_tertiary = 0
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'], year, method='and')
self.array.values[(self.array_inputs['range'], :, index)] = ((((((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_primary) * lhv_primary) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_secondary) * lhv_secondary)) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_tertiary) * lhv_tertiary)) * 1000) / self.array.values[(self.array_inputs['TtW energy'], :, index)])
if {'ICEV-d', 'HEV-d', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
for (y, year) in enumerate(self.scope['year']):
share_primary = self.fuel_blends['diesel']['primary']['share'][y]
lhv_primary = self.fuel_blends['diesel']['primary']['lhv']
share_secondary = self.fuel_blends['diesel']['secondary']['share'][y]
lhv_secondary = self.fuel_blends['diesel']['secondary']['lhv']
if ('tertiary' in self.fuel_blends['diesel']):
share_tertiary = self.fuel_blends['diesel']['tertiary']['share'][y]
lhv_tertiary = self.fuel_blends['diesel']['tertiary']['lhv']
else:
share_tertiary = 0
lhv_tertiary = 0
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'], year, method='and')
self.array.values[(self.array_inputs['range'], :, index)] = ((((((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_primary) * lhv_primary) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_secondary) * lhv_secondary)) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_tertiary) * lhv_tertiary)) * 1000) / self.array.values[(self.array_inputs['TtW energy'], :, index)]) | def set_actual_range(self):
'\n Set the actual range considering the blend.\n Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate\n the vehicle range.\n Modifies parameter `range` of `array` in place\n '
if {'ICEV-p', 'HEV-p', 'PHEV-p'}.intersection(set(self.scope['powertrain'])):
for (y, year) in enumerate(self.scope['year']):
share_primary = self.fuel_blends['petrol']['primary']['share'][y]
lhv_primary = self.fuel_blends['petrol']['primary']['lhv']
share_secondary = self.fuel_blends['petrol']['secondary']['share'][y]
lhv_secondary = self.fuel_blends['petrol']['secondary']['lhv']
if ('tertiary' in self.fuel_blends['petrol']):
share_tertiary = self.fuel_blends['petrol']['tertiary']['share'][y]
lhv_tertiary = self.fuel_blends['petrol']['tertiary']['lhv']
else:
share_tertiary = 0
lhv_tertiary = 0
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'], year, method='and')
self.array.values[(self.array_inputs['range'], :, index)] = ((((((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_primary) * lhv_primary) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_secondary) * lhv_secondary)) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_tertiary) * lhv_tertiary)) * 1000) / self.array.values[(self.array_inputs['TtW energy'], :, index)])
if {'ICEV-d', 'HEV-d', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
for (y, year) in enumerate(self.scope['year']):
share_primary = self.fuel_blends['diesel']['primary']['share'][y]
lhv_primary = self.fuel_blends['diesel']['primary']['lhv']
share_secondary = self.fuel_blends['diesel']['secondary']['share'][y]
lhv_secondary = self.fuel_blends['diesel']['secondary']['lhv']
if ('tertiary' in self.fuel_blends['diesel']):
share_tertiary = self.fuel_blends['diesel']['tertiary']['share'][y]
lhv_tertiary = self.fuel_blends['diesel']['tertiary']['lhv']
else:
share_tertiary = 0
lhv_tertiary = 0
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'], year, method='and')
self.array.values[(self.array_inputs['range'], :, index)] = ((((((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_primary) * lhv_primary) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_secondary) * lhv_secondary)) + ((self.array.values[(self.array_inputs['fuel mass'], :, index)] * share_tertiary) * lhv_tertiary)) * 1000) / self.array.values[(self.array_inputs['TtW energy'], :, index)])<|docstring|>Set the actual range considering the blend.
Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate
the vehicle range.
Modifies parameter `range` of `array` in place<|endoftext|> |
094ba0d4eb48c25ba263488a2529367143767c5f3d59d9ee81469176152861b6 | def define_fuel_blends(self):
'\n This function defines fuel blends from what is passed in `background_configuration`.\n It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values\n and CO2 emission factors of the fuels used.\n :return:\n '
fuels_lhv = {'petrol': 42.4, 'bioethanol - wheat straw': 26.8, 'bioethanol - maize starch': 26.8, 'bioethanol - sugarbeet': 26.8, 'bioethanol - forest residues': 26.8, 'synthetic gasoline': 42.4, 'diesel': 42.8, 'biodiesel - cooking oil': 31.7, 'biodiesel - algae': 31.7, 'biodiesel - rapeseed oil': 31.7, 'biodiesel - palm oil': 31.7, 'synthetic diesel': 43.3, 'synthetic diesel - energy allocation': 43.3, 'cng': 55.5, 'biogas - sewage sludge': 55.5, 'biogas - biowaste': 55.5, 'syngas': 55.5}
fuels_CO2 = {'petrol': 3.18, 'bioethanol - wheat straw': 1.91, 'bioethanol - maize starch': 1.91, 'bioethanol - sugarbeet': 1.91, 'bioethanol - forest residues': 1.91, 'synthetic gasoline': 3.18, 'diesel': 3.14, 'biodiesel - cooking oil': 2.85, 'biodiesel - palm oil': 2.85, 'biodiesel - rapeseed oil': 2.85, 'biodiesel - algae': 2.85, 'synthetic diesel': 3.16, 'synthetic diesel - energy allocation': 3.16, 'cng': 2.65, 'biogas - sewage sludge': 2.65, 'biogas - biowaste': 2.65, 'syngas': 2.65}
if {'ICEV-p', 'HEV-p', 'PHEV-p'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'petrol'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[secondary], 'CO2': fuels_CO2[secondary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'ICEV-d', 'HEV-d', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'diesel'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[secondary], 'CO2': fuels_CO2[secondary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'ICEV-g'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'cng'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'FCEV'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'hydrogen'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share}, 'secondary': {'type': secondary, 'share': secondary_share}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share}
if {'BEV', 'PHEV-p', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'electricity'
self.create_fuel_markets(fuel_type) | This function defines fuel blends from what is passed in `background_configuration`.
It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values
and CO2 emission factors of the fuels used.
:return: | carculator/inventory.py | define_fuel_blends | rena-nong/carculator | 1 | python | def define_fuel_blends(self):
'\n This function defines fuel blends from what is passed in `background_configuration`.\n It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values\n and CO2 emission factors of the fuels used.\n :return:\n '
fuels_lhv = {'petrol': 42.4, 'bioethanol - wheat straw': 26.8, 'bioethanol - maize starch': 26.8, 'bioethanol - sugarbeet': 26.8, 'bioethanol - forest residues': 26.8, 'synthetic gasoline': 42.4, 'diesel': 42.8, 'biodiesel - cooking oil': 31.7, 'biodiesel - algae': 31.7, 'biodiesel - rapeseed oil': 31.7, 'biodiesel - palm oil': 31.7, 'synthetic diesel': 43.3, 'synthetic diesel - energy allocation': 43.3, 'cng': 55.5, 'biogas - sewage sludge': 55.5, 'biogas - biowaste': 55.5, 'syngas': 55.5}
fuels_CO2 = {'petrol': 3.18, 'bioethanol - wheat straw': 1.91, 'bioethanol - maize starch': 1.91, 'bioethanol - sugarbeet': 1.91, 'bioethanol - forest residues': 1.91, 'synthetic gasoline': 3.18, 'diesel': 3.14, 'biodiesel - cooking oil': 2.85, 'biodiesel - palm oil': 2.85, 'biodiesel - rapeseed oil': 2.85, 'biodiesel - algae': 2.85, 'synthetic diesel': 3.16, 'synthetic diesel - energy allocation': 3.16, 'cng': 2.65, 'biogas - sewage sludge': 2.65, 'biogas - biowaste': 2.65, 'syngas': 2.65}
if {'ICEV-p', 'HEV-p', 'PHEV-p'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'petrol'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[secondary], 'CO2': fuels_CO2[secondary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'ICEV-d', 'HEV-d', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'diesel'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[secondary], 'CO2': fuels_CO2[secondary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'ICEV-g'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'cng'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'FCEV'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'hydrogen'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share}, 'secondary': {'type': secondary, 'share': secondary_share}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share}
if {'BEV', 'PHEV-p', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'electricity'
self.create_fuel_markets(fuel_type) | def define_fuel_blends(self):
'\n This function defines fuel blends from what is passed in `background_configuration`.\n It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values\n and CO2 emission factors of the fuels used.\n :return:\n '
fuels_lhv = {'petrol': 42.4, 'bioethanol - wheat straw': 26.8, 'bioethanol - maize starch': 26.8, 'bioethanol - sugarbeet': 26.8, 'bioethanol - forest residues': 26.8, 'synthetic gasoline': 42.4, 'diesel': 42.8, 'biodiesel - cooking oil': 31.7, 'biodiesel - algae': 31.7, 'biodiesel - rapeseed oil': 31.7, 'biodiesel - palm oil': 31.7, 'synthetic diesel': 43.3, 'synthetic diesel - energy allocation': 43.3, 'cng': 55.5, 'biogas - sewage sludge': 55.5, 'biogas - biowaste': 55.5, 'syngas': 55.5}
fuels_CO2 = {'petrol': 3.18, 'bioethanol - wheat straw': 1.91, 'bioethanol - maize starch': 1.91, 'bioethanol - sugarbeet': 1.91, 'bioethanol - forest residues': 1.91, 'synthetic gasoline': 3.18, 'diesel': 3.14, 'biodiesel - cooking oil': 2.85, 'biodiesel - palm oil': 2.85, 'biodiesel - rapeseed oil': 2.85, 'biodiesel - algae': 2.85, 'synthetic diesel': 3.16, 'synthetic diesel - energy allocation': 3.16, 'cng': 2.65, 'biogas - sewage sludge': 2.65, 'biogas - biowaste': 2.65, 'syngas': 2.65}
if {'ICEV-p', 'HEV-p', 'PHEV-p'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'petrol'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[secondary], 'CO2': fuels_CO2[secondary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'ICEV-d', 'HEV-d', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'diesel'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[secondary], 'CO2': fuels_CO2[secondary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'ICEV-g'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'cng'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}, 'secondary': {'type': secondary, 'share': secondary_share, 'lhv': fuels_lhv[primary], 'CO2': fuels_CO2[primary]}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share, 'lhv': fuels_lhv[tertiary], 'CO2': fuels_CO2[tertiary]}
if {'FCEV'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'hydrogen'
(primary, secondary, primary_share, secondary_share, tertiary, tertiary_share) = self.find_fuel_shares(fuel_type)
self.create_fuel_markets(fuel_type, primary, secondary, tertiary, primary_share, secondary_share, tertiary_share)
self.fuel_blends[fuel_type] = {'primary': {'type': primary, 'share': primary_share}, 'secondary': {'type': secondary, 'share': secondary_share}}
if tertiary:
self.fuel_blends[fuel_type]['tertiary'] = {'type': tertiary, 'share': tertiary_share}
if {'BEV', 'PHEV-p', 'PHEV-d'}.intersection(set(self.scope['powertrain'])):
fuel_type = 'electricity'
self.create_fuel_markets(fuel_type)<|docstring|>This function defines fuel blends from what is passed in `background_configuration`.
It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values
and CO2 emission factors of the fuels used.
:return:<|endoftext|> |
2c8771d34dc4927441b17fb8852d27243cb4bf632e8a312e8e8b5fc1d6c06b19 | def get_sulfur_content(self, location, fuel, year):
'\n Return the sulfur content in the fuel.\n If a region is passed, the average sulfur content over\n the countries the region contains is returned.\n :param location: str. A country or region ISO code\n :param fuel: str. "diesel" or "gasoline\n :return: float. Sulfur content in ppm.\n '
try:
int(year)
except ValueError:
raise ValueError('The year for which to fetch sulfur concentration values is not valid.')
if (location in self.bs.sulfur.country.values):
sulfur_concentration = self.bs.sulfur.sel(country=location, year=year, fuel=fuel).sum().values
else:
list_countries = self.geo.iam_to_ecoinvent_location(location)
list_countries = [c for c in list_countries if (c in self.bs.sulfur.country.values)]
if (len(list_countries) > 0):
sulfur_concentration = self.bs.sulfur.sel(country=list_countries, year=year, fuel=fuel).mean().values
else:
print('The sulfur content for {} fuel in {} could not be found. European average sulfur content is used instead.'.format(fuel, location))
sulfur_concentration = self.bs.sulfur.sel(country='RER', year=year, fuel=fuel).sum().values
return sulfur_concentration | Return the sulfur content in the fuel.
If a region is passed, the average sulfur content over
the countries the region contains is returned.
:param location: str. A country or region ISO code
:param fuel: str. "diesel" or "gasoline
:return: float. Sulfur content in ppm. | carculator/inventory.py | get_sulfur_content | rena-nong/carculator | 1 | python | def get_sulfur_content(self, location, fuel, year):
'\n Return the sulfur content in the fuel.\n If a region is passed, the average sulfur content over\n the countries the region contains is returned.\n :param location: str. A country or region ISO code\n :param fuel: str. "diesel" or "gasoline\n :return: float. Sulfur content in ppm.\n '
try:
int(year)
except ValueError:
raise ValueError('The year for which to fetch sulfur concentration values is not valid.')
if (location in self.bs.sulfur.country.values):
sulfur_concentration = self.bs.sulfur.sel(country=location, year=year, fuel=fuel).sum().values
else:
list_countries = self.geo.iam_to_ecoinvent_location(location)
list_countries = [c for c in list_countries if (c in self.bs.sulfur.country.values)]
if (len(list_countries) > 0):
sulfur_concentration = self.bs.sulfur.sel(country=list_countries, year=year, fuel=fuel).mean().values
else:
print('The sulfur content for {} fuel in {} could not be found. European average sulfur content is used instead.'.format(fuel, location))
sulfur_concentration = self.bs.sulfur.sel(country='RER', year=year, fuel=fuel).sum().values
return sulfur_concentration | def get_sulfur_content(self, location, fuel, year):
'\n Return the sulfur content in the fuel.\n If a region is passed, the average sulfur content over\n the countries the region contains is returned.\n :param location: str. A country or region ISO code\n :param fuel: str. "diesel" or "gasoline\n :return: float. Sulfur content in ppm.\n '
try:
int(year)
except ValueError:
raise ValueError('The year for which to fetch sulfur concentration values is not valid.')
if (location in self.bs.sulfur.country.values):
sulfur_concentration = self.bs.sulfur.sel(country=location, year=year, fuel=fuel).sum().values
else:
list_countries = self.geo.iam_to_ecoinvent_location(location)
list_countries = [c for c in list_countries if (c in self.bs.sulfur.country.values)]
if (len(list_countries) > 0):
sulfur_concentration = self.bs.sulfur.sel(country=list_countries, year=year, fuel=fuel).mean().values
else:
print('The sulfur content for {} fuel in {} could not be found. European average sulfur content is used instead.'.format(fuel, location))
sulfur_concentration = self.bs.sulfur.sel(country='RER', year=year, fuel=fuel).sum().values
return sulfur_concentration<|docstring|>Return the sulfur content in the fuel.
If a region is passed, the average sulfur content over
the countries the region contains is returned.
:param location: str. A country or region ISO code
:param fuel: str. "diesel" or "gasoline
:return: float. Sulfur content in ppm.<|endoftext|> |
b73f03d444dbd53f10cc9c9439e3304b5f12637e37010b4b06f3f7058d98306a | def create_fuel_markets(self, fuel_type, primary=None, secondary=None, tertiary=None, primary_share=None, secondary_share=None, tertiary_share=None):
'\n This function creates markets for fuel, considering a given blend, a given fuel type and a given year.\n It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.\n :return:\n '
d_dataset_name = {'petrol': 'fuel supply for gasoline vehicles, ', 'diesel': 'fuel supply for diesel vehicles, ', 'cng': 'fuel supply for gas vehicles, ', 'hydrogen': 'fuel supply for hydrogen vehicles, ', 'electricity': 'electricity supply for electric vehicles, '}
if (fuel_type != 'electricity'):
for (y, year) in enumerate(self.scope['year']):
dataset_name = (d_dataset_name[fuel_type] + str(year))
fuel_market_index = [self.inputs[i] for i in self.inputs if (i[0] == dataset_name)][0]
try:
primary_fuel_activity_index = self.inputs[self.fuel_dictionary[primary]['name']]
secondary_fuel_activity_index = self.inputs[self.fuel_dictionary[secondary]['name']]
except KeyError:
raise KeyError('One of the primary or secondary fuels specified in the fuel blend for {} is not valid.'.format(fuel_type))
self.A[(:, primary_fuel_activity_index, fuel_market_index)] = ((- 1) * primary_share[y])
self.A[(:, secondary_fuel_activity_index, fuel_market_index)] = ((- 1) * secondary_share[y])
additional_electricity = ((self.fuel_dictionary[primary]['additional electricity'] * primary_share[y]) + (self.fuel_dictionary[secondary]['additional electricity'] * secondary_share[y]))
if tertiary:
tertiary_fuel_activity_index = self.inputs[self.fuel_dictionary[tertiary]['name']]
self.A[(:, tertiary_fuel_activity_index, fuel_market_index)] = ((- 1) * tertiary_share[y])
additional_electricity += (self.fuel_dictionary[tertiary]['additional electricity'] * tertiary_share[y])
if (additional_electricity > 0):
electricity_mix_index = [self.inputs[i] for i in self.inputs if (i[0] == ('electricity market for fuel preparation, ' + str(year)))][0]
self.A[(:, electricity_mix_index, fuel_market_index)] = ((- 1) * additional_electricity)
else:
for year in self.scope['year']:
dataset_name = (d_dataset_name[fuel_type] + str(year))
electricity_market_index = [self.inputs[i] for i in self.inputs if (i[0] == dataset_name)][0]
electricity_mix_index = [self.inputs[i] for i in self.inputs if (i[0] == ('electricity market for fuel preparation, ' + str(year)))][0]
self.A[(:, electricity_mix_index, electricity_market_index)] = (- 1) | This function creates markets for fuel, considering a given blend, a given fuel type and a given year.
It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.
:return: | carculator/inventory.py | create_fuel_markets | rena-nong/carculator | 1 | python | def create_fuel_markets(self, fuel_type, primary=None, secondary=None, tertiary=None, primary_share=None, secondary_share=None, tertiary_share=None):
'\n This function creates markets for fuel, considering a given blend, a given fuel type and a given year.\n It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.\n :return:\n '
d_dataset_name = {'petrol': 'fuel supply for gasoline vehicles, ', 'diesel': 'fuel supply for diesel vehicles, ', 'cng': 'fuel supply for gas vehicles, ', 'hydrogen': 'fuel supply for hydrogen vehicles, ', 'electricity': 'electricity supply for electric vehicles, '}
if (fuel_type != 'electricity'):
for (y, year) in enumerate(self.scope['year']):
dataset_name = (d_dataset_name[fuel_type] + str(year))
fuel_market_index = [self.inputs[i] for i in self.inputs if (i[0] == dataset_name)][0]
try:
primary_fuel_activity_index = self.inputs[self.fuel_dictionary[primary]['name']]
secondary_fuel_activity_index = self.inputs[self.fuel_dictionary[secondary]['name']]
except KeyError:
raise KeyError('One of the primary or secondary fuels specified in the fuel blend for {} is not valid.'.format(fuel_type))
self.A[(:, primary_fuel_activity_index, fuel_market_index)] = ((- 1) * primary_share[y])
self.A[(:, secondary_fuel_activity_index, fuel_market_index)] = ((- 1) * secondary_share[y])
additional_electricity = ((self.fuel_dictionary[primary]['additional electricity'] * primary_share[y]) + (self.fuel_dictionary[secondary]['additional electricity'] * secondary_share[y]))
if tertiary:
tertiary_fuel_activity_index = self.inputs[self.fuel_dictionary[tertiary]['name']]
self.A[(:, tertiary_fuel_activity_index, fuel_market_index)] = ((- 1) * tertiary_share[y])
additional_electricity += (self.fuel_dictionary[tertiary]['additional electricity'] * tertiary_share[y])
if (additional_electricity > 0):
electricity_mix_index = [self.inputs[i] for i in self.inputs if (i[0] == ('electricity market for fuel preparation, ' + str(year)))][0]
self.A[(:, electricity_mix_index, fuel_market_index)] = ((- 1) * additional_electricity)
else:
for year in self.scope['year']:
dataset_name = (d_dataset_name[fuel_type] + str(year))
electricity_market_index = [self.inputs[i] for i in self.inputs if (i[0] == dataset_name)][0]
electricity_mix_index = [self.inputs[i] for i in self.inputs if (i[0] == ('electricity market for fuel preparation, ' + str(year)))][0]
self.A[(:, electricity_mix_index, electricity_market_index)] = (- 1) | def create_fuel_markets(self, fuel_type, primary=None, secondary=None, tertiary=None, primary_share=None, secondary_share=None, tertiary_share=None):
'\n This function creates markets for fuel, considering a given blend, a given fuel type and a given year.\n It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.\n :return:\n '
d_dataset_name = {'petrol': 'fuel supply for gasoline vehicles, ', 'diesel': 'fuel supply for diesel vehicles, ', 'cng': 'fuel supply for gas vehicles, ', 'hydrogen': 'fuel supply for hydrogen vehicles, ', 'electricity': 'electricity supply for electric vehicles, '}
if (fuel_type != 'electricity'):
for (y, year) in enumerate(self.scope['year']):
dataset_name = (d_dataset_name[fuel_type] + str(year))
fuel_market_index = [self.inputs[i] for i in self.inputs if (i[0] == dataset_name)][0]
try:
primary_fuel_activity_index = self.inputs[self.fuel_dictionary[primary]['name']]
secondary_fuel_activity_index = self.inputs[self.fuel_dictionary[secondary]['name']]
except KeyError:
raise KeyError('One of the primary or secondary fuels specified in the fuel blend for {} is not valid.'.format(fuel_type))
self.A[(:, primary_fuel_activity_index, fuel_market_index)] = ((- 1) * primary_share[y])
self.A[(:, secondary_fuel_activity_index, fuel_market_index)] = ((- 1) * secondary_share[y])
additional_electricity = ((self.fuel_dictionary[primary]['additional electricity'] * primary_share[y]) + (self.fuel_dictionary[secondary]['additional electricity'] * secondary_share[y]))
if tertiary:
tertiary_fuel_activity_index = self.inputs[self.fuel_dictionary[tertiary]['name']]
self.A[(:, tertiary_fuel_activity_index, fuel_market_index)] = ((- 1) * tertiary_share[y])
additional_electricity += (self.fuel_dictionary[tertiary]['additional electricity'] * tertiary_share[y])
if (additional_electricity > 0):
electricity_mix_index = [self.inputs[i] for i in self.inputs if (i[0] == ('electricity market for fuel preparation, ' + str(year)))][0]
self.A[(:, electricity_mix_index, fuel_market_index)] = ((- 1) * additional_electricity)
else:
for year in self.scope['year']:
dataset_name = (d_dataset_name[fuel_type] + str(year))
electricity_market_index = [self.inputs[i] for i in self.inputs if (i[0] == dataset_name)][0]
electricity_mix_index = [self.inputs[i] for i in self.inputs if (i[0] == ('electricity market for fuel preparation, ' + str(year)))][0]
self.A[(:, electricity_mix_index, electricity_market_index)] = (- 1)<|docstring|>This function creates markets for fuel, considering a given blend, a given fuel type and a given year.
It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.
:return:<|endoftext|> |
224d07899ad25842055dea3c2a5527ceebe1b2c96cb1e8c0194501185c7faef4 | def find_inputs(self, value_in, value_out, find_input_by='name', zero_out_input=False):
"\n Finds the exchange inputs to a specified functional unit\n :param find_input_by: can be 'name' or 'unit'\n :param value_in: value to look for\n :param value_out: functional unit output\n :return: indices of all inputs to FU, indices of inputs of intereste\n :rtype: tuple\n "
if isinstance(value_out, str):
value_out = [value_out]
index_output = [self.inputs[i] for val in value_out for i in self.inputs if (val.lower() in i[0].lower())]
f = np.float32(np.zeros(np.shape(self.A)[1]))
f[index_output] = 1
X = np.float32(sparse.linalg.spsolve(self.A[0], f.T))
ind_inputs = np.nonzero(X)[0]
if (find_input_by == 'name'):
ins = [i for i in ind_inputs if (value_in.lower() in self.rev_inputs[i][0].lower())]
if (find_input_by == 'unit'):
ins = [i for i in ind_inputs if (value_in.lower() in self.rev_inputs[i][2].lower())]
outs = [i for i in ind_inputs if (i not in ins)]
sum_supplied = X[ins].sum()
if zero_out_input:
self.A[np.ix_(np.arange(0, self.A.shape[0]), ins, outs)] *= 0
else:
return sum_supplied | Finds the exchange inputs to a specified functional unit
:param find_input_by: can be 'name' or 'unit'
:param value_in: value to look for
:param value_out: functional unit output
:return: indices of all inputs to FU, indices of inputs of intereste
:rtype: tuple | carculator/inventory.py | find_inputs | rena-nong/carculator | 1 | python | def find_inputs(self, value_in, value_out, find_input_by='name', zero_out_input=False):
"\n Finds the exchange inputs to a specified functional unit\n :param find_input_by: can be 'name' or 'unit'\n :param value_in: value to look for\n :param value_out: functional unit output\n :return: indices of all inputs to FU, indices of inputs of intereste\n :rtype: tuple\n "
if isinstance(value_out, str):
value_out = [value_out]
index_output = [self.inputs[i] for val in value_out for i in self.inputs if (val.lower() in i[0].lower())]
f = np.float32(np.zeros(np.shape(self.A)[1]))
f[index_output] = 1
X = np.float32(sparse.linalg.spsolve(self.A[0], f.T))
ind_inputs = np.nonzero(X)[0]
if (find_input_by == 'name'):
ins = [i for i in ind_inputs if (value_in.lower() in self.rev_inputs[i][0].lower())]
if (find_input_by == 'unit'):
ins = [i for i in ind_inputs if (value_in.lower() in self.rev_inputs[i][2].lower())]
outs = [i for i in ind_inputs if (i not in ins)]
sum_supplied = X[ins].sum()
if zero_out_input:
self.A[np.ix_(np.arange(0, self.A.shape[0]), ins, outs)] *= 0
else:
return sum_supplied | def find_inputs(self, value_in, value_out, find_input_by='name', zero_out_input=False):
"\n Finds the exchange inputs to a specified functional unit\n :param find_input_by: can be 'name' or 'unit'\n :param value_in: value to look for\n :param value_out: functional unit output\n :return: indices of all inputs to FU, indices of inputs of intereste\n :rtype: tuple\n "
if isinstance(value_out, str):
value_out = [value_out]
index_output = [self.inputs[i] for val in value_out for i in self.inputs if (val.lower() in i[0].lower())]
f = np.float32(np.zeros(np.shape(self.A)[1]))
f[index_output] = 1
X = np.float32(sparse.linalg.spsolve(self.A[0], f.T))
ind_inputs = np.nonzero(X)[0]
if (find_input_by == 'name'):
ins = [i for i in ind_inputs if (value_in.lower() in self.rev_inputs[i][0].lower())]
if (find_input_by == 'unit'):
ins = [i for i in ind_inputs if (value_in.lower() in self.rev_inputs[i][2].lower())]
outs = [i for i in ind_inputs if (i not in ins)]
sum_supplied = X[ins].sum()
if zero_out_input:
self.A[np.ix_(np.arange(0, self.A.shape[0]), ins, outs)] *= 0
else:
return sum_supplied<|docstring|>Finds the exchange inputs to a specified functional unit
:param find_input_by: can be 'name' or 'unit'
:param value_in: value to look for
:param value_out: functional unit output
:return: indices of all inputs to FU, indices of inputs of intereste
:rtype: tuple<|endoftext|> |
674b4f06b11eb8d6584f7e1737b306f73d8891ff7a3b0df3e68ed304cc4f4ee8 | def set_inputs_in_A_matrix(self, array):
'\n Fill-in the A matrix. Does not return anything. Modifies in place.\n Shape of the A matrix (values, products, activities).\n\n :param array: :attr:`array` from :class:`CarModel` class\n '
self.A[(:, self.inputs[('market for glider, passenger car', 'GLO', 'kilogram', 'glider, passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['glider base mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Glider lightweighting', 'GLO', 'kilogram', 'Glider lightweighting')], (- self.number_of_cars):)] = (((array[(self.array_inputs['lightweighting'], :)] * array[(self.array_inputs['glider base mass'], :)]) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('maintenance, passenger car', 'RER', 'unit', 'passenger car maintenance')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] / 1240) / 150000) * (- 1))
self.A[(:, self.inputs[('market for manual dismantling of used electric passenger car', 'GLO', 'unit', 'manual dismantling of used electric passenger car')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] * (1 - array[(self.array_inputs['combustion power share'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for used Li-ion battery', 'GLO', 'kilogram', 'used Li-ion battery')], (- self.number_of_cars):)] = (array[(self.array_inputs['energy battery mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)])
self.A[(:, self.inputs[('market for manual dismantling of used passenger car with internal combustion engine', 'GLO', 'unit', 'manual dismantling of used passenger car with internal combustion engine')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] * array[(self.array_inputs['combustion power share'], :)]) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for charger, electric passenger car', 'GLO', 'kilogram', 'charger, electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['charger mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for converter, for electric passenger car', 'GLO', 'kilogram', 'converter, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['converter mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for electric motor, electric passenger car', 'GLO', 'kilogram', 'electric motor, electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['electric engine mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for inverter, for electric passenger car', 'GLO', 'kilogram', 'inverter, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['inverter mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for power distribution unit, for electric passenger car', 'GLO', 'kilogram', 'power distribution unit, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['power distribution unit mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
l_elec_pt = ['charger mass', 'converter mass', 'inverter mass', 'power distribution unit mass', 'electric engine mass', 'fuel cell stack mass', 'fuel cell ancillary BoP mass', 'fuel cell essential BoP mass', 'battery cell mass', 'battery BoP mass']
self.A[(:, self.inputs[('market for used powertrain from electric passenger car, manual dismantling', 'GLO', 'kilogram', 'used powertrain from electric passenger car, manual dismantling')], (- self.number_of_cars):)] = (array[([self.array_inputs[l] for l in l_elec_pt], :)].sum(axis=0) / array[(self.array_inputs['lifetime kilometers'], :)])
self.A[(:, self.inputs[('market for internal combustion engine, passenger car', 'GLO', 'kilogram', 'internal combustion engine, for passenger car')], (- self.number_of_cars):)] = ((array[([self.array_inputs[l] for l in ['combustion engine mass', 'powertrain mass']], :)].sum(axis=0) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Ancillary BoP', 'GLO', 'kilogram', 'Ancillary BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['fuel cell ancillary BoP mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Essential BoP', 'GLO', 'kilogram', 'Essential BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['fuel cell essential BoP mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Stack', 'GLO', 'kilowatt', 'Stack')], (- self.number_of_cars):)] = ((((array[(self.array_inputs['fuel cell stack mass'], :)] / 0.51) * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
print('****************** IMPORTANT BACKGROUND PARAMETERS ******************', end='\n * ')
print(('The country of use is ' + self.country), end='\n * ')
battery_tech = self.background_configuration['energy storage']['electric']['type']
battery_origin = self.background_configuration['energy storage']['electric']['origin']
print((((('Power and energy batteries produced in ' + battery_origin) + ' using ') + battery_tech) + ' chemistry.'), end='\n * ')
self.A[(:, self.inputs[('Battery BoP', 'GLO', 'kilogram', 'Battery BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['battery BoP mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
battery_cell_label = (('Battery cell, ' + battery_tech), 'GLO', 'kilogram', 'Battery cell')
self.A[(:, self.inputs[battery_cell_label], (- self.number_of_cars):)] = (((array[(self.array_inputs['battery cell mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market group for electricity, medium voltage', 'World', 'kilowatt hour', 'electricity, medium voltage')], self.inputs[battery_cell_label])] = 0
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(y)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity market for energy storage production' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]))])] = (array[(self.array_inputs['battery cell production electricity'], :, index)].T * self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]))])]).reshape(self.iterations, 1, (- 1))
index_A = [self.inputs[c] for c in self.inputs if any(((ele in c[0]) for ele in ['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d']))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])
self.A[(:, self.inputs[('polyethylene production, high density, granulate', 'RER', 'kilogram', 'polyethylene, high density, granulate')], index_A)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
index = self.get_index_vehicle_from_array('ICEV-g')
self.A[(:, self.inputs[('glass fibre reinforced plastic production, polyamide, injection moulded', 'RER', 'kilogram', 'glass fibre reinforced plastic, polyamide, injection moulded')], self.index_cng)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
if ('hydrogen' in self.background_configuration['energy storage']):
hydro_tank_technology = self.background_configuration['energy storage']['hydrogen']['type']
else:
hydro_tank_technology = 'carbon fiber'
dict_tank_map = {'carbon fiber': ('Fuel tank, compressed hydrogen gas, 700bar', 'GLO', 'kilogram', 'Fuel tank, compressed hydrogen gas, 700bar'), 'hdpe': ('Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner', 'RER', 'kilogram', 'Hydrogen tank'), 'aluminium': ('Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner', 'RER', 'kilogram', 'Hydrogen tank')}
index = self.get_index_vehicle_from_array('FCEV')
self.A[(:, self.inputs[dict_tank_map[hydro_tank_technology]], self.index_fuel_cell)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
(sum_renew, co2_intensity_tech) = self.define_renewable_rate_in_mix()
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
print(((((((('in ' + str(year)) + ', % of renewable: ') + str(np.round((sum_renew[y] * 100), 0))) + '%') + ', GHG intensity per kWh: ') + str(int(np.sum((co2_intensity_tech[y] * self.mix[y]))))) + ' g. CO2-eq.'), end=end_str)
if any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in self.scope['powertrain']))):
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(['BEV', 'PHEV-p', 'PHEV-d'], y, method='and')
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity supply for electric vehicles' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]) and any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in i[0]))))])] = (array[(self.array_inputs['electricity consumption'], :, index)] * (- 1)).T.reshape(self.iterations, 1, (- 1))
if ('FCEV' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('FCEV')
if ('tertiary' in self.fuel_blends['hydrogen']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type'], self.fuel_blends['hydrogen']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['hydrogen']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('FCEV' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for hydrogen vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if ('ICEV-g' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('ICEV-g')
if ('tertiary' in self.fuel_blends['cng']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type'], self.fuel_blends['cng']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['cng']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['cng']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('ICEV-g' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gas vehicles' in i[0]))], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (1 + array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)])) * (- 1)).T
self.A[(:, self.inputs[('Methane, fossil', ('air',), 'kilogram')], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_fossil = (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['secondary']['CO2'] * self.fuel_blends['cng']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['tertiary']['CO2'] * self.fuel_blends['cng']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_non_fossil = (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['cng']['secondary']['share'][y] * self.fuel_blends['cng']['secondary']['CO2'])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['cng']['tertiary']['share'][y] * self.fuel_blends['cng']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-d', 'PHEV-d', 'HEV-d'])]:
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'])
if ('tertiary' in self.fuel_blends['diesel']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type'], self.fuel_blends['diesel']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['diesel']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'PHEV-d', 'HEV-d'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for diesel vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'diesel', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['secondary']['share'][y] * self.fuel_blends['diesel']['secondary']['CO2'])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['tertiary']['share'][y] * self.fuel_blends['diesel']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-p', 'HEV-p', 'PHEV-p'])]:
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'])
if ('tertiary' in self.fuel_blends['petrol']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type'], self.fuel_blends['petrol']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['petrol']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
for (y, year) in enumerate(self.scope['year']):
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-p', 'HEV-p', 'PHEV-p'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gasoline vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] == 'petrol'):
share_fossil = self.fuel_blends['petrol']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'petrol', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['tertiary']['share'][y] * self.fuel_blends['petrol']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('market for road wear emissions, passenger car', 'GLO', 'kilogram', 'road wear emissions, passenger car')], (- self.number_of_cars):)] = (array[(self.array_inputs['driving mass'], :)] * 1e-08)
self.A[(:, self.inputs[('market for tyre wear emissions, passenger car', 'GLO', 'kilogram', 'tyre wear emissions, passenger car')], (- self.number_of_cars):)] = (array[(self.array_inputs['driving mass'], :)] * 6e-08)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'ICEV-p', 'ICEV-g'])))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'ICEV-g'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :, index)].T * 5e-09)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])))]
index = self.get_index_vehicle_from_array(['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = ((array[(self.array_inputs['driving mass'], :, index)].T * 5e-09) * 0.2)
self.A[(:, self.inputs[('market for road', 'GLO', 'meter-year', 'road')], (- self.number_of_cars):)] = ((5.37e-07 * array[(self.array_inputs['driving mass'], :)]) * (- 1))
self.A[(:, self.inputs[('market for road maintenance', 'RER', 'meter-year', 'road maintenance')], (- self.number_of_cars):)] = (0.00129 * (- 1))
self.A[(:, self.index_emissions, (- self.number_of_cars):)] = (array[[self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]] for x in self.index_emissions]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.index_noise, (- self.number_of_cars):)] = (array[[self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]] for x in self.index_noise]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.inputs[('Ethane, 1,1,1,2-tetrafluoro-, HFC-134a', ('air',), 'kilogram')], (- self.number_of_cars):)] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
self.A[(:, self.inputs[('market for refrigerant R134a', 'GLO', 'kilogram', 'refrigerant R134a')], (- self.number_of_cars):)] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
print('*********************************************************************') | Fill-in the A matrix. Does not return anything. Modifies in place.
Shape of the A matrix (values, products, activities).
:param array: :attr:`array` from :class:`CarModel` class | carculator/inventory.py | set_inputs_in_A_matrix | rena-nong/carculator | 1 | python | def set_inputs_in_A_matrix(self, array):
'\n Fill-in the A matrix. Does not return anything. Modifies in place.\n Shape of the A matrix (values, products, activities).\n\n :param array: :attr:`array` from :class:`CarModel` class\n '
self.A[(:, self.inputs[('market for glider, passenger car', 'GLO', 'kilogram', 'glider, passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['glider base mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Glider lightweighting', 'GLO', 'kilogram', 'Glider lightweighting')], (- self.number_of_cars):)] = (((array[(self.array_inputs['lightweighting'], :)] * array[(self.array_inputs['glider base mass'], :)]) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('maintenance, passenger car', 'RER', 'unit', 'passenger car maintenance')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] / 1240) / 150000) * (- 1))
self.A[(:, self.inputs[('market for manual dismantling of used electric passenger car', 'GLO', 'unit', 'manual dismantling of used electric passenger car')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] * (1 - array[(self.array_inputs['combustion power share'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for used Li-ion battery', 'GLO', 'kilogram', 'used Li-ion battery')], (- self.number_of_cars):)] = (array[(self.array_inputs['energy battery mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)])
self.A[(:, self.inputs[('market for manual dismantling of used passenger car with internal combustion engine', 'GLO', 'unit', 'manual dismantling of used passenger car with internal combustion engine')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] * array[(self.array_inputs['combustion power share'], :)]) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for charger, electric passenger car', 'GLO', 'kilogram', 'charger, electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['charger mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for converter, for electric passenger car', 'GLO', 'kilogram', 'converter, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['converter mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for electric motor, electric passenger car', 'GLO', 'kilogram', 'electric motor, electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['electric engine mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for inverter, for electric passenger car', 'GLO', 'kilogram', 'inverter, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['inverter mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for power distribution unit, for electric passenger car', 'GLO', 'kilogram', 'power distribution unit, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['power distribution unit mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
l_elec_pt = ['charger mass', 'converter mass', 'inverter mass', 'power distribution unit mass', 'electric engine mass', 'fuel cell stack mass', 'fuel cell ancillary BoP mass', 'fuel cell essential BoP mass', 'battery cell mass', 'battery BoP mass']
self.A[(:, self.inputs[('market for used powertrain from electric passenger car, manual dismantling', 'GLO', 'kilogram', 'used powertrain from electric passenger car, manual dismantling')], (- self.number_of_cars):)] = (array[([self.array_inputs[l] for l in l_elec_pt], :)].sum(axis=0) / array[(self.array_inputs['lifetime kilometers'], :)])
self.A[(:, self.inputs[('market for internal combustion engine, passenger car', 'GLO', 'kilogram', 'internal combustion engine, for passenger car')], (- self.number_of_cars):)] = ((array[([self.array_inputs[l] for l in ['combustion engine mass', 'powertrain mass']], :)].sum(axis=0) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Ancillary BoP', 'GLO', 'kilogram', 'Ancillary BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['fuel cell ancillary BoP mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Essential BoP', 'GLO', 'kilogram', 'Essential BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['fuel cell essential BoP mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Stack', 'GLO', 'kilowatt', 'Stack')], (- self.number_of_cars):)] = ((((array[(self.array_inputs['fuel cell stack mass'], :)] / 0.51) * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
print('****************** IMPORTANT BACKGROUND PARAMETERS ******************', end='\n * ')
print(('The country of use is ' + self.country), end='\n * ')
battery_tech = self.background_configuration['energy storage']['electric']['type']
battery_origin = self.background_configuration['energy storage']['electric']['origin']
print((((('Power and energy batteries produced in ' + battery_origin) + ' using ') + battery_tech) + ' chemistry.'), end='\n * ')
self.A[(:, self.inputs[('Battery BoP', 'GLO', 'kilogram', 'Battery BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['battery BoP mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
battery_cell_label = (('Battery cell, ' + battery_tech), 'GLO', 'kilogram', 'Battery cell')
self.A[(:, self.inputs[battery_cell_label], (- self.number_of_cars):)] = (((array[(self.array_inputs['battery cell mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market group for electricity, medium voltage', 'World', 'kilowatt hour', 'electricity, medium voltage')], self.inputs[battery_cell_label])] = 0
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(y)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity market for energy storage production' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]))])] = (array[(self.array_inputs['battery cell production electricity'], :, index)].T * self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]))])]).reshape(self.iterations, 1, (- 1))
index_A = [self.inputs[c] for c in self.inputs if any(((ele in c[0]) for ele in ['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d']))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])
self.A[(:, self.inputs[('polyethylene production, high density, granulate', 'RER', 'kilogram', 'polyethylene, high density, granulate')], index_A)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
index = self.get_index_vehicle_from_array('ICEV-g')
self.A[(:, self.inputs[('glass fibre reinforced plastic production, polyamide, injection moulded', 'RER', 'kilogram', 'glass fibre reinforced plastic, polyamide, injection moulded')], self.index_cng)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
if ('hydrogen' in self.background_configuration['energy storage']):
hydro_tank_technology = self.background_configuration['energy storage']['hydrogen']['type']
else:
hydro_tank_technology = 'carbon fiber'
dict_tank_map = {'carbon fiber': ('Fuel tank, compressed hydrogen gas, 700bar', 'GLO', 'kilogram', 'Fuel tank, compressed hydrogen gas, 700bar'), 'hdpe': ('Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner', 'RER', 'kilogram', 'Hydrogen tank'), 'aluminium': ('Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner', 'RER', 'kilogram', 'Hydrogen tank')}
index = self.get_index_vehicle_from_array('FCEV')
self.A[(:, self.inputs[dict_tank_map[hydro_tank_technology]], self.index_fuel_cell)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
(sum_renew, co2_intensity_tech) = self.define_renewable_rate_in_mix()
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
print(((((((('in ' + str(year)) + ', % of renewable: ') + str(np.round((sum_renew[y] * 100), 0))) + '%') + ', GHG intensity per kWh: ') + str(int(np.sum((co2_intensity_tech[y] * self.mix[y]))))) + ' g. CO2-eq.'), end=end_str)
if any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in self.scope['powertrain']))):
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(['BEV', 'PHEV-p', 'PHEV-d'], y, method='and')
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity supply for electric vehicles' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]) and any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in i[0]))))])] = (array[(self.array_inputs['electricity consumption'], :, index)] * (- 1)).T.reshape(self.iterations, 1, (- 1))
if ('FCEV' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('FCEV')
if ('tertiary' in self.fuel_blends['hydrogen']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type'], self.fuel_blends['hydrogen']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['hydrogen']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('FCEV' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for hydrogen vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if ('ICEV-g' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('ICEV-g')
if ('tertiary' in self.fuel_blends['cng']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type'], self.fuel_blends['cng']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['cng']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['cng']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('ICEV-g' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gas vehicles' in i[0]))], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (1 + array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)])) * (- 1)).T
self.A[(:, self.inputs[('Methane, fossil', ('air',), 'kilogram')], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_fossil = (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['secondary']['CO2'] * self.fuel_blends['cng']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['tertiary']['CO2'] * self.fuel_blends['cng']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_non_fossil = (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['cng']['secondary']['share'][y] * self.fuel_blends['cng']['secondary']['CO2'])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['cng']['tertiary']['share'][y] * self.fuel_blends['cng']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-d', 'PHEV-d', 'HEV-d'])]:
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'])
if ('tertiary' in self.fuel_blends['diesel']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type'], self.fuel_blends['diesel']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['diesel']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'PHEV-d', 'HEV-d'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for diesel vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'diesel', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['secondary']['share'][y] * self.fuel_blends['diesel']['secondary']['CO2'])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['tertiary']['share'][y] * self.fuel_blends['diesel']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-p', 'HEV-p', 'PHEV-p'])]:
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'])
if ('tertiary' in self.fuel_blends['petrol']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type'], self.fuel_blends['petrol']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['petrol']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
for (y, year) in enumerate(self.scope['year']):
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-p', 'HEV-p', 'PHEV-p'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gasoline vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] == 'petrol'):
share_fossil = self.fuel_blends['petrol']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'petrol', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['tertiary']['share'][y] * self.fuel_blends['petrol']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('market for road wear emissions, passenger car', 'GLO', 'kilogram', 'road wear emissions, passenger car')], (- self.number_of_cars):)] = (array[(self.array_inputs['driving mass'], :)] * 1e-08)
self.A[(:, self.inputs[('market for tyre wear emissions, passenger car', 'GLO', 'kilogram', 'tyre wear emissions, passenger car')], (- self.number_of_cars):)] = (array[(self.array_inputs['driving mass'], :)] * 6e-08)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'ICEV-p', 'ICEV-g'])))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'ICEV-g'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :, index)].T * 5e-09)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])))]
index = self.get_index_vehicle_from_array(['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = ((array[(self.array_inputs['driving mass'], :, index)].T * 5e-09) * 0.2)
self.A[(:, self.inputs[('market for road', 'GLO', 'meter-year', 'road')], (- self.number_of_cars):)] = ((5.37e-07 * array[(self.array_inputs['driving mass'], :)]) * (- 1))
self.A[(:, self.inputs[('market for road maintenance', 'RER', 'meter-year', 'road maintenance')], (- self.number_of_cars):)] = (0.00129 * (- 1))
self.A[(:, self.index_emissions, (- self.number_of_cars):)] = (array[[self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]] for x in self.index_emissions]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.index_noise, (- self.number_of_cars):)] = (array[[self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]] for x in self.index_noise]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.inputs[('Ethane, 1,1,1,2-tetrafluoro-, HFC-134a', ('air',), 'kilogram')], (- self.number_of_cars):)] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
self.A[(:, self.inputs[('market for refrigerant R134a', 'GLO', 'kilogram', 'refrigerant R134a')], (- self.number_of_cars):)] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
print('*********************************************************************') | def set_inputs_in_A_matrix(self, array):
'\n Fill-in the A matrix. Does not return anything. Modifies in place.\n Shape of the A matrix (values, products, activities).\n\n :param array: :attr:`array` from :class:`CarModel` class\n '
self.A[(:, self.inputs[('market for glider, passenger car', 'GLO', 'kilogram', 'glider, passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['glider base mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Glider lightweighting', 'GLO', 'kilogram', 'Glider lightweighting')], (- self.number_of_cars):)] = (((array[(self.array_inputs['lightweighting'], :)] * array[(self.array_inputs['glider base mass'], :)]) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('maintenance, passenger car', 'RER', 'unit', 'passenger car maintenance')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] / 1240) / 150000) * (- 1))
self.A[(:, self.inputs[('market for manual dismantling of used electric passenger car', 'GLO', 'unit', 'manual dismantling of used electric passenger car')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] * (1 - array[(self.array_inputs['combustion power share'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for used Li-ion battery', 'GLO', 'kilogram', 'used Li-ion battery')], (- self.number_of_cars):)] = (array[(self.array_inputs['energy battery mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)])
self.A[(:, self.inputs[('market for manual dismantling of used passenger car with internal combustion engine', 'GLO', 'unit', 'manual dismantling of used passenger car with internal combustion engine')], (- self.number_of_cars):)] = (((array[(self.array_inputs['curb mass'], :)] * array[(self.array_inputs['combustion power share'], :)]) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for charger, electric passenger car', 'GLO', 'kilogram', 'charger, electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['charger mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for converter, for electric passenger car', 'GLO', 'kilogram', 'converter, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['converter mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for electric motor, electric passenger car', 'GLO', 'kilogram', 'electric motor, electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['electric engine mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for inverter, for electric passenger car', 'GLO', 'kilogram', 'inverter, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['inverter mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market for power distribution unit, for electric passenger car', 'GLO', 'kilogram', 'power distribution unit, for electric passenger car')], (- self.number_of_cars):)] = ((array[(self.array_inputs['power distribution unit mass'], :)] / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
l_elec_pt = ['charger mass', 'converter mass', 'inverter mass', 'power distribution unit mass', 'electric engine mass', 'fuel cell stack mass', 'fuel cell ancillary BoP mass', 'fuel cell essential BoP mass', 'battery cell mass', 'battery BoP mass']
self.A[(:, self.inputs[('market for used powertrain from electric passenger car, manual dismantling', 'GLO', 'kilogram', 'used powertrain from electric passenger car, manual dismantling')], (- self.number_of_cars):)] = (array[([self.array_inputs[l] for l in l_elec_pt], :)].sum(axis=0) / array[(self.array_inputs['lifetime kilometers'], :)])
self.A[(:, self.inputs[('market for internal combustion engine, passenger car', 'GLO', 'kilogram', 'internal combustion engine, for passenger car')], (- self.number_of_cars):)] = ((array[([self.array_inputs[l] for l in ['combustion engine mass', 'powertrain mass']], :)].sum(axis=0) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Ancillary BoP', 'GLO', 'kilogram', 'Ancillary BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['fuel cell ancillary BoP mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Essential BoP', 'GLO', 'kilogram', 'Essential BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['fuel cell essential BoP mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('Stack', 'GLO', 'kilowatt', 'Stack')], (- self.number_of_cars):)] = ((((array[(self.array_inputs['fuel cell stack mass'], :)] / 0.51) * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
print('****************** IMPORTANT BACKGROUND PARAMETERS ******************', end='\n * ')
print(('The country of use is ' + self.country), end='\n * ')
battery_tech = self.background_configuration['energy storage']['electric']['type']
battery_origin = self.background_configuration['energy storage']['electric']['origin']
print((((('Power and energy batteries produced in ' + battery_origin) + ' using ') + battery_tech) + ' chemistry.'), end='\n * ')
self.A[(:, self.inputs[('Battery BoP', 'GLO', 'kilogram', 'Battery BoP')], (- self.number_of_cars):)] = (((array[(self.array_inputs['battery BoP mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
battery_cell_label = (('Battery cell, ' + battery_tech), 'GLO', 'kilogram', 'Battery cell')
self.A[(:, self.inputs[battery_cell_label], (- self.number_of_cars):)] = (((array[(self.array_inputs['battery cell mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) / array[(self.array_inputs['lifetime kilometers'], :)]) * (- 1))
self.A[(:, self.inputs[('market group for electricity, medium voltage', 'World', 'kilowatt hour', 'electricity, medium voltage')], self.inputs[battery_cell_label])] = 0
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(y)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity market for energy storage production' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]))])] = (array[(self.array_inputs['battery cell production electricity'], :, index)].T * self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]))])]).reshape(self.iterations, 1, (- 1))
index_A = [self.inputs[c] for c in self.inputs if any(((ele in c[0]) for ele in ['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d']))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])
self.A[(:, self.inputs[('polyethylene production, high density, granulate', 'RER', 'kilogram', 'polyethylene, high density, granulate')], index_A)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
index = self.get_index_vehicle_from_array('ICEV-g')
self.A[(:, self.inputs[('glass fibre reinforced plastic production, polyamide, injection moulded', 'RER', 'kilogram', 'glass fibre reinforced plastic, polyamide, injection moulded')], self.index_cng)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
if ('hydrogen' in self.background_configuration['energy storage']):
hydro_tank_technology = self.background_configuration['energy storage']['hydrogen']['type']
else:
hydro_tank_technology = 'carbon fiber'
dict_tank_map = {'carbon fiber': ('Fuel tank, compressed hydrogen gas, 700bar', 'GLO', 'kilogram', 'Fuel tank, compressed hydrogen gas, 700bar'), 'hdpe': ('Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner', 'RER', 'kilogram', 'Hydrogen tank'), 'aluminium': ('Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner', 'RER', 'kilogram', 'Hydrogen tank')}
index = self.get_index_vehicle_from_array('FCEV')
self.A[(:, self.inputs[dict_tank_map[hydro_tank_technology]], self.index_fuel_cell)] = ((array[(self.array_inputs['fuel tank mass'], :, index)] / array[(self.array_inputs['lifetime kilometers'], :, index)]) * (- 1)).T
(sum_renew, co2_intensity_tech) = self.define_renewable_rate_in_mix()
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
print(((((((('in ' + str(year)) + ', % of renewable: ') + str(np.round((sum_renew[y] * 100), 0))) + '%') + ', GHG intensity per kWh: ') + str(int(np.sum((co2_intensity_tech[y] * self.mix[y]))))) + ' g. CO2-eq.'), end=end_str)
if any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in self.scope['powertrain']))):
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(['BEV', 'PHEV-p', 'PHEV-d'], y, method='and')
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity supply for electric vehicles' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]) and any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in i[0]))))])] = (array[(self.array_inputs['electricity consumption'], :, index)] * (- 1)).T.reshape(self.iterations, 1, (- 1))
if ('FCEV' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('FCEV')
if ('tertiary' in self.fuel_blends['hydrogen']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type'], self.fuel_blends['hydrogen']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['hydrogen']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('FCEV' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for hydrogen vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if ('ICEV-g' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('ICEV-g')
if ('tertiary' in self.fuel_blends['cng']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type'], self.fuel_blends['cng']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['cng']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['cng']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('ICEV-g' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gas vehicles' in i[0]))], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (1 + array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)])) * (- 1)).T
self.A[(:, self.inputs[('Methane, fossil', ('air',), 'kilogram')], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_fossil = (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['secondary']['CO2'] * self.fuel_blends['cng']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['tertiary']['CO2'] * self.fuel_blends['cng']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_non_fossil = (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['cng']['secondary']['share'][y] * self.fuel_blends['cng']['secondary']['CO2'])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['cng']['tertiary']['share'][y] * self.fuel_blends['cng']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-d', 'PHEV-d', 'HEV-d'])]:
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'])
if ('tertiary' in self.fuel_blends['diesel']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type'], self.fuel_blends['diesel']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['diesel']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'PHEV-d', 'HEV-d'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for diesel vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'diesel', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['secondary']['share'][y] * self.fuel_blends['diesel']['secondary']['CO2'])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['tertiary']['share'][y] * self.fuel_blends['diesel']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-p', 'HEV-p', 'PHEV-p'])]:
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'])
if ('tertiary' in self.fuel_blends['petrol']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type'], self.fuel_blends['petrol']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['petrol']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
for (y, year) in enumerate(self.scope['year']):
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-p', 'HEV-p', 'PHEV-p'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gasoline vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] == 'petrol'):
share_fossil = self.fuel_blends['petrol']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'petrol', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['tertiary']['share'][y] * self.fuel_blends['petrol']['tertiary']['CO2'])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('market for road wear emissions, passenger car', 'GLO', 'kilogram', 'road wear emissions, passenger car')], (- self.number_of_cars):)] = (array[(self.array_inputs['driving mass'], :)] * 1e-08)
self.A[(:, self.inputs[('market for tyre wear emissions, passenger car', 'GLO', 'kilogram', 'tyre wear emissions, passenger car')], (- self.number_of_cars):)] = (array[(self.array_inputs['driving mass'], :)] * 6e-08)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'ICEV-p', 'ICEV-g'])))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'ICEV-g'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :, index)].T * 5e-09)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])))]
index = self.get_index_vehicle_from_array(['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = ((array[(self.array_inputs['driving mass'], :, index)].T * 5e-09) * 0.2)
self.A[(:, self.inputs[('market for road', 'GLO', 'meter-year', 'road')], (- self.number_of_cars):)] = ((5.37e-07 * array[(self.array_inputs['driving mass'], :)]) * (- 1))
self.A[(:, self.inputs[('market for road maintenance', 'RER', 'meter-year', 'road maintenance')], (- self.number_of_cars):)] = (0.00129 * (- 1))
self.A[(:, self.index_emissions, (- self.number_of_cars):)] = (array[[self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]] for x in self.index_emissions]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.index_noise, (- self.number_of_cars):)] = (array[[self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]] for x in self.index_noise]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.inputs[('Ethane, 1,1,1,2-tetrafluoro-, HFC-134a', ('air',), 'kilogram')], (- self.number_of_cars):)] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
self.A[(:, self.inputs[('market for refrigerant R134a', 'GLO', 'kilogram', 'refrigerant R134a')], (- self.number_of_cars):)] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
print('*********************************************************************')<|docstring|>Fill-in the A matrix. Does not return anything. Modifies in place.
Shape of the A matrix (values, products, activities).
:param array: :attr:`array` from :class:`CarModel` class<|endoftext|> |
e97394747f44ae23d8c972fd38bddbef5a3d095dbd4489dd40fc5f2b7228f77c | def set_inputs_in_A_matrix_for_export(self, array):
'\n Fill-in the A matrix. Does not return anything. Modifies in place.\n Shape of the A matrix (values, products, activities).\n\n :param array: :attr:`array` from :class:`CarModel` class\n '
self.A[(:, self.inputs[('market for glider, passenger car', 'GLO', 'kilogram', 'glider, passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['glider base mass'], :)] * (- 1))
self.A[(:, self.inputs[('Glider lightweighting', 'GLO', 'kilogram', 'Glider lightweighting')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['lightweighting'], :)] * array[(self.array_inputs['glider base mass'], :)]) * (- 1))
self.A[(:, self.inputs[('maintenance, passenger car', 'RER', 'unit', 'passenger car maintenance')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (((array[(self.array_inputs['curb mass'], :)] / 1240) / 150000) * (- 1))
self.A[(:, self.inputs[('market for manual dismantling of used electric passenger car', 'GLO', 'unit', 'manual dismantling of used electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['curb mass'], :)] * (1 - array[(self.array_inputs['combustion power share'], :)]))
self.A[(:, self.inputs[('market for used Li-ion battery', 'GLO', 'kilogram', 'used Li-ion battery')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = array[(self.array_inputs['energy battery mass'], :)]
self.A[(:, self.inputs[('market for manual dismantling of used passenger car with internal combustion engine', 'GLO', 'unit', 'manual dismantling of used passenger car with internal combustion engine')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['curb mass'], :)] * array[(self.array_inputs['combustion power share'], :)]) * (- 1))
self.A[(:, self.inputs[('market for charger, electric passenger car', 'GLO', 'kilogram', 'charger, electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['charger mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for converter, for electric passenger car', 'GLO', 'kilogram', 'converter, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['converter mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for electric motor, electric passenger car', 'GLO', 'kilogram', 'electric motor, electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['electric engine mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for inverter, for electric passenger car', 'GLO', 'kilogram', 'inverter, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['inverter mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for power distribution unit, for electric passenger car', 'GLO', 'kilogram', 'power distribution unit, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['power distribution unit mass'], :)] * (- 1))
l_elec_pt = ['charger mass', 'converter mass', 'inverter mass', 'power distribution unit mass', 'electric engine mass', 'fuel cell stack mass', 'fuel cell ancillary BoP mass', 'fuel cell essential BoP mass', 'battery cell mass', 'battery BoP mass']
self.A[(:, self.inputs[('market for used powertrain from electric passenger car, manual dismantling', 'GLO', 'kilogram', 'used powertrain from electric passenger car, manual dismantling')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = array[([self.array_inputs[l] for l in l_elec_pt], :)].sum(axis=0)
self.A[(:, self.inputs[('market for internal combustion engine, passenger car', 'GLO', 'kilogram', 'internal combustion engine, for passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[([self.array_inputs[l] for l in ['combustion engine mass', 'powertrain mass']], :)].sum(axis=0) * (- 1))
self.A[(:, self.inputs[('Ancillary BoP', 'GLO', 'kilogram', 'Ancillary BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell ancillary BoP mass'], :)] * (- 1))
self.A[(:, self.inputs[('Essential BoP', 'GLO', 'kilogram', 'Essential BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell essential BoP mass'], :)] * (- 1))
self.A[(:, self.inputs[('Stack', 'GLO', 'kilowatt', 'Stack')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell stack mass'], :)] * (- 1))
print('****************** IMPORTANT BACKGROUND PARAMETERS ******************', end='\n * ')
print(('The country of use is ' + self.country), end='\n * ')
battery_tech = self.background_configuration['energy storage']['electric']['type']
battery_origin = self.background_configuration['energy storage']['electric']['origin']
print((((('Power and energy batteries produced in ' + battery_origin) + ' using ') + battery_tech) + ' chemistry.'), end='\n * ')
self.A[(:, self.inputs[('Battery BoP', 'GLO', 'kilogram', 'Battery BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['battery BoP mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) * (- 1))
battery_cell_label = (('Battery cell, ' + battery_tech), 'GLO', 'kilogram', 'Battery cell')
self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['battery cell mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) * (- 1))
self.A[(:, self.inputs[('market group for electricity, medium voltage', 'World', 'kilowatt hour', 'electricity, medium voltage')], self.inputs[battery_cell_label])] = 0
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(y)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity market for energy storage production' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('Passenger car' in i[0]))])] = (array[(self.array_inputs['battery cell production electricity'], :, index)].T * self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('Passenger car' in i[0]))])]).reshape(self.iterations, 1, (- 1))
index_A = [self.inputs[c] for c in self.inputs if (any(((ele in c[0]) for ele in ['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])
self.A[(:, self.inputs[('polyethylene production, high density, granulate', 'RER', 'kilogram', 'polyethylene, high density, granulate')], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
index_A = [self.inputs[c] for c in self.inputs if (('ICEV-g' in c[0]) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array('ICEV-g')
self.A[(:, self.inputs[('glass fibre reinforced plastic production, polyamide, injection moulded', 'RER', 'kilogram', 'glass fibre reinforced plastic, polyamide, injection moulded')], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
if ('hydrogen' in self.background_configuration['energy storage']):
hydro_tank_technology = self.background_configuration['energy storage']['hydrogen']['type']
else:
hydro_tank_technology = 'carbon fiber'
dict_tank_map = {'carbon fiber': ('Fuel tank, compressed hydrogen gas, 700bar', 'GLO', 'kilogram', 'Fuel tank, compressed hydrogen gas, 700bar'), 'hdpe': ('Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner', 'RER', 'kilogram', 'Hydrogen tank'), 'aluminium': ('Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner', 'RER', 'kilogram', 'Hydrogen tank')}
index_A = [self.inputs[c] for c in self.inputs if (('FCEV' in c[0]) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array('FCEV')
self.A[(:, self.inputs[dict_tank_map[hydro_tank_technology]], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
self.A[(:, [self.inputs[c] for c in self.inputs if ('Passenger car' in c[0])], [self.inputs[c] for c in self.inputs if ('transport, passenger car' in c[0])])] = ((- 1) / array[self.array_inputs['lifetime kilometers']])
(sum_renew, co2_intensity_tech) = self.define_renewable_rate_in_mix()
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
print(((((((('in ' + str(year)) + ', % of renewable: ') + str(np.round((sum_renew[y] * 100), 0))) + '%') + ', GHG intensity per kWh: ') + str(int(np.sum((co2_intensity_tech[y] * self.mix[y]))))) + ' g. CO2-eq.'), end=end_str)
if any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in self.scope['powertrain']))):
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(['BEV', 'PHEV-p', 'PHEV-d'], y, method='and')
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity supply for electric vehicles' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]) and any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in i[0]))))])] = (array[(self.array_inputs['electricity consumption'], :, index)] * (- 1)).T.reshape(self.iterations, 1, (- 1))
if ('FCEV' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('FCEV')
if ('tertiary' in self.fuel_blends['hydrogen']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type'], self.fuel_blends['hydrogen']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['hydrogen']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('FCEV' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for hydrogen vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if ('ICEV-g' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('ICEV-g')
if ('tertiary' in self.fuel_blends['cng']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type'], self.fuel_blends['cng']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['cng']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['cng']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('ICEV-g' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gas vehicles' in i[0]))], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (1 + array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)])) * (- 1)).T
self.A[(:, self.inputs[('Methane, fossil', ('air',), 'kilogram')], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['secondary']['CO2'] * self.fuel_blends['cng']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['tertiary']['CO2'] * self.fuel_blends['cng']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['primary']['CO2']
if (self.fuel_blends['cng']['secondary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['secondary']['CO2']
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['tertiary']['CO2']
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-d', 'PHEV-d', 'HEV-d'])]:
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'])
if ('tertiary' in self.fuel_blends['diesel']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type'], self.fuel_blends['diesel']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['diesel']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger car' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'PHEV-d', 'HEV-d'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for diesel vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'diesel', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-p', 'HEV-p', 'PHEV-p'])]:
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'])
if ('tertiary' in self.fuel_blends['petrol']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type'], self.fuel_blends['petrol']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['petrol']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
for (y, year) in enumerate(self.scope['year']):
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-p', 'HEV-p', 'PHEV-p'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gasoline vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_fossil = (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'petrol', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
ind_A = [self.inputs[i] for i in self.inputs if ('transport, passenger' in i[0])]
self.A[(:, self.inputs[('market for road wear emissions, passenger car', 'GLO', 'kilogram', 'road wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :)] * 1e-08)
self.A[(:, self.inputs[('market for tyre wear emissions, passenger car', 'GLO', 'kilogram', 'tyre wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :)] * 6e-08)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'ICEV-p', 'ICEV-g'])))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'ICEV-g'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :, index)].T * 5e-09)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])))]
index = self.get_index_vehicle_from_array(['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = ((array[(self.array_inputs['driving mass'], :, index)].T * 5e-09) * 0.2)
self.A[(:, self.inputs[('market for road', 'GLO', 'meter-year', 'road')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((5.37e-07 * array[(self.array_inputs['driving mass'], :)]) * (- 1))
self.A[(:, self.inputs[('market for road maintenance', 'RER', 'meter-year', 'road maintenance')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (0.00129 * (- 1))
self.A[np.ix_(np.arange(self.iterations), self.index_emissions, [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (array[[self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]] for x in self.index_emissions]] * (- 1)).transpose([1, 0, 2])
self.A[np.ix_(np.arange(self.iterations), self.index_noise, [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (array[[self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]] for x in self.index_noise]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.inputs[('Ethane, 1,1,1,2-tetrafluoro-, HFC-134a', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
self.A[(:, self.inputs[('market for refrigerant R134a', 'GLO', 'kilogram', 'refrigerant R134a')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
print('*********************************************************************') | Fill-in the A matrix. Does not return anything. Modifies in place.
Shape of the A matrix (values, products, activities).
:param array: :attr:`array` from :class:`CarModel` class | carculator/inventory.py | set_inputs_in_A_matrix_for_export | rena-nong/carculator | 1 | python | def set_inputs_in_A_matrix_for_export(self, array):
'\n Fill-in the A matrix. Does not return anything. Modifies in place.\n Shape of the A matrix (values, products, activities).\n\n :param array: :attr:`array` from :class:`CarModel` class\n '
self.A[(:, self.inputs[('market for glider, passenger car', 'GLO', 'kilogram', 'glider, passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['glider base mass'], :)] * (- 1))
self.A[(:, self.inputs[('Glider lightweighting', 'GLO', 'kilogram', 'Glider lightweighting')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['lightweighting'], :)] * array[(self.array_inputs['glider base mass'], :)]) * (- 1))
self.A[(:, self.inputs[('maintenance, passenger car', 'RER', 'unit', 'passenger car maintenance')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (((array[(self.array_inputs['curb mass'], :)] / 1240) / 150000) * (- 1))
self.A[(:, self.inputs[('market for manual dismantling of used electric passenger car', 'GLO', 'unit', 'manual dismantling of used electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['curb mass'], :)] * (1 - array[(self.array_inputs['combustion power share'], :)]))
self.A[(:, self.inputs[('market for used Li-ion battery', 'GLO', 'kilogram', 'used Li-ion battery')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = array[(self.array_inputs['energy battery mass'], :)]
self.A[(:, self.inputs[('market for manual dismantling of used passenger car with internal combustion engine', 'GLO', 'unit', 'manual dismantling of used passenger car with internal combustion engine')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['curb mass'], :)] * array[(self.array_inputs['combustion power share'], :)]) * (- 1))
self.A[(:, self.inputs[('market for charger, electric passenger car', 'GLO', 'kilogram', 'charger, electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['charger mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for converter, for electric passenger car', 'GLO', 'kilogram', 'converter, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['converter mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for electric motor, electric passenger car', 'GLO', 'kilogram', 'electric motor, electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['electric engine mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for inverter, for electric passenger car', 'GLO', 'kilogram', 'inverter, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['inverter mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for power distribution unit, for electric passenger car', 'GLO', 'kilogram', 'power distribution unit, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['power distribution unit mass'], :)] * (- 1))
l_elec_pt = ['charger mass', 'converter mass', 'inverter mass', 'power distribution unit mass', 'electric engine mass', 'fuel cell stack mass', 'fuel cell ancillary BoP mass', 'fuel cell essential BoP mass', 'battery cell mass', 'battery BoP mass']
self.A[(:, self.inputs[('market for used powertrain from electric passenger car, manual dismantling', 'GLO', 'kilogram', 'used powertrain from electric passenger car, manual dismantling')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = array[([self.array_inputs[l] for l in l_elec_pt], :)].sum(axis=0)
self.A[(:, self.inputs[('market for internal combustion engine, passenger car', 'GLO', 'kilogram', 'internal combustion engine, for passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[([self.array_inputs[l] for l in ['combustion engine mass', 'powertrain mass']], :)].sum(axis=0) * (- 1))
self.A[(:, self.inputs[('Ancillary BoP', 'GLO', 'kilogram', 'Ancillary BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell ancillary BoP mass'], :)] * (- 1))
self.A[(:, self.inputs[('Essential BoP', 'GLO', 'kilogram', 'Essential BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell essential BoP mass'], :)] * (- 1))
self.A[(:, self.inputs[('Stack', 'GLO', 'kilowatt', 'Stack')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell stack mass'], :)] * (- 1))
print('****************** IMPORTANT BACKGROUND PARAMETERS ******************', end='\n * ')
print(('The country of use is ' + self.country), end='\n * ')
battery_tech = self.background_configuration['energy storage']['electric']['type']
battery_origin = self.background_configuration['energy storage']['electric']['origin']
print((((('Power and energy batteries produced in ' + battery_origin) + ' using ') + battery_tech) + ' chemistry.'), end='\n * ')
self.A[(:, self.inputs[('Battery BoP', 'GLO', 'kilogram', 'Battery BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['battery BoP mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) * (- 1))
battery_cell_label = (('Battery cell, ' + battery_tech), 'GLO', 'kilogram', 'Battery cell')
self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['battery cell mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) * (- 1))
self.A[(:, self.inputs[('market group for electricity, medium voltage', 'World', 'kilowatt hour', 'electricity, medium voltage')], self.inputs[battery_cell_label])] = 0
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(y)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity market for energy storage production' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('Passenger car' in i[0]))])] = (array[(self.array_inputs['battery cell production electricity'], :, index)].T * self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('Passenger car' in i[0]))])]).reshape(self.iterations, 1, (- 1))
index_A = [self.inputs[c] for c in self.inputs if (any(((ele in c[0]) for ele in ['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])
self.A[(:, self.inputs[('polyethylene production, high density, granulate', 'RER', 'kilogram', 'polyethylene, high density, granulate')], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
index_A = [self.inputs[c] for c in self.inputs if (('ICEV-g' in c[0]) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array('ICEV-g')
self.A[(:, self.inputs[('glass fibre reinforced plastic production, polyamide, injection moulded', 'RER', 'kilogram', 'glass fibre reinforced plastic, polyamide, injection moulded')], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
if ('hydrogen' in self.background_configuration['energy storage']):
hydro_tank_technology = self.background_configuration['energy storage']['hydrogen']['type']
else:
hydro_tank_technology = 'carbon fiber'
dict_tank_map = {'carbon fiber': ('Fuel tank, compressed hydrogen gas, 700bar', 'GLO', 'kilogram', 'Fuel tank, compressed hydrogen gas, 700bar'), 'hdpe': ('Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner', 'RER', 'kilogram', 'Hydrogen tank'), 'aluminium': ('Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner', 'RER', 'kilogram', 'Hydrogen tank')}
index_A = [self.inputs[c] for c in self.inputs if (('FCEV' in c[0]) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array('FCEV')
self.A[(:, self.inputs[dict_tank_map[hydro_tank_technology]], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
self.A[(:, [self.inputs[c] for c in self.inputs if ('Passenger car' in c[0])], [self.inputs[c] for c in self.inputs if ('transport, passenger car' in c[0])])] = ((- 1) / array[self.array_inputs['lifetime kilometers']])
(sum_renew, co2_intensity_tech) = self.define_renewable_rate_in_mix()
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
print(((((((('in ' + str(year)) + ', % of renewable: ') + str(np.round((sum_renew[y] * 100), 0))) + '%') + ', GHG intensity per kWh: ') + str(int(np.sum((co2_intensity_tech[y] * self.mix[y]))))) + ' g. CO2-eq.'), end=end_str)
if any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in self.scope['powertrain']))):
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(['BEV', 'PHEV-p', 'PHEV-d'], y, method='and')
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity supply for electric vehicles' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]) and any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in i[0]))))])] = (array[(self.array_inputs['electricity consumption'], :, index)] * (- 1)).T.reshape(self.iterations, 1, (- 1))
if ('FCEV' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('FCEV')
if ('tertiary' in self.fuel_blends['hydrogen']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type'], self.fuel_blends['hydrogen']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['hydrogen']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('FCEV' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for hydrogen vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if ('ICEV-g' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('ICEV-g')
if ('tertiary' in self.fuel_blends['cng']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type'], self.fuel_blends['cng']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['cng']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['cng']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('ICEV-g' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gas vehicles' in i[0]))], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (1 + array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)])) * (- 1)).T
self.A[(:, self.inputs[('Methane, fossil', ('air',), 'kilogram')], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['secondary']['CO2'] * self.fuel_blends['cng']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['tertiary']['CO2'] * self.fuel_blends['cng']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['primary']['CO2']
if (self.fuel_blends['cng']['secondary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['secondary']['CO2']
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['tertiary']['CO2']
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-d', 'PHEV-d', 'HEV-d'])]:
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'])
if ('tertiary' in self.fuel_blends['diesel']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type'], self.fuel_blends['diesel']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['diesel']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger car' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'PHEV-d', 'HEV-d'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for diesel vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'diesel', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-p', 'HEV-p', 'PHEV-p'])]:
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'])
if ('tertiary' in self.fuel_blends['petrol']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type'], self.fuel_blends['petrol']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['petrol']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
for (y, year) in enumerate(self.scope['year']):
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-p', 'HEV-p', 'PHEV-p'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gasoline vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_fossil = (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'petrol', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
ind_A = [self.inputs[i] for i in self.inputs if ('transport, passenger' in i[0])]
self.A[(:, self.inputs[('market for road wear emissions, passenger car', 'GLO', 'kilogram', 'road wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :)] * 1e-08)
self.A[(:, self.inputs[('market for tyre wear emissions, passenger car', 'GLO', 'kilogram', 'tyre wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :)] * 6e-08)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'ICEV-p', 'ICEV-g'])))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'ICEV-g'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :, index)].T * 5e-09)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])))]
index = self.get_index_vehicle_from_array(['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = ((array[(self.array_inputs['driving mass'], :, index)].T * 5e-09) * 0.2)
self.A[(:, self.inputs[('market for road', 'GLO', 'meter-year', 'road')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((5.37e-07 * array[(self.array_inputs['driving mass'], :)]) * (- 1))
self.A[(:, self.inputs[('market for road maintenance', 'RER', 'meter-year', 'road maintenance')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (0.00129 * (- 1))
self.A[np.ix_(np.arange(self.iterations), self.index_emissions, [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (array[[self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]] for x in self.index_emissions]] * (- 1)).transpose([1, 0, 2])
self.A[np.ix_(np.arange(self.iterations), self.index_noise, [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (array[[self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]] for x in self.index_noise]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.inputs[('Ethane, 1,1,1,2-tetrafluoro-, HFC-134a', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
self.A[(:, self.inputs[('market for refrigerant R134a', 'GLO', 'kilogram', 'refrigerant R134a')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
print('*********************************************************************') | def set_inputs_in_A_matrix_for_export(self, array):
'\n Fill-in the A matrix. Does not return anything. Modifies in place.\n Shape of the A matrix (values, products, activities).\n\n :param array: :attr:`array` from :class:`CarModel` class\n '
self.A[(:, self.inputs[('market for glider, passenger car', 'GLO', 'kilogram', 'glider, passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['glider base mass'], :)] * (- 1))
self.A[(:, self.inputs[('Glider lightweighting', 'GLO', 'kilogram', 'Glider lightweighting')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['lightweighting'], :)] * array[(self.array_inputs['glider base mass'], :)]) * (- 1))
self.A[(:, self.inputs[('maintenance, passenger car', 'RER', 'unit', 'passenger car maintenance')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (((array[(self.array_inputs['curb mass'], :)] / 1240) / 150000) * (- 1))
self.A[(:, self.inputs[('market for manual dismantling of used electric passenger car', 'GLO', 'unit', 'manual dismantling of used electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['curb mass'], :)] * (1 - array[(self.array_inputs['combustion power share'], :)]))
self.A[(:, self.inputs[('market for used Li-ion battery', 'GLO', 'kilogram', 'used Li-ion battery')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = array[(self.array_inputs['energy battery mass'], :)]
self.A[(:, self.inputs[('market for manual dismantling of used passenger car with internal combustion engine', 'GLO', 'unit', 'manual dismantling of used passenger car with internal combustion engine')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['curb mass'], :)] * array[(self.array_inputs['combustion power share'], :)]) * (- 1))
self.A[(:, self.inputs[('market for charger, electric passenger car', 'GLO', 'kilogram', 'charger, electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['charger mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for converter, for electric passenger car', 'GLO', 'kilogram', 'converter, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['converter mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for electric motor, electric passenger car', 'GLO', 'kilogram', 'electric motor, electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['electric engine mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for inverter, for electric passenger car', 'GLO', 'kilogram', 'inverter, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['inverter mass'], :)] * (- 1))
self.A[(:, self.inputs[('market for power distribution unit, for electric passenger car', 'GLO', 'kilogram', 'power distribution unit, for electric passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['power distribution unit mass'], :)] * (- 1))
l_elec_pt = ['charger mass', 'converter mass', 'inverter mass', 'power distribution unit mass', 'electric engine mass', 'fuel cell stack mass', 'fuel cell ancillary BoP mass', 'fuel cell essential BoP mass', 'battery cell mass', 'battery BoP mass']
self.A[(:, self.inputs[('market for used powertrain from electric passenger car, manual dismantling', 'GLO', 'kilogram', 'used powertrain from electric passenger car, manual dismantling')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = array[([self.array_inputs[l] for l in l_elec_pt], :)].sum(axis=0)
self.A[(:, self.inputs[('market for internal combustion engine, passenger car', 'GLO', 'kilogram', 'internal combustion engine, for passenger car')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[([self.array_inputs[l] for l in ['combustion engine mass', 'powertrain mass']], :)].sum(axis=0) * (- 1))
self.A[(:, self.inputs[('Ancillary BoP', 'GLO', 'kilogram', 'Ancillary BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell ancillary BoP mass'], :)] * (- 1))
self.A[(:, self.inputs[('Essential BoP', 'GLO', 'kilogram', 'Essential BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell essential BoP mass'], :)] * (- 1))
self.A[(:, self.inputs[('Stack', 'GLO', 'kilowatt', 'Stack')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = (array[(self.array_inputs['fuel cell stack mass'], :)] * (- 1))
print('****************** IMPORTANT BACKGROUND PARAMETERS ******************', end='\n * ')
print(('The country of use is ' + self.country), end='\n * ')
battery_tech = self.background_configuration['energy storage']['electric']['type']
battery_origin = self.background_configuration['energy storage']['electric']['origin']
print((((('Power and energy batteries produced in ' + battery_origin) + ' using ') + battery_tech) + ' chemistry.'), end='\n * ')
self.A[(:, self.inputs[('Battery BoP', 'GLO', 'kilogram', 'Battery BoP')], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['battery BoP mass'], :)] * (1 + array[(self.array_inputs['battery lifetime replacements'], :)])) * (- 1))
battery_cell_label = (('Battery cell, ' + battery_tech), 'GLO', 'kilogram', 'Battery cell')
self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ('Passenger car' in i[0])])] = ((array[(self.array_inputs['battery cell mass'], :)] * (1 + array[(self.array_inputs['fuel cell lifetime replacements'], :)])) * (- 1))
self.A[(:, self.inputs[('market group for electricity, medium voltage', 'World', 'kilowatt hour', 'electricity, medium voltage')], self.inputs[battery_cell_label])] = 0
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(y)
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity market for energy storage production' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('Passenger car' in i[0]))])] = (array[(self.array_inputs['battery cell production electricity'], :, index)].T * self.A[(:, self.inputs[battery_cell_label], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('Passenger car' in i[0]))])]).reshape(self.iterations, 1, (- 1))
index_A = [self.inputs[c] for c in self.inputs if (any(((ele in c[0]) for ele in ['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'HEV-p', 'PHEV-p', 'PHEV-d', 'HEV-d'])
self.A[(:, self.inputs[('polyethylene production, high density, granulate', 'RER', 'kilogram', 'polyethylene, high density, granulate')], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
index_A = [self.inputs[c] for c in self.inputs if (('ICEV-g' in c[0]) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array('ICEV-g')
self.A[(:, self.inputs[('glass fibre reinforced plastic production, polyamide, injection moulded', 'RER', 'kilogram', 'glass fibre reinforced plastic, polyamide, injection moulded')], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
if ('hydrogen' in self.background_configuration['energy storage']):
hydro_tank_technology = self.background_configuration['energy storage']['hydrogen']['type']
else:
hydro_tank_technology = 'carbon fiber'
dict_tank_map = {'carbon fiber': ('Fuel tank, compressed hydrogen gas, 700bar', 'GLO', 'kilogram', 'Fuel tank, compressed hydrogen gas, 700bar'), 'hdpe': ('Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner', 'RER', 'kilogram', 'Hydrogen tank'), 'aluminium': ('Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner', 'RER', 'kilogram', 'Hydrogen tank')}
index_A = [self.inputs[c] for c in self.inputs if (('FCEV' in c[0]) and ('Passenger car' in c[0]))]
index = self.get_index_vehicle_from_array('FCEV')
self.A[(:, self.inputs[dict_tank_map[hydro_tank_technology]], index_A)] = (array[(self.array_inputs['fuel tank mass'], :, index)] * (- 1)).T
self.A[(:, [self.inputs[c] for c in self.inputs if ('Passenger car' in c[0])], [self.inputs[c] for c in self.inputs if ('transport, passenger car' in c[0])])] = ((- 1) / array[self.array_inputs['lifetime kilometers']])
(sum_renew, co2_intensity_tech) = self.define_renewable_rate_in_mix()
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
print(((((((('in ' + str(year)) + ', % of renewable: ') + str(np.round((sum_renew[y] * 100), 0))) + '%') + ', GHG intensity per kWh: ') + str(int(np.sum((co2_intensity_tech[y] * self.mix[y]))))) + ' g. CO2-eq.'), end=end_str)
if any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in self.scope['powertrain']))):
for y in self.scope['year']:
index = self.get_index_vehicle_from_array(['BEV', 'PHEV-p', 'PHEV-d'], y, method='and')
self.A[np.ix_(np.arange(self.iterations), [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('electricity supply for electric vehicles' in i[0]))], [self.inputs[i] for i in self.inputs if ((str(y) in i[0]) and ('transport, passenger' in i[0]) and any((True for x in ['BEV', 'PHEV-p', 'PHEV-d'] if (x in i[0]))))])] = (array[(self.array_inputs['electricity consumption'], :, index)] * (- 1)).T.reshape(self.iterations, 1, (- 1))
if ('FCEV' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('FCEV')
if ('tertiary' in self.fuel_blends['hydrogen']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type'], self.fuel_blends['hydrogen']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['hydrogen']['primary']['type'], self.fuel_blends['hydrogen']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['hydrogen']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['hydrogen']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['hydrogen']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('FCEV' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for hydrogen vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if ('ICEV-g' in self.scope['powertrain']):
index = self.get_index_vehicle_from_array('ICEV-g')
if ('tertiary' in self.fuel_blends['cng']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type'], self.fuel_blends['cng']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['cng']['primary']['type'], self.fuel_blends['cng']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['cng']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['cng']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['cng']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and ('ICEV-g' in i[0]))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gas vehicles' in i[0]))], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (1 + array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)])) * (- 1)).T
self.A[(:, self.inputs[('Methane, fossil', ('air',), 'kilogram')], ind_A)] = (((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * array[(self.array_inputs['CNG pump-to-tank leakage'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['primary']['CO2'] * self.fuel_blends['cng']['primary']['share'][y])
if (self.fuel_blends['cng']['secondary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['secondary']['CO2'] * self.fuel_blends['cng']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] == 'cng'):
share_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['cng']['tertiary']['CO2'] * self.fuel_blends['cng']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['cng']['primary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['primary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['primary']['CO2']
if (self.fuel_blends['cng']['secondary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['secondary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['secondary']['CO2']
if ('tertiary' in self.fuel_blends['cng']):
if (self.fuel_blends['cng']['tertiary']['type'] != 'cng'):
share_non_fossil += self.fuel_blends['cng']['tertiary']['share'][y]
CO2_non_fossil = self.fuel_blends['cng']['tertiary']['CO2']
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-d', 'PHEV-d', 'HEV-d'])]:
index = self.get_index_vehicle_from_array(['ICEV-d', 'PHEV-d', 'HEV-d'])
if ('tertiary' in self.fuel_blends['diesel']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type'], self.fuel_blends['diesel']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['diesel']['primary']['type'], self.fuel_blends['diesel']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['diesel']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['diesel']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['diesel']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger car' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'PHEV-d', 'HEV-d'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for diesel vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] == 'diesel'):
share_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'diesel', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['diesel']['primary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['primary']['CO2'] * self.fuel_blends['diesel']['primary']['share'][y])
if (self.fuel_blends['diesel']['secondary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['secondary']['CO2'] * self.fuel_blends['diesel']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['diesel']):
if (self.fuel_blends['diesel']['tertiary']['type'] != 'diesel'):
share_non_fossil += self.fuel_blends['diesel']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['diesel']['tertiary']['CO2'] * self.fuel_blends['diesel']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
if [i for i in self.scope['powertrain'] if (i in ['ICEV-p', 'HEV-p', 'PHEV-p'])]:
index = self.get_index_vehicle_from_array(['ICEV-p', 'HEV-p', 'PHEV-p'])
if ('tertiary' in self.fuel_blends['petrol']):
print('{} is completed by {} and {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type'], self.fuel_blends['petrol']['tertiary']['type']), end='\n \t * ')
else:
print('{} is completed by {}.'.format(self.fuel_blends['petrol']['primary']['type'], self.fuel_blends['petrol']['secondary']['type']), end='\n \t * ')
for (y, year) in enumerate(self.scope['year']):
if ((y + 1) == len(self.scope['year'])):
end_str = '\n * '
else:
end_str = '\n \t * '
if ('tertiary' in self.fuel_blends['petrol']):
print(((((((('in ' + str(year)) + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%') + ' _________________ ') + str(np.round((self.fuel_blends['petrol']['tertiary']['share'][y] * 100), 0))) + '%'), end=end_str)
else:
print((((('in ' + str(year)) + ' _________________________________________ ') + str(np.round((self.fuel_blends['petrol']['secondary']['share'][y] * 100), 0))) + '%'), end=end_str)
for (y, year) in enumerate(self.scope['year']):
ind_A = [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-p', 'HEV-p', 'PHEV-p'])))]
ind_array = [x for x in self.get_index_vehicle_from_array(year) if (x in index)]
self.A[(:, [self.inputs[i] for i in self.inputs if ((str(year) in i[0]) and ('fuel supply for gasoline vehicles' in i[0]))], ind_A)] = ((array[(self.array_inputs['fuel mass'], :, ind_array)] / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_fossil = 0
CO2_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_fossil = (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] == 'petrol'):
share_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, fossil', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * CO2_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
sulfur_concentration = self.get_sulfur_content(self.country, 'petrol', year)
self.A[(:, self.inputs[('Sulfur dioxide', ('air',), 'kilogram')], ind_A)] = (((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * sulfur_concentration) * (64 / 32)) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
share_non_fossil = 0
CO2_non_fossil = 0
if (self.fuel_blends['petrol']['primary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['primary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['primary']['CO2'] * self.fuel_blends['petrol']['primary']['share'][y])
if (self.fuel_blends['petrol']['secondary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['secondary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['secondary']['CO2'] * self.fuel_blends['petrol']['secondary']['share'][y])
if ('tertiary' in self.fuel_blends['petrol']):
if (self.fuel_blends['petrol']['tertiary']['type'] != 'petrol'):
share_non_fossil += self.fuel_blends['petrol']['tertiary']['share'][y]
CO2_non_fossil += (self.fuel_blends['petrol']['tertiary']['CO2'] * self.fuel_blends['petrol']['tertiary']['share'][y])
self.A[(:, self.inputs[('Carbon dioxide, from soil or biomass stock', ('air',), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_non_fossil) * CO2_non_fossil) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Cadmium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Copper', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1.7e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 5e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Nickel', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 7e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Selenium', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-08) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Zinc', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-06) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
self.A[(:, self.inputs[('Chromium VI', ('air', 'urban air close to ground'), 'kilogram')], ind_A)] = ((((array[(self.array_inputs['fuel mass'], :, ind_array)] * share_fossil) * 1e-10) / array[(self.array_inputs['range'], :, ind_array)]) * (- 1)).T
ind_A = [self.inputs[i] for i in self.inputs if ('transport, passenger' in i[0])]
self.A[(:, self.inputs[('market for road wear emissions, passenger car', 'GLO', 'kilogram', 'road wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :)] * 1e-08)
self.A[(:, self.inputs[('market for tyre wear emissions, passenger car', 'GLO', 'kilogram', 'tyre wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :)] * 6e-08)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['ICEV-d', 'ICEV-p', 'ICEV-g'])))]
index = self.get_index_vehicle_from_array(['ICEV-d', 'ICEV-p', 'ICEV-g'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = (array[(self.array_inputs['driving mass'], :, index)].T * 5e-09)
ind_A = [self.inputs[i] for i in self.inputs if (('transport, passenger' in i[0]) and any(((x in i[0]) for x in ['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])))]
index = self.get_index_vehicle_from_array(['BEV', 'FCEV', 'HEV-p', 'HEV-d', 'PHEV-p', 'PHEV-d'])
self.A[(:, self.inputs[('market for brake wear emissions, passenger car', 'GLO', 'kilogram', 'brake wear emissions, passenger car')], ind_A)] = ((array[(self.array_inputs['driving mass'], :, index)].T * 5e-09) * 0.2)
self.A[(:, self.inputs[('market for road', 'GLO', 'meter-year', 'road')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((5.37e-07 * array[(self.array_inputs['driving mass'], :)]) * (- 1))
self.A[(:, self.inputs[('market for road maintenance', 'RER', 'meter-year', 'road maintenance')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (0.00129 * (- 1))
self.A[np.ix_(np.arange(self.iterations), self.index_emissions, [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (array[[self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]] for x in self.index_emissions]] * (- 1)).transpose([1, 0, 2])
self.A[np.ix_(np.arange(self.iterations), self.index_noise, [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = (array[[self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]] for x in self.index_noise]] * (- 1)).transpose([1, 0, 2])
self.A[(:, self.inputs[('Ethane, 1,1,1,2-tetrafluoro-, HFC-134a', ('air',), 'kilogram')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
self.A[(:, self.inputs[('market for refrigerant R134a', 'GLO', 'kilogram', 'refrigerant R134a')], [self.inputs[i] for i in self.inputs if ('transport, passenger car' in i[0])])] = ((0.053 / self.array.values[self.array_inputs['kilometers per year']]) * (- 1))
print('*********************************************************************')<|docstring|>Fill-in the A matrix. Does not return anything. Modifies in place.
Shape of the A matrix (values, products, activities).
:param array: :attr:`array` from :class:`CarModel` class<|endoftext|> |
a053c18baafbfed19c3ae99bd992a190ac036c6dd469b74980610011c984b628 | def select_heat_supplier(self, heat_supplier):
'\n The heat supply is an important aspect of direct air capture.\n Here, we can change the supplier of heat.\n :param heat_supplier: by default "waste heat". Must be one of "waste heat", "biomass heat",\n "natural gas heat", "market heat".\n :type heat_supplier: str\n :return:\n '
d_heat_suppliers = {'waste heat': ('heat, from municipal waste incineration to generic market for heat district or industrial, other than natural gas', 'CH', 'megajoule', 'heat, district or industrial, other than natural gas'), 'biomass heat': ('heat production, hardwood chips from forest, at furnace 1000kW, state-of-the-art 2014', 'CH', 'megajoule', 'heat, district or industrial, other than natural gas'), 'natural gas heat': ('market group for heat, central or small-scale, natural gas', 'RER', 'megajoule', 'heat, central or small-scale, natural gas'), 'market heat': ('market for heat, from steam, in chemical industry', 'RER', 'megajoule', 'heat, from steam, in chemical industry')}
air_capture = self.inputs[('carbon dioxide, captured from atmosphere', 'RER', 'kilogram', 'carbon dioxide, captured from the atmosphere')]
methanol_distillation = self.inputs[('Methanol distillation', 'RER', 'kilogram', 'Purified methanol')]
all_inds = [self.inputs[i] for i in list(d_heat_suppliers.values())]
heat_amount = self.A[np.ix_(range(self.A.shape[0]), all_inds, [air_capture])].sum()
self.A[np.ix_(range(self.A.shape[0]), all_inds, [air_capture])] = 0
ind = self.inputs[d_heat_suppliers[heat_supplier]]
self.A[np.ix_(range(self.A.shape[0]), [ind], [air_capture])] = heat_amount
heat_amount = self.A[np.ix_(range(self.A.shape[0]), all_inds, [methanol_distillation])].sum()
self.A[np.ix_(range(self.A.shape[0]), all_inds, [methanol_distillation])] = 0
ind = self.inputs[d_heat_suppliers[heat_supplier]]
self.A[np.ix_(range(self.A.shape[0]), [ind], [methanol_distillation])] = heat_amount | The heat supply is an important aspect of direct air capture.
Here, we can change the supplier of heat.
:param heat_supplier: by default "waste heat". Must be one of "waste heat", "biomass heat",
"natural gas heat", "market heat".
:type heat_supplier: str
:return: | carculator/inventory.py | select_heat_supplier | rena-nong/carculator | 1 | python | def select_heat_supplier(self, heat_supplier):
'\n The heat supply is an important aspect of direct air capture.\n Here, we can change the supplier of heat.\n :param heat_supplier: by default "waste heat". Must be one of "waste heat", "biomass heat",\n "natural gas heat", "market heat".\n :type heat_supplier: str\n :return:\n '
d_heat_suppliers = {'waste heat': ('heat, from municipal waste incineration to generic market for heat district or industrial, other than natural gas', 'CH', 'megajoule', 'heat, district or industrial, other than natural gas'), 'biomass heat': ('heat production, hardwood chips from forest, at furnace 1000kW, state-of-the-art 2014', 'CH', 'megajoule', 'heat, district or industrial, other than natural gas'), 'natural gas heat': ('market group for heat, central or small-scale, natural gas', 'RER', 'megajoule', 'heat, central or small-scale, natural gas'), 'market heat': ('market for heat, from steam, in chemical industry', 'RER', 'megajoule', 'heat, from steam, in chemical industry')}
air_capture = self.inputs[('carbon dioxide, captured from atmosphere', 'RER', 'kilogram', 'carbon dioxide, captured from the atmosphere')]
methanol_distillation = self.inputs[('Methanol distillation', 'RER', 'kilogram', 'Purified methanol')]
all_inds = [self.inputs[i] for i in list(d_heat_suppliers.values())]
heat_amount = self.A[np.ix_(range(self.A.shape[0]), all_inds, [air_capture])].sum()
self.A[np.ix_(range(self.A.shape[0]), all_inds, [air_capture])] = 0
ind = self.inputs[d_heat_suppliers[heat_supplier]]
self.A[np.ix_(range(self.A.shape[0]), [ind], [air_capture])] = heat_amount
heat_amount = self.A[np.ix_(range(self.A.shape[0]), all_inds, [methanol_distillation])].sum()
self.A[np.ix_(range(self.A.shape[0]), all_inds, [methanol_distillation])] = 0
ind = self.inputs[d_heat_suppliers[heat_supplier]]
self.A[np.ix_(range(self.A.shape[0]), [ind], [methanol_distillation])] = heat_amount | def select_heat_supplier(self, heat_supplier):
'\n The heat supply is an important aspect of direct air capture.\n Here, we can change the supplier of heat.\n :param heat_supplier: by default "waste heat". Must be one of "waste heat", "biomass heat",\n "natural gas heat", "market heat".\n :type heat_supplier: str\n :return:\n '
d_heat_suppliers = {'waste heat': ('heat, from municipal waste incineration to generic market for heat district or industrial, other than natural gas', 'CH', 'megajoule', 'heat, district or industrial, other than natural gas'), 'biomass heat': ('heat production, hardwood chips from forest, at furnace 1000kW, state-of-the-art 2014', 'CH', 'megajoule', 'heat, district or industrial, other than natural gas'), 'natural gas heat': ('market group for heat, central or small-scale, natural gas', 'RER', 'megajoule', 'heat, central or small-scale, natural gas'), 'market heat': ('market for heat, from steam, in chemical industry', 'RER', 'megajoule', 'heat, from steam, in chemical industry')}
air_capture = self.inputs[('carbon dioxide, captured from atmosphere', 'RER', 'kilogram', 'carbon dioxide, captured from the atmosphere')]
methanol_distillation = self.inputs[('Methanol distillation', 'RER', 'kilogram', 'Purified methanol')]
all_inds = [self.inputs[i] for i in list(d_heat_suppliers.values())]
heat_amount = self.A[np.ix_(range(self.A.shape[0]), all_inds, [air_capture])].sum()
self.A[np.ix_(range(self.A.shape[0]), all_inds, [air_capture])] = 0
ind = self.inputs[d_heat_suppliers[heat_supplier]]
self.A[np.ix_(range(self.A.shape[0]), [ind], [air_capture])] = heat_amount
heat_amount = self.A[np.ix_(range(self.A.shape[0]), all_inds, [methanol_distillation])].sum()
self.A[np.ix_(range(self.A.shape[0]), all_inds, [methanol_distillation])] = 0
ind = self.inputs[d_heat_suppliers[heat_supplier]]
self.A[np.ix_(range(self.A.shape[0]), [ind], [methanol_distillation])] = heat_amount<|docstring|>The heat supply is an important aspect of direct air capture.
Here, we can change the supplier of heat.
:param heat_supplier: by default "waste heat". Must be one of "waste heat", "biomass heat",
"natural gas heat", "market heat".
:type heat_supplier: str
:return:<|endoftext|> |
4723af1b0692eca50429a0b5922435a55f4c5efd85e46919f1730b3793beef8d | def report(filename, limit, lemmas, dbname=db, documents=None, most=True, display_format='html'):
'generate report and save to file\n '
if lemmas:
print('lemmas')
direction = ''
if most:
direction = 'DESC'
if (not lemmas):
sql = 'SELECT s.sentence, d.document, w.word\n FROM lemma_word_sentence lws\n LEFT JOIN sentence s\n ON s.id=lws.sentence_id\n LEFT JOIN document d \n ON d.id=s.document_id\n LEFT JOIN word w \n ON w.id=lws.word_id\n \n LEFT JOIN \n (SELECT w.id , w.word\n FROM word w \n JOIN lemma_word_sentence lws\n ON w.id=lws.word_id\n GROUP BY w.id\n ORDER BY\n SUM(lws.count) {}\n LIMIT {}) ranking\n ON lws.word_id=ranking.id\n WHERE ranking.id IS NOT NULL\n ORDER BY w.word, d.document, s.sentence {}\n '.format(direction, limit, direction)
else:
sql = 'SELECT s.sentence, d.document, l.lemma\n FROM lemma_word_sentence lws\n LEFT JOIN sentence s\n ON s.id=lws.sentence_id\n LEFT JOIN document d \n ON d.id=s.document_id\n LEFT JOIN lemma l\n ON l.id=lws.lemma_id\n \n LEFT JOIN \n (SELECT l.id , l.lemma\n FROM lemma l\n JOIN lemma_word_sentence lws\n ON l.id=lws.lemma_id\n GROUP BY l.id\n ORDER BY\n SUM(lws.count) {}\n LIMIT {}) ranking\n ON lws.lemma_id=ranking.id\n WHERE ranking.id IS NOT NULL\n ORDER BY l.lemma, d.document, s.sentence {}\n '.format(direction, limit, direction)
df = pd.read_sql_query(sql, conn)
if documents:
df.set_index('document', drop=False, inplace=True)
df = df[df.index.isin(documents)]
if (not lemmas):
df['count'] = df['word'].groupby(df['word']).transform('count')
df.sort_values(by=['count', 'word', 'document'], ascending=[False, True, True], inplace=True)
df = df[['word', 'document', 'sentence', 'count']]
else:
df['count'] = df['lemma'].groupby(df['lemma']).transform('count')
df.sort_values(by=['count', 'lemma', 'document'], ascending=[False, True, True], inplace=True)
df = df[['lemma', 'document', 'sentence', 'count']]
print(df.shape)
if (display_format == 'html'):
df.to_html(open((filename + '.html'), 'w'), index=False)
webbrowser.open(('file://' + os.path.realpath((filename + '.html'))))
elif (display_format == 'csv'):
df.to_csv((filename + '.csv'), index=False) | generate report and save to file | report.py | report | ImKogan/nlp | 0 | python | def report(filename, limit, lemmas, dbname=db, documents=None, most=True, display_format='html'):
'\n '
if lemmas:
print('lemmas')
direction =
if most:
direction = 'DESC'
if (not lemmas):
sql = 'SELECT s.sentence, d.document, w.word\n FROM lemma_word_sentence lws\n LEFT JOIN sentence s\n ON s.id=lws.sentence_id\n LEFT JOIN document d \n ON d.id=s.document_id\n LEFT JOIN word w \n ON w.id=lws.word_id\n \n LEFT JOIN \n (SELECT w.id , w.word\n FROM word w \n JOIN lemma_word_sentence lws\n ON w.id=lws.word_id\n GROUP BY w.id\n ORDER BY\n SUM(lws.count) {}\n LIMIT {}) ranking\n ON lws.word_id=ranking.id\n WHERE ranking.id IS NOT NULL\n ORDER BY w.word, d.document, s.sentence {}\n '.format(direction, limit, direction)
else:
sql = 'SELECT s.sentence, d.document, l.lemma\n FROM lemma_word_sentence lws\n LEFT JOIN sentence s\n ON s.id=lws.sentence_id\n LEFT JOIN document d \n ON d.id=s.document_id\n LEFT JOIN lemma l\n ON l.id=lws.lemma_id\n \n LEFT JOIN \n (SELECT l.id , l.lemma\n FROM lemma l\n JOIN lemma_word_sentence lws\n ON l.id=lws.lemma_id\n GROUP BY l.id\n ORDER BY\n SUM(lws.count) {}\n LIMIT {}) ranking\n ON lws.lemma_id=ranking.id\n WHERE ranking.id IS NOT NULL\n ORDER BY l.lemma, d.document, s.sentence {}\n '.format(direction, limit, direction)
df = pd.read_sql_query(sql, conn)
if documents:
df.set_index('document', drop=False, inplace=True)
df = df[df.index.isin(documents)]
if (not lemmas):
df['count'] = df['word'].groupby(df['word']).transform('count')
df.sort_values(by=['count', 'word', 'document'], ascending=[False, True, True], inplace=True)
df = df[['word', 'document', 'sentence', 'count']]
else:
df['count'] = df['lemma'].groupby(df['lemma']).transform('count')
df.sort_values(by=['count', 'lemma', 'document'], ascending=[False, True, True], inplace=True)
df = df[['lemma', 'document', 'sentence', 'count']]
print(df.shape)
if (display_format == 'html'):
df.to_html(open((filename + '.html'), 'w'), index=False)
webbrowser.open(('file://' + os.path.realpath((filename + '.html'))))
elif (display_format == 'csv'):
df.to_csv((filename + '.csv'), index=False) | def report(filename, limit, lemmas, dbname=db, documents=None, most=True, display_format='html'):
'\n '
if lemmas:
print('lemmas')
direction =
if most:
direction = 'DESC'
if (not lemmas):
sql = 'SELECT s.sentence, d.document, w.word\n FROM lemma_word_sentence lws\n LEFT JOIN sentence s\n ON s.id=lws.sentence_id\n LEFT JOIN document d \n ON d.id=s.document_id\n LEFT JOIN word w \n ON w.id=lws.word_id\n \n LEFT JOIN \n (SELECT w.id , w.word\n FROM word w \n JOIN lemma_word_sentence lws\n ON w.id=lws.word_id\n GROUP BY w.id\n ORDER BY\n SUM(lws.count) {}\n LIMIT {}) ranking\n ON lws.word_id=ranking.id\n WHERE ranking.id IS NOT NULL\n ORDER BY w.word, d.document, s.sentence {}\n '.format(direction, limit, direction)
else:
sql = 'SELECT s.sentence, d.document, l.lemma\n FROM lemma_word_sentence lws\n LEFT JOIN sentence s\n ON s.id=lws.sentence_id\n LEFT JOIN document d \n ON d.id=s.document_id\n LEFT JOIN lemma l\n ON l.id=lws.lemma_id\n \n LEFT JOIN \n (SELECT l.id , l.lemma\n FROM lemma l\n JOIN lemma_word_sentence lws\n ON l.id=lws.lemma_id\n GROUP BY l.id\n ORDER BY\n SUM(lws.count) {}\n LIMIT {}) ranking\n ON lws.lemma_id=ranking.id\n WHERE ranking.id IS NOT NULL\n ORDER BY l.lemma, d.document, s.sentence {}\n '.format(direction, limit, direction)
df = pd.read_sql_query(sql, conn)
if documents:
df.set_index('document', drop=False, inplace=True)
df = df[df.index.isin(documents)]
if (not lemmas):
df['count'] = df['word'].groupby(df['word']).transform('count')
df.sort_values(by=['count', 'word', 'document'], ascending=[False, True, True], inplace=True)
df = df[['word', 'document', 'sentence', 'count']]
else:
df['count'] = df['lemma'].groupby(df['lemma']).transform('count')
df.sort_values(by=['count', 'lemma', 'document'], ascending=[False, True, True], inplace=True)
df = df[['lemma', 'document', 'sentence', 'count']]
print(df.shape)
if (display_format == 'html'):
df.to_html(open((filename + '.html'), 'w'), index=False)
webbrowser.open(('file://' + os.path.realpath((filename + '.html'))))
elif (display_format == 'csv'):
df.to_csv((filename + '.csv'), index=False)<|docstring|>generate report and save to file<|endoftext|> |
37607367ab1efa0d102b7248213bbb670ac7ac6cf672141f195caa19ba038d52 | def __init__(self, img_size=(480, 892), render_type='naive'):
'\n img_size: List or Tuple with two elemets: h, w\n '
assert (render_type in self._render_types), 'render_type:{} is not supported!'.format(render_type)
self.render_type = render_type
self.img_size = img_size
(self.ylim, self.xlim) = img_size | img_size: List or Tuple with two elemets: h, w | plugin/packnet/pipelines.py | __init__ | a1600012888/mmdetection3d | 0 | python | def __init__(self, img_size=(480, 892), render_type='naive'):
'\n \n '
assert (render_type in self._render_types), 'render_type:{} is not supported!'.format(render_type)
self.render_type = render_type
self.img_size = img_size
(self.ylim, self.xlim) = img_size | def __init__(self, img_size=(480, 892), render_type='naive'):
'\n \n '
assert (render_type in self._render_types), 'render_type:{} is not supported!'.format(render_type)
self.render_type = render_type
self.img_size = img_size
(self.ylim, self.xlim) = img_size<|docstring|>img_size: List or Tuple with two elemets: h, w<|endoftext|> |
727d71ad7c24d9f875d78690b9ec5ba4155f2095a33a9a5a4044848f10b51d34 | def sort_points(self, points):
'\n sort the points accroding to their depth in descending order\n '
depth = points[(:, 2)]
idx = np.argsort(depth)
idx = idx[::(- 1)]
new_points = points[idx]
return new_points | sort the points accroding to their depth in descending order | plugin/packnet/pipelines.py | sort_points | a1600012888/mmdetection3d | 0 | python | def sort_points(self, points):
'\n \n '
depth = points[(:, 2)]
idx = np.argsort(depth)
idx = idx[::(- 1)]
new_points = points[idx]
return new_points | def sort_points(self, points):
'\n \n '
depth = points[(:, 2)]
idx = np.argsort(depth)
idx = idx[::(- 1)]
new_points = points[idx]
return new_points<|docstring|>sort the points accroding to their depth in descending order<|endoftext|> |
8256e2ed8339e8141d7e859dd3c8002bd431f8101baf15ffc9a429e5b5847aa3 | def naive_depth_render(self, points, depth_map):
'\n for float cord, use its int version\n '
points = self.sort_points(points)
x_cords = ((points[(:, 0)] * self.xlim) / 1600.0)
y_cords = ((points[(:, 1)] * self.ylim) / 900.0)
depth = points[(:, 2)]
depth = np.clip(depth, a_min=1e-05, a_max=99999)
x_cords = x_cords.astype(np.int)
y_cords = y_cords.astype(np.int)
depth_map[(y_cords, x_cords)] = points[(:, 2)]
return depth_map | for float cord, use its int version | plugin/packnet/pipelines.py | naive_depth_render | a1600012888/mmdetection3d | 0 | python | def naive_depth_render(self, points, depth_map):
'\n \n '
points = self.sort_points(points)
x_cords = ((points[(:, 0)] * self.xlim) / 1600.0)
y_cords = ((points[(:, 1)] * self.ylim) / 900.0)
depth = points[(:, 2)]
depth = np.clip(depth, a_min=1e-05, a_max=99999)
x_cords = x_cords.astype(np.int)
y_cords = y_cords.astype(np.int)
depth_map[(y_cords, x_cords)] = points[(:, 2)]
return depth_map | def naive_depth_render(self, points, depth_map):
'\n \n '
points = self.sort_points(points)
x_cords = ((points[(:, 0)] * self.xlim) / 1600.0)
y_cords = ((points[(:, 1)] * self.ylim) / 900.0)
depth = points[(:, 2)]
depth = np.clip(depth, a_min=1e-05, a_max=99999)
x_cords = x_cords.astype(np.int)
y_cords = y_cords.astype(np.int)
depth_map[(y_cords, x_cords)] = points[(:, 2)]
return depth_map<|docstring|>for float cord, use its int version<|endoftext|> |
34087245f3dd50ca7ced4c489a9bc4e5da72764e300e57fa8a142bc0ba40dd64 | def split_path(path):
'Convert PATH to (parent-path, name), unless it is None.\n '
return (posixpath.split(path) if (path is not None) else None) | Convert PATH to (parent-path, name), unless it is None. | notes/move-tracking/path_pairs_to_eid_map.py | split_path | auycro/subversion | 3 | python | def split_path(path):
'\n '
return (posixpath.split(path) if (path is not None) else None) | def split_path(path):
'\n '
return (posixpath.split(path) if (path is not None) else None)<|docstring|>Convert PATH to (parent-path, name), unless it is None.<|endoftext|> |
95cc59cdd466bfdff9f938a9197c850e8fe4ba43fdbd051dc3d958e14b945199 | def add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name):
'Add a (parent_eid, name) entry for SIDE:EID, and for each of its parent\n paths that lacks an EID, up to a path that has an EID.\n Add this same mapping to the other side as well, but without caring\n whether the parent element exists on the other side. ### Is this right?\n '
parent_eid = mapping.find_eid_from_relpath(side, parent_path)
if (parent_eid < 0):
parent_eid = add_new(mapping, side, parent_path)
loc = (parent_eid, name)
mapping.set_peid_loc(side, eid, loc)
return loc | Add a (parent_eid, name) entry for SIDE:EID, and for each of its parent
paths that lacks an EID, up to a path that has an EID.
Add this same mapping to the other side as well, but without caring
whether the parent element exists on the other side. ### Is this right? | notes/move-tracking/path_pairs_to_eid_map.py | add_eid_mapping_and_make_parents | auycro/subversion | 3 | python | def add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name):
'Add a (parent_eid, name) entry for SIDE:EID, and for each of its parent\n paths that lacks an EID, up to a path that has an EID.\n Add this same mapping to the other side as well, but without caring\n whether the parent element exists on the other side. ### Is this right?\n '
parent_eid = mapping.find_eid_from_relpath(side, parent_path)
if (parent_eid < 0):
parent_eid = add_new(mapping, side, parent_path)
loc = (parent_eid, name)
mapping.set_peid_loc(side, eid, loc)
return loc | def add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name):
'Add a (parent_eid, name) entry for SIDE:EID, and for each of its parent\n paths that lacks an EID, up to a path that has an EID.\n Add this same mapping to the other side as well, but without caring\n whether the parent element exists on the other side. ### Is this right?\n '
parent_eid = mapping.find_eid_from_relpath(side, parent_path)
if (parent_eid < 0):
parent_eid = add_new(mapping, side, parent_path)
loc = (parent_eid, name)
mapping.set_peid_loc(side, eid, loc)
return loc<|docstring|>Add a (parent_eid, name) entry for SIDE:EID, and for each of its parent
paths that lacks an EID, up to a path that has an EID.
Add this same mapping to the other side as well, but without caring
whether the parent element exists on the other side. ### Is this right?<|endoftext|> |
1e1f2e05456fe028c17405f25618818dbf4106f5ca03fe6aa6a3ec31bb5fef5e | def add_new(mapping, side, path):
'Add a new EID and (parent_eid, name) entry for PATH, and for each\n of its parents that lacks an EID.\n\n Add this same mapping to the other side as well, but without caring\n whether the parent element exists on the other side.\n ### Why is this right?\n '
eid = get_next_eid()
(parent_path, name) = posixpath.split(path)
loc = add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name)
if (not mapping.has_peid_loc((1 - side), loc)):
mapping.set_peid_loc((1 - side), eid, loc)
return eid | Add a new EID and (parent_eid, name) entry for PATH, and for each
of its parents that lacks an EID.
Add this same mapping to the other side as well, but without caring
whether the parent element exists on the other side.
### Why is this right? | notes/move-tracking/path_pairs_to_eid_map.py | add_new | auycro/subversion | 3 | python | def add_new(mapping, side, path):
'Add a new EID and (parent_eid, name) entry for PATH, and for each\n of its parents that lacks an EID.\n\n Add this same mapping to the other side as well, but without caring\n whether the parent element exists on the other side.\n ### Why is this right?\n '
eid = get_next_eid()
(parent_path, name) = posixpath.split(path)
loc = add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name)
if (not mapping.has_peid_loc((1 - side), loc)):
mapping.set_peid_loc((1 - side), eid, loc)
return eid | def add_new(mapping, side, path):
'Add a new EID and (parent_eid, name) entry for PATH, and for each\n of its parents that lacks an EID.\n\n Add this same mapping to the other side as well, but without caring\n whether the parent element exists on the other side.\n ### Why is this right?\n '
eid = get_next_eid()
(parent_path, name) = posixpath.split(path)
loc = add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name)
if (not mapping.has_peid_loc((1 - side), loc)):
mapping.set_peid_loc((1 - side), eid, loc)
return eid<|docstring|>Add a new EID and (parent_eid, name) entry for PATH, and for each
of its parents that lacks an EID.
Add this same mapping to the other side as well, but without caring
whether the parent element exists on the other side.
### Why is this right?<|endoftext|> |
736e25364a49b61b68e857bb6ceb36adbd70f78493f7542dc38ebee6e7076355 | def write_parent_eid(mapping, side, eid):
'Write a (parent_eid, name) mapping corresponding to the existing\n (parent-path, name) mapping for SIDE:EID.\n\n For each of its parent paths in SIDE that lacks an EID, up to a path\n that has an EID, allocate an EID and write a (parent-eid, name) mapping\n in BOTH sides.\n '
path_loc = mapping.path_locs_for_side(side)[eid]
(parent_path, name) = path_loc
new_loc = add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name)
print(('# converting e%d: %s -> %s' % (eid, str(path_loc), str(new_loc)))) | Write a (parent_eid, name) mapping corresponding to the existing
(parent-path, name) mapping for SIDE:EID.
For each of its parent paths in SIDE that lacks an EID, up to a path
that has an EID, allocate an EID and write a (parent-eid, name) mapping
in BOTH sides. | notes/move-tracking/path_pairs_to_eid_map.py | write_parent_eid | auycro/subversion | 3 | python | def write_parent_eid(mapping, side, eid):
'Write a (parent_eid, name) mapping corresponding to the existing\n (parent-path, name) mapping for SIDE:EID.\n\n For each of its parent paths in SIDE that lacks an EID, up to a path\n that has an EID, allocate an EID and write a (parent-eid, name) mapping\n in BOTH sides.\n '
path_loc = mapping.path_locs_for_side(side)[eid]
(parent_path, name) = path_loc
new_loc = add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name)
print(('# converting e%d: %s -> %s' % (eid, str(path_loc), str(new_loc)))) | def write_parent_eid(mapping, side, eid):
'Write a (parent_eid, name) mapping corresponding to the existing\n (parent-path, name) mapping for SIDE:EID.\n\n For each of its parent paths in SIDE that lacks an EID, up to a path\n that has an EID, allocate an EID and write a (parent-eid, name) mapping\n in BOTH sides.\n '
path_loc = mapping.path_locs_for_side(side)[eid]
(parent_path, name) = path_loc
new_loc = add_eid_mapping_and_make_parents(mapping, side, eid, parent_path, name)
print(('# converting e%d: %s -> %s' % (eid, str(path_loc), str(new_loc))))<|docstring|>Write a (parent_eid, name) mapping corresponding to the existing
(parent-path, name) mapping for SIDE:EID.
For each of its parent paths in SIDE that lacks an EID, up to a path
that has an EID, allocate an EID and write a (parent-eid, name) mapping
in BOTH sides.<|endoftext|> |
3e0aca8c33fdda76cdf78872d971fa8346ce1110b645983a7d6cf53ba8b58045 | def __setitem__(self, k, v):
'Ensure no duplicate value already exists.'
assert (v not in self.values()), (k, v)
dict.__setitem__(self, k, v) | Ensure no duplicate value already exists. | notes/move-tracking/path_pairs_to_eid_map.py | __setitem__ | auycro/subversion | 3 | python | def __setitem__(self, k, v):
assert (v not in self.values()), (k, v)
dict.__setitem__(self, k, v) | def __setitem__(self, k, v):
assert (v not in self.values()), (k, v)
dict.__setitem__(self, k, v)<|docstring|>Ensure no duplicate value already exists.<|endoftext|> |
a6dd5b0f90f492064a454bffd23ce08c6623278e1b5eb89433942cdb59f5aaf2 | def eid_from_relpath(self, relpath):
'Return the EID for RELPATH, or -1 if the EID for RELPATH is not known.\n '
if (relpath == ''):
return 0
(parent_path, name) = posixpath.split(relpath)
for (eid, loc) in self.items():
if (loc == (parent_path, name)):
return eid
return (- 1) | Return the EID for RELPATH, or -1 if the EID for RELPATH is not known. | notes/move-tracking/path_pairs_to_eid_map.py | eid_from_relpath | auycro/subversion | 3 | python | def eid_from_relpath(self, relpath):
'\n '
if (relpath == ):
return 0
(parent_path, name) = posixpath.split(relpath)
for (eid, loc) in self.items():
if (loc == (parent_path, name)):
return eid
return (- 1) | def eid_from_relpath(self, relpath):
'\n '
if (relpath == ):
return 0
(parent_path, name) = posixpath.split(relpath)
for (eid, loc) in self.items():
if (loc == (parent_path, name)):
return eid
return (- 1)<|docstring|>Return the EID for RELPATH, or -1 if the EID for RELPATH is not known.<|endoftext|> |
ac1f0fdca15fe6064da66db171573136568a748492739f39fcc04af2a9a0f150 | def eid_from_relpath(self, relpath):
'Return the EID for RELPATH, or -1 if the EID for RELPATH is not known.\n '
if (relpath == ''):
return 0
(parent_path, name) = posixpath.split(relpath)
for (eid, loc) in self.items():
if ((loc[1] == name) and (loc[0] == self.eid_from_relpath(parent_path))):
return eid
return (- 1) | Return the EID for RELPATH, or -1 if the EID for RELPATH is not known. | notes/move-tracking/path_pairs_to_eid_map.py | eid_from_relpath | auycro/subversion | 3 | python | def eid_from_relpath(self, relpath):
'\n '
if (relpath == ):
return 0
(parent_path, name) = posixpath.split(relpath)
for (eid, loc) in self.items():
if ((loc[1] == name) and (loc[0] == self.eid_from_relpath(parent_path))):
return eid
return (- 1) | def eid_from_relpath(self, relpath):
'\n '
if (relpath == ):
return 0
(parent_path, name) = posixpath.split(relpath)
for (eid, loc) in self.items():
if ((loc[1] == name) and (loc[0] == self.eid_from_relpath(parent_path))):
return eid
return (- 1)<|docstring|>Return the EID for RELPATH, or -1 if the EID for RELPATH is not known.<|endoftext|> |
c61bf7aca96382692d66e5be0c29b86d7c8ef5a2bc42c5b6047a8187fb0ed7c8 | def eid_from_loc(self, loc):
'Return the EID for LOC, or -1 if the EID for LOC is not known.\n LOC is (parent_eid, name).\n '
if (loc is None):
return 0
for (eid, this_loc) in self.items():
if (loc == this_loc):
return eid
return (- 1) | Return the EID for LOC, or -1 if the EID for LOC is not known.
LOC is (parent_eid, name). | notes/move-tracking/path_pairs_to_eid_map.py | eid_from_loc | auycro/subversion | 3 | python | def eid_from_loc(self, loc):
'Return the EID for LOC, or -1 if the EID for LOC is not known.\n LOC is (parent_eid, name).\n '
if (loc is None):
return 0
for (eid, this_loc) in self.items():
if (loc == this_loc):
return eid
return (- 1) | def eid_from_loc(self, loc):
'Return the EID for LOC, or -1 if the EID for LOC is not known.\n LOC is (parent_eid, name).\n '
if (loc is None):
return 0
for (eid, this_loc) in self.items():
if (loc == this_loc):
return eid
return (- 1)<|docstring|>Return the EID for LOC, or -1 if the EID for LOC is not known.
LOC is (parent_eid, name).<|endoftext|> |
69d189e775945b3350c2870fde432a16afd09022352be5b95795893bc3f13786 | def relpath_from_eid(self, eid):
'Return the relpath of element EID in a mapping from EID to\n (parent_eid, name).\n '
if (eid == 0):
return ''
element = self.get(eid)
if (element is None):
return None
(parent_eid, name) = element
parent_path = self.relpath_from_eid(parent_eid)
if (parent_path is None):
return None
return posixpath.join(parent_path, name) | Return the relpath of element EID in a mapping from EID to
(parent_eid, name). | notes/move-tracking/path_pairs_to_eid_map.py | relpath_from_eid | auycro/subversion | 3 | python | def relpath_from_eid(self, eid):
'Return the relpath of element EID in a mapping from EID to\n (parent_eid, name).\n '
if (eid == 0):
return
element = self.get(eid)
if (element is None):
return None
(parent_eid, name) = element
parent_path = self.relpath_from_eid(parent_eid)
if (parent_path is None):
return None
return posixpath.join(parent_path, name) | def relpath_from_eid(self, eid):
'Return the relpath of element EID in a mapping from EID to\n (parent_eid, name).\n '
if (eid == 0):
return
element = self.get(eid)
if (element is None):
return None
(parent_eid, name) = element
parent_path = self.relpath_from_eid(parent_eid)
if (parent_path is None):
return None
return posixpath.join(parent_path, name)<|docstring|>Return the relpath of element EID in a mapping from EID to
(parent_eid, name).<|endoftext|> |
3b18bfa128a0cff212d1e31476613035476ce3a90993fef398ea88be64acd142 | def set_peid_loc(self, side, eid, loc):
'Set the mapping for SIDE:EID to LOC. (If no mapping for EID already\n exists, implicitly set the other side to None.)\n LOC is (parent-eid, name).\n '
assert (type(loc[0]) is int)
self.peid_maps[side][eid] = loc | Set the mapping for SIDE:EID to LOC. (If no mapping for EID already
exists, implicitly set the other side to None.)
LOC is (parent-eid, name). | notes/move-tracking/path_pairs_to_eid_map.py | set_peid_loc | auycro/subversion | 3 | python | def set_peid_loc(self, side, eid, loc):
'Set the mapping for SIDE:EID to LOC. (If no mapping for EID already\n exists, implicitly set the other side to None.)\n LOC is (parent-eid, name).\n '
assert (type(loc[0]) is int)
self.peid_maps[side][eid] = loc | def set_peid_loc(self, side, eid, loc):
'Set the mapping for SIDE:EID to LOC. (If no mapping for EID already\n exists, implicitly set the other side to None.)\n LOC is (parent-eid, name).\n '
assert (type(loc[0]) is int)
self.peid_maps[side][eid] = loc<|docstring|>Set the mapping for SIDE:EID to LOC. (If no mapping for EID already
exists, implicitly set the other side to None.)
LOC is (parent-eid, name).<|endoftext|> |
bde5193bd7d6aee0719fff1d8f63ce9385edbe143af942a94d9e48c8d1cf5650 | def find_eid_from_relpath(self, side, relpath):
'Return the EID for SIDE:RELPATH, or -1 if not found.\n '
eid = self.path_locs_for_side(side).eid_from_relpath(relpath)
if (eid < 0):
eid = self.peid_locs_for_side(side).eid_from_relpath(relpath)
if (eid < 0):
pass
return eid | Return the EID for SIDE:RELPATH, or -1 if not found. | notes/move-tracking/path_pairs_to_eid_map.py | find_eid_from_relpath | auycro/subversion | 3 | python | def find_eid_from_relpath(self, side, relpath):
'\n '
eid = self.path_locs_for_side(side).eid_from_relpath(relpath)
if (eid < 0):
eid = self.peid_locs_for_side(side).eid_from_relpath(relpath)
if (eid < 0):
pass
return eid | def find_eid_from_relpath(self, side, relpath):
'\n '
eid = self.path_locs_for_side(side).eid_from_relpath(relpath)
if (eid < 0):
eid = self.peid_locs_for_side(side).eid_from_relpath(relpath)
if (eid < 0):
pass
return eid<|docstring|>Return the EID for SIDE:RELPATH, or -1 if not found.<|endoftext|> |
45fffffb91f00357ba8272b6e9f58ca6a8447d9353e616a81750235617323920 | def test(did_pass):
' Print the result of a test. '
linenum = sys._getframe(1).f_lineno
if did_pass:
msg = 'Test at line {0} ok.'.format(linenum)
else:
msg = 'Test at line {0} FAILED.'.format(linenum)
print(msg) | Print the result of a test. | Chapter7/Exercise15.py | test | NoahNacho/Python-project-tests | 2 | python | def test(did_pass):
' '
linenum = sys._getframe(1).f_lineno
if did_pass:
msg = 'Test at line {0} ok.'.format(linenum)
else:
msg = 'Test at line {0} FAILED.'.format(linenum)
print(msg) | def test(did_pass):
' '
linenum = sys._getframe(1).f_lineno
if did_pass:
msg = 'Test at line {0} ok.'.format(linenum)
else:
msg = 'Test at line {0} FAILED.'.format(linenum)
print(msg)<|docstring|>Print the result of a test.<|endoftext|> |
9ac4993dc114358527b3be52bcb3dd76b8d7881cc4d874218a6c58a1b45182a1 | def inst_variable(x, y, z):
'\n Instrumental variable method\n Args:\n x: the input matrix [T n]\n y: the output matrix [T]\n z: the instrument [T n]\n Returns:\n the estimation of theta in y = x theta + n by instrumental variable\n '
(T, n) = x.shape
A = np.zeros((n, n))
B = np.zeros((n, 1))
epsI = (1e-05 * np.eye(n))
for t in range(T):
A += (np.outer(z[(t, :)], x[(t, :)]) / T)
B += np.dot(z[(t, :)], (y[t] / T)).reshape((n, 1))
return (LA.inv((A + epsI)) @ B) | Instrumental variable method
Args:
x: the input matrix [T n]
y: the output matrix [T]
z: the instrument [T n]
Returns:
the estimation of theta in y = x theta + n by instrumental variable | lq/funlib.py | inst_variable | FarnazAdib/Crash_course_on_RL | 53 | python | def inst_variable(x, y, z):
'\n Instrumental variable method\n Args:\n x: the input matrix [T n]\n y: the output matrix [T]\n z: the instrument [T n]\n Returns:\n the estimation of theta in y = x theta + n by instrumental variable\n '
(T, n) = x.shape
A = np.zeros((n, n))
B = np.zeros((n, 1))
epsI = (1e-05 * np.eye(n))
for t in range(T):
A += (np.outer(z[(t, :)], x[(t, :)]) / T)
B += np.dot(z[(t, :)], (y[t] / T)).reshape((n, 1))
return (LA.inv((A + epsI)) @ B) | def inst_variable(x, y, z):
'\n Instrumental variable method\n Args:\n x: the input matrix [T n]\n y: the output matrix [T]\n z: the instrument [T n]\n Returns:\n the estimation of theta in y = x theta + n by instrumental variable\n '
(T, n) = x.shape
A = np.zeros((n, n))
B = np.zeros((n, 1))
epsI = (1e-05 * np.eye(n))
for t in range(T):
A += (np.outer(z[(t, :)], x[(t, :)]) / T)
B += np.dot(z[(t, :)], (y[t] / T)).reshape((n, 1))
return (LA.inv((A + epsI)) @ B)<|docstring|>Instrumental variable method
Args:
x: the input matrix [T n]
y: the output matrix [T]
z: the instrument [T n]
Returns:
the estimation of theta in y = x theta + n by instrumental variable<|endoftext|> |
608541fa0fe28bf70df4dba1abdf8b3256f2dccbba4e2850ba9dd58218a92d0e | def GtoP(G, K):
'\n :param G: The kernel of Q function\n :param K: The gain\n :return: The P associated with G and K\n '
(_, n) = K.shape
M = np.concatenate((np.eye(n), K.T), axis=1)
return ((M @ G) @ M.T) | :param G: The kernel of Q function
:param K: The gain
:return: The P associated with G and K | lq/funlib.py | GtoP | FarnazAdib/Crash_course_on_RL | 53 | python | def GtoP(G, K):
'\n :param G: The kernel of Q function\n :param K: The gain\n :return: The P associated with G and K\n '
(_, n) = K.shape
M = np.concatenate((np.eye(n), K.T), axis=1)
return ((M @ G) @ M.T) | def GtoP(G, K):
'\n :param G: The kernel of Q function\n :param K: The gain\n :return: The P associated with G and K\n '
(_, n) = K.shape
M = np.concatenate((np.eye(n), K.T), axis=1)
return ((M @ G) @ M.T)<|docstring|>:param G: The kernel of Q function
:param K: The gain
:return: The P associated with G and K<|endoftext|> |
ef797bdc7fdc7c5e4822702d0f060cfb56a22af3bbbfaf8613b8abe02b63a1a2 | def vecv(x):
'\n :param x: input vector of shape [T , n]\n :return: vector of x^2 of shape [T, n(n+1)/2]\n '
(T, n) = x.shape
N = int(((n * (n + 1)) / 2))
y = np.zeros((T, N))
for t in range(T):
yt = []
for i in range(n):
for j in range(i, n):
if (j == i):
yt.append((x[(t, i)] ** 2))
else:
yt.append(((2 * x[(t, i)]) * x[(t, j)]))
y[(t, :)] = yt
return y | :param x: input vector of shape [T , n]
:return: vector of x^2 of shape [T, n(n+1)/2] | lq/funlib.py | vecv | FarnazAdib/Crash_course_on_RL | 53 | python | def vecv(x):
'\n :param x: input vector of shape [T , n]\n :return: vector of x^2 of shape [T, n(n+1)/2]\n '
(T, n) = x.shape
N = int(((n * (n + 1)) / 2))
y = np.zeros((T, N))
for t in range(T):
yt = []
for i in range(n):
for j in range(i, n):
if (j == i):
yt.append((x[(t, i)] ** 2))
else:
yt.append(((2 * x[(t, i)]) * x[(t, j)]))
y[(t, :)] = yt
return y | def vecv(x):
'\n :param x: input vector of shape [T , n]\n :return: vector of x^2 of shape [T, n(n+1)/2]\n '
(T, n) = x.shape
N = int(((n * (n + 1)) / 2))
y = np.zeros((T, N))
for t in range(T):
yt = []
for i in range(n):
for j in range(i, n):
if (j == i):
yt.append((x[(t, i)] ** 2))
else:
yt.append(((2 * x[(t, i)]) * x[(t, j)]))
y[(t, :)] = yt
return y<|docstring|>:param x: input vector of shape [T , n]
:return: vector of x^2 of shape [T, n(n+1)/2]<|endoftext|> |
6793f7973c3dcfe9f42ecf5af40ae722b86830bccc3431a635f698363202cc13 | def SquareMat(v, n):
'\n :param v: a vector\n :param n: dimension of the symmetric square matrix\n :return: a symmetric square matrix using v\n '
P = np.zeros((n, n))
s = 0
for i in range(n):
e = ((s + n) - i)
m = v[s:e].T
P[(i, i:)] = m
P[(i:, i)] = m
s = e
return P | :param v: a vector
:param n: dimension of the symmetric square matrix
:return: a symmetric square matrix using v | lq/funlib.py | SquareMat | FarnazAdib/Crash_course_on_RL | 53 | python | def SquareMat(v, n):
'\n :param v: a vector\n :param n: dimension of the symmetric square matrix\n :return: a symmetric square matrix using v\n '
P = np.zeros((n, n))
s = 0
for i in range(n):
e = ((s + n) - i)
m = v[s:e].T
P[(i, i:)] = m
P[(i:, i)] = m
s = e
return P | def SquareMat(v, n):
'\n :param v: a vector\n :param n: dimension of the symmetric square matrix\n :return: a symmetric square matrix using v\n '
P = np.zeros((n, n))
s = 0
for i in range(n):
e = ((s + n) - i)
m = v[s:e].T
P[(i, i:)] = m
P[(i:, i)] = m
s = e
return P<|docstring|>:param v: a vector
:param n: dimension of the symmetric square matrix
:return: a symmetric square matrix using v<|endoftext|> |
ac32a1bc33a5955c689e28c4edff143fd1f83864032c1f3a41552d20790da98a | def opt_onestep(self, g):
'\n This function calculate one iteration of adam optimization. It takes the gradient of cost functin with repect to\n parameter thetha and return dtheta. Note that you should use +dtheta when you are maximizing and -dtheta when\n minimizing.\n return the changes for the learning parameter\n :param g: Assume as gradient of loss with respect to the parameter theta\n :return: dtheta\n '
self.adam_M = ((self.beta1 * self.adam_M) + ((1 - self.beta1) * g))
self.adam_V = ((self.beta2 * self.adam_V) + ((1 - self.beta2) * (g * g)))
mhat = (copy.copy(self.adam_M) / (1 - (self.beta1 ** (self.it_index + 1))))
vhat = (copy.copy(self.adam_V) / (1 - (self.beta2 ** (self.it_index + 1))))
self.it_index = (self.it_index + 1)
return ((self.step_size * mhat) / (np.sqrt(vhat) + self.epsilon)) | This function calculate one iteration of adam optimization. It takes the gradient of cost functin with repect to
parameter thetha and return dtheta. Note that you should use +dtheta when you are maximizing and -dtheta when
minimizing.
return the changes for the learning parameter
:param g: Assume as gradient of loss with respect to the parameter theta
:return: dtheta | lq/funlib.py | opt_onestep | FarnazAdib/Crash_course_on_RL | 53 | python | def opt_onestep(self, g):
'\n This function calculate one iteration of adam optimization. It takes the gradient of cost functin with repect to\n parameter thetha and return dtheta. Note that you should use +dtheta when you are maximizing and -dtheta when\n minimizing.\n return the changes for the learning parameter\n :param g: Assume as gradient of loss with respect to the parameter theta\n :return: dtheta\n '
self.adam_M = ((self.beta1 * self.adam_M) + ((1 - self.beta1) * g))
self.adam_V = ((self.beta2 * self.adam_V) + ((1 - self.beta2) * (g * g)))
mhat = (copy.copy(self.adam_M) / (1 - (self.beta1 ** (self.it_index + 1))))
vhat = (copy.copy(self.adam_V) / (1 - (self.beta2 ** (self.it_index + 1))))
self.it_index = (self.it_index + 1)
return ((self.step_size * mhat) / (np.sqrt(vhat) + self.epsilon)) | def opt_onestep(self, g):
'\n This function calculate one iteration of adam optimization. It takes the gradient of cost functin with repect to\n parameter thetha and return dtheta. Note that you should use +dtheta when you are maximizing and -dtheta when\n minimizing.\n return the changes for the learning parameter\n :param g: Assume as gradient of loss with respect to the parameter theta\n :return: dtheta\n '
self.adam_M = ((self.beta1 * self.adam_M) + ((1 - self.beta1) * g))
self.adam_V = ((self.beta2 * self.adam_V) + ((1 - self.beta2) * (g * g)))
mhat = (copy.copy(self.adam_M) / (1 - (self.beta1 ** (self.it_index + 1))))
vhat = (copy.copy(self.adam_V) / (1 - (self.beta2 ** (self.it_index + 1))))
self.it_index = (self.it_index + 1)
return ((self.step_size * mhat) / (np.sqrt(vhat) + self.epsilon))<|docstring|>This function calculate one iteration of adam optimization. It takes the gradient of cost functin with repect to
parameter thetha and return dtheta. Note that you should use +dtheta when you are maximizing and -dtheta when
minimizing.
return the changes for the learning parameter
:param g: Assume as gradient of loss with respect to the parameter theta
:return: dtheta<|endoftext|> |
db503034a0b394fdfbfd38f43bf369cf35ce71f460bf62ceb80f0e3ac4697f94 | @cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type) | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded | intersight/model/boot_san.py | additional_properties_type | CiscoDevNet/intersight-python | 5 | python | @cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type) | @cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)<|docstring|>This must be a method because a model may have properties that are
of type self, this must run after the class is loaded<|endoftext|> |
ce20c930b8a8913807427169502712e588195e5348ae3255ce295d958c22ce8b | @cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'class_id': (str,), 'object_type': (str,), 'bootloader': (BootBootloader,), 'interface_name': (str,), 'lun': (int,), 'slot': (str,), 'wwpn': (str,), 'enabled': (bool,), 'name': (str,)} | This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type. | intersight/model/boot_san.py | openapi_types | CiscoDevNet/intersight-python | 5 | python | @cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'class_id': (str,), 'object_type': (str,), 'bootloader': (BootBootloader,), 'interface_name': (str,), 'lun': (int,), 'slot': (str,), 'wwpn': (str,), 'enabled': (bool,), 'name': (str,)} | @cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'class_id': (str,), 'object_type': (str,), 'bootloader': (BootBootloader,), 'interface_name': (str,), 'lun': (int,), 'slot': (str,), 'wwpn': (str,), 'enabled': (bool,), 'name': (str,)}<|docstring|>This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.<|endoftext|> |
42cff2cb9ec50dc66236bda3e41900c8499a159d4658edd482490f4b27d5d8cd | @convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'BootSan - a model defined in OpenAPI\n\n Args:\n\n Keyword Args:\n class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501\n object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the \'ClassId\' property.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n bootloader (BootBootloader): [optional] # noqa: E501\n interface_name (str): The name of the underlying vHBA interface to be used by the SAN boot device.. [optional] # noqa: E501\n lun (int): The Logical Unit Number (LUN) of the device.. [optional] if omitted the server will use the default value of 0 # noqa: E501\n slot (str): Slot ID of the device. Supported values are ( 1 - 255, "MLOM", "L1", "L2" ).. [optional] # noqa: E501\n wwpn (str): The WWPN Address of the underlying fiber channel interface used by the SAN boot device. Value must be in hexadecimal format xx:xx:xx:xx:xx:xx:xx:xx.. [optional] # noqa: E501\n enabled (bool): Specifies if the boot device is enabled or disabled.. [optional] if omitted the server will use the default value of False # noqa: E501\n name (str): A name that helps identify a boot device. It can be any string that adheres to the following constraints. It should start and end with an alphanumeric character. It can have underscores and hyphens. It cannot be more than 30 characters.. [optional] # noqa: E501\n '
class_id = kwargs.get('class_id', 'boot.San')
object_type = kwargs.get('object_type', 'boot.San')
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
required_args = {'class_id': class_id, 'object_type': object_type}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for (var_name, var_value) in required_args.items():
setattr(self, var_name, var_value)
for (var_name, var_value) in kwargs.items():
if ((var_name in unused_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (not self._additional_properties_model_instances)):
continue
setattr(self, var_name, var_value) | BootSan - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bootloader (BootBootloader): [optional] # noqa: E501
interface_name (str): The name of the underlying vHBA interface to be used by the SAN boot device.. [optional] # noqa: E501
lun (int): The Logical Unit Number (LUN) of the device.. [optional] if omitted the server will use the default value of 0 # noqa: E501
slot (str): Slot ID of the device. Supported values are ( 1 - 255, "MLOM", "L1", "L2" ).. [optional] # noqa: E501
wwpn (str): The WWPN Address of the underlying fiber channel interface used by the SAN boot device. Value must be in hexadecimal format xx:xx:xx:xx:xx:xx:xx:xx.. [optional] # noqa: E501
enabled (bool): Specifies if the boot device is enabled or disabled.. [optional] if omitted the server will use the default value of False # noqa: E501
name (str): A name that helps identify a boot device. It can be any string that adheres to the following constraints. It should start and end with an alphanumeric character. It can have underscores and hyphens. It cannot be more than 30 characters.. [optional] # noqa: E501 | intersight/model/boot_san.py | __init__ | CiscoDevNet/intersight-python | 5 | python | @convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'BootSan - a model defined in OpenAPI\n\n Args:\n\n Keyword Args:\n class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501\n object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the \'ClassId\' property.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n bootloader (BootBootloader): [optional] # noqa: E501\n interface_name (str): The name of the underlying vHBA interface to be used by the SAN boot device.. [optional] # noqa: E501\n lun (int): The Logical Unit Number (LUN) of the device.. [optional] if omitted the server will use the default value of 0 # noqa: E501\n slot (str): Slot ID of the device. Supported values are ( 1 - 255, "MLOM", "L1", "L2" ).. [optional] # noqa: E501\n wwpn (str): The WWPN Address of the underlying fiber channel interface used by the SAN boot device. Value must be in hexadecimal format xx:xx:xx:xx:xx:xx:xx:xx.. [optional] # noqa: E501\n enabled (bool): Specifies if the boot device is enabled or disabled.. [optional] if omitted the server will use the default value of False # noqa: E501\n name (str): A name that helps identify a boot device. It can be any string that adheres to the following constraints. It should start and end with an alphanumeric character. It can have underscores and hyphens. It cannot be more than 30 characters.. [optional] # noqa: E501\n '
class_id = kwargs.get('class_id', 'boot.San')
object_type = kwargs.get('object_type', 'boot.San')
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
required_args = {'class_id': class_id, 'object_type': object_type}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for (var_name, var_value) in required_args.items():
setattr(self, var_name, var_value)
for (var_name, var_value) in kwargs.items():
if ((var_name in unused_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (not self._additional_properties_model_instances)):
continue
setattr(self, var_name, var_value) | @convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
'BootSan - a model defined in OpenAPI\n\n Args:\n\n Keyword Args:\n class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501\n object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the \'ClassId\' property.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n bootloader (BootBootloader): [optional] # noqa: E501\n interface_name (str): The name of the underlying vHBA interface to be used by the SAN boot device.. [optional] # noqa: E501\n lun (int): The Logical Unit Number (LUN) of the device.. [optional] if omitted the server will use the default value of 0 # noqa: E501\n slot (str): Slot ID of the device. Supported values are ( 1 - 255, "MLOM", "L1", "L2" ).. [optional] # noqa: E501\n wwpn (str): The WWPN Address of the underlying fiber channel interface used by the SAN boot device. Value must be in hexadecimal format xx:xx:xx:xx:xx:xx:xx:xx.. [optional] # noqa: E501\n enabled (bool): Specifies if the boot device is enabled or disabled.. [optional] if omitted the server will use the default value of False # noqa: E501\n name (str): A name that helps identify a boot device. It can be any string that adheres to the following constraints. It should start and end with an alphanumeric character. It can have underscores and hyphens. It cannot be more than 30 characters.. [optional] # noqa: E501\n '
class_id = kwargs.get('class_id', 'boot.San')
object_type = kwargs.get('object_type', 'boot.San')
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
required_args = {'class_id': class_id, 'object_type': object_type}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for (var_name, var_value) in required_args.items():
setattr(self, var_name, var_value)
for (var_name, var_value) in kwargs.items():
if ((var_name in unused_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (not self._additional_properties_model_instances)):
continue
setattr(self, var_name, var_value)<|docstring|>BootSan - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "boot.San", must be one of ["boot.San", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bootloader (BootBootloader): [optional] # noqa: E501
interface_name (str): The name of the underlying vHBA interface to be used by the SAN boot device.. [optional] # noqa: E501
lun (int): The Logical Unit Number (LUN) of the device.. [optional] if omitted the server will use the default value of 0 # noqa: E501
slot (str): Slot ID of the device. Supported values are ( 1 - 255, "MLOM", "L1", "L2" ).. [optional] # noqa: E501
wwpn (str): The WWPN Address of the underlying fiber channel interface used by the SAN boot device. Value must be in hexadecimal format xx:xx:xx:xx:xx:xx:xx:xx.. [optional] # noqa: E501
enabled (bool): Specifies if the boot device is enabled or disabled.. [optional] if omitted the server will use the default value of False # noqa: E501
name (str): A name that helps identify a boot device. It can be any string that adheres to the following constraints. It should start and end with an alphanumeric character. It can have underscores and hyphens. It cannot be more than 30 characters.. [optional] # noqa: E501<|endoftext|> |
b34409f988bcc424201d148b010d9934c0f15caabbce3f9cfa440f2add9afbda | def iBEAt_test_DTI(Elastix_Parameter_file_PATH, output_dir, sorted_slice_files, ArrayDicomiBEAt, image_parameters, filenameDCM, lstFilesDCM):
' Example application of MDR in renal DTI (iBEAt data) \n\n Description\n -----------\n This function performs model driven registration for selected DTI sequence on a single selected slice \n and returns as output the MDR registered images, signal model fit, deformation field x, deformation field y, \n fitted parameters FA and ADC, and the final diagnostics.\n \n Args\n ----\n Elastix_Parameter_file_PATH (string): complete path to the elastix parameter file to be used. \n output_dir (string): directory where results are saved. \n slice_sorted_files (list): selected slices to process using MDR: sorted according to acquisition time. \n ArrayDicomiBEAt (numpy.ndarray): input DICOM to numpy array (unsorted). \n image_parameters (SITK input): image pixel spacing. \n filenameDCM (pathlib.PosixPath): dicom filenames to process. \n lstFilesDCM (list): list of dicom files to process. \n '
start_computation_time = time.time()
image_shape = np.shape(ArrayDicomiBEAt)
original_images = np.zeros(image_shape)
for (i, s) in enumerate(sorted_slice_files):
img2d = s.pixel_array
original_images[(:, :, i)] = img2d
full_module_name = 'models.DTI'
model = importlib.import_module(full_module_name)
signal_model_parameters = read_signal_model_parameters(filenameDCM, lstFilesDCM)
elastix_model_parameters = read_elastix_model_parameters(Elastix_Parameter_file_PATH, ['MaximumNumberOfIterations', 1024])
MDR_output = model_driven_registration(original_images, image_parameters, model, signal_model_parameters, elastix_model_parameters, precision=1, function='main')
export_images(MDR_output[0], (output_dir + '/coregistered/MDR-registered_DTI_'))
export_images(MDR_output[1], (output_dir + '/fit/fit_image_'))
export_images(MDR_output[2][(:, :, 0, :)], (output_dir + '/deformation_field/final_deformation_x_'))
export_images(MDR_output[2][(:, :, 1, :)], (output_dir + '/deformation_field/final_deformation_y_'))
export_maps(MDR_output[3][(0, :)], (output_dir + '/fitted_parameters/FA'), np.shape(original_images))
export_maps(MDR_output[3][(1, :)], (output_dir + '/fitted_parameters/ADC'), np.shape(original_images))
MDR_output[4].to_csv((output_dir + 'DTI_largest_deformations.csv'))
end_computation_time = time.time()
print('total computation time for MDR (minutes taken:)...')
print((0.0166667 * (end_computation_time - start_computation_time)))
print('completed MDR registration!')
print('Finished processing Model Driven Registration case for iBEAt study DTI sequence!') | Example application of MDR in renal DTI (iBEAt data)
Description
-----------
This function performs model driven registration for selected DTI sequence on a single selected slice
and returns as output the MDR registered images, signal model fit, deformation field x, deformation field y,
fitted parameters FA and ADC, and the final diagnostics.
Args
----
Elastix_Parameter_file_PATH (string): complete path to the elastix parameter file to be used.
output_dir (string): directory where results are saved.
slice_sorted_files (list): selected slices to process using MDR: sorted according to acquisition time.
ArrayDicomiBEAt (numpy.ndarray): input DICOM to numpy array (unsorted).
image_parameters (SITK input): image pixel spacing.
filenameDCM (pathlib.PosixPath): dicom filenames to process.
lstFilesDCM (list): list of dicom files to process. | tests/MDR_test_DTI.py | iBEAt_test_DTI | QIB-Sheffield/MDR-Library | 0 | python | def iBEAt_test_DTI(Elastix_Parameter_file_PATH, output_dir, sorted_slice_files, ArrayDicomiBEAt, image_parameters, filenameDCM, lstFilesDCM):
' Example application of MDR in renal DTI (iBEAt data) \n\n Description\n -----------\n This function performs model driven registration for selected DTI sequence on a single selected slice \n and returns as output the MDR registered images, signal model fit, deformation field x, deformation field y, \n fitted parameters FA and ADC, and the final diagnostics.\n \n Args\n ----\n Elastix_Parameter_file_PATH (string): complete path to the elastix parameter file to be used. \n output_dir (string): directory where results are saved. \n slice_sorted_files (list): selected slices to process using MDR: sorted according to acquisition time. \n ArrayDicomiBEAt (numpy.ndarray): input DICOM to numpy array (unsorted). \n image_parameters (SITK input): image pixel spacing. \n filenameDCM (pathlib.PosixPath): dicom filenames to process. \n lstFilesDCM (list): list of dicom files to process. \n '
start_computation_time = time.time()
image_shape = np.shape(ArrayDicomiBEAt)
original_images = np.zeros(image_shape)
for (i, s) in enumerate(sorted_slice_files):
img2d = s.pixel_array
original_images[(:, :, i)] = img2d
full_module_name = 'models.DTI'
model = importlib.import_module(full_module_name)
signal_model_parameters = read_signal_model_parameters(filenameDCM, lstFilesDCM)
elastix_model_parameters = read_elastix_model_parameters(Elastix_Parameter_file_PATH, ['MaximumNumberOfIterations', 1024])
MDR_output = model_driven_registration(original_images, image_parameters, model, signal_model_parameters, elastix_model_parameters, precision=1, function='main')
export_images(MDR_output[0], (output_dir + '/coregistered/MDR-registered_DTI_'))
export_images(MDR_output[1], (output_dir + '/fit/fit_image_'))
export_images(MDR_output[2][(:, :, 0, :)], (output_dir + '/deformation_field/final_deformation_x_'))
export_images(MDR_output[2][(:, :, 1, :)], (output_dir + '/deformation_field/final_deformation_y_'))
export_maps(MDR_output[3][(0, :)], (output_dir + '/fitted_parameters/FA'), np.shape(original_images))
export_maps(MDR_output[3][(1, :)], (output_dir + '/fitted_parameters/ADC'), np.shape(original_images))
MDR_output[4].to_csv((output_dir + 'DTI_largest_deformations.csv'))
end_computation_time = time.time()
print('total computation time for MDR (minutes taken:)...')
print((0.0166667 * (end_computation_time - start_computation_time)))
print('completed MDR registration!')
print('Finished processing Model Driven Registration case for iBEAt study DTI sequence!') | def iBEAt_test_DTI(Elastix_Parameter_file_PATH, output_dir, sorted_slice_files, ArrayDicomiBEAt, image_parameters, filenameDCM, lstFilesDCM):
' Example application of MDR in renal DTI (iBEAt data) \n\n Description\n -----------\n This function performs model driven registration for selected DTI sequence on a single selected slice \n and returns as output the MDR registered images, signal model fit, deformation field x, deformation field y, \n fitted parameters FA and ADC, and the final diagnostics.\n \n Args\n ----\n Elastix_Parameter_file_PATH (string): complete path to the elastix parameter file to be used. \n output_dir (string): directory where results are saved. \n slice_sorted_files (list): selected slices to process using MDR: sorted according to acquisition time. \n ArrayDicomiBEAt (numpy.ndarray): input DICOM to numpy array (unsorted). \n image_parameters (SITK input): image pixel spacing. \n filenameDCM (pathlib.PosixPath): dicom filenames to process. \n lstFilesDCM (list): list of dicom files to process. \n '
start_computation_time = time.time()
image_shape = np.shape(ArrayDicomiBEAt)
original_images = np.zeros(image_shape)
for (i, s) in enumerate(sorted_slice_files):
img2d = s.pixel_array
original_images[(:, :, i)] = img2d
full_module_name = 'models.DTI'
model = importlib.import_module(full_module_name)
signal_model_parameters = read_signal_model_parameters(filenameDCM, lstFilesDCM)
elastix_model_parameters = read_elastix_model_parameters(Elastix_Parameter_file_PATH, ['MaximumNumberOfIterations', 1024])
MDR_output = model_driven_registration(original_images, image_parameters, model, signal_model_parameters, elastix_model_parameters, precision=1, function='main')
export_images(MDR_output[0], (output_dir + '/coregistered/MDR-registered_DTI_'))
export_images(MDR_output[1], (output_dir + '/fit/fit_image_'))
export_images(MDR_output[2][(:, :, 0, :)], (output_dir + '/deformation_field/final_deformation_x_'))
export_images(MDR_output[2][(:, :, 1, :)], (output_dir + '/deformation_field/final_deformation_y_'))
export_maps(MDR_output[3][(0, :)], (output_dir + '/fitted_parameters/FA'), np.shape(original_images))
export_maps(MDR_output[3][(1, :)], (output_dir + '/fitted_parameters/ADC'), np.shape(original_images))
MDR_output[4].to_csv((output_dir + 'DTI_largest_deformations.csv'))
end_computation_time = time.time()
print('total computation time for MDR (minutes taken:)...')
print((0.0166667 * (end_computation_time - start_computation_time)))
print('completed MDR registration!')
print('Finished processing Model Driven Registration case for iBEAt study DTI sequence!')<|docstring|>Example application of MDR in renal DTI (iBEAt data)
Description
-----------
This function performs model driven registration for selected DTI sequence on a single selected slice
and returns as output the MDR registered images, signal model fit, deformation field x, deformation field y,
fitted parameters FA and ADC, and the final diagnostics.
Args
----
Elastix_Parameter_file_PATH (string): complete path to the elastix parameter file to be used.
output_dir (string): directory where results are saved.
slice_sorted_files (list): selected slices to process using MDR: sorted according to acquisition time.
ArrayDicomiBEAt (numpy.ndarray): input DICOM to numpy array (unsorted).
image_parameters (SITK input): image pixel spacing.
filenameDCM (pathlib.PosixPath): dicom filenames to process.
lstFilesDCM (list): list of dicom files to process.<|endoftext|> |
94f15fd820c1fceab101c975a603ce08351cbf64c13d90d88606b22fe67e075a | def read_dicom_tags_DTI(fname, lstFilesDCM):
' This function reads the DICOM header from the DTI sequence and returns the corresponding DTI tags.\n\n Args\n ----\n fname (pathlib.PosixPath): dicom filenames to process. \n lstFilesDCM (list): list of dicom files to process. \n\n Returns\n -------\n b-values (list): list of DTI b-values (s/mm2). \n b_Vec_original (list): original b-vectors as list. \n image_orientation_patient (list): patient orientation as list. \n '
b_values = []
b_Vec_original = []
image_orientation_patient = []
for fname in lstFilesDCM:
dataset = pydicom.dcmread(fname)
b_values.append(dataset[(25, 4108)].value)
b_Vec_original.append(dataset[(25, 4110)].value)
image_orientation_patient.append(dataset.ImageOrientationPatient)
return (b_values, b_Vec_original, image_orientation_patient) | This function reads the DICOM header from the DTI sequence and returns the corresponding DTI tags.
Args
----
fname (pathlib.PosixPath): dicom filenames to process.
lstFilesDCM (list): list of dicom files to process.
Returns
-------
b-values (list): list of DTI b-values (s/mm2).
b_Vec_original (list): original b-vectors as list.
image_orientation_patient (list): patient orientation as list. | tests/MDR_test_DTI.py | read_dicom_tags_DTI | QIB-Sheffield/MDR-Library | 0 | python | def read_dicom_tags_DTI(fname, lstFilesDCM):
' This function reads the DICOM header from the DTI sequence and returns the corresponding DTI tags.\n\n Args\n ----\n fname (pathlib.PosixPath): dicom filenames to process. \n lstFilesDCM (list): list of dicom files to process. \n\n Returns\n -------\n b-values (list): list of DTI b-values (s/mm2). \n b_Vec_original (list): original b-vectors as list. \n image_orientation_patient (list): patient orientation as list. \n '
b_values = []
b_Vec_original = []
image_orientation_patient = []
for fname in lstFilesDCM:
dataset = pydicom.dcmread(fname)
b_values.append(dataset[(25, 4108)].value)
b_Vec_original.append(dataset[(25, 4110)].value)
image_orientation_patient.append(dataset.ImageOrientationPatient)
return (b_values, b_Vec_original, image_orientation_patient) | def read_dicom_tags_DTI(fname, lstFilesDCM):
' This function reads the DICOM header from the DTI sequence and returns the corresponding DTI tags.\n\n Args\n ----\n fname (pathlib.PosixPath): dicom filenames to process. \n lstFilesDCM (list): list of dicom files to process. \n\n Returns\n -------\n b-values (list): list of DTI b-values (s/mm2). \n b_Vec_original (list): original b-vectors as list. \n image_orientation_patient (list): patient orientation as list. \n '
b_values = []
b_Vec_original = []
image_orientation_patient = []
for fname in lstFilesDCM:
dataset = pydicom.dcmread(fname)
b_values.append(dataset[(25, 4108)].value)
b_Vec_original.append(dataset[(25, 4110)].value)
image_orientation_patient.append(dataset.ImageOrientationPatient)
return (b_values, b_Vec_original, image_orientation_patient)<|docstring|>This function reads the DICOM header from the DTI sequence and returns the corresponding DTI tags.
Args
----
fname (pathlib.PosixPath): dicom filenames to process.
lstFilesDCM (list): list of dicom files to process.
Returns
-------
b-values (list): list of DTI b-values (s/mm2).
b_Vec_original (list): original b-vectors as list.
image_orientation_patient (list): patient orientation as list.<|endoftext|> |
d0bdfc1c168c71660383d952cc65eede72ebe681cacdb06835260eecb34859cd | def detect_card(image_path: str, output_dir: str='output/', unwarp: bool=True, model_name: str='maskrcnn_resnet50', color: tuple=(0, 0, 0)):
'\n Arguments:\n image_path: path to the image to be processed\n output_dir: path to the results to be exported\n unwarp: unwarp detected id card to rectangle\n model_name: model to be used in the inference\n color: color to be used in the mask/bbox/quad visualizations\n '
image = read_image(image_path)
(masks, boxes, classes, scores) = get_prediction(image=image, model_name='maskrcnn_resnet50', threshold=0.75)
prediction_visual = visualize_prediction(image, masks, boxes, classes, rect_th=2, text_size=0.85, text_th=2, color=color, output_dir=output_dir)
if (not unwarp):
export_predicted_bboxes(image=image, boxes=boxes, output_dir=output_dir)
quads = []
unwarped_quads = []
else:
quads = fit_quads_over_masks(image, masks)
quad_visual = visualize_quads(image=image, quads=quads, output_dir=output_dir, color=color)
unwarped_quads = unwarp_quads(image, quads)
export_unwarped_quads(unwarped_quads, output_dir=output_dir)
return (masks, boxes, classes, scores, quads) | Arguments:
image_path: path to the image to be processed
output_dir: path to the results to be exported
unwarp: unwarp detected id card to rectangle
model_name: model to be used in the inference
color: color to be used in the mask/bbox/quad visualizations | id_card_detector/__init__.py | detect_card | SaddamBInSyed/id-card-detector | 3 | python | def detect_card(image_path: str, output_dir: str='output/', unwarp: bool=True, model_name: str='maskrcnn_resnet50', color: tuple=(0, 0, 0)):
'\n Arguments:\n image_path: path to the image to be processed\n output_dir: path to the results to be exported\n unwarp: unwarp detected id card to rectangle\n model_name: model to be used in the inference\n color: color to be used in the mask/bbox/quad visualizations\n '
image = read_image(image_path)
(masks, boxes, classes, scores) = get_prediction(image=image, model_name='maskrcnn_resnet50', threshold=0.75)
prediction_visual = visualize_prediction(image, masks, boxes, classes, rect_th=2, text_size=0.85, text_th=2, color=color, output_dir=output_dir)
if (not unwarp):
export_predicted_bboxes(image=image, boxes=boxes, output_dir=output_dir)
quads = []
unwarped_quads = []
else:
quads = fit_quads_over_masks(image, masks)
quad_visual = visualize_quads(image=image, quads=quads, output_dir=output_dir, color=color)
unwarped_quads = unwarp_quads(image, quads)
export_unwarped_quads(unwarped_quads, output_dir=output_dir)
return (masks, boxes, classes, scores, quads) | def detect_card(image_path: str, output_dir: str='output/', unwarp: bool=True, model_name: str='maskrcnn_resnet50', color: tuple=(0, 0, 0)):
'\n Arguments:\n image_path: path to the image to be processed\n output_dir: path to the results to be exported\n unwarp: unwarp detected id card to rectangle\n model_name: model to be used in the inference\n color: color to be used in the mask/bbox/quad visualizations\n '
image = read_image(image_path)
(masks, boxes, classes, scores) = get_prediction(image=image, model_name='maskrcnn_resnet50', threshold=0.75)
prediction_visual = visualize_prediction(image, masks, boxes, classes, rect_th=2, text_size=0.85, text_th=2, color=color, output_dir=output_dir)
if (not unwarp):
export_predicted_bboxes(image=image, boxes=boxes, output_dir=output_dir)
quads = []
unwarped_quads = []
else:
quads = fit_quads_over_masks(image, masks)
quad_visual = visualize_quads(image=image, quads=quads, output_dir=output_dir, color=color)
unwarped_quads = unwarp_quads(image, quads)
export_unwarped_quads(unwarped_quads, output_dir=output_dir)
return (masks, boxes, classes, scores, quads)<|docstring|>Arguments:
image_path: path to the image to be processed
output_dir: path to the results to be exported
unwarp: unwarp detected id card to rectangle
model_name: model to be used in the inference
color: color to be used in the mask/bbox/quad visualizations<|endoftext|> |
9d19a07b1ce1c3fe46cfd55be73b390e720553ee601dfe88a5d35c87c8d5c68b | def validate_columns(columns):
'\n Validates the columns based on their validity, returning a set\n :param columns: \n :return: a set of columns, constructed from the iterable passed in as the param\n '
columns = tuple(columns)
if (len(columns) == 0):
raise Exception('Pipeline must read >0 columns')
cols = []
for c in columns:
cols.append(''.join([i for i in c if (not i.isdigit())]))
new_columns = set(cols)
invalid_columns = (new_columns - valid_columns)
if (len(invalid_columns) != 0):
raise Exception("Can't instantiate Pipeline with invalid columns: {}".format(invalid_columns))
return columns | Validates the columns based on their validity, returning a set
:param columns:
:return: a set of columns, constructed from the iterable passed in as the param | tensorflow/contrib/persona/python/ops/io_pipe.py | validate_columns | epfl-dcsl/ptf-system | 0 | python | def validate_columns(columns):
'\n Validates the columns based on their validity, returning a set\n :param columns: \n :return: a set of columns, constructed from the iterable passed in as the param\n '
columns = tuple(columns)
if (len(columns) == 0):
raise Exception('Pipeline must read >0 columns')
cols = []
for c in columns:
cols.append(.join([i for i in c if (not i.isdigit())]))
new_columns = set(cols)
invalid_columns = (new_columns - valid_columns)
if (len(invalid_columns) != 0):
raise Exception("Can't instantiate Pipeline with invalid columns: {}".format(invalid_columns))
return columns | def validate_columns(columns):
'\n Validates the columns based on their validity, returning a set\n :param columns: \n :return: a set of columns, constructed from the iterable passed in as the param\n '
columns = tuple(columns)
if (len(columns) == 0):
raise Exception('Pipeline must read >0 columns')
cols = []
for c in columns:
cols.append(.join([i for i in c if (not i.isdigit())]))
new_columns = set(cols)
invalid_columns = (new_columns - valid_columns)
if (len(invalid_columns) != 0):
raise Exception("Can't instantiate Pipeline with invalid columns: {}".format(invalid_columns))
return columns<|docstring|>Validates the columns based on their validity, returning a set
:param columns:
:return: a set of columns, constructed from the iterable passed in as the param<|endoftext|> |
a726ea82effec8d5a28b9c0f187a7fdb3cd27ee40dd6c178cc2b3a1039e00198 | def expand_column_extensions(key, columns):
'\n Expands a given AGD key into the full extensions, based on the columns\n :param keys: an iterator of scalar strings, representing the keys for a given parallelism level\n :param columns: assumed to have been validated previously be the caller\n :yield: a generator for keys\n '
for c in columns:
(yield string_ops.string_join(inputs=[key, c], separator='.', name='AGD_column_expansion')) | Expands a given AGD key into the full extensions, based on the columns
:param keys: an iterator of scalar strings, representing the keys for a given parallelism level
:param columns: assumed to have been validated previously be the caller
:yield: a generator for keys | tensorflow/contrib/persona/python/ops/io_pipe.py | expand_column_extensions | epfl-dcsl/ptf-system | 0 | python | def expand_column_extensions(key, columns):
'\n Expands a given AGD key into the full extensions, based on the columns\n :param keys: an iterator of scalar strings, representing the keys for a given parallelism level\n :param columns: assumed to have been validated previously be the caller\n :yield: a generator for keys\n '
for c in columns:
(yield string_ops.string_join(inputs=[key, c], separator='.', name='AGD_column_expansion')) | def expand_column_extensions(key, columns):
'\n Expands a given AGD key into the full extensions, based on the columns\n :param keys: an iterator of scalar strings, representing the keys for a given parallelism level\n :param columns: assumed to have been validated previously be the caller\n :yield: a generator for keys\n '
for c in columns:
(yield string_ops.string_join(inputs=[key, c], separator='.', name='AGD_column_expansion'))<|docstring|>Expands a given AGD key into the full extensions, based on the columns
:param keys: an iterator of scalar strings, representing the keys for a given parallelism level
:param columns: assumed to have been validated previously be the caller
:yield: a generator for keys<|endoftext|> |
e3a1bd5fdd1610b6fae0bdb0dcc335a80c430900a1ba9b7084fc48623545a851 | def ceph_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, columns, pool_name, ceph_read_size=(2 ** 26), buffer_pool=None, buffer_pool_args=pool_default_args, delete_after_read=False, name='ceph_read_pipeline', log_directory=None, metadata=None):
'\n Create a ceph input pipeline.\n \n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name: \n :param cluster_name: \n :param ceph_conf_path: \n :param columns: \n :param downstream_parallel: the level of parallelism to create for the downstream nodes\n :param ceph_read_size: \n :param buffer_pool: \n :param name: \n :return: a list of (key, namespace, tuple(chunk_buffers)) for every tensor in upstream tensors\n '
upstream_tensors = sanitize_generator(upstream_tensors)
with ops.name_scope(name):
columns = validate_columns(columns=columns)
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
reader = partial(persona_ops.ceph_reader, cluster_name=cluster_name, user_name=user_name, pool_name=pool_name, ceph_conf_path=ceph_conf_path, read_size=ceph_read_size, delete_after_read=delete_after_read, buffer_pool=buffer_pool)
if (metadata is None):
metadata = ((None,) * len(upstream_tensors))
else:
metadata = sanitize_generator(metadata)
if (len(metadata) != len(upstream_tensors)):
raise Exception('Only have {m} metadata items, but passed in {u} upstream tensors to Ceph Read'.format(m=len(metadata), u=len(upstream_tensors)))
for ((key, namespace), idc) in zip(upstream_tensors, metadata):
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
chunk_buffers = tuple(((column_key, reader(key=column_key, namespace=namespace)) for column_key in expand_column_extensions(key=key, columns=columns)))
def gen_file_handles(buffers):
for (column_key, cb) in buffers:
a = cb.file_handle
if (log_directory is not None):
timestamp = cb.time
read_duration = cb.duration
num_bytes = cb.bytes
log_op = gate.log_events(item_names=(('timestamp', 'key', 'duration', 'bytes') + (('id',) if (idc is not None) else ())), directory=log_directory, event_name=name, name='{}_logger'.format(name), components=((timestamp, column_key, read_duration, num_bytes) + ((idc,) if (idc is not None) else ())))
with ops.control_dependencies((log_op,)):
a = array_ops.identity(a)
(yield a)
(yield (key, namespace, tuple(gen_file_handles(buffers=chunk_buffers)))) | Create a ceph input pipeline.
:param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param columns:
:param downstream_parallel: the level of parallelism to create for the downstream nodes
:param ceph_read_size:
:param buffer_pool:
:param name:
:return: a list of (key, namespace, tuple(chunk_buffers)) for every tensor in upstream tensors | tensorflow/contrib/persona/python/ops/io_pipe.py | ceph_read_pipeline | epfl-dcsl/ptf-system | 0 | python | def ceph_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, columns, pool_name, ceph_read_size=(2 ** 26), buffer_pool=None, buffer_pool_args=pool_default_args, delete_after_read=False, name='ceph_read_pipeline', log_directory=None, metadata=None):
'\n Create a ceph input pipeline.\n \n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name: \n :param cluster_name: \n :param ceph_conf_path: \n :param columns: \n :param downstream_parallel: the level of parallelism to create for the downstream nodes\n :param ceph_read_size: \n :param buffer_pool: \n :param name: \n :return: a list of (key, namespace, tuple(chunk_buffers)) for every tensor in upstream tensors\n '
upstream_tensors = sanitize_generator(upstream_tensors)
with ops.name_scope(name):
columns = validate_columns(columns=columns)
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
reader = partial(persona_ops.ceph_reader, cluster_name=cluster_name, user_name=user_name, pool_name=pool_name, ceph_conf_path=ceph_conf_path, read_size=ceph_read_size, delete_after_read=delete_after_read, buffer_pool=buffer_pool)
if (metadata is None):
metadata = ((None,) * len(upstream_tensors))
else:
metadata = sanitize_generator(metadata)
if (len(metadata) != len(upstream_tensors)):
raise Exception('Only have {m} metadata items, but passed in {u} upstream tensors to Ceph Read'.format(m=len(metadata), u=len(upstream_tensors)))
for ((key, namespace), idc) in zip(upstream_tensors, metadata):
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
chunk_buffers = tuple(((column_key, reader(key=column_key, namespace=namespace)) for column_key in expand_column_extensions(key=key, columns=columns)))
def gen_file_handles(buffers):
for (column_key, cb) in buffers:
a = cb.file_handle
if (log_directory is not None):
timestamp = cb.time
read_duration = cb.duration
num_bytes = cb.bytes
log_op = gate.log_events(item_names=(('timestamp', 'key', 'duration', 'bytes') + (('id',) if (idc is not None) else ())), directory=log_directory, event_name=name, name='{}_logger'.format(name), components=((timestamp, column_key, read_duration, num_bytes) + ((idc,) if (idc is not None) else ())))
with ops.control_dependencies((log_op,)):
a = array_ops.identity(a)
(yield a)
(yield (key, namespace, tuple(gen_file_handles(buffers=chunk_buffers)))) | def ceph_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, columns, pool_name, ceph_read_size=(2 ** 26), buffer_pool=None, buffer_pool_args=pool_default_args, delete_after_read=False, name='ceph_read_pipeline', log_directory=None, metadata=None):
'\n Create a ceph input pipeline.\n \n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name: \n :param cluster_name: \n :param ceph_conf_path: \n :param columns: \n :param downstream_parallel: the level of parallelism to create for the downstream nodes\n :param ceph_read_size: \n :param buffer_pool: \n :param name: \n :return: a list of (key, namespace, tuple(chunk_buffers)) for every tensor in upstream tensors\n '
upstream_tensors = sanitize_generator(upstream_tensors)
with ops.name_scope(name):
columns = validate_columns(columns=columns)
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
reader = partial(persona_ops.ceph_reader, cluster_name=cluster_name, user_name=user_name, pool_name=pool_name, ceph_conf_path=ceph_conf_path, read_size=ceph_read_size, delete_after_read=delete_after_read, buffer_pool=buffer_pool)
if (metadata is None):
metadata = ((None,) * len(upstream_tensors))
else:
metadata = sanitize_generator(metadata)
if (len(metadata) != len(upstream_tensors)):
raise Exception('Only have {m} metadata items, but passed in {u} upstream tensors to Ceph Read'.format(m=len(metadata), u=len(upstream_tensors)))
for ((key, namespace), idc) in zip(upstream_tensors, metadata):
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
chunk_buffers = tuple(((column_key, reader(key=column_key, namespace=namespace)) for column_key in expand_column_extensions(key=key, columns=columns)))
def gen_file_handles(buffers):
for (column_key, cb) in buffers:
a = cb.file_handle
if (log_directory is not None):
timestamp = cb.time
read_duration = cb.duration
num_bytes = cb.bytes
log_op = gate.log_events(item_names=(('timestamp', 'key', 'duration', 'bytes') + (('id',) if (idc is not None) else ())), directory=log_directory, event_name=name, name='{}_logger'.format(name), components=((timestamp, column_key, read_duration, num_bytes) + ((idc,) if (idc is not None) else ())))
with ops.control_dependencies((log_op,)):
a = array_ops.identity(a)
(yield a)
(yield (key, namespace, tuple(gen_file_handles(buffers=chunk_buffers))))<|docstring|>Create a ceph input pipeline.
:param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param columns:
:param downstream_parallel: the level of parallelism to create for the downstream nodes
:param ceph_read_size:
:param buffer_pool:
:param name:
:return: a list of (key, namespace, tuple(chunk_buffers)) for every tensor in upstream tensors<|endoftext|> |
a671e0af1524b1a18aab1dfdcf0e0f8ae617acdb35623c471c65ef6ba2311caa | def ceph_lazy_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, columns, pool_name, records_per_segment, segments_to_buffer, delete_after_read=False, name='ceph_lazy_read_pipeline'):
'\n Create a lazy ceph input pipeline.\n\n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name:\n :param cluster_name:\n :param ceph_conf_path:\n :param columns:\n :param pool_name:\n :param records_per_segment:\n :param segments_to_buffer:\n :param delete_after_read:\n :param name:\n :return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors\n Note that it is assumed that the record_id is the same for all column chunks (it should be)\n '
with ops.name_scope(name):
columns = validate_columns(columns=columns)
pool = persona_ops.ceph_lazy_column_pool(bound=False, size=0, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=str(ceph_conf_path), pool_name=pool_name, records_per_segment=records_per_segment, num_segments=segments_to_buffer)
reader = partial(persona_ops.lazy_ceph_reader, column_pool=pool, delete_after_read=delete_after_read)
for (key, namespace) in upstream_tensors:
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
(chunk_buffers, record_ids) = zip(*(reader(key=column_key, namespace=namespace) for column_key in expand_column_extensions(key=key, columns=columns)))
(yield (key, namespace, chunk_buffers, record_ids[0])) | Create a lazy ceph input pipeline.
:param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param columns:
:param pool_name:
:param records_per_segment:
:param segments_to_buffer:
:param delete_after_read:
:param name:
:return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors
Note that it is assumed that the record_id is the same for all column chunks (it should be) | tensorflow/contrib/persona/python/ops/io_pipe.py | ceph_lazy_read_pipeline | epfl-dcsl/ptf-system | 0 | python | def ceph_lazy_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, columns, pool_name, records_per_segment, segments_to_buffer, delete_after_read=False, name='ceph_lazy_read_pipeline'):
'\n Create a lazy ceph input pipeline.\n\n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name:\n :param cluster_name:\n :param ceph_conf_path:\n :param columns:\n :param pool_name:\n :param records_per_segment:\n :param segments_to_buffer:\n :param delete_after_read:\n :param name:\n :return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors\n Note that it is assumed that the record_id is the same for all column chunks (it should be)\n '
with ops.name_scope(name):
columns = validate_columns(columns=columns)
pool = persona_ops.ceph_lazy_column_pool(bound=False, size=0, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=str(ceph_conf_path), pool_name=pool_name, records_per_segment=records_per_segment, num_segments=segments_to_buffer)
reader = partial(persona_ops.lazy_ceph_reader, column_pool=pool, delete_after_read=delete_after_read)
for (key, namespace) in upstream_tensors:
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
(chunk_buffers, record_ids) = zip(*(reader(key=column_key, namespace=namespace) for column_key in expand_column_extensions(key=key, columns=columns)))
(yield (key, namespace, chunk_buffers, record_ids[0])) | def ceph_lazy_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, columns, pool_name, records_per_segment, segments_to_buffer, delete_after_read=False, name='ceph_lazy_read_pipeline'):
'\n Create a lazy ceph input pipeline.\n\n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name:\n :param cluster_name:\n :param ceph_conf_path:\n :param columns:\n :param pool_name:\n :param records_per_segment:\n :param segments_to_buffer:\n :param delete_after_read:\n :param name:\n :return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors\n Note that it is assumed that the record_id is the same for all column chunks (it should be)\n '
with ops.name_scope(name):
columns = validate_columns(columns=columns)
pool = persona_ops.ceph_lazy_column_pool(bound=False, size=0, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=str(ceph_conf_path), pool_name=pool_name, records_per_segment=records_per_segment, num_segments=segments_to_buffer)
reader = partial(persona_ops.lazy_ceph_reader, column_pool=pool, delete_after_read=delete_after_read)
for (key, namespace) in upstream_tensors:
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
(chunk_buffers, record_ids) = zip(*(reader(key=column_key, namespace=namespace) for column_key in expand_column_extensions(key=key, columns=columns)))
(yield (key, namespace, chunk_buffers, record_ids[0]))<|docstring|>Create a lazy ceph input pipeline.
:param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param columns:
:param pool_name:
:param records_per_segment:
:param segments_to_buffer:
:param delete_after_read:
:param name:
:return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors
Note that it is assumed that the record_id is the same for all column chunks (it should be)<|endoftext|> |
7365d4dec8551457ffeb96a576bb1abbac22181eba852444484542000442dc88 | def ceph_combo_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, pool_name, columns, records_per_segment, segments_to_buffer, ceph_read_size=(2 ** 26), buffer_pool=None, buffer_pool_args=pool_default_args, eager_column_types=(), delete_after_read=False, name='ceph_combo_read_pipeline'):
'\n Create a lazy ceph input pipeline.\n\n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name:\n :param cluster_name:\n :param ceph_conf_path:\n :param columns:\n :param pool_name:\n :param records_per_segment:\n :param segments_to_buffer:\n :param delete_after_read:\n :param name:\n :return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors\n Note that it is assumed that the record_id is the same for all column chunks (it should be)\n '
with ops.name_scope(name):
columns = validate_columns(columns=columns)
pool = persona_ops.ceph_lazy_column_pool(bound=False, size=0, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=str(ceph_conf_path), pool_name=pool_name, records_per_segment=records_per_segment, num_segments=segments_to_buffer)
lazy_reader = partial(persona_ops.lazy_ceph_reader, column_pool=pool, delete_after_read=delete_after_read)
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
eager_reader = partial(persona_ops.ceph_reader, cluster_name=cluster_name, user_name=user_name, pool_name=pool_name, ceph_conf_path=str(ceph_conf_path), read_size=ceph_read_size, delete_after_read=delete_after_read, buffer_pool=buffer_pool)
pool = persona_ops.raw_file_system_column_pool(bound=False, size=0)
convert = partial(persona_ops.raw_file_converter, column_pool=pool)
def gen_columns(key, namespace):
for (column_key, column) in zip(expand_column_extensions(key=key, columns=columns), columns):
if (column in eager_column_types):
val = eager_reader(key=column_key, namespace=namespace)
val = convert(data=val.file_handle)
else:
val = lazy_reader(key=column_key, namespace=namespace)
(yield val)
for (key, namespace) in upstream_tensors:
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
this_columns = tuple(gen_columns(key=key, namespace=namespace))
(chunk_buffers, record_ids) = zip(*this_columns)
(yield (key, namespace, chunk_buffers, record_ids[0])) | Create a lazy ceph input pipeline.
:param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param columns:
:param pool_name:
:param records_per_segment:
:param segments_to_buffer:
:param delete_after_read:
:param name:
:return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors
Note that it is assumed that the record_id is the same for all column chunks (it should be) | tensorflow/contrib/persona/python/ops/io_pipe.py | ceph_combo_read_pipeline | epfl-dcsl/ptf-system | 0 | python | def ceph_combo_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, pool_name, columns, records_per_segment, segments_to_buffer, ceph_read_size=(2 ** 26), buffer_pool=None, buffer_pool_args=pool_default_args, eager_column_types=(), delete_after_read=False, name='ceph_combo_read_pipeline'):
'\n Create a lazy ceph input pipeline.\n\n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name:\n :param cluster_name:\n :param ceph_conf_path:\n :param columns:\n :param pool_name:\n :param records_per_segment:\n :param segments_to_buffer:\n :param delete_after_read:\n :param name:\n :return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors\n Note that it is assumed that the record_id is the same for all column chunks (it should be)\n '
with ops.name_scope(name):
columns = validate_columns(columns=columns)
pool = persona_ops.ceph_lazy_column_pool(bound=False, size=0, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=str(ceph_conf_path), pool_name=pool_name, records_per_segment=records_per_segment, num_segments=segments_to_buffer)
lazy_reader = partial(persona_ops.lazy_ceph_reader, column_pool=pool, delete_after_read=delete_after_read)
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
eager_reader = partial(persona_ops.ceph_reader, cluster_name=cluster_name, user_name=user_name, pool_name=pool_name, ceph_conf_path=str(ceph_conf_path), read_size=ceph_read_size, delete_after_read=delete_after_read, buffer_pool=buffer_pool)
pool = persona_ops.raw_file_system_column_pool(bound=False, size=0)
convert = partial(persona_ops.raw_file_converter, column_pool=pool)
def gen_columns(key, namespace):
for (column_key, column) in zip(expand_column_extensions(key=key, columns=columns), columns):
if (column in eager_column_types):
val = eager_reader(key=column_key, namespace=namespace)
val = convert(data=val.file_handle)
else:
val = lazy_reader(key=column_key, namespace=namespace)
(yield val)
for (key, namespace) in upstream_tensors:
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
this_columns = tuple(gen_columns(key=key, namespace=namespace))
(chunk_buffers, record_ids) = zip(*this_columns)
(yield (key, namespace, chunk_buffers, record_ids[0])) | def ceph_combo_read_pipeline(upstream_tensors, user_name, cluster_name, ceph_conf_path, pool_name, columns, records_per_segment, segments_to_buffer, ceph_read_size=(2 ** 26), buffer_pool=None, buffer_pool_args=pool_default_args, eager_column_types=(), delete_after_read=False, name='ceph_combo_read_pipeline'):
'\n Create a lazy ceph input pipeline.\n\n :param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism\n :param user_name:\n :param cluster_name:\n :param ceph_conf_path:\n :param columns:\n :param pool_name:\n :param records_per_segment:\n :param segments_to_buffer:\n :param delete_after_read:\n :param name:\n :return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors\n Note that it is assumed that the record_id is the same for all column chunks (it should be)\n '
with ops.name_scope(name):
columns = validate_columns(columns=columns)
pool = persona_ops.ceph_lazy_column_pool(bound=False, size=0, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=str(ceph_conf_path), pool_name=pool_name, records_per_segment=records_per_segment, num_segments=segments_to_buffer)
lazy_reader = partial(persona_ops.lazy_ceph_reader, column_pool=pool, delete_after_read=delete_after_read)
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
eager_reader = partial(persona_ops.ceph_reader, cluster_name=cluster_name, user_name=user_name, pool_name=pool_name, ceph_conf_path=str(ceph_conf_path), read_size=ceph_read_size, delete_after_read=delete_after_read, buffer_pool=buffer_pool)
pool = persona_ops.raw_file_system_column_pool(bound=False, size=0)
convert = partial(persona_ops.raw_file_converter, column_pool=pool)
def gen_columns(key, namespace):
for (column_key, column) in zip(expand_column_extensions(key=key, columns=columns), columns):
if (column in eager_column_types):
val = eager_reader(key=column_key, namespace=namespace)
val = convert(data=val.file_handle)
else:
val = lazy_reader(key=column_key, namespace=namespace)
(yield val)
for (key, namespace) in upstream_tensors:
validate_shape_and_dtype(tensor=key, expected_shape=scalar_shape, expected_dtype=dtypes.string)
validate_shape_and_dtype(tensor=namespace, expected_shape=scalar_shape, expected_dtype=dtypes.string)
this_columns = tuple(gen_columns(key=key, namespace=namespace))
(chunk_buffers, record_ids) = zip(*this_columns)
(yield (key, namespace, chunk_buffers, record_ids[0]))<|docstring|>Create a lazy ceph input pipeline.
:param upstream_tensors: a tuple of tensors (key, namespace), which are typically found in the metadata file. This controls the parallelism
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param columns:
:param pool_name:
:param records_per_segment:
:param segments_to_buffer:
:param delete_after_read:
:param name:
:return: yield a list of (key, namespace, tuple(chunk_buffers), record_id) for every tensor in upstream tensors
Note that it is assumed that the record_id is the same for all column chunks (it should be)<|endoftext|> |
623db8a3b8f01540218b569a3b0b5641a02f1e5516c025e8f3f21cc2bf7c267b | def aligner_compress_pipeline(upstream_tensors, buffer_pool=None, buffer_pool_args=pool_default_args, name='aligner_compress_pipeline'):
'\n Compresses a list of upstream tensors of buffer list (via handles) into buffers\n :param upstream_tensors: \n :param name: \n :return: a stacked matrix of compressed buffers\n '
with ops.name_scope(name):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
compress_buffer_list = partial(persona_ops.buffer_list_compressor, buffer_pool=buffer_pool)
for buffer_lists in upstream_tensors:
bls_unstacked = array_ops.unstack(buffer_lists)
compressed_buffers = tuple((compress_buffer_list(buffer_list=a) for a in bls_unstacked))
(yield array_ops.stack(compressed_buffers)) | Compresses a list of upstream tensors of buffer list (via handles) into buffers
:param upstream_tensors:
:param name:
:return: a stacked matrix of compressed buffers | tensorflow/contrib/persona/python/ops/io_pipe.py | aligner_compress_pipeline | epfl-dcsl/ptf-system | 0 | python | def aligner_compress_pipeline(upstream_tensors, buffer_pool=None, buffer_pool_args=pool_default_args, name='aligner_compress_pipeline'):
'\n Compresses a list of upstream tensors of buffer list (via handles) into buffers\n :param upstream_tensors: \n :param name: \n :return: a stacked matrix of compressed buffers\n '
with ops.name_scope(name):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
compress_buffer_list = partial(persona_ops.buffer_list_compressor, buffer_pool=buffer_pool)
for buffer_lists in upstream_tensors:
bls_unstacked = array_ops.unstack(buffer_lists)
compressed_buffers = tuple((compress_buffer_list(buffer_list=a) for a in bls_unstacked))
(yield array_ops.stack(compressed_buffers)) | def aligner_compress_pipeline(upstream_tensors, buffer_pool=None, buffer_pool_args=pool_default_args, name='aligner_compress_pipeline'):
'\n Compresses a list of upstream tensors of buffer list (via handles) into buffers\n :param upstream_tensors: \n :param name: \n :return: a stacked matrix of compressed buffers\n '
with ops.name_scope(name):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
compress_buffer_list = partial(persona_ops.buffer_list_compressor, buffer_pool=buffer_pool)
for buffer_lists in upstream_tensors:
bls_unstacked = array_ops.unstack(buffer_lists)
compressed_buffers = tuple((compress_buffer_list(buffer_list=a) for a in bls_unstacked))
(yield array_ops.stack(compressed_buffers))<|docstring|>Compresses a list of upstream tensors of buffer list (via handles) into buffers
:param upstream_tensors:
:param name:
:return: a stacked matrix of compressed buffers<|endoftext|> |
1d19b338993e8ce235a508c81668742bc1fe8254041b44fa7f1fec40f6fd3bb4 | def sorter_compress_pipeline(upstream_tensors, buffer_pool=None, buffer_pool_args=pool_default_args, name='sorter_compress_pipeline'):
'\n :param upstream_tensors: a generator of stacked (i.e. matrix of (N,2) references to buffer pairs) to compress\n :param buffer_pool:\n :param buffer_pool_args:\n :param name:\n :return: a generator of stacked references to buffers, in the same shape as upstream_tensors for each item\n '
with ops.name_scope('compress_merge_results'):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
compressor = partial(persona_ops.buffer_pair_compressor, buffer_pool=buffer_pool)
for buffer_pairs in upstream_tensors:
bps_unstacked = array_ops.unstack(buffer_pairs)
compressed_buffers = tuple((compressor(buffer_pair=a) for a in bps_unstacked))
(yield array_ops.stack(compressed_buffers)) | :param upstream_tensors: a generator of stacked (i.e. matrix of (N,2) references to buffer pairs) to compress
:param buffer_pool:
:param buffer_pool_args:
:param name:
:return: a generator of stacked references to buffers, in the same shape as upstream_tensors for each item | tensorflow/contrib/persona/python/ops/io_pipe.py | sorter_compress_pipeline | epfl-dcsl/ptf-system | 0 | python | def sorter_compress_pipeline(upstream_tensors, buffer_pool=None, buffer_pool_args=pool_default_args, name='sorter_compress_pipeline'):
'\n :param upstream_tensors: a generator of stacked (i.e. matrix of (N,2) references to buffer pairs) to compress\n :param buffer_pool:\n :param buffer_pool_args:\n :param name:\n :return: a generator of stacked references to buffers, in the same shape as upstream_tensors for each item\n '
with ops.name_scope('compress_merge_results'):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
compressor = partial(persona_ops.buffer_pair_compressor, buffer_pool=buffer_pool)
for buffer_pairs in upstream_tensors:
bps_unstacked = array_ops.unstack(buffer_pairs)
compressed_buffers = tuple((compressor(buffer_pair=a) for a in bps_unstacked))
(yield array_ops.stack(compressed_buffers)) | def sorter_compress_pipeline(upstream_tensors, buffer_pool=None, buffer_pool_args=pool_default_args, name='sorter_compress_pipeline'):
'\n :param upstream_tensors: a generator of stacked (i.e. matrix of (N,2) references to buffer pairs) to compress\n :param buffer_pool:\n :param buffer_pool_args:\n :param name:\n :return: a generator of stacked references to buffers, in the same shape as upstream_tensors for each item\n '
with ops.name_scope('compress_merge_results'):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args)
compressor = partial(persona_ops.buffer_pair_compressor, buffer_pool=buffer_pool)
for buffer_pairs in upstream_tensors:
bps_unstacked = array_ops.unstack(buffer_pairs)
compressed_buffers = tuple((compressor(buffer_pair=a) for a in bps_unstacked))
(yield array_ops.stack(compressed_buffers))<|docstring|>:param upstream_tensors: a generator of stacked (i.e. matrix of (N,2) references to buffer pairs) to compress
:param buffer_pool:
:param buffer_pool_args:
:param name:
:return: a generator of stacked references to buffers, in the same shape as upstream_tensors for each item<|endoftext|> |
56228b6abe7f4d2c0cc6d883afe1bcf45e20d8c5541e6269f322c5d598fae1d3 | def ceph_write_pipeline(upstream_tensors, user_name, cluster_name, pool_name, ceph_conf_path, compressed, record_types=default_records_type, name='ceph_write_pipeline', log_directory=None, metadata=None):
'\n :param upstream_tensors: a list of aligner output tensors of type (key, namespace, num_records, first ordinal, record id, column handle)\n :param user_name: \n :param cluster_name: \n :param ceph_conf_path: \n :param name: \n :return: yields the output of ceph write columns\n '
writer_op = partial(persona_ops.agd_ceph_buffer_writer, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=ceph_conf_path, pool_name=pool_name, compressed=compressed)
record_types = sanitize_generator(record_types)
upstream_tensors = sanitize_generator(upstream_tensors)
def make_ceph_writer(key, first_ordinal, num_records, column_handle, namespace, record_id, idc):
column_handles = array_ops.unstack(column_handle)
if (not (len(column_handles) == len(record_types))):
raise Exception('number of record types ({r}) must be equal to number of columns ({c})'.format(r=len(record_types), c=len(column_handles)))
custom_writer_op = partial(writer_op, record_id=record_id, num_records=num_records, first_ordinal=first_ordinal, namespace=namespace)
for (handle, record_type) in zip(column_handles, record_types):
check_valid_record_type(record_type=record_type)
full_key = string_ops.string_join([key, suffix_separator, record_type['extension']])
rtype = record_type['type']
a = custom_writer_op(record_type=rtype, path=full_key, resource_handle=handle, name='_'.join((name, rtype)))
res_val = a.output_path
if (log_directory is not None):
timestamp = a.time
write_duration = a.duration
num_bytes = a.bytes
log_op = gate.log_events(item_names=(('timestamp', 'key', 'duration', 'bytes') + (('id',) if (idc is not None) else ())), directory=log_directory, event_name=name, name='{}_logger'.format(name), components=((timestamp, res_val, write_duration, num_bytes) + ((idc,) if (idc is not None) else ())))
with ops.control_dependencies((log_op,)):
res_val = array_ops.identity(res_val)
(yield res_val)
if (metadata is None):
metadata = ((None,) * len(upstream_tensors))
else:
metadata = sanitize_generator(metadata)
if (len(metadata) != len(upstream_tensors)):
raise Exception('Have {m} metadata tensors and {u} upstream buffers. Must be equal!'.format(m=len(metadata), u=len(upstream_tensors)))
for ((key, namespace, num_records, first_ordinal, record_id, column_handle), idc) in zip(upstream_tensors, metadata):
(yield make_ceph_writer(key=key, first_ordinal=first_ordinal, num_records=num_records, record_id=record_id, namespace=namespace, column_handle=column_handle, idc=idc)) | :param upstream_tensors: a list of aligner output tensors of type (key, namespace, num_records, first ordinal, record id, column handle)
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param name:
:return: yields the output of ceph write columns | tensorflow/contrib/persona/python/ops/io_pipe.py | ceph_write_pipeline | epfl-dcsl/ptf-system | 0 | python | def ceph_write_pipeline(upstream_tensors, user_name, cluster_name, pool_name, ceph_conf_path, compressed, record_types=default_records_type, name='ceph_write_pipeline', log_directory=None, metadata=None):
'\n :param upstream_tensors: a list of aligner output tensors of type (key, namespace, num_records, first ordinal, record id, column handle)\n :param user_name: \n :param cluster_name: \n :param ceph_conf_path: \n :param name: \n :return: yields the output of ceph write columns\n '
writer_op = partial(persona_ops.agd_ceph_buffer_writer, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=ceph_conf_path, pool_name=pool_name, compressed=compressed)
record_types = sanitize_generator(record_types)
upstream_tensors = sanitize_generator(upstream_tensors)
def make_ceph_writer(key, first_ordinal, num_records, column_handle, namespace, record_id, idc):
column_handles = array_ops.unstack(column_handle)
if (not (len(column_handles) == len(record_types))):
raise Exception('number of record types ({r}) must be equal to number of columns ({c})'.format(r=len(record_types), c=len(column_handles)))
custom_writer_op = partial(writer_op, record_id=record_id, num_records=num_records, first_ordinal=first_ordinal, namespace=namespace)
for (handle, record_type) in zip(column_handles, record_types):
check_valid_record_type(record_type=record_type)
full_key = string_ops.string_join([key, suffix_separator, record_type['extension']])
rtype = record_type['type']
a = custom_writer_op(record_type=rtype, path=full_key, resource_handle=handle, name='_'.join((name, rtype)))
res_val = a.output_path
if (log_directory is not None):
timestamp = a.time
write_duration = a.duration
num_bytes = a.bytes
log_op = gate.log_events(item_names=(('timestamp', 'key', 'duration', 'bytes') + (('id',) if (idc is not None) else ())), directory=log_directory, event_name=name, name='{}_logger'.format(name), components=((timestamp, res_val, write_duration, num_bytes) + ((idc,) if (idc is not None) else ())))
with ops.control_dependencies((log_op,)):
res_val = array_ops.identity(res_val)
(yield res_val)
if (metadata is None):
metadata = ((None,) * len(upstream_tensors))
else:
metadata = sanitize_generator(metadata)
if (len(metadata) != len(upstream_tensors)):
raise Exception('Have {m} metadata tensors and {u} upstream buffers. Must be equal!'.format(m=len(metadata), u=len(upstream_tensors)))
for ((key, namespace, num_records, first_ordinal, record_id, column_handle), idc) in zip(upstream_tensors, metadata):
(yield make_ceph_writer(key=key, first_ordinal=first_ordinal, num_records=num_records, record_id=record_id, namespace=namespace, column_handle=column_handle, idc=idc)) | def ceph_write_pipeline(upstream_tensors, user_name, cluster_name, pool_name, ceph_conf_path, compressed, record_types=default_records_type, name='ceph_write_pipeline', log_directory=None, metadata=None):
'\n :param upstream_tensors: a list of aligner output tensors of type (key, namespace, num_records, first ordinal, record id, column handle)\n :param user_name: \n :param cluster_name: \n :param ceph_conf_path: \n :param name: \n :return: yields the output of ceph write columns\n '
writer_op = partial(persona_ops.agd_ceph_buffer_writer, user_name=user_name, cluster_name=cluster_name, ceph_conf_path=ceph_conf_path, pool_name=pool_name, compressed=compressed)
record_types = sanitize_generator(record_types)
upstream_tensors = sanitize_generator(upstream_tensors)
def make_ceph_writer(key, first_ordinal, num_records, column_handle, namespace, record_id, idc):
column_handles = array_ops.unstack(column_handle)
if (not (len(column_handles) == len(record_types))):
raise Exception('number of record types ({r}) must be equal to number of columns ({c})'.format(r=len(record_types), c=len(column_handles)))
custom_writer_op = partial(writer_op, record_id=record_id, num_records=num_records, first_ordinal=first_ordinal, namespace=namespace)
for (handle, record_type) in zip(column_handles, record_types):
check_valid_record_type(record_type=record_type)
full_key = string_ops.string_join([key, suffix_separator, record_type['extension']])
rtype = record_type['type']
a = custom_writer_op(record_type=rtype, path=full_key, resource_handle=handle, name='_'.join((name, rtype)))
res_val = a.output_path
if (log_directory is not None):
timestamp = a.time
write_duration = a.duration
num_bytes = a.bytes
log_op = gate.log_events(item_names=(('timestamp', 'key', 'duration', 'bytes') + (('id',) if (idc is not None) else ())), directory=log_directory, event_name=name, name='{}_logger'.format(name), components=((timestamp, res_val, write_duration, num_bytes) + ((idc,) if (idc is not None) else ())))
with ops.control_dependencies((log_op,)):
res_val = array_ops.identity(res_val)
(yield res_val)
if (metadata is None):
metadata = ((None,) * len(upstream_tensors))
else:
metadata = sanitize_generator(metadata)
if (len(metadata) != len(upstream_tensors)):
raise Exception('Have {m} metadata tensors and {u} upstream buffers. Must be equal!'.format(m=len(metadata), u=len(upstream_tensors)))
for ((key, namespace, num_records, first_ordinal, record_id, column_handle), idc) in zip(upstream_tensors, metadata):
(yield make_ceph_writer(key=key, first_ordinal=first_ordinal, num_records=num_records, record_id=record_id, namespace=namespace, column_handle=column_handle, idc=idc))<|docstring|>:param upstream_tensors: a list of aligner output tensors of type (key, namespace, num_records, first ordinal, record id, column handle)
:param user_name:
:param cluster_name:
:param ceph_conf_path:
:param name:
:return: yields the output of ceph write columns<|endoftext|> |
1f0c0d9a3abc077a7a3211656d9b40cc15b4427c35ffa713368de1e5f52089dc | def local_read_group_pipeline(upstream_tensors, sync=True, mmap_pool=None, mmap_pool_args=pool_default_args, name='local_read_group_pipeline'):
"\n Takes a bunch of groups of files and makes synchronous filemmap groups from them\n :param upstream_tensors: a generator of either a vector tensor or a list of scalar tensors of filenames to read. each of these constitutes a grop which will have control dependencies\n :param sync: whether or not to synchronously map the files\n :param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method\n :param mmap_pool_args:\n :return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors\n "
if (mmap_pool is None):
mmap_pool = persona_ops.m_map_pool(name=name, **mmap_pool_args)
assert (len(upstream_tensors) > 0)
for file_paths in upstream_tensors:
if isinstance(file_paths, ops.Tensor):
file_paths = array_ops.unstack(file_paths, axis=0)
try:
prev = []
for file_path in file_paths:
with ops.control_dependencies(prev):
mmap_op = persona_ops.file_m_map(filename=file_path, pool_handle=mmap_pool, synchronous=sync, name=name)
prev.append(mmap_op)
(yield prev)
except TypeError:
raise Exception('file paths {fp} is not an iterable or Tensor'.format(fp=file_paths)) | Takes a bunch of groups of files and makes synchronous filemmap groups from them
:param upstream_tensors: a generator of either a vector tensor or a list of scalar tensors of filenames to read. each of these constitutes a grop which will have control dependencies
:param sync: whether or not to synchronously map the files
:param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method
:param mmap_pool_args:
:return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors | tensorflow/contrib/persona/python/ops/io_pipe.py | local_read_group_pipeline | epfl-dcsl/ptf-system | 0 | python | def local_read_group_pipeline(upstream_tensors, sync=True, mmap_pool=None, mmap_pool_args=pool_default_args, name='local_read_group_pipeline'):
"\n Takes a bunch of groups of files and makes synchronous filemmap groups from them\n :param upstream_tensors: a generator of either a vector tensor or a list of scalar tensors of filenames to read. each of these constitutes a grop which will have control dependencies\n :param sync: whether or not to synchronously map the files\n :param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method\n :param mmap_pool_args:\n :return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors\n "
if (mmap_pool is None):
mmap_pool = persona_ops.m_map_pool(name=name, **mmap_pool_args)
assert (len(upstream_tensors) > 0)
for file_paths in upstream_tensors:
if isinstance(file_paths, ops.Tensor):
file_paths = array_ops.unstack(file_paths, axis=0)
try:
prev = []
for file_path in file_paths:
with ops.control_dependencies(prev):
mmap_op = persona_ops.file_m_map(filename=file_path, pool_handle=mmap_pool, synchronous=sync, name=name)
prev.append(mmap_op)
(yield prev)
except TypeError:
raise Exception('file paths {fp} is not an iterable or Tensor'.format(fp=file_paths)) | def local_read_group_pipeline(upstream_tensors, sync=True, mmap_pool=None, mmap_pool_args=pool_default_args, name='local_read_group_pipeline'):
"\n Takes a bunch of groups of files and makes synchronous filemmap groups from them\n :param upstream_tensors: a generator of either a vector tensor or a list of scalar tensors of filenames to read. each of these constitutes a grop which will have control dependencies\n :param sync: whether or not to synchronously map the files\n :param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method\n :param mmap_pool_args:\n :return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors\n "
if (mmap_pool is None):
mmap_pool = persona_ops.m_map_pool(name=name, **mmap_pool_args)
assert (len(upstream_tensors) > 0)
for file_paths in upstream_tensors:
if isinstance(file_paths, ops.Tensor):
file_paths = array_ops.unstack(file_paths, axis=0)
try:
prev = []
for file_path in file_paths:
with ops.control_dependencies(prev):
mmap_op = persona_ops.file_m_map(filename=file_path, pool_handle=mmap_pool, synchronous=sync, name=name)
prev.append(mmap_op)
(yield prev)
except TypeError:
raise Exception('file paths {fp} is not an iterable or Tensor'.format(fp=file_paths))<|docstring|>Takes a bunch of groups of files and makes synchronous filemmap groups from them
:param upstream_tensors: a generator of either a vector tensor or a list of scalar tensors of filenames to read. each of these constitutes a grop which will have control dependencies
:param sync: whether or not to synchronously map the files
:param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method
:param mmap_pool_args:
:return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors<|endoftext|> |
c50ce0101a519ec5bcdd4db978167501b29c6fef1f86f5a155ce597673fdb0fb | def local_read_pipeline(upstream_tensors, columns, sync=True, delete_after_use=False, mmap_pool=None, mmap_pool_args=pool_default_args, name='local_read_pipeline'):
"\n Create a read pipeline to read from the filesystem\n :param upstream_tensors: a list of file keys, as extracted from the metadata file\n :param columns: a list of columns to extract. See `valid_columns` for the set of valid columns\n :param sync: whether or not to synchronously map the files\n :param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method\n :param mmap_pool_args:\n :param name: \n :return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors\n "
def make_readers(input_file_basename):
prev = []
for full_filename in expand_column_extensions(key=input_file_basename, columns=columns):
with ops.control_dependencies(prev):
mmap_op = reader(filename=full_filename)
(yield mmap_op)
prev.append(mmap_op)
columns = validate_columns(columns=columns)
if (mmap_pool is None):
mmap_pool = persona_ops.m_map_pool(name=name, **mmap_pool_args)
reader = partial(persona_ops.file_m_map, synchronous=sync, pool_handle=mmap_pool, delete_after_use=delete_after_use)
has_tensors = False
for file_path in upstream_tensors:
(yield make_readers(input_file_basename=file_path))
has_tensors = True
assert has_tensors | Create a read pipeline to read from the filesystem
:param upstream_tensors: a list of file keys, as extracted from the metadata file
:param columns: a list of columns to extract. See `valid_columns` for the set of valid columns
:param sync: whether or not to synchronously map the files
:param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method
:param mmap_pool_args:
:param name:
:return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors | tensorflow/contrib/persona/python/ops/io_pipe.py | local_read_pipeline | epfl-dcsl/ptf-system | 0 | python | def local_read_pipeline(upstream_tensors, columns, sync=True, delete_after_use=False, mmap_pool=None, mmap_pool_args=pool_default_args, name='local_read_pipeline'):
"\n Create a read pipeline to read from the filesystem\n :param upstream_tensors: a list of file keys, as extracted from the metadata file\n :param columns: a list of columns to extract. See `valid_columns` for the set of valid columns\n :param sync: whether or not to synchronously map the files\n :param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method\n :param mmap_pool_args:\n :param name: \n :return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors\n "
def make_readers(input_file_basename):
prev = []
for full_filename in expand_column_extensions(key=input_file_basename, columns=columns):
with ops.control_dependencies(prev):
mmap_op = reader(filename=full_filename)
(yield mmap_op)
prev.append(mmap_op)
columns = validate_columns(columns=columns)
if (mmap_pool is None):
mmap_pool = persona_ops.m_map_pool(name=name, **mmap_pool_args)
reader = partial(persona_ops.file_m_map, synchronous=sync, pool_handle=mmap_pool, delete_after_use=delete_after_use)
has_tensors = False
for file_path in upstream_tensors:
(yield make_readers(input_file_basename=file_path))
has_tensors = True
assert has_tensors | def local_read_pipeline(upstream_tensors, columns, sync=True, delete_after_use=False, mmap_pool=None, mmap_pool_args=pool_default_args, name='local_read_pipeline'):
"\n Create a read pipeline to read from the filesystem\n :param upstream_tensors: a list of file keys, as extracted from the metadata file\n :param columns: a list of columns to extract. See `valid_columns` for the set of valid columns\n :param sync: whether or not to synchronously map the files\n :param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method\n :param mmap_pool_args:\n :param name: \n :return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors\n "
def make_readers(input_file_basename):
prev = []
for full_filename in expand_column_extensions(key=input_file_basename, columns=columns):
with ops.control_dependencies(prev):
mmap_op = reader(filename=full_filename)
(yield mmap_op)
prev.append(mmap_op)
columns = validate_columns(columns=columns)
if (mmap_pool is None):
mmap_pool = persona_ops.m_map_pool(name=name, **mmap_pool_args)
reader = partial(persona_ops.file_m_map, synchronous=sync, pool_handle=mmap_pool, delete_after_use=delete_after_use)
has_tensors = False
for file_path in upstream_tensors:
(yield make_readers(input_file_basename=file_path))
has_tensors = True
assert has_tensors<|docstring|>Create a read pipeline to read from the filesystem
:param upstream_tensors: a list of file keys, as extracted from the metadata file
:param columns: a list of columns to extract. See `valid_columns` for the set of valid columns
:param sync: whether or not to synchronously map the files
:param mmap_pool: if not None, provide a persona_ops.file_m_map pool to this method
:param mmap_pool_args:
:param name:
:return: yield a tuple of '(persona_ops.file_m_map for every column file, a generator)' for every tensor in upstream_tensors<|endoftext|> |
0b648d132718799c8ba8f92325e11271e11a5a4e01548765151dd63f23330548 | def local_write_pipeline(upstream_tensors, compressed, record_types=default_records_type, record_suffix='', name='local_write_pipeline'):
'\n Create a local write pipeline, based on the number of upstream tensors received.\n :param upstream_tensors: a list of tensor tuples of type: buffer_list_handle, record_id, first_ordinal, num_records, file_path\n :param record_type: the type of results to write. See persona_ops.cc for valid types\n :param name: \n :return: yield a writer for each record to be written in upstream tensors. Each writer op returns the full path where it was written\n '
if compressed:
writer_op = partial(persona_ops.agd_file_system_buffer_writer, compressed=True)
else:
writer_op = persona_ops.agd_file_system_buffer_list_writer
record_types = sanitize_generator(record_types)
suffix = constant_op.constant(record_suffix)
def make_writer(record_id, file_path, first_ordinal, num_records, bl_handle):
bl_handle = array_ops.unstack(bl_handle)
if (len(bl_handle) != len(record_types)):
raise Exception('number of record types must equal number of buffer list handles')
for (handle, record_type) in zip(bl_handle, record_types):
check_valid_record_type(record_type=record_type)
full_filepath = string_ops.string_join([file_path, suffix, suffix_separator, record_type['extension']])
rtype = record_type['type']
(yield writer_op(record_id=record_id, record_type=rtype, resource_handle=handle, first_ordinal=first_ordinal, num_records=num_records, path=full_filepath, name='{name}_{rtype}'.format(name=name, rtype=rtype)))
upstream_tensors = sanitize_generator(upstream_tensors)
assert (len(upstream_tensors) > 0)
for (buffer_list_handle, record_id, first_ordinal, num_records, file_path) in upstream_tensors:
(yield make_writer(record_id=record_id, file_path=file_path, num_records=num_records, first_ordinal=first_ordinal, bl_handle=buffer_list_handle)) | Create a local write pipeline, based on the number of upstream tensors received.
:param upstream_tensors: a list of tensor tuples of type: buffer_list_handle, record_id, first_ordinal, num_records, file_path
:param record_type: the type of results to write. See persona_ops.cc for valid types
:param name:
:return: yield a writer for each record to be written in upstream tensors. Each writer op returns the full path where it was written | tensorflow/contrib/persona/python/ops/io_pipe.py | local_write_pipeline | epfl-dcsl/ptf-system | 0 | python | def local_write_pipeline(upstream_tensors, compressed, record_types=default_records_type, record_suffix=, name='local_write_pipeline'):
'\n Create a local write pipeline, based on the number of upstream tensors received.\n :param upstream_tensors: a list of tensor tuples of type: buffer_list_handle, record_id, first_ordinal, num_records, file_path\n :param record_type: the type of results to write. See persona_ops.cc for valid types\n :param name: \n :return: yield a writer for each record to be written in upstream tensors. Each writer op returns the full path where it was written\n '
if compressed:
writer_op = partial(persona_ops.agd_file_system_buffer_writer, compressed=True)
else:
writer_op = persona_ops.agd_file_system_buffer_list_writer
record_types = sanitize_generator(record_types)
suffix = constant_op.constant(record_suffix)
def make_writer(record_id, file_path, first_ordinal, num_records, bl_handle):
bl_handle = array_ops.unstack(bl_handle)
if (len(bl_handle) != len(record_types)):
raise Exception('number of record types must equal number of buffer list handles')
for (handle, record_type) in zip(bl_handle, record_types):
check_valid_record_type(record_type=record_type)
full_filepath = string_ops.string_join([file_path, suffix, suffix_separator, record_type['extension']])
rtype = record_type['type']
(yield writer_op(record_id=record_id, record_type=rtype, resource_handle=handle, first_ordinal=first_ordinal, num_records=num_records, path=full_filepath, name='{name}_{rtype}'.format(name=name, rtype=rtype)))
upstream_tensors = sanitize_generator(upstream_tensors)
assert (len(upstream_tensors) > 0)
for (buffer_list_handle, record_id, first_ordinal, num_records, file_path) in upstream_tensors:
(yield make_writer(record_id=record_id, file_path=file_path, num_records=num_records, first_ordinal=first_ordinal, bl_handle=buffer_list_handle)) | def local_write_pipeline(upstream_tensors, compressed, record_types=default_records_type, record_suffix=, name='local_write_pipeline'):
'\n Create a local write pipeline, based on the number of upstream tensors received.\n :param upstream_tensors: a list of tensor tuples of type: buffer_list_handle, record_id, first_ordinal, num_records, file_path\n :param record_type: the type of results to write. See persona_ops.cc for valid types\n :param name: \n :return: yield a writer for each record to be written in upstream tensors. Each writer op returns the full path where it was written\n '
if compressed:
writer_op = partial(persona_ops.agd_file_system_buffer_writer, compressed=True)
else:
writer_op = persona_ops.agd_file_system_buffer_list_writer
record_types = sanitize_generator(record_types)
suffix = constant_op.constant(record_suffix)
def make_writer(record_id, file_path, first_ordinal, num_records, bl_handle):
bl_handle = array_ops.unstack(bl_handle)
if (len(bl_handle) != len(record_types)):
raise Exception('number of record types must equal number of buffer list handles')
for (handle, record_type) in zip(bl_handle, record_types):
check_valid_record_type(record_type=record_type)
full_filepath = string_ops.string_join([file_path, suffix, suffix_separator, record_type['extension']])
rtype = record_type['type']
(yield writer_op(record_id=record_id, record_type=rtype, resource_handle=handle, first_ordinal=first_ordinal, num_records=num_records, path=full_filepath, name='{name}_{rtype}'.format(name=name, rtype=rtype)))
upstream_tensors = sanitize_generator(upstream_tensors)
assert (len(upstream_tensors) > 0)
for (buffer_list_handle, record_id, first_ordinal, num_records, file_path) in upstream_tensors:
(yield make_writer(record_id=record_id, file_path=file_path, num_records=num_records, first_ordinal=first_ordinal, bl_handle=buffer_list_handle))<|docstring|>Create a local write pipeline, based on the number of upstream tensors received.
:param upstream_tensors: a list of tensor tuples of type: buffer_list_handle, record_id, first_ordinal, num_records, file_path
:param record_type: the type of results to write. See persona_ops.cc for valid types
:param name:
:return: yield a writer for each record to be written in upstream tensors. Each writer op returns the full path where it was written<|endoftext|> |
4887c7faf9b91bb3544bba63b557603843831aa810d280b645ba0546aa76a3d7 | def agd_reader_pipeline(upstream_tensors, verify=False, buffer_pool=None, buffer_pool_args=pool_default_args, repack=None, name='agd_reader_pipeline'):
"\n Yield a pipeline of input buffers processed by AGDReader.\n \n This processes ONLY A SINGLE COLUMN. Use agd_reader_multi_column_pipeline to do multiple columns in parallel.\n \n :param upstream_tensors: a tensor of handles to resources of type Data (in C++ persona code)\n :param verify: if True, enable format verification by AGDReader. Will fail if shape doesn't conform, but causes performance impact\n :param buffer_pool: if not None, use this as the buffer_pool, else create buffer_pool\n :param buffer_pool_default_args: the arguments to make the buffer_pool, if it is None\n :param name: \n :return: yields a tuple of output_buffer, num_records, first_ordinal, record_id\n "
if (repack is None):
repack = ((False,) * len(upstream_tensors))
with ops.name_scope('agd_reader'):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args, name='agd_reader_buffer_pool')
if isinstance(upstream_tensors, ops.Tensor):
upstream_tensors = array_ops.unstack(upstream_tensors)
assert (len(upstream_tensors) > 0)
if (len(repack) != len(upstream_tensors)):
raise Exception('Repack vector not equal to the number of tensors')
reader_op = partial(persona_ops.agd_reader, buffer_pool=buffer_pool, name=name, verify=verify)
for (upstream_tensor, repack_column) in zip(upstream_tensors, repack):
assert isinstance(repack_column, bool), 'repack is not a bool! got {}'.format(repack_column)
ut_shape = upstream_tensor.get_shape()
if (ut_shape != resource_shape):
raise Exception('AGD_Reader pipeline encounter Tensor with shape {actual}, but expected {expected}'.format(actual=ut_shape, expected=resource_shape))
(output_buffer, num_records, first_ordinal, record_id) = reader_op(file_handle=upstream_tensor, unpack=(not repack_column), repack=repack_column)
(yield (output_buffer, num_records, first_ordinal, record_id)) | Yield a pipeline of input buffers processed by AGDReader.
This processes ONLY A SINGLE COLUMN. Use agd_reader_multi_column_pipeline to do multiple columns in parallel.
:param upstream_tensors: a tensor of handles to resources of type Data (in C++ persona code)
:param verify: if True, enable format verification by AGDReader. Will fail if shape doesn't conform, but causes performance impact
:param buffer_pool: if not None, use this as the buffer_pool, else create buffer_pool
:param buffer_pool_default_args: the arguments to make the buffer_pool, if it is None
:param name:
:return: yields a tuple of output_buffer, num_records, first_ordinal, record_id | tensorflow/contrib/persona/python/ops/io_pipe.py | agd_reader_pipeline | epfl-dcsl/ptf-system | 0 | python | def agd_reader_pipeline(upstream_tensors, verify=False, buffer_pool=None, buffer_pool_args=pool_default_args, repack=None, name='agd_reader_pipeline'):
"\n Yield a pipeline of input buffers processed by AGDReader.\n \n This processes ONLY A SINGLE COLUMN. Use agd_reader_multi_column_pipeline to do multiple columns in parallel.\n \n :param upstream_tensors: a tensor of handles to resources of type Data (in C++ persona code)\n :param verify: if True, enable format verification by AGDReader. Will fail if shape doesn't conform, but causes performance impact\n :param buffer_pool: if not None, use this as the buffer_pool, else create buffer_pool\n :param buffer_pool_default_args: the arguments to make the buffer_pool, if it is None\n :param name: \n :return: yields a tuple of output_buffer, num_records, first_ordinal, record_id\n "
if (repack is None):
repack = ((False,) * len(upstream_tensors))
with ops.name_scope('agd_reader'):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args, name='agd_reader_buffer_pool')
if isinstance(upstream_tensors, ops.Tensor):
upstream_tensors = array_ops.unstack(upstream_tensors)
assert (len(upstream_tensors) > 0)
if (len(repack) != len(upstream_tensors)):
raise Exception('Repack vector not equal to the number of tensors')
reader_op = partial(persona_ops.agd_reader, buffer_pool=buffer_pool, name=name, verify=verify)
for (upstream_tensor, repack_column) in zip(upstream_tensors, repack):
assert isinstance(repack_column, bool), 'repack is not a bool! got {}'.format(repack_column)
ut_shape = upstream_tensor.get_shape()
if (ut_shape != resource_shape):
raise Exception('AGD_Reader pipeline encounter Tensor with shape {actual}, but expected {expected}'.format(actual=ut_shape, expected=resource_shape))
(output_buffer, num_records, first_ordinal, record_id) = reader_op(file_handle=upstream_tensor, unpack=(not repack_column), repack=repack_column)
(yield (output_buffer, num_records, first_ordinal, record_id)) | def agd_reader_pipeline(upstream_tensors, verify=False, buffer_pool=None, buffer_pool_args=pool_default_args, repack=None, name='agd_reader_pipeline'):
"\n Yield a pipeline of input buffers processed by AGDReader.\n \n This processes ONLY A SINGLE COLUMN. Use agd_reader_multi_column_pipeline to do multiple columns in parallel.\n \n :param upstream_tensors: a tensor of handles to resources of type Data (in C++ persona code)\n :param verify: if True, enable format verification by AGDReader. Will fail if shape doesn't conform, but causes performance impact\n :param buffer_pool: if not None, use this as the buffer_pool, else create buffer_pool\n :param buffer_pool_default_args: the arguments to make the buffer_pool, if it is None\n :param name: \n :return: yields a tuple of output_buffer, num_records, first_ordinal, record_id\n "
if (repack is None):
repack = ((False,) * len(upstream_tensors))
with ops.name_scope('agd_reader'):
if (buffer_pool is None):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args, name='agd_reader_buffer_pool')
if isinstance(upstream_tensors, ops.Tensor):
upstream_tensors = array_ops.unstack(upstream_tensors)
assert (len(upstream_tensors) > 0)
if (len(repack) != len(upstream_tensors)):
raise Exception('Repack vector not equal to the number of tensors')
reader_op = partial(persona_ops.agd_reader, buffer_pool=buffer_pool, name=name, verify=verify)
for (upstream_tensor, repack_column) in zip(upstream_tensors, repack):
assert isinstance(repack_column, bool), 'repack is not a bool! got {}'.format(repack_column)
ut_shape = upstream_tensor.get_shape()
if (ut_shape != resource_shape):
raise Exception('AGD_Reader pipeline encounter Tensor with shape {actual}, but expected {expected}'.format(actual=ut_shape, expected=resource_shape))
(output_buffer, num_records, first_ordinal, record_id) = reader_op(file_handle=upstream_tensor, unpack=(not repack_column), repack=repack_column)
(yield (output_buffer, num_records, first_ordinal, record_id))<|docstring|>Yield a pipeline of input buffers processed by AGDReader.
This processes ONLY A SINGLE COLUMN. Use agd_reader_multi_column_pipeline to do multiple columns in parallel.
:param upstream_tensors: a tensor of handles to resources of type Data (in C++ persona code)
:param verify: if True, enable format verification by AGDReader. Will fail if shape doesn't conform, but causes performance impact
:param buffer_pool: if not None, use this as the buffer_pool, else create buffer_pool
:param buffer_pool_default_args: the arguments to make the buffer_pool, if it is None
:param name:
:return: yields a tuple of output_buffer, num_records, first_ordinal, record_id<|endoftext|> |
6c8c958991621cd774bf49d19d0f926b501baf6aeb629f484f83190bd733cd0c | def agd_reader_multi_column_pipeline(upstream_tensorz, control_ops=None, verify=False, buffer_pool=None, share_buffer_pool=True, buffer_pool_args=pool_default_args, repack=None, name='agd_reader_multi_column_pipeline'):
"\n Create an AGDReader pipeline for an iterable of columns. Each column group is assumed to have the same first ordinal, number of records, and record id.\n :param upstream_tensorz: a list of list of tensors, each item being a column group\n :param verify: whether or not to invoke the verification for AGD columns\n :param buffer_pool: pass in a buffer_pool to reuse\n :param share_buffer_pool: if buffer_pool is not passed in, create one to share among all the AGDReader instances\n :param buffer_pool_args: special buffer pool args, if it's created\n :param name: \n :return: yield [output_buffer_handles], num_records, first_ordinal, record_id; in order, for each column group in upstream_tensorz\n "
upstream_tensorz = sanitize_generator(upstream_tensorz)
if (control_ops is not None):
control_ops = sanitize_generator(control_ops)
if (len(control_ops) != len(upstream_tensorz)):
raise Exception('Control ops needs to be the same length as upstream tensors. len(tensors) = {lt}, len(control_ops) = {lc}'.format(lt=len(upstream_tensorz), lc=len(control_ops)))
else:
control_ops = itertools.repeat([])
with ops.name_scope('agd_read_multi'):
if ((buffer_pool is None) and share_buffer_pool):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args, name='agd_reader_buffer_pool')
assert (len(upstream_tensorz) > 0)
def gen_groups():
reader = partial(agd_reader_pipeline, verify=verify, buffer_pool_args=buffer_pool_args, buffer_pool=buffer_pool, name=name, repack=repack)
for (upstream_tensors, control_dep) in zip(upstream_tensorz, control_ops):
with ops.control_dependencies(control_dep):
(yield reader(upstream_tensors=upstream_tensors))
for processed_tensors in gen_groups():
(output_buffers, num_recordss, first_ordinalss, record_ids) = zip(*processed_tensors)
(yield (output_buffers, num_recordss[0], first_ordinalss[0], record_ids[0])) | Create an AGDReader pipeline for an iterable of columns. Each column group is assumed to have the same first ordinal, number of records, and record id.
:param upstream_tensorz: a list of list of tensors, each item being a column group
:param verify: whether or not to invoke the verification for AGD columns
:param buffer_pool: pass in a buffer_pool to reuse
:param share_buffer_pool: if buffer_pool is not passed in, create one to share among all the AGDReader instances
:param buffer_pool_args: special buffer pool args, if it's created
:param name:
:return: yield [output_buffer_handles], num_records, first_ordinal, record_id; in order, for each column group in upstream_tensorz | tensorflow/contrib/persona/python/ops/io_pipe.py | agd_reader_multi_column_pipeline | epfl-dcsl/ptf-system | 0 | python | def agd_reader_multi_column_pipeline(upstream_tensorz, control_ops=None, verify=False, buffer_pool=None, share_buffer_pool=True, buffer_pool_args=pool_default_args, repack=None, name='agd_reader_multi_column_pipeline'):
"\n Create an AGDReader pipeline for an iterable of columns. Each column group is assumed to have the same first ordinal, number of records, and record id.\n :param upstream_tensorz: a list of list of tensors, each item being a column group\n :param verify: whether or not to invoke the verification for AGD columns\n :param buffer_pool: pass in a buffer_pool to reuse\n :param share_buffer_pool: if buffer_pool is not passed in, create one to share among all the AGDReader instances\n :param buffer_pool_args: special buffer pool args, if it's created\n :param name: \n :return: yield [output_buffer_handles], num_records, first_ordinal, record_id; in order, for each column group in upstream_tensorz\n "
upstream_tensorz = sanitize_generator(upstream_tensorz)
if (control_ops is not None):
control_ops = sanitize_generator(control_ops)
if (len(control_ops) != len(upstream_tensorz)):
raise Exception('Control ops needs to be the same length as upstream tensors. len(tensors) = {lt}, len(control_ops) = {lc}'.format(lt=len(upstream_tensorz), lc=len(control_ops)))
else:
control_ops = itertools.repeat([])
with ops.name_scope('agd_read_multi'):
if ((buffer_pool is None) and share_buffer_pool):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args, name='agd_reader_buffer_pool')
assert (len(upstream_tensorz) > 0)
def gen_groups():
reader = partial(agd_reader_pipeline, verify=verify, buffer_pool_args=buffer_pool_args, buffer_pool=buffer_pool, name=name, repack=repack)
for (upstream_tensors, control_dep) in zip(upstream_tensorz, control_ops):
with ops.control_dependencies(control_dep):
(yield reader(upstream_tensors=upstream_tensors))
for processed_tensors in gen_groups():
(output_buffers, num_recordss, first_ordinalss, record_ids) = zip(*processed_tensors)
(yield (output_buffers, num_recordss[0], first_ordinalss[0], record_ids[0])) | def agd_reader_multi_column_pipeline(upstream_tensorz, control_ops=None, verify=False, buffer_pool=None, share_buffer_pool=True, buffer_pool_args=pool_default_args, repack=None, name='agd_reader_multi_column_pipeline'):
"\n Create an AGDReader pipeline for an iterable of columns. Each column group is assumed to have the same first ordinal, number of records, and record id.\n :param upstream_tensorz: a list of list of tensors, each item being a column group\n :param verify: whether or not to invoke the verification for AGD columns\n :param buffer_pool: pass in a buffer_pool to reuse\n :param share_buffer_pool: if buffer_pool is not passed in, create one to share among all the AGDReader instances\n :param buffer_pool_args: special buffer pool args, if it's created\n :param name: \n :return: yield [output_buffer_handles], num_records, first_ordinal, record_id; in order, for each column group in upstream_tensorz\n "
upstream_tensorz = sanitize_generator(upstream_tensorz)
if (control_ops is not None):
control_ops = sanitize_generator(control_ops)
if (len(control_ops) != len(upstream_tensorz)):
raise Exception('Control ops needs to be the same length as upstream tensors. len(tensors) = {lt}, len(control_ops) = {lc}'.format(lt=len(upstream_tensorz), lc=len(control_ops)))
else:
control_ops = itertools.repeat([])
with ops.name_scope('agd_read_multi'):
if ((buffer_pool is None) and share_buffer_pool):
buffer_pool = persona_ops.buffer_pool(**buffer_pool_args, name='agd_reader_buffer_pool')
assert (len(upstream_tensorz) > 0)
def gen_groups():
reader = partial(agd_reader_pipeline, verify=verify, buffer_pool_args=buffer_pool_args, buffer_pool=buffer_pool, name=name, repack=repack)
for (upstream_tensors, control_dep) in zip(upstream_tensorz, control_ops):
with ops.control_dependencies(control_dep):
(yield reader(upstream_tensors=upstream_tensors))
for processed_tensors in gen_groups():
(output_buffers, num_recordss, first_ordinalss, record_ids) = zip(*processed_tensors)
(yield (output_buffers, num_recordss[0], first_ordinalss[0], record_ids[0]))<|docstring|>Create an AGDReader pipeline for an iterable of columns. Each column group is assumed to have the same first ordinal, number of records, and record id.
:param upstream_tensorz: a list of list of tensors, each item being a column group
:param verify: whether or not to invoke the verification for AGD columns
:param buffer_pool: pass in a buffer_pool to reuse
:param share_buffer_pool: if buffer_pool is not passed in, create one to share among all the AGDReader instances
:param buffer_pool_args: special buffer pool args, if it's created
:param name:
:return: yield [output_buffer_handles], num_records, first_ordinal, record_id; in order, for each column group in upstream_tensorz<|endoftext|> |
44e98f53d9877db5475dce4510095fefe2a34fd67f18e03f8c04c4e28c23ee45 | def agd_bwa_read_assembler(upstream_tensors, agd_read_pool=None, agd_read_pool_args=pool_default_args, include_meta=False, name='agd_read_assembler'):
"\n Generate agd_bwa_read datatypes from the upstream tensors. BWA paired aligner requires specific data structures\n :param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)\n :param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share\n :param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None\n :param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible\n :param name: \n :return: yield instances of a tensor with AGDRead instance as the result\n "
def make_agd_read(column_buffers, num_reads):
if isinstance(column_buffers, ops.Tensor):
column_buffers = array_ops.unstack(column_buffers)
if include_meta:
assert (len(column_buffers) == 3)
return persona_ops.bwa_assembler(bwa_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], meta_handle=column_buffers[2], num_records=num_reads)
else:
assert (len(column_buffers) == 2)
return persona_ops.no_meta_bwa_assembler(bwa_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], num_records=num_reads)
if (agd_read_pool is None):
agd_read_pool = persona_ops.bwa_read_pool(**agd_read_pool_args, name='agd_reader_bwa_read_pool')
assert (len(upstream_tensors) > 0)
for (output_buffers, num_reads) in upstream_tensors:
(yield make_agd_read(column_buffers=output_buffers, num_reads=num_reads)) | Generate agd_bwa_read datatypes from the upstream tensors. BWA paired aligner requires specific data structures
:param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)
:param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share
:param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None
:param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible
:param name:
:return: yield instances of a tensor with AGDRead instance as the result | tensorflow/contrib/persona/python/ops/io_pipe.py | agd_bwa_read_assembler | epfl-dcsl/ptf-system | 0 | python | def agd_bwa_read_assembler(upstream_tensors, agd_read_pool=None, agd_read_pool_args=pool_default_args, include_meta=False, name='agd_read_assembler'):
"\n Generate agd_bwa_read datatypes from the upstream tensors. BWA paired aligner requires specific data structures\n :param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)\n :param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share\n :param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None\n :param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible\n :param name: \n :return: yield instances of a tensor with AGDRead instance as the result\n "
def make_agd_read(column_buffers, num_reads):
if isinstance(column_buffers, ops.Tensor):
column_buffers = array_ops.unstack(column_buffers)
if include_meta:
assert (len(column_buffers) == 3)
return persona_ops.bwa_assembler(bwa_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], meta_handle=column_buffers[2], num_records=num_reads)
else:
assert (len(column_buffers) == 2)
return persona_ops.no_meta_bwa_assembler(bwa_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], num_records=num_reads)
if (agd_read_pool is None):
agd_read_pool = persona_ops.bwa_read_pool(**agd_read_pool_args, name='agd_reader_bwa_read_pool')
assert (len(upstream_tensors) > 0)
for (output_buffers, num_reads) in upstream_tensors:
(yield make_agd_read(column_buffers=output_buffers, num_reads=num_reads)) | def agd_bwa_read_assembler(upstream_tensors, agd_read_pool=None, agd_read_pool_args=pool_default_args, include_meta=False, name='agd_read_assembler'):
"\n Generate agd_bwa_read datatypes from the upstream tensors. BWA paired aligner requires specific data structures\n :param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)\n :param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share\n :param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None\n :param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible\n :param name: \n :return: yield instances of a tensor with AGDRead instance as the result\n "
def make_agd_read(column_buffers, num_reads):
if isinstance(column_buffers, ops.Tensor):
column_buffers = array_ops.unstack(column_buffers)
if include_meta:
assert (len(column_buffers) == 3)
return persona_ops.bwa_assembler(bwa_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], meta_handle=column_buffers[2], num_records=num_reads)
else:
assert (len(column_buffers) == 2)
return persona_ops.no_meta_bwa_assembler(bwa_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], num_records=num_reads)
if (agd_read_pool is None):
agd_read_pool = persona_ops.bwa_read_pool(**agd_read_pool_args, name='agd_reader_bwa_read_pool')
assert (len(upstream_tensors) > 0)
for (output_buffers, num_reads) in upstream_tensors:
(yield make_agd_read(column_buffers=output_buffers, num_reads=num_reads))<|docstring|>Generate agd_bwa_read datatypes from the upstream tensors. BWA paired aligner requires specific data structures
:param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)
:param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share
:param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None
:param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible
:param name:
:return: yield instances of a tensor with AGDRead instance as the result<|endoftext|> |
789f095a6f5719e39b10eb014465a9e011c02db01ef1cf4bad6119113760c39c | def agd_read_assembler(upstream_tensors, control_deps=None, agd_read_pool=None, agd_read_pool_args=pool_default_args, include_meta=False, name='agd_read_assembler'):
"\n Generate agd_read datatypes from the upstream tensors\n :param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)\n :param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share\n :param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None\n :param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible\n :param name: \n :return: yield instances of a tensor with AGDRead instance as the result\n "
upstream_tensors = sanitize_generator(upstream_tensors)
if (control_deps is None):
control_deps = itertools.repeat([], times=len(upstream_tensors))
else:
control_deps = sanitize_generator(control_deps)
if (len(control_deps) != len(upstream_tensors)):
raise Exception('Got {ut} upstream tensor groups, but only {cd} control dependencies. Must be equal!'.format(ut=len(upstream_tensors), cd=len(control_deps)))
with ops.name_scope('agd_read_assembler'):
def make_agd_read(column_buffers, num_reads):
if isinstance(column_buffers, ops.Tensor):
column_buffers = array_ops.unstack(column_buffers)
if include_meta:
assert (len(column_buffers) == 3)
return persona_ops.agd_assembler(agd_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], meta_handle=column_buffers[2], num_records=num_reads)
else:
assert (len(column_buffers) == 2)
return persona_ops.no_meta_agd_assembler(agd_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], num_records=num_reads)
if (agd_read_pool is None):
agd_read_pool = persona_ops.agd_read_pool(**agd_read_pool_args, name='agd_reader_agd_read_pool')
assert (len(upstream_tensors) > 0)
for ((output_buffers, num_reads), control_dep) in zip(upstream_tensors, control_deps):
with ops.control_dependencies(control_dep):
(yield make_agd_read(column_buffers=output_buffers, num_reads=num_reads)) | Generate agd_read datatypes from the upstream tensors
:param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)
:param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share
:param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None
:param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible
:param name:
:return: yield instances of a tensor with AGDRead instance as the result | tensorflow/contrib/persona/python/ops/io_pipe.py | agd_read_assembler | epfl-dcsl/ptf-system | 0 | python | def agd_read_assembler(upstream_tensors, control_deps=None, agd_read_pool=None, agd_read_pool_args=pool_default_args, include_meta=False, name='agd_read_assembler'):
"\n Generate agd_read datatypes from the upstream tensors\n :param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)\n :param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share\n :param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None\n :param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible\n :param name: \n :return: yield instances of a tensor with AGDRead instance as the result\n "
upstream_tensors = sanitize_generator(upstream_tensors)
if (control_deps is None):
control_deps = itertools.repeat([], times=len(upstream_tensors))
else:
control_deps = sanitize_generator(control_deps)
if (len(control_deps) != len(upstream_tensors)):
raise Exception('Got {ut} upstream tensor groups, but only {cd} control dependencies. Must be equal!'.format(ut=len(upstream_tensors), cd=len(control_deps)))
with ops.name_scope('agd_read_assembler'):
def make_agd_read(column_buffers, num_reads):
if isinstance(column_buffers, ops.Tensor):
column_buffers = array_ops.unstack(column_buffers)
if include_meta:
assert (len(column_buffers) == 3)
return persona_ops.agd_assembler(agd_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], meta_handle=column_buffers[2], num_records=num_reads)
else:
assert (len(column_buffers) == 2)
return persona_ops.no_meta_agd_assembler(agd_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], num_records=num_reads)
if (agd_read_pool is None):
agd_read_pool = persona_ops.agd_read_pool(**agd_read_pool_args, name='agd_reader_agd_read_pool')
assert (len(upstream_tensors) > 0)
for ((output_buffers, num_reads), control_dep) in zip(upstream_tensors, control_deps):
with ops.control_dependencies(control_dep):
(yield make_agd_read(column_buffers=output_buffers, num_reads=num_reads)) | def agd_read_assembler(upstream_tensors, control_deps=None, agd_read_pool=None, agd_read_pool_args=pool_default_args, include_meta=False, name='agd_read_assembler'):
"\n Generate agd_read datatypes from the upstream tensors\n :param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)\n :param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share\n :param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None\n :param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible\n :param name: \n :return: yield instances of a tensor with AGDRead instance as the result\n "
upstream_tensors = sanitize_generator(upstream_tensors)
if (control_deps is None):
control_deps = itertools.repeat([], times=len(upstream_tensors))
else:
control_deps = sanitize_generator(control_deps)
if (len(control_deps) != len(upstream_tensors)):
raise Exception('Got {ut} upstream tensor groups, but only {cd} control dependencies. Must be equal!'.format(ut=len(upstream_tensors), cd=len(control_deps)))
with ops.name_scope('agd_read_assembler'):
def make_agd_read(column_buffers, num_reads):
if isinstance(column_buffers, ops.Tensor):
column_buffers = array_ops.unstack(column_buffers)
if include_meta:
assert (len(column_buffers) == 3)
return persona_ops.agd_assembler(agd_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], meta_handle=column_buffers[2], num_records=num_reads)
else:
assert (len(column_buffers) == 2)
return persona_ops.no_meta_agd_assembler(agd_read_pool=agd_read_pool, base_handle=column_buffers[0], qual_handle=column_buffers[1], num_records=num_reads)
if (agd_read_pool is None):
agd_read_pool = persona_ops.agd_read_pool(**agd_read_pool_args, name='agd_reader_agd_read_pool')
assert (len(upstream_tensors) > 0)
for ((output_buffers, num_reads), control_dep) in zip(upstream_tensors, control_deps):
with ops.control_dependencies(control_dep):
(yield make_agd_read(column_buffers=output_buffers, num_reads=num_reads))<|docstring|>Generate agd_read datatypes from the upstream tensors
:param upstream_tensors: a list of tuples of tensors with type: (column_buffers, num_reads)
:param agd_read_pool: if not None, pass in an instance of persona_ops.agd_read_pool to share
:param agd_read_pool_args: args for deafult construction of agd_read_pool if it's None
:param include_meta: create a meta read assembler if passed. The shape of upstream_tensors must be compatible
:param name:
:return: yield instances of a tensor with AGDRead instance as the result<|endoftext|> |
d1e4746de554a42482775dc8f8af0c76f9d8cf10e4b06028ed958969187413da | @tf.function
def call(self, inputs):
'\n :param inputs: batched ids corresponding to text\n :return the probabilities as a tensor, [batch_size x num_classes]\n '
embedding = tf.nn.embedding_lookup(self.E, inputs)
embedding = self.pos_embed(embedding)
x = self.transformer(embedding)
x = tf.keras.layers.GlobalAveragePooling1D()(x)
probs = self.dense1(x)
return probs | :param inputs: batched ids corresponding to text
:return the probabilities as a tensor, [batch_size x num_classes] | transformer_model/code/transformer_model.py | call | nate-gillman/alzheimers-DL-final | 0 | python | @tf.function
def call(self, inputs):
'\n :param inputs: batched ids corresponding to text\n :return the probabilities as a tensor, [batch_size x num_classes]\n '
embedding = tf.nn.embedding_lookup(self.E, inputs)
embedding = self.pos_embed(embedding)
x = self.transformer(embedding)
x = tf.keras.layers.GlobalAveragePooling1D()(x)
probs = self.dense1(x)
return probs | @tf.function
def call(self, inputs):
'\n :param inputs: batched ids corresponding to text\n :return the probabilities as a tensor, [batch_size x num_classes]\n '
embedding = tf.nn.embedding_lookup(self.E, inputs)
embedding = self.pos_embed(embedding)
x = self.transformer(embedding)
x = tf.keras.layers.GlobalAveragePooling1D()(x)
probs = self.dense1(x)
return probs<|docstring|>:param inputs: batched ids corresponding to text
:return the probabilities as a tensor, [batch_size x num_classes]<|endoftext|> |
d943cb1ccda179e93309de0e0b5171ca0ad59285b1b10fdfb0e869dbf510a2ee | def accuracy(self, logits, labels):
"\n Calculates the model's prediction accuracy by comparing\n logits to correct labels – no need to modify this.\n \n :param logits: a matrix of size (num_inputs, self.num_classes); during training, this will be (batch_size, self.num_classes)\n containing the result of multiple convolution and feed forward layers\n :param labels: matrix of size (num_labels, self.num_classes) containing the answers, during training, this will be (batch_size, self.num_classes)\n\n NOTE: DO NOT EDIT\n \n :return: the accuracy of the model as a Tensor\n "
correct_predictions = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
return tf.reduce_mean(tf.cast(correct_predictions, tf.float32)) | Calculates the model's prediction accuracy by comparing
logits to correct labels – no need to modify this.
:param logits: a matrix of size (num_inputs, self.num_classes); during training, this will be (batch_size, self.num_classes)
containing the result of multiple convolution and feed forward layers
:param labels: matrix of size (num_labels, self.num_classes) containing the answers, during training, this will be (batch_size, self.num_classes)
NOTE: DO NOT EDIT
:return: the accuracy of the model as a Tensor | transformer_model/code/transformer_model.py | accuracy | nate-gillman/alzheimers-DL-final | 0 | python | def accuracy(self, logits, labels):
"\n Calculates the model's prediction accuracy by comparing\n logits to correct labels – no need to modify this.\n \n :param logits: a matrix of size (num_inputs, self.num_classes); during training, this will be (batch_size, self.num_classes)\n containing the result of multiple convolution and feed forward layers\n :param labels: matrix of size (num_labels, self.num_classes) containing the answers, during training, this will be (batch_size, self.num_classes)\n\n NOTE: DO NOT EDIT\n \n :return: the accuracy of the model as a Tensor\n "
correct_predictions = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
return tf.reduce_mean(tf.cast(correct_predictions, tf.float32)) | def accuracy(self, logits, labels):
"\n Calculates the model's prediction accuracy by comparing\n logits to correct labels – no need to modify this.\n \n :param logits: a matrix of size (num_inputs, self.num_classes); during training, this will be (batch_size, self.num_classes)\n containing the result of multiple convolution and feed forward layers\n :param labels: matrix of size (num_labels, self.num_classes) containing the answers, during training, this will be (batch_size, self.num_classes)\n\n NOTE: DO NOT EDIT\n \n :return: the accuracy of the model as a Tensor\n "
correct_predictions = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
return tf.reduce_mean(tf.cast(correct_predictions, tf.float32))<|docstring|>Calculates the model's prediction accuracy by comparing
logits to correct labels – no need to modify this.
:param logits: a matrix of size (num_inputs, self.num_classes); during training, this will be (batch_size, self.num_classes)
containing the result of multiple convolution and feed forward layers
:param labels: matrix of size (num_labels, self.num_classes) containing the answers, during training, this will be (batch_size, self.num_classes)
NOTE: DO NOT EDIT
:return: the accuracy of the model as a Tensor<|endoftext|> |
4ab77a2e02628b424fc2ad60f13d9fa55ddef53ce3309e426206ea23c0f8af54 | def loss_function(self, prbs, labels, mask):
'\n\t\tCalculates the model cross-entropy loss after one forward pass\n\t\tPlease use reduce sum here instead of reduce mean to make things easier in calculating per symbol accuracy.\n\n\t\t:param prbs: float tensor, word prediction probabilities [batch_size x window_size x english_vocab_size]\n\t\t:param labels: integer tensor, word prediction labels [batch_size x window_size]\n\t\t:param mask: tensor that acts as a padding mask [batch_size x window_size]\n\t\t:return: the loss of the model as a tensor\n\t\t'
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels, prbs)) | Calculates the model cross-entropy loss after one forward pass
Please use reduce sum here instead of reduce mean to make things easier in calculating per symbol accuracy.
:param prbs: float tensor, word prediction probabilities [batch_size x window_size x english_vocab_size]
:param labels: integer tensor, word prediction labels [batch_size x window_size]
:param mask: tensor that acts as a padding mask [batch_size x window_size]
:return: the loss of the model as a tensor | transformer_model/code/transformer_model.py | loss_function | nate-gillman/alzheimers-DL-final | 0 | python | def loss_function(self, prbs, labels, mask):
'\n\t\tCalculates the model cross-entropy loss after one forward pass\n\t\tPlease use reduce sum here instead of reduce mean to make things easier in calculating per symbol accuracy.\n\n\t\t:param prbs: float tensor, word prediction probabilities [batch_size x window_size x english_vocab_size]\n\t\t:param labels: integer tensor, word prediction labels [batch_size x window_size]\n\t\t:param mask: tensor that acts as a padding mask [batch_size x window_size]\n\t\t:return: the loss of the model as a tensor\n\t\t'
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels, prbs)) | def loss_function(self, prbs, labels, mask):
'\n\t\tCalculates the model cross-entropy loss after one forward pass\n\t\tPlease use reduce sum here instead of reduce mean to make things easier in calculating per symbol accuracy.\n\n\t\t:param prbs: float tensor, word prediction probabilities [batch_size x window_size x english_vocab_size]\n\t\t:param labels: integer tensor, word prediction labels [batch_size x window_size]\n\t\t:param mask: tensor that acts as a padding mask [batch_size x window_size]\n\t\t:return: the loss of the model as a tensor\n\t\t'
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels, prbs))<|docstring|>Calculates the model cross-entropy loss after one forward pass
Please use reduce sum here instead of reduce mean to make things easier in calculating per symbol accuracy.
:param prbs: float tensor, word prediction probabilities [batch_size x window_size x english_vocab_size]
:param labels: integer tensor, word prediction labels [batch_size x window_size]
:param mask: tensor that acts as a padding mask [batch_size x window_size]
:return: the loss of the model as a tensor<|endoftext|> |
c26d7ec7296685ef4962d4d5316996e82179196890578dfd579ba100d362381a | @staticmethod
def maxArea(height: List[int]) -> int:
'\n (以下段落摘抄自评论区)\n 其实我们显然可以发现影响问题的两个关键因素,一个是最短的短板,一个是宽度。\n 如果宽度变小,那么面积想比之前的更大,唯一的可能是最短的短板比之前要高。\n 所以更高的木板根本没必要移动,动它没有任何意义。如果想在更短的宽度得到更大的面积,唯一的可能是移动最短的短板,以期望其变高。短板原理啊,朋友们,是不是刷着刷着题突然就领悟到了人生的哲理。\n 所以大家不要老是想这道题该不该用双指针去解;看清这题的本质,自然而然就意识到双指针显然是个较优解。\n '
(i, j, res) = (0, (len(height) - 1), 0)
while (i < j):
if (height[i] < height[j]):
res = max(res, (height[i] * (j - i)))
i += 1
else:
res = max(res, (height[j] * (j - i)))
j -= 1
return res | (以下段落摘抄自评论区)
其实我们显然可以发现影响问题的两个关键因素,一个是最短的短板,一个是宽度。
如果宽度变小,那么面积想比之前的更大,唯一的可能是最短的短板比之前要高。
所以更高的木板根本没必要移动,动它没有任何意义。如果想在更短的宽度得到更大的面积,唯一的可能是移动最短的短板,以期望其变高。短板原理啊,朋友们,是不是刷着刷着题突然就领悟到了人生的哲理。
所以大家不要老是想这道题该不该用双指针去解;看清这题的本质,自然而然就意识到双指针显然是个较优解。 | problems/11_most_water/good_solution.py | maxArea | TanyeeZhang/leet-note-code | 0 | python | @staticmethod
def maxArea(height: List[int]) -> int:
'\n (以下段落摘抄自评论区)\n 其实我们显然可以发现影响问题的两个关键因素,一个是最短的短板,一个是宽度。\n 如果宽度变小,那么面积想比之前的更大,唯一的可能是最短的短板比之前要高。\n 所以更高的木板根本没必要移动,动它没有任何意义。如果想在更短的宽度得到更大的面积,唯一的可能是移动最短的短板,以期望其变高。短板原理啊,朋友们,是不是刷着刷着题突然就领悟到了人生的哲理。\n 所以大家不要老是想这道题该不该用双指针去解;看清这题的本质,自然而然就意识到双指针显然是个较优解。\n '
(i, j, res) = (0, (len(height) - 1), 0)
while (i < j):
if (height[i] < height[j]):
res = max(res, (height[i] * (j - i)))
i += 1
else:
res = max(res, (height[j] * (j - i)))
j -= 1
return res | @staticmethod
def maxArea(height: List[int]) -> int:
'\n (以下段落摘抄自评论区)\n 其实我们显然可以发现影响问题的两个关键因素,一个是最短的短板,一个是宽度。\n 如果宽度变小,那么面积想比之前的更大,唯一的可能是最短的短板比之前要高。\n 所以更高的木板根本没必要移动,动它没有任何意义。如果想在更短的宽度得到更大的面积,唯一的可能是移动最短的短板,以期望其变高。短板原理啊,朋友们,是不是刷着刷着题突然就领悟到了人生的哲理。\n 所以大家不要老是想这道题该不该用双指针去解;看清这题的本质,自然而然就意识到双指针显然是个较优解。\n '
(i, j, res) = (0, (len(height) - 1), 0)
while (i < j):
if (height[i] < height[j]):
res = max(res, (height[i] * (j - i)))
i += 1
else:
res = max(res, (height[j] * (j - i)))
j -= 1
return res<|docstring|>(以下段落摘抄自评论区)
其实我们显然可以发现影响问题的两个关键因素,一个是最短的短板,一个是宽度。
如果宽度变小,那么面积想比之前的更大,唯一的可能是最短的短板比之前要高。
所以更高的木板根本没必要移动,动它没有任何意义。如果想在更短的宽度得到更大的面积,唯一的可能是移动最短的短板,以期望其变高。短板原理啊,朋友们,是不是刷着刷着题突然就领悟到了人生的哲理。
所以大家不要老是想这道题该不该用双指针去解;看清这题的本质,自然而然就意识到双指针显然是个较优解。<|endoftext|> |
2110a4e980c233cbc87bcc4252708ccd1535e5255a0fa1e7d34a9aae26b04e84 | def random_date(start, end):
'\n This function will return a random datetime between two datetime\n objects.\n '
delta = (end - start)
int_delta = ((((delta.days * 24) * 60) * 60) + delta.seconds)
random_second = randrange(int_delta)
return (start + timedelta(seconds=random_second)) | This function will return a random datetime between two datetime
objects. | src/tuberlin/inventory/management/commands/random_objekts.py | random_date | CircularBerlin/gmit | 0 | python | def random_date(start, end):
'\n This function will return a random datetime between two datetime\n objects.\n '
delta = (end - start)
int_delta = ((((delta.days * 24) * 60) * 60) + delta.seconds)
random_second = randrange(int_delta)
return (start + timedelta(seconds=random_second)) | def random_date(start, end):
'\n This function will return a random datetime between two datetime\n objects.\n '
delta = (end - start)
int_delta = ((((delta.days * 24) * 60) * 60) + delta.seconds)
random_second = randrange(int_delta)
return (start + timedelta(seconds=random_second))<|docstring|>This function will return a random datetime between two datetime
objects.<|endoftext|> |
dc36bb4e4e6d6d904a1f62da78c769858183c37827e92be6951cd9713f1daab6 | def run_network(pgm):
'Run the intcode network'
computers = []
for c in range(0, 50):
computers.append(intcode.Program('network', copy.copy(pgm), [c]))
while True:
for c in range(0, 50):
if (computers[c].state['ptr'] != (- 1)):
computers[c].intcode()
if (len(computers[c].state['outputs']) >= 3):
dst = computers[c].state['outputs'].pop(0)
x = computers[c].state['outputs'].pop(0)
y = computers[c].state['outputs'].pop(0)
if (dst == 255):
return y
computers[dst].state['inputs'].append(x)
computers[dst].state['inputs'].append(y) | Run the intcode network | aoc2019/day23.py | run_network | zoeimogen/AoC2019 | 0 | python | def run_network(pgm):
computers = []
for c in range(0, 50):
computers.append(intcode.Program('network', copy.copy(pgm), [c]))
while True:
for c in range(0, 50):
if (computers[c].state['ptr'] != (- 1)):
computers[c].intcode()
if (len(computers[c].state['outputs']) >= 3):
dst = computers[c].state['outputs'].pop(0)
x = computers[c].state['outputs'].pop(0)
y = computers[c].state['outputs'].pop(0)
if (dst == 255):
return y
computers[dst].state['inputs'].append(x)
computers[dst].state['inputs'].append(y) | def run_network(pgm):
computers = []
for c in range(0, 50):
computers.append(intcode.Program('network', copy.copy(pgm), [c]))
while True:
for c in range(0, 50):
if (computers[c].state['ptr'] != (- 1)):
computers[c].intcode()
if (len(computers[c].state['outputs']) >= 3):
dst = computers[c].state['outputs'].pop(0)
x = computers[c].state['outputs'].pop(0)
y = computers[c].state['outputs'].pop(0)
if (dst == 255):
return y
computers[dst].state['inputs'].append(x)
computers[dst].state['inputs'].append(y)<|docstring|>Run the intcode network<|endoftext|> |
193efc06f56f550f4c7309ce113644e509f44dee5e8b216e0fffa3a6aa14ab49 | def run_network_p2(pgm):
'Run the intcode network with NAT'
computers = []
for c in range(0, 50):
computers.append(intcode.Program('network', copy.copy(pgm), [c]))
nat = (0, 0)
while True:
for c in range(0, 50):
if (computers[c].state['ptr'] != (- 1)):
computers[c].intcode()
if (len(computers[c].state['outputs']) >= 3):
dst = computers[c].state['outputs'].pop(0)
x = computers[c].state['outputs'].pop(0)
y = computers[c].state['outputs'].pop(0)
if (dst == 255):
if (y == nat[1]):
return y
nat = (x, y)
else:
computers[dst].state['inputs'].append(x)
computers[dst].state['inputs'].append(y)
if (len([i for i in computers if (i.state['idle'] < 2)]) == 0):
computers[0].state['idle'] = 0
computers[0].state['inputs'].append(nat[0])
computers[0].state['inputs'].append(nat[1]) | Run the intcode network with NAT | aoc2019/day23.py | run_network_p2 | zoeimogen/AoC2019 | 0 | python | def run_network_p2(pgm):
computers = []
for c in range(0, 50):
computers.append(intcode.Program('network', copy.copy(pgm), [c]))
nat = (0, 0)
while True:
for c in range(0, 50):
if (computers[c].state['ptr'] != (- 1)):
computers[c].intcode()
if (len(computers[c].state['outputs']) >= 3):
dst = computers[c].state['outputs'].pop(0)
x = computers[c].state['outputs'].pop(0)
y = computers[c].state['outputs'].pop(0)
if (dst == 255):
if (y == nat[1]):
return y
nat = (x, y)
else:
computers[dst].state['inputs'].append(x)
computers[dst].state['inputs'].append(y)
if (len([i for i in computers if (i.state['idle'] < 2)]) == 0):
computers[0].state['idle'] = 0
computers[0].state['inputs'].append(nat[0])
computers[0].state['inputs'].append(nat[1]) | def run_network_p2(pgm):
computers = []
for c in range(0, 50):
computers.append(intcode.Program('network', copy.copy(pgm), [c]))
nat = (0, 0)
while True:
for c in range(0, 50):
if (computers[c].state['ptr'] != (- 1)):
computers[c].intcode()
if (len(computers[c].state['outputs']) >= 3):
dst = computers[c].state['outputs'].pop(0)
x = computers[c].state['outputs'].pop(0)
y = computers[c].state['outputs'].pop(0)
if (dst == 255):
if (y == nat[1]):
return y
nat = (x, y)
else:
computers[dst].state['inputs'].append(x)
computers[dst].state['inputs'].append(y)
if (len([i for i in computers if (i.state['idle'] < 2)]) == 0):
computers[0].state['idle'] = 0
computers[0].state['inputs'].append(nat[0])
computers[0].state['inputs'].append(nat[1])<|docstring|>Run the intcode network with NAT<|endoftext|> |
cc9938fcc0b25533a32636449b49c6e3339aa6db4f7f3b3d60d8ad46a882ca71 | def run() -> Tuple[(int, int)]:
'Main'
with open('inputs/day23.txt') as f:
data = list(map(int, f.readline().split(',')))
part1 = run_network(data)
part2 = run_network_p2(data)
return (part1, part2) | Main | aoc2019/day23.py | run | zoeimogen/AoC2019 | 0 | python | def run() -> Tuple[(int, int)]:
with open('inputs/day23.txt') as f:
data = list(map(int, f.readline().split(',')))
part1 = run_network(data)
part2 = run_network_p2(data)
return (part1, part2) | def run() -> Tuple[(int, int)]:
with open('inputs/day23.txt') as f:
data = list(map(int, f.readline().split(',')))
part1 = run_network(data)
part2 = run_network_p2(data)
return (part1, part2)<|docstring|>Main<|endoftext|> |
10c9f699fffc1cbcf3ef2f8089e22e31bc66714bc61df7a9d49bb8028f6af31b | @staticmethod
def store_result(result: dict, filepath: str):
'Store the given result at the specified location'
ScanResultProcessor.store_json_convertible_result(result, filepath) | Store the given result at the specified location | core/scan_result_processor.py | store_result | RE4CT10N/avain | 51 | python | @staticmethod
def store_result(result: dict, filepath: str):
ScanResultProcessor.store_json_convertible_result(result, filepath) | @staticmethod
def store_result(result: dict, filepath: str):
ScanResultProcessor.store_json_convertible_result(result, filepath)<|docstring|>Store the given result at the specified location<|endoftext|> |
113c1b0d55e7ed547e0cbb64907791f9843e631372b31b3fc2acd27fdb70f9a3 | @staticmethod
def store_aggregated_result(aggr_result, filepath: str):
'Store the given aggregated result at the specified location'
ScanResultProcessor.store_json_convertible_result(aggr_result, filepath) | Store the given aggregated result at the specified location | core/scan_result_processor.py | store_aggregated_result | RE4CT10N/avain | 51 | python | @staticmethod
def store_aggregated_result(aggr_result, filepath: str):
ScanResultProcessor.store_json_convertible_result(aggr_result, filepath) | @staticmethod
def store_aggregated_result(aggr_result, filepath: str):
ScanResultProcessor.store_json_convertible_result(aggr_result, filepath)<|docstring|>Store the given aggregated result at the specified location<|endoftext|> |
438957c3c005e0f6186514b204c6154fab84c949c79ed6a5880c6276a3165d4e | def aggregate_results(self):
'\n Accumulate all retrieved scan results to one scan result.\n\n :return: a dict having host IPs as keys and their scan results as values\n '
if (not self.results):
result = {}
elif (len(self.results) == 1):
result = copy.deepcopy(self.results[list(self.results.keys())[0]])
else:
result = self._aggregate_results()
for (key, val) in result.items():
if (key != 'trust'):
if (not ('os' in val)):
val['os'] = {}
if (not ('tcp' in val)):
val['tcp'] = {}
if (not ('udp' in val)):
val['udp'] = {}
ScanResultProcessor.remove_trust_values(result)
for (_, host) in result.items():
if ('os' in host):
if (not isinstance(host['os'], list)):
host['os'] = [host['os']]
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, portinfos) in host[protocol].items():
if (not isinstance(portinfos, list)):
host[protocol][portid] = [portinfos]
return ResultProcessor.sort_result_by_ip(result) | Accumulate all retrieved scan results to one scan result.
:return: a dict having host IPs as keys and their scan results as values | core/scan_result_processor.py | aggregate_results | RE4CT10N/avain | 51 | python | def aggregate_results(self):
'\n Accumulate all retrieved scan results to one scan result.\n\n :return: a dict having host IPs as keys and their scan results as values\n '
if (not self.results):
result = {}
elif (len(self.results) == 1):
result = copy.deepcopy(self.results[list(self.results.keys())[0]])
else:
result = self._aggregate_results()
for (key, val) in result.items():
if (key != 'trust'):
if (not ('os' in val)):
val['os'] = {}
if (not ('tcp' in val)):
val['tcp'] = {}
if (not ('udp' in val)):
val['udp'] = {}
ScanResultProcessor.remove_trust_values(result)
for (_, host) in result.items():
if ('os' in host):
if (not isinstance(host['os'], list)):
host['os'] = [host['os']]
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, portinfos) in host[protocol].items():
if (not isinstance(portinfos, list)):
host[protocol][portid] = [portinfos]
return ResultProcessor.sort_result_by_ip(result) | def aggregate_results(self):
'\n Accumulate all retrieved scan results to one scan result.\n\n :return: a dict having host IPs as keys and their scan results as values\n '
if (not self.results):
result = {}
elif (len(self.results) == 1):
result = copy.deepcopy(self.results[list(self.results.keys())[0]])
else:
result = self._aggregate_results()
for (key, val) in result.items():
if (key != 'trust'):
if (not ('os' in val)):
val['os'] = {}
if (not ('tcp' in val)):
val['tcp'] = {}
if (not ('udp' in val)):
val['udp'] = {}
ScanResultProcessor.remove_trust_values(result)
for (_, host) in result.items():
if ('os' in host):
if (not isinstance(host['os'], list)):
host['os'] = [host['os']]
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, portinfos) in host[protocol].items():
if (not isinstance(portinfos, list)):
host[protocol][portid] = [portinfos]
return ResultProcessor.sort_result_by_ip(result)<|docstring|>Accumulate all retrieved scan results to one scan result.
:return: a dict having host IPs as keys and their scan results as values<|endoftext|> |
70b14611f18c094ea25c41400714b70519060e0e353ce51b5ad0341516cff6bf | def _group_by_product(self, intermediate_results):
'\n Group the intermediate results by their CPE product value (if it exists). Two items\n are grouped if they have the same part and vendor and the cosine similarity of their\n product strings is greater than 0.45.\n\n :param intermediate_results: the intermediate results after first group and reduce\n :return: the intermediate results grouped by their CPE product\n '
def group_item_by_product(item, groups):
for group in groups:
for gitem in group:
for cpe in item.get('cpes', []):
for gcpe in gitem.get('cpes', []):
(cpe_split, gcpe_split) = (cpe[5:].split(':'), gcpe[5:].split(':'))
if ((len(cpe_split) > 2) and (len(gcpe_split) > 2)):
if ((cpe_split[0] == gcpe_split[0]) and (cpe_split[1] == gcpe_split[1])):
if (util.compute_cosine_similarity(cpe_split[2], gcpe_split[2], '[^\\W_]+') > 0.45):
group.append(item)
return True
return False
def group_protocol(protocol):
nonlocal ip, host, product_groups
if (protocol in host):
if (protocol not in product_groups):
product_groups[ip][protocol] = {}
for (portid, port_nodes) in host[protocol].items():
port_groups = []
for port_node in port_nodes:
if (not group_item_by_product(port_node, port_groups)):
port_groups.append([port_node])
product_groups[ip][protocol][portid] = port_groups
product_groups = {}
for (ip, host) in intermediate_results.items():
if (ip not in product_groups):
product_groups[ip] = {}
if ('os' in host):
os_groups = []
for os_node in host['os']:
if (not group_item_by_product(os_node, os_groups)):
os_groups.append([os_node])
product_groups[ip]['os'] = os_groups
group_protocol('tcp')
group_protocol('udp')
return product_groups | Group the intermediate results by their CPE product value (if it exists). Two items
are grouped if they have the same part and vendor and the cosine similarity of their
product strings is greater than 0.45.
:param intermediate_results: the intermediate results after first group and reduce
:return: the intermediate results grouped by their CPE product | core/scan_result_processor.py | _group_by_product | RE4CT10N/avain | 51 | python | def _group_by_product(self, intermediate_results):
'\n Group the intermediate results by their CPE product value (if it exists). Two items\n are grouped if they have the same part and vendor and the cosine similarity of their\n product strings is greater than 0.45.\n\n :param intermediate_results: the intermediate results after first group and reduce\n :return: the intermediate results grouped by their CPE product\n '
def group_item_by_product(item, groups):
for group in groups:
for gitem in group:
for cpe in item.get('cpes', []):
for gcpe in gitem.get('cpes', []):
(cpe_split, gcpe_split) = (cpe[5:].split(':'), gcpe[5:].split(':'))
if ((len(cpe_split) > 2) and (len(gcpe_split) > 2)):
if ((cpe_split[0] == gcpe_split[0]) and (cpe_split[1] == gcpe_split[1])):
if (util.compute_cosine_similarity(cpe_split[2], gcpe_split[2], '[^\\W_]+') > 0.45):
group.append(item)
return True
return False
def group_protocol(protocol):
nonlocal ip, host, product_groups
if (protocol in host):
if (protocol not in product_groups):
product_groups[ip][protocol] = {}
for (portid, port_nodes) in host[protocol].items():
port_groups = []
for port_node in port_nodes:
if (not group_item_by_product(port_node, port_groups)):
port_groups.append([port_node])
product_groups[ip][protocol][portid] = port_groups
product_groups = {}
for (ip, host) in intermediate_results.items():
if (ip not in product_groups):
product_groups[ip] = {}
if ('os' in host):
os_groups = []
for os_node in host['os']:
if (not group_item_by_product(os_node, os_groups)):
os_groups.append([os_node])
product_groups[ip]['os'] = os_groups
group_protocol('tcp')
group_protocol('udp')
return product_groups | def _group_by_product(self, intermediate_results):
'\n Group the intermediate results by their CPE product value (if it exists). Two items\n are grouped if they have the same part and vendor and the cosine similarity of their\n product strings is greater than 0.45.\n\n :param intermediate_results: the intermediate results after first group and reduce\n :return: the intermediate results grouped by their CPE product\n '
def group_item_by_product(item, groups):
for group in groups:
for gitem in group:
for cpe in item.get('cpes', []):
for gcpe in gitem.get('cpes', []):
(cpe_split, gcpe_split) = (cpe[5:].split(':'), gcpe[5:].split(':'))
if ((len(cpe_split) > 2) and (len(gcpe_split) > 2)):
if ((cpe_split[0] == gcpe_split[0]) and (cpe_split[1] == gcpe_split[1])):
if (util.compute_cosine_similarity(cpe_split[2], gcpe_split[2], '[^\\W_]+') > 0.45):
group.append(item)
return True
return False
def group_protocol(protocol):
nonlocal ip, host, product_groups
if (protocol in host):
if (protocol not in product_groups):
product_groups[ip][protocol] = {}
for (portid, port_nodes) in host[protocol].items():
port_groups = []
for port_node in port_nodes:
if (not group_item_by_product(port_node, port_groups)):
port_groups.append([port_node])
product_groups[ip][protocol][portid] = port_groups
product_groups = {}
for (ip, host) in intermediate_results.items():
if (ip not in product_groups):
product_groups[ip] = {}
if ('os' in host):
os_groups = []
for os_node in host['os']:
if (not group_item_by_product(os_node, os_groups)):
os_groups.append([os_node])
product_groups[ip]['os'] = os_groups
group_protocol('tcp')
group_protocol('udp')
return product_groups<|docstring|>Group the intermediate results by their CPE product value (if it exists). Two items
are grouped if they have the same part and vendor and the cosine similarity of their
product strings is greater than 0.45.
:param intermediate_results: the intermediate results after first group and reduce
:return: the intermediate results grouped by their CPE product<|endoftext|> |
f2a28f43f1ecf0e11b1a930a321e5ccb094fb85b82eca91f9c70f77eda66fbc2 | def _aggregate_results(self):
'\n Aggregate the "grouped and reduced" results to one final result. The\n aggregation is done depending on the config value for "scan_result_aggr_scheme".\n\n Value "SINGLE" : the single result with the highest trust rating is chosen\n Value "MULTIPLE" : the results are returned without further processing\n Value "FILTER" : similar products are filtered out, i.e. out of macOS 10.12\n and macOS 10.13, only the one with the highest trust rating\n is returned\n '
processed_results = self._group_and_reduce()
if (self.config['core'].get('scan_result_aggr_scheme', '').upper() == 'MULTIPLE'):
return processed_results
if (self.config['core'].get('scan_result_aggr_scheme', '').upper() == 'SINGLE'):
for (_, host) in processed_results.items():
if ('os' in host):
host['os'] = [max(host['os'], key=(lambda entry: entry['trust']))]
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, port_entries) in host[protocol].items():
host[protocol][portid] = [max(port_entries, key=(lambda entry: entry['trust']))]
return processed_results
if (self.config['core'].get('scan_result_aggr_scheme', 'FILTER').upper() == 'FILTER'):
product_groups = self._group_by_product(processed_results)
for (_, host) in product_groups.items():
if ('os' in host):
os_items = []
for group in host['os']:
os_items.append(max(group, key=(lambda entry: entry['trust'])))
host['os'] = os_items
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, port_groups) in host[protocol].items():
port_items = []
for group in port_groups:
port_items.append(max(group, key=(lambda entry: entry['trust'])))
host[protocol][portid] = port_items
return product_groups
util.printit("Warning: unknown config value for 'scan_result_aggr_scheme'", color=util.RED)
return {} | Aggregate the "grouped and reduced" results to one final result. The
aggregation is done depending on the config value for "scan_result_aggr_scheme".
Value "SINGLE" : the single result with the highest trust rating is chosen
Value "MULTIPLE" : the results are returned without further processing
Value "FILTER" : similar products are filtered out, i.e. out of macOS 10.12
and macOS 10.13, only the one with the highest trust rating
is returned | core/scan_result_processor.py | _aggregate_results | RE4CT10N/avain | 51 | python | def _aggregate_results(self):
'\n Aggregate the "grouped and reduced" results to one final result. The\n aggregation is done depending on the config value for "scan_result_aggr_scheme".\n\n Value "SINGLE" : the single result with the highest trust rating is chosen\n Value "MULTIPLE" : the results are returned without further processing\n Value "FILTER" : similar products are filtered out, i.e. out of macOS 10.12\n and macOS 10.13, only the one with the highest trust rating\n is returned\n '
processed_results = self._group_and_reduce()
if (self.config['core'].get('scan_result_aggr_scheme', ).upper() == 'MULTIPLE'):
return processed_results
if (self.config['core'].get('scan_result_aggr_scheme', ).upper() == 'SINGLE'):
for (_, host) in processed_results.items():
if ('os' in host):
host['os'] = [max(host['os'], key=(lambda entry: entry['trust']))]
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, port_entries) in host[protocol].items():
host[protocol][portid] = [max(port_entries, key=(lambda entry: entry['trust']))]
return processed_results
if (self.config['core'].get('scan_result_aggr_scheme', 'FILTER').upper() == 'FILTER'):
product_groups = self._group_by_product(processed_results)
for (_, host) in product_groups.items():
if ('os' in host):
os_items = []
for group in host['os']:
os_items.append(max(group, key=(lambda entry: entry['trust'])))
host['os'] = os_items
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, port_groups) in host[protocol].items():
port_items = []
for group in port_groups:
port_items.append(max(group, key=(lambda entry: entry['trust'])))
host[protocol][portid] = port_items
return product_groups
util.printit("Warning: unknown config value for 'scan_result_aggr_scheme'", color=util.RED)
return {} | def _aggregate_results(self):
'\n Aggregate the "grouped and reduced" results to one final result. The\n aggregation is done depending on the config value for "scan_result_aggr_scheme".\n\n Value "SINGLE" : the single result with the highest trust rating is chosen\n Value "MULTIPLE" : the results are returned without further processing\n Value "FILTER" : similar products are filtered out, i.e. out of macOS 10.12\n and macOS 10.13, only the one with the highest trust rating\n is returned\n '
processed_results = self._group_and_reduce()
if (self.config['core'].get('scan_result_aggr_scheme', ).upper() == 'MULTIPLE'):
return processed_results
if (self.config['core'].get('scan_result_aggr_scheme', ).upper() == 'SINGLE'):
for (_, host) in processed_results.items():
if ('os' in host):
host['os'] = [max(host['os'], key=(lambda entry: entry['trust']))]
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, port_entries) in host[protocol].items():
host[protocol][portid] = [max(port_entries, key=(lambda entry: entry['trust']))]
return processed_results
if (self.config['core'].get('scan_result_aggr_scheme', 'FILTER').upper() == 'FILTER'):
product_groups = self._group_by_product(processed_results)
for (_, host) in product_groups.items():
if ('os' in host):
os_items = []
for group in host['os']:
os_items.append(max(group, key=(lambda entry: entry['trust'])))
host['os'] = os_items
for protocol in ('tcp', 'udp'):
if (protocol in host):
for (portid, port_groups) in host[protocol].items():
port_items = []
for group in port_groups:
port_items.append(max(group, key=(lambda entry: entry['trust'])))
host[protocol][portid] = port_items
return product_groups
util.printit("Warning: unknown config value for 'scan_result_aggr_scheme'", color=util.RED)
return {}<|docstring|>Aggregate the "grouped and reduced" results to one final result. The
aggregation is done depending on the config value for "scan_result_aggr_scheme".
Value "SINGLE" : the single result with the highest trust rating is chosen
Value "MULTIPLE" : the results are returned without further processing
Value "FILTER" : similar products are filtered out, i.e. out of macOS 10.12
and macOS 10.13, only the one with the highest trust rating
is returned<|endoftext|> |
48b7ca6898c8dff73f5f2e911e3bb852fe924c95a9c4c716d89a46941eb96fb1 | def _group_and_reduce(self):
'\n First groups all the different OS and port information of every host\n retrieved from the different scanning modules into groups that contain\n similar items. For example, "OS: macOS 10.10 is" grouped together with\n "macOS 10.10.4".\n Next, these groups are reduced / aggregated to one entry each. This\n can be done in several ways. Currently supported are: 1. Reducing\n to the item with the highest trust value; 2. Reducing to the most\n specific entry and giving it an aggregated trust value, based on all\n trust values in its group.\n '
def group_os():
'\n Group the OS entry of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if ('os' not in host):
return
if (not ('os' in groups[ip])):
groups[ip]['os'] = []
if isinstance(host['os'], list):
for item in host['os']:
self._group_item(ip, module, item, groups[ip]['os'], (lambda host: host['os']))
else:
self._group_item(ip, module, host['os'], groups[ip]['os'], (lambda host: host['os']))
def group_ports(protocol):
'\n Group the port entries of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if (protocol not in host):
return
if (protocol not in groups[ip]):
groups[ip][protocol] = {}
for (portid, port) in host[protocol].items():
if (not (portid in groups[ip][protocol])):
groups[ip][protocol][portid] = []
if isinstance(port, list):
for item in port:
self._group_item(ip, module, item, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
else:
self._group_item(ip, module, port, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
results = {}
groups = {}
for (module, result) in self.results.items():
if ('trust' in result):
module_trust_rating = result['trust']
del result['trust']
else:
module_trust_rating = self.default_trust
for (ip, host) in result.items():
if (ip not in groups):
groups[ip] = {}
ScanResultProcessor._add_trust(host, module_trust_rating)
if ('os' in host):
group_os()
if ('tcp' in host):
group_ports('tcp')
if ('udp' in host):
group_ports('udp')
group_out_file = os.path.join(self.output_dir, AGGR_GROUP_FILE)
with open(group_out_file, 'w') as file:
file.write(json.dumps(groups, ensure_ascii=False, indent=3))
self.logger.info('Grouped similar scan results and wrote result to %s', group_out_file)
for (ip, host) in groups.items():
results[ip] = host
if ('os' in host):
os_items = []
for os_group in host['os']:
os_items.append(self._aggregate_group(os_group))
results[ip]['os'] = os_items
for protocol in {'tcp', 'udp'}:
if (protocol in host):
for (portid, port_groups) in host[protocol].items():
port_items = []
for port_group in port_groups:
port_items.append(self._aggregate_group(port_group))
results[ip][protocol][portid] = port_items
option_out_file = os.path.join(self.output_dir, AGGR_OPTION_FILE)
with open(option_out_file, 'w') as file:
file.write(json.dumps(results, ensure_ascii=False, indent=3))
self.logger.info('Aggregated the individual groups and wrote result to %s', option_out_file)
return results | First groups all the different OS and port information of every host
retrieved from the different scanning modules into groups that contain
similar items. For example, "OS: macOS 10.10 is" grouped together with
"macOS 10.10.4".
Next, these groups are reduced / aggregated to one entry each. This
can be done in several ways. Currently supported are: 1. Reducing
to the item with the highest trust value; 2. Reducing to the most
specific entry and giving it an aggregated trust value, based on all
trust values in its group. | core/scan_result_processor.py | _group_and_reduce | RE4CT10N/avain | 51 | python | def _group_and_reduce(self):
'\n First groups all the different OS and port information of every host\n retrieved from the different scanning modules into groups that contain\n similar items. For example, "OS: macOS 10.10 is" grouped together with\n "macOS 10.10.4".\n Next, these groups are reduced / aggregated to one entry each. This\n can be done in several ways. Currently supported are: 1. Reducing\n to the item with the highest trust value; 2. Reducing to the most\n specific entry and giving it an aggregated trust value, based on all\n trust values in its group.\n '
def group_os():
'\n Group the OS entry of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if ('os' not in host):
return
if (not ('os' in groups[ip])):
groups[ip]['os'] = []
if isinstance(host['os'], list):
for item in host['os']:
self._group_item(ip, module, item, groups[ip]['os'], (lambda host: host['os']))
else:
self._group_item(ip, module, host['os'], groups[ip]['os'], (lambda host: host['os']))
def group_ports(protocol):
'\n Group the port entries of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if (protocol not in host):
return
if (protocol not in groups[ip]):
groups[ip][protocol] = {}
for (portid, port) in host[protocol].items():
if (not (portid in groups[ip][protocol])):
groups[ip][protocol][portid] = []
if isinstance(port, list):
for item in port:
self._group_item(ip, module, item, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
else:
self._group_item(ip, module, port, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
results = {}
groups = {}
for (module, result) in self.results.items():
if ('trust' in result):
module_trust_rating = result['trust']
del result['trust']
else:
module_trust_rating = self.default_trust
for (ip, host) in result.items():
if (ip not in groups):
groups[ip] = {}
ScanResultProcessor._add_trust(host, module_trust_rating)
if ('os' in host):
group_os()
if ('tcp' in host):
group_ports('tcp')
if ('udp' in host):
group_ports('udp')
group_out_file = os.path.join(self.output_dir, AGGR_GROUP_FILE)
with open(group_out_file, 'w') as file:
file.write(json.dumps(groups, ensure_ascii=False, indent=3))
self.logger.info('Grouped similar scan results and wrote result to %s', group_out_file)
for (ip, host) in groups.items():
results[ip] = host
if ('os' in host):
os_items = []
for os_group in host['os']:
os_items.append(self._aggregate_group(os_group))
results[ip]['os'] = os_items
for protocol in {'tcp', 'udp'}:
if (protocol in host):
for (portid, port_groups) in host[protocol].items():
port_items = []
for port_group in port_groups:
port_items.append(self._aggregate_group(port_group))
results[ip][protocol][portid] = port_items
option_out_file = os.path.join(self.output_dir, AGGR_OPTION_FILE)
with open(option_out_file, 'w') as file:
file.write(json.dumps(results, ensure_ascii=False, indent=3))
self.logger.info('Aggregated the individual groups and wrote result to %s', option_out_file)
return results | def _group_and_reduce(self):
'\n First groups all the different OS and port information of every host\n retrieved from the different scanning modules into groups that contain\n similar items. For example, "OS: macOS 10.10 is" grouped together with\n "macOS 10.10.4".\n Next, these groups are reduced / aggregated to one entry each. This\n can be done in several ways. Currently supported are: 1. Reducing\n to the item with the highest trust value; 2. Reducing to the most\n specific entry and giving it an aggregated trust value, based on all\n trust values in its group.\n '
def group_os():
'\n Group the OS entry of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if ('os' not in host):
return
if (not ('os' in groups[ip])):
groups[ip]['os'] = []
if isinstance(host['os'], list):
for item in host['os']:
self._group_item(ip, module, item, groups[ip]['os'], (lambda host: host['os']))
else:
self._group_item(ip, module, host['os'], groups[ip]['os'], (lambda host: host['os']))
def group_ports(protocol):
'\n Group the port entries of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if (protocol not in host):
return
if (protocol not in groups[ip]):
groups[ip][protocol] = {}
for (portid, port) in host[protocol].items():
if (not (portid in groups[ip][protocol])):
groups[ip][protocol][portid] = []
if isinstance(port, list):
for item in port:
self._group_item(ip, module, item, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
else:
self._group_item(ip, module, port, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
results = {}
groups = {}
for (module, result) in self.results.items():
if ('trust' in result):
module_trust_rating = result['trust']
del result['trust']
else:
module_trust_rating = self.default_trust
for (ip, host) in result.items():
if (ip not in groups):
groups[ip] = {}
ScanResultProcessor._add_trust(host, module_trust_rating)
if ('os' in host):
group_os()
if ('tcp' in host):
group_ports('tcp')
if ('udp' in host):
group_ports('udp')
group_out_file = os.path.join(self.output_dir, AGGR_GROUP_FILE)
with open(group_out_file, 'w') as file:
file.write(json.dumps(groups, ensure_ascii=False, indent=3))
self.logger.info('Grouped similar scan results and wrote result to %s', group_out_file)
for (ip, host) in groups.items():
results[ip] = host
if ('os' in host):
os_items = []
for os_group in host['os']:
os_items.append(self._aggregate_group(os_group))
results[ip]['os'] = os_items
for protocol in {'tcp', 'udp'}:
if (protocol in host):
for (portid, port_groups) in host[protocol].items():
port_items = []
for port_group in port_groups:
port_items.append(self._aggregate_group(port_group))
results[ip][protocol][portid] = port_items
option_out_file = os.path.join(self.output_dir, AGGR_OPTION_FILE)
with open(option_out_file, 'w') as file:
file.write(json.dumps(results, ensure_ascii=False, indent=3))
self.logger.info('Aggregated the individual groups and wrote result to %s', option_out_file)
return results<|docstring|>First groups all the different OS and port information of every host
retrieved from the different scanning modules into groups that contain
similar items. For example, "OS: macOS 10.10 is" grouped together with
"macOS 10.10.4".
Next, these groups are reduced / aggregated to one entry each. This
can be done in several ways. Currently supported are: 1. Reducing
to the item with the highest trust value; 2. Reducing to the most
specific entry and giving it an aggregated trust value, based on all
trust values in its group.<|endoftext|> |
fd2794def9651aceac8cac8b1cb75b4b10d5c77985ce647793ad1b9d0483d951 | @staticmethod
def _add_trust(host: dict, trust_value: float):
'\n Add a trust value to every OS and port entry of the current host.\n '
def add_to_ports(protocol: str):
'\n Add trust values to the ports used by the given transport protocol.\n '
if (protocol in host):
for (portid, portitems) in host[protocol].items():
if (not isinstance(portitems, list)):
portitems = [portitems]
for port in portitems:
if ('trust' not in port):
if ('trust' in host[protocol][portid]):
port['trust'] = host[protocol][portid]['trust']
if ('trust' in host[protocol]):
port['trust'] = host[protocol]['trust']
elif ('trust' in host):
port['trust'] = host['trust']
else:
port['trust'] = trust_value
if ('os' in host):
ositems = (host['os'] if isinstance(host['os'], list) else [host['os']])
for ositem in ositems:
if ('trust' not in ositem):
if ('trust' in host['os']):
ositem['trust'] = host['os']['trust']
if ('trust' in host):
ositem['trust'] = host['trust']
else:
ositem['trust'] = trust_value
add_to_ports('tcp')
add_to_ports('udp') | Add a trust value to every OS and port entry of the current host. | core/scan_result_processor.py | _add_trust | RE4CT10N/avain | 51 | python | @staticmethod
def _add_trust(host: dict, trust_value: float):
'\n \n '
def add_to_ports(protocol: str):
'\n Add trust values to the ports used by the given transport protocol.\n '
if (protocol in host):
for (portid, portitems) in host[protocol].items():
if (not isinstance(portitems, list)):
portitems = [portitems]
for port in portitems:
if ('trust' not in port):
if ('trust' in host[protocol][portid]):
port['trust'] = host[protocol][portid]['trust']
if ('trust' in host[protocol]):
port['trust'] = host[protocol]['trust']
elif ('trust' in host):
port['trust'] = host['trust']
else:
port['trust'] = trust_value
if ('os' in host):
ositems = (host['os'] if isinstance(host['os'], list) else [host['os']])
for ositem in ositems:
if ('trust' not in ositem):
if ('trust' in host['os']):
ositem['trust'] = host['os']['trust']
if ('trust' in host):
ositem['trust'] = host['trust']
else:
ositem['trust'] = trust_value
add_to_ports('tcp')
add_to_ports('udp') | @staticmethod
def _add_trust(host: dict, trust_value: float):
'\n \n '
def add_to_ports(protocol: str):
'\n Add trust values to the ports used by the given transport protocol.\n '
if (protocol in host):
for (portid, portitems) in host[protocol].items():
if (not isinstance(portitems, list)):
portitems = [portitems]
for port in portitems:
if ('trust' not in port):
if ('trust' in host[protocol][portid]):
port['trust'] = host[protocol][portid]['trust']
if ('trust' in host[protocol]):
port['trust'] = host[protocol]['trust']
elif ('trust' in host):
port['trust'] = host['trust']
else:
port['trust'] = trust_value
if ('os' in host):
ositems = (host['os'] if isinstance(host['os'], list) else [host['os']])
for ositem in ositems:
if ('trust' not in ositem):
if ('trust' in host['os']):
ositem['trust'] = host['os']['trust']
if ('trust' in host):
ositem['trust'] = host['trust']
else:
ositem['trust'] = trust_value
add_to_ports('tcp')
add_to_ports('udp')<|docstring|>Add a trust value to every OS and port entry of the current host.<|endoftext|> |
c169c550dcc8fec52df3305d8a05badef966edc9fc8fc18f3b9e89656573a08c | @staticmethod
def remove_trust_values(result: dict):
'\n Remove all potential "trust" fields stored in the given scan result\n '
def remove_in_protocol(protocol: str):
'\n Remove the trust values stored under the given transport protocol.\n '
if (protocol in host):
if ('trust' in host[protocol]):
del host[protocol]['trust']
for (_, portinfos) in host[protocol].items():
for portinfo in portinfos:
if ('trust' in portinfo):
del portinfo['trust']
if ('trust' in result):
del result['trust']
for (_, host) in result.items():
if ('trust' in host):
del host['trust']
if ('os' in host):
for osinfo in host['os']:
if ('trust' in osinfo):
del osinfo['trust']
remove_in_protocol('tcp')
remove_in_protocol('udp') | Remove all potential "trust" fields stored in the given scan result | core/scan_result_processor.py | remove_trust_values | RE4CT10N/avain | 51 | python | @staticmethod
def remove_trust_values(result: dict):
'\n \n '
def remove_in_protocol(protocol: str):
'\n Remove the trust values stored under the given transport protocol.\n '
if (protocol in host):
if ('trust' in host[protocol]):
del host[protocol]['trust']
for (_, portinfos) in host[protocol].items():
for portinfo in portinfos:
if ('trust' in portinfo):
del portinfo['trust']
if ('trust' in result):
del result['trust']
for (_, host) in result.items():
if ('trust' in host):
del host['trust']
if ('os' in host):
for osinfo in host['os']:
if ('trust' in osinfo):
del osinfo['trust']
remove_in_protocol('tcp')
remove_in_protocol('udp') | @staticmethod
def remove_trust_values(result: dict):
'\n \n '
def remove_in_protocol(protocol: str):
'\n Remove the trust values stored under the given transport protocol.\n '
if (protocol in host):
if ('trust' in host[protocol]):
del host[protocol]['trust']
for (_, portinfos) in host[protocol].items():
for portinfo in portinfos:
if ('trust' in portinfo):
del portinfo['trust']
if ('trust' in result):
del result['trust']
for (_, host) in result.items():
if ('trust' in host):
del host['trust']
if ('os' in host):
for osinfo in host['os']:
if ('trust' in osinfo):
del osinfo['trust']
remove_in_protocol('tcp')
remove_in_protocol('udp')<|docstring|>Remove all potential "trust" fields stored in the given scan result<|endoftext|> |
102c56939e7ab1ad4e4bf481dec79a27ae14b822426c7c247134735b551e184c | def _group_item(self, ip: str, module, item: dict, dest: dict, iter_access_func: Callable[([dict], dict)]):
"\n Build a group based on the given item. The group consists of all entries that are\n similar to the given item. The mentioned entries are provided by all modules'\n scan results.\n\n :param item: the base item to group other items with\n :param dest: the dictionary to store the resulting group in\n :param iter_access_func: a function defining how to access compatible entries\n from other modules.\n "
item_group = [item]
for (module_iter, result_iter) in self.results.items():
if (module_iter == module):
continue
if (ip in result_iter):
try:
items_iter = iter_access_func(result_iter[ip])
except KeyError:
continue
if (not isinstance(items_iter, list)):
items_iter = [items_iter]
for item_iter in items_iter:
addded_to_group = False
for cpe in item.get('cpes', []):
if any(((cpe_iter in cpe) for cpe_iter in item_iter.get('cpes', []))):
item_group.append(item_iter)
addded_to_group = True
break
if ((not addded_to_group) and ('name' in item) and ('name' in item_iter)):
(item_str, item_iter_str) = (item['name'], item_iter['name'])
if (('service' in item) and ('service' in item_iter)):
item_str += (' ' + item['service'])
item_iter_str += (' ' + item_iter['service'])
if (item_iter_str in item_str):
item_group.append(item_iter)
if (not ScanResultProcessor._group_in(item_group, dest)):
dest[:] = [other for other in dest if (not all(((o_item in item_group) for o_item in other)))]
dest.append(item_group) | Build a group based on the given item. The group consists of all entries that are
similar to the given item. The mentioned entries are provided by all modules'
scan results.
:param item: the base item to group other items with
:param dest: the dictionary to store the resulting group in
:param iter_access_func: a function defining how to access compatible entries
from other modules. | core/scan_result_processor.py | _group_item | RE4CT10N/avain | 51 | python | def _group_item(self, ip: str, module, item: dict, dest: dict, iter_access_func: Callable[([dict], dict)]):
"\n Build a group based on the given item. The group consists of all entries that are\n similar to the given item. The mentioned entries are provided by all modules'\n scan results.\n\n :param item: the base item to group other items with\n :param dest: the dictionary to store the resulting group in\n :param iter_access_func: a function defining how to access compatible entries\n from other modules.\n "
item_group = [item]
for (module_iter, result_iter) in self.results.items():
if (module_iter == module):
continue
if (ip in result_iter):
try:
items_iter = iter_access_func(result_iter[ip])
except KeyError:
continue
if (not isinstance(items_iter, list)):
items_iter = [items_iter]
for item_iter in items_iter:
addded_to_group = False
for cpe in item.get('cpes', []):
if any(((cpe_iter in cpe) for cpe_iter in item_iter.get('cpes', []))):
item_group.append(item_iter)
addded_to_group = True
break
if ((not addded_to_group) and ('name' in item) and ('name' in item_iter)):
(item_str, item_iter_str) = (item['name'], item_iter['name'])
if (('service' in item) and ('service' in item_iter)):
item_str += (' ' + item['service'])
item_iter_str += (' ' + item_iter['service'])
if (item_iter_str in item_str):
item_group.append(item_iter)
if (not ScanResultProcessor._group_in(item_group, dest)):
dest[:] = [other for other in dest if (not all(((o_item in item_group) for o_item in other)))]
dest.append(item_group) | def _group_item(self, ip: str, module, item: dict, dest: dict, iter_access_func: Callable[([dict], dict)]):
"\n Build a group based on the given item. The group consists of all entries that are\n similar to the given item. The mentioned entries are provided by all modules'\n scan results.\n\n :param item: the base item to group other items with\n :param dest: the dictionary to store the resulting group in\n :param iter_access_func: a function defining how to access compatible entries\n from other modules.\n "
item_group = [item]
for (module_iter, result_iter) in self.results.items():
if (module_iter == module):
continue
if (ip in result_iter):
try:
items_iter = iter_access_func(result_iter[ip])
except KeyError:
continue
if (not isinstance(items_iter, list)):
items_iter = [items_iter]
for item_iter in items_iter:
addded_to_group = False
for cpe in item.get('cpes', []):
if any(((cpe_iter in cpe) for cpe_iter in item_iter.get('cpes', []))):
item_group.append(item_iter)
addded_to_group = True
break
if ((not addded_to_group) and ('name' in item) and ('name' in item_iter)):
(item_str, item_iter_str) = (item['name'], item_iter['name'])
if (('service' in item) and ('service' in item_iter)):
item_str += (' ' + item['service'])
item_iter_str += (' ' + item_iter['service'])
if (item_iter_str in item_str):
item_group.append(item_iter)
if (not ScanResultProcessor._group_in(item_group, dest)):
dest[:] = [other for other in dest if (not all(((o_item in item_group) for o_item in other)))]
dest.append(item_group)<|docstring|>Build a group based on the given item. The group consists of all entries that are
similar to the given item. The mentioned entries are provided by all modules'
scan results.
:param item: the base item to group other items with
:param dest: the dictionary to store the resulting group in
:param iter_access_func: a function defining how to access compatible entries
from other modules.<|endoftext|> |
84239209e4a87a5b9722f9caed62520217b8c33d1f37aabc1828e62ad9f015ae | @staticmethod
def _get_most_specific_group_entry(group: list):
"\n Retrieve the most specific entry contained in the given group.\n\n :param group: the group of which to find its most specific entry\n :return: the given group's most specific entry as a dict\n "
most_specific_entry = group[0]
for entry in group[1:]:
entry_cpes = entry.get('cpes', [])
for entry_cpe in entry_cpes:
mse_cpes = most_specific_entry.get('cpes', [])
if mse_cpes:
if any((util.neq_in(mse_cpe, entry_cpe) for mse_cpe in mse_cpes)):
most_specific_entry = entry
elif all(((entry_cpe == mse_cpe) for mse_cpe in mse_cpes)):
e_name = entry.get('name', '')
mse_name = most_specific_entry.get('name', '')
if util.neq_in(mse_name, e_name):
most_specific_entry = entry
elif ('name' in most_specific_entry):
(e_name, mse_name) = (entry.get('name', ''), most_specific_entry['name'])
if (mse_name in e_name):
most_specific_entry = entry
else:
most_specific_entry = entry
if ((not entry_cpes) and ('name' in entry)):
(e_name, mse_name) = (entry['name'], most_specific_entry.get('name', ''))
if util.neq_in(mse_name, e_name):
if (not ((mse_name == '') and ('cpes' in most_specific_entry))):
most_specific_entry = entry
return most_specific_entry | Retrieve the most specific entry contained in the given group.
:param group: the group of which to find its most specific entry
:return: the given group's most specific entry as a dict | core/scan_result_processor.py | _get_most_specific_group_entry | RE4CT10N/avain | 51 | python | @staticmethod
def _get_most_specific_group_entry(group: list):
"\n Retrieve the most specific entry contained in the given group.\n\n :param group: the group of which to find its most specific entry\n :return: the given group's most specific entry as a dict\n "
most_specific_entry = group[0]
for entry in group[1:]:
entry_cpes = entry.get('cpes', [])
for entry_cpe in entry_cpes:
mse_cpes = most_specific_entry.get('cpes', [])
if mse_cpes:
if any((util.neq_in(mse_cpe, entry_cpe) for mse_cpe in mse_cpes)):
most_specific_entry = entry
elif all(((entry_cpe == mse_cpe) for mse_cpe in mse_cpes)):
e_name = entry.get('name', )
mse_name = most_specific_entry.get('name', )
if util.neq_in(mse_name, e_name):
most_specific_entry = entry
elif ('name' in most_specific_entry):
(e_name, mse_name) = (entry.get('name', ), most_specific_entry['name'])
if (mse_name in e_name):
most_specific_entry = entry
else:
most_specific_entry = entry
if ((not entry_cpes) and ('name' in entry)):
(e_name, mse_name) = (entry['name'], most_specific_entry.get('name', ))
if util.neq_in(mse_name, e_name):
if (not ((mse_name == ) and ('cpes' in most_specific_entry))):
most_specific_entry = entry
return most_specific_entry | @staticmethod
def _get_most_specific_group_entry(group: list):
"\n Retrieve the most specific entry contained in the given group.\n\n :param group: the group of which to find its most specific entry\n :return: the given group's most specific entry as a dict\n "
most_specific_entry = group[0]
for entry in group[1:]:
entry_cpes = entry.get('cpes', [])
for entry_cpe in entry_cpes:
mse_cpes = most_specific_entry.get('cpes', [])
if mse_cpes:
if any((util.neq_in(mse_cpe, entry_cpe) for mse_cpe in mse_cpes)):
most_specific_entry = entry
elif all(((entry_cpe == mse_cpe) for mse_cpe in mse_cpes)):
e_name = entry.get('name', )
mse_name = most_specific_entry.get('name', )
if util.neq_in(mse_name, e_name):
most_specific_entry = entry
elif ('name' in most_specific_entry):
(e_name, mse_name) = (entry.get('name', ), most_specific_entry['name'])
if (mse_name in e_name):
most_specific_entry = entry
else:
most_specific_entry = entry
if ((not entry_cpes) and ('name' in entry)):
(e_name, mse_name) = (entry['name'], most_specific_entry.get('name', ))
if util.neq_in(mse_name, e_name):
if (not ((mse_name == ) and ('cpes' in most_specific_entry))):
most_specific_entry = entry
return most_specific_entry<|docstring|>Retrieve the most specific entry contained in the given group.
:param group: the group of which to find its most specific entry
:return: the given group's most specific entry as a dict<|endoftext|> |
2ab7958e3b4416e154fb7a0c426baf35710c691ad52278965161ceeaf9a20ebb | @staticmethod
def _group_in(group: list, list_groups: list):
'\n Check if there exists a group in the second list parameter\n that contains all items in the given group (first list).\n\n :param group: the group to check whether all its items are already\n in a group contained in list_groups\n :param list_groups: a list of item groups\n :return: True if there is a group in list_groups that contains all\n items in group, False otherwise\n '
for l_group in list_groups:
group_in = True
for item in group:
if (item not in l_group):
group_in = False
break
if group_in:
return True
return False | Check if there exists a group in the second list parameter
that contains all items in the given group (first list).
:param group: the group to check whether all its items are already
in a group contained in list_groups
:param list_groups: a list of item groups
:return: True if there is a group in list_groups that contains all
items in group, False otherwise | core/scan_result_processor.py | _group_in | RE4CT10N/avain | 51 | python | @staticmethod
def _group_in(group: list, list_groups: list):
'\n Check if there exists a group in the second list parameter\n that contains all items in the given group (first list).\n\n :param group: the group to check whether all its items are already\n in a group contained in list_groups\n :param list_groups: a list of item groups\n :return: True if there is a group in list_groups that contains all\n items in group, False otherwise\n '
for l_group in list_groups:
group_in = True
for item in group:
if (item not in l_group):
group_in = False
break
if group_in:
return True
return False | @staticmethod
def _group_in(group: list, list_groups: list):
'\n Check if there exists a group in the second list parameter\n that contains all items in the given group (first list).\n\n :param group: the group to check whether all its items are already\n in a group contained in list_groups\n :param list_groups: a list of item groups\n :return: True if there is a group in list_groups that contains all\n items in group, False otherwise\n '
for l_group in list_groups:
group_in = True
for item in group:
if (item not in l_group):
group_in = False
break
if group_in:
return True
return False<|docstring|>Check if there exists a group in the second list parameter
that contains all items in the given group (first list).
:param group: the group to check whether all its items are already
in a group contained in list_groups
:param list_groups: a list of item groups
:return: True if there is a group in list_groups that contains all
items in group, False otherwise<|endoftext|> |
8618d78d7e636ef999a97b53259129eb59458b0f9cc2b23f057751b458993347 | def _aggregate_group(self, group: list):
'\n Reduce the given group based on the algorithm specified by the\n respective configuration parameter.\n\n :param group: the group to reduce\n '
if (not group):
return {}
if (len(group) == 1):
return group[0]
if (not ('scan_trust_aggr_scheme' in self.config['core'])):
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group)
if (self.config['core']['scan_trust_aggr_scheme'] == 'TRUST_AGGR'):
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group)
if (self.config['core']['scan_trust_aggr_scheme'] == 'TRUST_MAX'):
return ScanResultProcessor._aggregate_group_by_trust_max(group)
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group) | Reduce the given group based on the algorithm specified by the
respective configuration parameter.
:param group: the group to reduce | core/scan_result_processor.py | _aggregate_group | RE4CT10N/avain | 51 | python | def _aggregate_group(self, group: list):
'\n Reduce the given group based on the algorithm specified by the\n respective configuration parameter.\n\n :param group: the group to reduce\n '
if (not group):
return {}
if (len(group) == 1):
return group[0]
if (not ('scan_trust_aggr_scheme' in self.config['core'])):
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group)
if (self.config['core']['scan_trust_aggr_scheme'] == 'TRUST_AGGR'):
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group)
if (self.config['core']['scan_trust_aggr_scheme'] == 'TRUST_MAX'):
return ScanResultProcessor._aggregate_group_by_trust_max(group)
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group) | def _aggregate_group(self, group: list):
'\n Reduce the given group based on the algorithm specified by the\n respective configuration parameter.\n\n :param group: the group to reduce\n '
if (not group):
return {}
if (len(group) == 1):
return group[0]
if (not ('scan_trust_aggr_scheme' in self.config['core'])):
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group)
if (self.config['core']['scan_trust_aggr_scheme'] == 'TRUST_AGGR'):
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group)
if (self.config['core']['scan_trust_aggr_scheme'] == 'TRUST_MAX'):
return ScanResultProcessor._aggregate_group_by_trust_max(group)
return ScanResultProcessor._aggregate_group_by_trust_aggregation(group)<|docstring|>Reduce the given group based on the algorithm specified by the
respective configuration parameter.
:param group: the group to reduce<|endoftext|> |
bebfcc0dc6bfd5ff18e3a4ccb8448af293683c4e8c2e82f7b803f553f5978b3a | @staticmethod
def _aggregate_group_by_trust_max(group: list):
'\n Reduce the given group to the item with the highest trust value.\n\n :param group: the group to reduce\n '
return max(group, key=(lambda member: member['trust'])) | Reduce the given group to the item with the highest trust value.
:param group: the group to reduce | core/scan_result_processor.py | _aggregate_group_by_trust_max | RE4CT10N/avain | 51 | python | @staticmethod
def _aggregate_group_by_trust_max(group: list):
'\n Reduce the given group to the item with the highest trust value.\n\n :param group: the group to reduce\n '
return max(group, key=(lambda member: member['trust'])) | @staticmethod
def _aggregate_group_by_trust_max(group: list):
'\n Reduce the given group to the item with the highest trust value.\n\n :param group: the group to reduce\n '
return max(group, key=(lambda member: member['trust']))<|docstring|>Reduce the given group to the item with the highest trust value.
:param group: the group to reduce<|endoftext|> |
0c0bd2b002a8c668415bfa37b6319a5c3659eaaca9a8c164095c5516b9344a50 | @staticmethod
def _aggregate_group_by_trust_aggregation(group: list):
'\n Reduce the given group to its most specific entry and giving it a\n trust value based on all trust values contained in the group.\n\n :param group: the group to reduce\n '
grouping_strength = 0.675
most_specific_entry = copy.deepcopy(ScanResultProcessor._get_most_specific_group_entry(group))
trust_sum = sum([entry['trust'] for entry in group])
aggr_trust = (trust_sum / (len(group) ** grouping_strength))
most_specific_entry['trust'] = aggr_trust
return most_specific_entry | Reduce the given group to its most specific entry and giving it a
trust value based on all trust values contained in the group.
:param group: the group to reduce | core/scan_result_processor.py | _aggregate_group_by_trust_aggregation | RE4CT10N/avain | 51 | python | @staticmethod
def _aggregate_group_by_trust_aggregation(group: list):
'\n Reduce the given group to its most specific entry and giving it a\n trust value based on all trust values contained in the group.\n\n :param group: the group to reduce\n '
grouping_strength = 0.675
most_specific_entry = copy.deepcopy(ScanResultProcessor._get_most_specific_group_entry(group))
trust_sum = sum([entry['trust'] for entry in group])
aggr_trust = (trust_sum / (len(group) ** grouping_strength))
most_specific_entry['trust'] = aggr_trust
return most_specific_entry | @staticmethod
def _aggregate_group_by_trust_aggregation(group: list):
'\n Reduce the given group to its most specific entry and giving it a\n trust value based on all trust values contained in the group.\n\n :param group: the group to reduce\n '
grouping_strength = 0.675
most_specific_entry = copy.deepcopy(ScanResultProcessor._get_most_specific_group_entry(group))
trust_sum = sum([entry['trust'] for entry in group])
aggr_trust = (trust_sum / (len(group) ** grouping_strength))
most_specific_entry['trust'] = aggr_trust
return most_specific_entry<|docstring|>Reduce the given group to its most specific entry and giving it a
trust value based on all trust values contained in the group.
:param group: the group to reduce<|endoftext|> |
2ee892c01589760de760ea4f510ac18d96195bbc4f5baffc42885515ffc8967d | def group_os():
'\n Group the OS entry of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if ('os' not in host):
return
if (not ('os' in groups[ip])):
groups[ip]['os'] = []
if isinstance(host['os'], list):
for item in host['os']:
self._group_item(ip, module, item, groups[ip]['os'], (lambda host: host['os']))
else:
self._group_item(ip, module, host['os'], groups[ip]['os'], (lambda host: host['os'])) | Group the OS entry of the current host (of the current module)
with similar entries from other modules. | core/scan_result_processor.py | group_os | RE4CT10N/avain | 51 | python | def group_os():
'\n Group the OS entry of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if ('os' not in host):
return
if (not ('os' in groups[ip])):
groups[ip]['os'] = []
if isinstance(host['os'], list):
for item in host['os']:
self._group_item(ip, module, item, groups[ip]['os'], (lambda host: host['os']))
else:
self._group_item(ip, module, host['os'], groups[ip]['os'], (lambda host: host['os'])) | def group_os():
'\n Group the OS entry of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if ('os' not in host):
return
if (not ('os' in groups[ip])):
groups[ip]['os'] = []
if isinstance(host['os'], list):
for item in host['os']:
self._group_item(ip, module, item, groups[ip]['os'], (lambda host: host['os']))
else:
self._group_item(ip, module, host['os'], groups[ip]['os'], (lambda host: host['os']))<|docstring|>Group the OS entry of the current host (of the current module)
with similar entries from other modules.<|endoftext|> |
739e4da81c5a9ccc09ccf39409f69669d28e3d63a01c692f3d4820695bfe42d4 | def group_ports(protocol):
'\n Group the port entries of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if (protocol not in host):
return
if (protocol not in groups[ip]):
groups[ip][protocol] = {}
for (portid, port) in host[protocol].items():
if (not (portid in groups[ip][protocol])):
groups[ip][protocol][portid] = []
if isinstance(port, list):
for item in port:
self._group_item(ip, module, item, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
else:
self._group_item(ip, module, port, groups[ip][protocol][portid], (lambda host: host[protocol][portid])) | Group the port entries of the current host (of the current module)
with similar entries from other modules. | core/scan_result_processor.py | group_ports | RE4CT10N/avain | 51 | python | def group_ports(protocol):
'\n Group the port entries of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if (protocol not in host):
return
if (protocol not in groups[ip]):
groups[ip][protocol] = {}
for (portid, port) in host[protocol].items():
if (not (portid in groups[ip][protocol])):
groups[ip][protocol][portid] = []
if isinstance(port, list):
for item in port:
self._group_item(ip, module, item, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
else:
self._group_item(ip, module, port, groups[ip][protocol][portid], (lambda host: host[protocol][portid])) | def group_ports(protocol):
'\n Group the port entries of the current host (of the current module)\n with similar entries from other modules.\n '
nonlocal ip, host, module
if (protocol not in host):
return
if (protocol not in groups[ip]):
groups[ip][protocol] = {}
for (portid, port) in host[protocol].items():
if (not (portid in groups[ip][protocol])):
groups[ip][protocol][portid] = []
if isinstance(port, list):
for item in port:
self._group_item(ip, module, item, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))
else:
self._group_item(ip, module, port, groups[ip][protocol][portid], (lambda host: host[protocol][portid]))<|docstring|>Group the port entries of the current host (of the current module)
with similar entries from other modules.<|endoftext|> |
1c9b71ee8f66913ad95820940d5ff9be05e68662836bdb98b0a5bbdbd7d51df8 | def add_to_ports(protocol: str):
'\n Add trust values to the ports used by the given transport protocol.\n '
if (protocol in host):
for (portid, portitems) in host[protocol].items():
if (not isinstance(portitems, list)):
portitems = [portitems]
for port in portitems:
if ('trust' not in port):
if ('trust' in host[protocol][portid]):
port['trust'] = host[protocol][portid]['trust']
if ('trust' in host[protocol]):
port['trust'] = host[protocol]['trust']
elif ('trust' in host):
port['trust'] = host['trust']
else:
port['trust'] = trust_value | Add trust values to the ports used by the given transport protocol. | core/scan_result_processor.py | add_to_ports | RE4CT10N/avain | 51 | python | def add_to_ports(protocol: str):
'\n \n '
if (protocol in host):
for (portid, portitems) in host[protocol].items():
if (not isinstance(portitems, list)):
portitems = [portitems]
for port in portitems:
if ('trust' not in port):
if ('trust' in host[protocol][portid]):
port['trust'] = host[protocol][portid]['trust']
if ('trust' in host[protocol]):
port['trust'] = host[protocol]['trust']
elif ('trust' in host):
port['trust'] = host['trust']
else:
port['trust'] = trust_value | def add_to_ports(protocol: str):
'\n \n '
if (protocol in host):
for (portid, portitems) in host[protocol].items():
if (not isinstance(portitems, list)):
portitems = [portitems]
for port in portitems:
if ('trust' not in port):
if ('trust' in host[protocol][portid]):
port['trust'] = host[protocol][portid]['trust']
if ('trust' in host[protocol]):
port['trust'] = host[protocol]['trust']
elif ('trust' in host):
port['trust'] = host['trust']
else:
port['trust'] = trust_value<|docstring|>Add trust values to the ports used by the given transport protocol.<|endoftext|> |
c0fb1e09d410cfe47d751b088e32eb829b45dd85a878504ff8c533602c4e2e44 | def remove_in_protocol(protocol: str):
'\n Remove the trust values stored under the given transport protocol.\n '
if (protocol in host):
if ('trust' in host[protocol]):
del host[protocol]['trust']
for (_, portinfos) in host[protocol].items():
for portinfo in portinfos:
if ('trust' in portinfo):
del portinfo['trust'] | Remove the trust values stored under the given transport protocol. | core/scan_result_processor.py | remove_in_protocol | RE4CT10N/avain | 51 | python | def remove_in_protocol(protocol: str):
'\n \n '
if (protocol in host):
if ('trust' in host[protocol]):
del host[protocol]['trust']
for (_, portinfos) in host[protocol].items():
for portinfo in portinfos:
if ('trust' in portinfo):
del portinfo['trust'] | def remove_in_protocol(protocol: str):
'\n \n '
if (protocol in host):
if ('trust' in host[protocol]):
del host[protocol]['trust']
for (_, portinfos) in host[protocol].items():
for portinfo in portinfos:
if ('trust' in portinfo):
del portinfo['trust']<|docstring|>Remove the trust values stored under the given transport protocol.<|endoftext|> |
c9722585649f5a522e9bd0ba8ad2f99f37c32d751f204a2763ae491a4e2fd7b4 | def convert_to_unicode(text):
"Converts `text` to Unicode (if it's not already), assuming utf-8 input."
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text))) | Converts `text` to Unicode (if it's not already), assuming utf-8 input. | pretraining/openwebtext/tokenization.py | convert_to_unicode | maact-org/electra-pytorch | 122 | python | def convert_to_unicode(text):
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text))) | def convert_to_unicode(text):
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))<|docstring|>Converts `text` to Unicode (if it's not already), assuming utf-8 input.<|endoftext|> |
5d16e8339f1a2108558267007284b132a3b2ff49172ecdca48c30001b7a9ff1f | def printable_text(text):
'Returns text encoded in a way suitable for print.'
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text))) | Returns text encoded in a way suitable for print. | pretraining/openwebtext/tokenization.py | printable_text | maact-org/electra-pytorch | 122 | python | def printable_text(text):
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text))) | def printable_text(text):
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))<|docstring|>Returns text encoded in a way suitable for print.<|endoftext|> |
b6db53f87d2a8191878cd1417b9c7d31d5793ed31cb8ff0c1d754bc43604b03b | def load_vocab(vocab_file):
'Loads a vocabulary file into a dictionary.'
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, 'r') as reader:
while True:
token = convert_to_unicode(reader.readline())
if (not token):
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. | pretraining/openwebtext/tokenization.py | load_vocab | maact-org/electra-pytorch | 122 | python | def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, 'r') as reader:
while True:
token = convert_to_unicode(reader.readline())
if (not token):
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, 'r') as reader:
while True:
token = convert_to_unicode(reader.readline())
if (not token):
break
token = token.strip()
vocab[token] = index
index += 1
return vocab<|docstring|>Loads a vocabulary file into a dictionary.<|endoftext|> |
4a9dc032a794b0b240fd2dcd12395b9c294f5aad2f51268ee098c1a6140d9728 | def convert_by_vocab(vocab, items):
'Converts a sequence of [tokens|ids] using the vocab.'
output = []
for item in items:
output.append(vocab[item])
return output | Converts a sequence of [tokens|ids] using the vocab. | pretraining/openwebtext/tokenization.py | convert_by_vocab | maact-org/electra-pytorch | 122 | python | def convert_by_vocab(vocab, items):
output = []
for item in items:
output.append(vocab[item])
return output | def convert_by_vocab(vocab, items):
output = []
for item in items:
output.append(vocab[item])
return output<|docstring|>Converts a sequence of [tokens|ids] using the vocab.<|endoftext|> |
ca9c93e0f8264eaba166533fb04fc0ffb2ffab9b2aa3fa3f3f2e56caec09269f | def whitespace_tokenize(text):
'Runs basic whitespace cleaning and splitting on a piece of text.'
text = text.strip()
if (not text):
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. | pretraining/openwebtext/tokenization.py | whitespace_tokenize | maact-org/electra-pytorch | 122 | python | def whitespace_tokenize(text):
text = text.strip()
if (not text):
return []
tokens = text.split()
return tokens | def whitespace_tokenize(text):
text = text.strip()
if (not text):
return []
tokens = text.split()
return tokens<|docstring|>Runs basic whitespace cleaning and splitting on a piece of text.<|endoftext|> |
c2af0c892229fa0e53a39bc6664a3159a9badeb37b2eec7ceaddbabd4d14707a | def _is_whitespace(char):
'Checks whether `chars` is a whitespace character.'
if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')):
return True
cat = unicodedata.category(char)
if (cat == 'Zs'):
return True
return False | Checks whether `chars` is a whitespace character. | pretraining/openwebtext/tokenization.py | _is_whitespace | maact-org/electra-pytorch | 122 | python | def _is_whitespace(char):
if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')):
return True
cat = unicodedata.category(char)
if (cat == 'Zs'):
return True
return False | def _is_whitespace(char):
if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')):
return True
cat = unicodedata.category(char)
if (cat == 'Zs'):
return True
return False<|docstring|>Checks whether `chars` is a whitespace character.<|endoftext|> |
67385be4e39d28d240dbee44ee5340c921fd5c03962386dbef535f84b0adf6df | def _is_control(char):
'Checks whether `chars` is a control character.'
if ((char == '\t') or (char == '\n') or (char == '\r')):
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False | Checks whether `chars` is a control character. | pretraining/openwebtext/tokenization.py | _is_control | maact-org/electra-pytorch | 122 | python | def _is_control(char):
if ((char == '\t') or (char == '\n') or (char == '\r')):
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False | def _is_control(char):
if ((char == '\t') or (char == '\n') or (char == '\r')):
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False<|docstring|>Checks whether `chars` is a control character.<|endoftext|> |
3036677df4a6a5fa3c51c846042ea3dd4acd704720bd66e96a0ec2dff3e7804a | def _is_punctuation(char):
'Checks whether `chars` is a punctuation character.'
cp = ord(char)
if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False | Checks whether `chars` is a punctuation character. | pretraining/openwebtext/tokenization.py | _is_punctuation | maact-org/electra-pytorch | 122 | python | def _is_punctuation(char):
cp = ord(char)
if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False | def _is_punctuation(char):
cp = ord(char)
if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False<|docstring|>Checks whether `chars` is a punctuation character.<|endoftext|> |
ba62f16d20314afeae3ede46ba056ee9eb2c4bd6ef4bfa593c6527add1c57062 | def __init__(self, do_lower_case=True):
'Constructs a BasicTokenizer.\n\n\t\tArgs:\n\t\t\tdo_lower_case: Whether to lower case the input.\n\t\t'
self.do_lower_case = do_lower_case | Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input. | pretraining/openwebtext/tokenization.py | __init__ | maact-org/electra-pytorch | 122 | python | def __init__(self, do_lower_case=True):
'Constructs a BasicTokenizer.\n\n\t\tArgs:\n\t\t\tdo_lower_case: Whether to lower case the input.\n\t\t'
self.do_lower_case = do_lower_case | def __init__(self, do_lower_case=True):
'Constructs a BasicTokenizer.\n\n\t\tArgs:\n\t\t\tdo_lower_case: Whether to lower case the input.\n\t\t'
self.do_lower_case = do_lower_case<|docstring|>Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.<|endoftext|> |
3c7946930bb61b5cc2480959ef1fd494bdc1d3f0739f606533e0f19f895c8340 | def tokenize(self, text):
'Tokenizes a piece of text.'
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens | Tokenizes a piece of text. | pretraining/openwebtext/tokenization.py | tokenize | maact-org/electra-pytorch | 122 | python | def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens | def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens<|docstring|>Tokenizes a piece of text.<|endoftext|> |
666f3fb2d4b7ff45415e4e3c9d59d5fb364eec7360af935fffec5d85341cd417 | def _run_strip_accents(self, text):
'Strips accents from a piece of text.'
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output) | Strips accents from a piece of text. | pretraining/openwebtext/tokenization.py | _run_strip_accents | maact-org/electra-pytorch | 122 | python | def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return .join(output) | def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return .join(output)<|docstring|>Strips accents from a piece of text.<|endoftext|> |
6f77e2aee6fad165c2cc79fd43331b3689f7566954f401421fd2b3078ba07d15 | def _run_split_on_punc(self, text):
'Splits punctuation on a piece of text.'
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output] | Splits punctuation on a piece of text. | pretraining/openwebtext/tokenization.py | _run_split_on_punc | maact-org/electra-pytorch | 122 | python | def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [.join(x) for x in output] | def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [.join(x) for x in output]<|docstring|>Splits punctuation on a piece of text.<|endoftext|> |
b35514042aac23aaf27f35a348b0f9fab0be127d05ecf3a4134759ba41954fc1 | def _tokenize_chinese_chars(self, text):
'Adds whitespace around any CJK character.'
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output) | Adds whitespace around any CJK character. | pretraining/openwebtext/tokenization.py | _tokenize_chinese_chars | maact-org/electra-pytorch | 122 | python | def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return .join(output) | def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return .join(output)<|docstring|>Adds whitespace around any CJK character.<|endoftext|> |
70697919acb3c18bd2db785cf0971a446ae6062102038ca37a477129c8bda201 | def _is_chinese_char(self, cp):
'Checks whether CP is the codepoint of a CJK character.'
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False | Checks whether CP is the codepoint of a CJK character. | pretraining/openwebtext/tokenization.py | _is_chinese_char | maact-org/electra-pytorch | 122 | python | def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False | def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False<|docstring|>Checks whether CP is the codepoint of a CJK character.<|endoftext|> |
5a6ae5e539033f597bf772d75b92c83ada8c6605fc6627b6ecdbf4cfeba8f187 | def _clean_text(self, text):
'Performs invalid character removal and whitespace cleanup on text.'
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) | Performs invalid character removal and whitespace cleanup on text. | pretraining/openwebtext/tokenization.py | _clean_text | maact-org/electra-pytorch | 122 | python | def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return .join(output) | def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return .join(output)<|docstring|>Performs invalid character removal and whitespace cleanup on text.<|endoftext|> |
589f10da2306b0e3bc593dcb459ea151773675ddfc1455b5d033d9122817ab47 | def tokenize(self, text):
'Tokenizes a piece of text into its word pieces.\n\n\t\tThis uses a greedy longest-match-first algorithm to perform tokenization\n\t\tusing the given vocabulary.\n\n\t\tFor example:\n\t\t\tinput = "unaffable"\n\t\t\toutput = ["un", "##aff", "##able"]\n\n\t\tArgs:\n\t\t\ttext: A single token or whitespace separated tokens. This should have\n\t\t\t\talready been passed through `BasicTokenizer.\n\n\t\tReturns:\n\t\t\tA list of wordpiece tokens.\n\t\t'
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = ''.join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens | Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens. | pretraining/openwebtext/tokenization.py | tokenize | maact-org/electra-pytorch | 122 | python | def tokenize(self, text):
'Tokenizes a piece of text into its word pieces.\n\n\t\tThis uses a greedy longest-match-first algorithm to perform tokenization\n\t\tusing the given vocabulary.\n\n\t\tFor example:\n\t\t\tinput = "unaffable"\n\t\t\toutput = ["un", "##aff", "##able"]\n\n\t\tArgs:\n\t\t\ttext: A single token or whitespace separated tokens. This should have\n\t\t\t\talready been passed through `BasicTokenizer.\n\n\t\tReturns:\n\t\t\tA list of wordpiece tokens.\n\t\t'
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = .join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens | def tokenize(self, text):
'Tokenizes a piece of text into its word pieces.\n\n\t\tThis uses a greedy longest-match-first algorithm to perform tokenization\n\t\tusing the given vocabulary.\n\n\t\tFor example:\n\t\t\tinput = "unaffable"\n\t\t\toutput = ["un", "##aff", "##able"]\n\n\t\tArgs:\n\t\t\ttext: A single token or whitespace separated tokens. This should have\n\t\t\t\talready been passed through `BasicTokenizer.\n\n\t\tReturns:\n\t\t\tA list of wordpiece tokens.\n\t\t'
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = .join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens<|docstring|>Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.<|endoftext|> |
351a9c9150297eac4727e1d69579a3f2773b57fbb5492c8d1ef55f11e55d1987 | def xgboost_cv(max_depth: int, gamma: float, min_child_weight: float, scale_pos_weight: float, n_estimators: int, reg_alpha: float, reg_lambda: float, max_delta_step: float, subsample: float, colsample_bytree: float, learning_rate: float, data: pd.DataFrame, targets: pd.DataFrame, n_jobs: int) -> float:
'XGBoost with 5 times repeated 5 fold cross validation.\n\n Parameters\n ----------\n max_depth: int\n Maximum depth of a tree.\n gamma: float\n Minimum loss reduction required to make a further partition on a leaf node of the tree. The larger gamma is,\n the more conservative the algorithm will be.\n min_child_weight: float\n Minimum sum of instance weight (hessian) needed in a child.\n scale_pos_weight: float\n Balancing of positive and negative weights.\n n_estimators: int\n Number of gradient boosted trees. Equivalent to number of boosting rounds.\n reg_alpha: float\n L1 regularization term on weights.\n reg_lambda: float\n L2 regularization term on weights\n max_delta_step: int\n Maximum delta step we allow each leaf output to be.\n subsample: float [0,1]\n Subsample ratio of the training instances.\n colsample_bytree: float\n Subsample ratio of columns when constructing each tree.\n learning_rate: float\n Boosting learning rate (xgb’s “eta”)\n data: pd.DataFrame\n Features (input data) used to train the model.\n targets: pd.DataFrame\n Labels used for training.\n n_jobs: int\n Number of parallel threads used to run xgboost.\n\n Returns\n -------\n float\n Mean cross-validation score.\n '
random.seed(42)
estimator = XGBRegressor(objective='reg:squarederror', n_estimators=n_estimators, max_depth=max_depth, gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=max_delta_step, subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, n_jobs=n_jobs)
rkf = model_selection.RepeatedKFold(n_splits=5, n_repeats=5, random_state=1234)
cval = model_selection.cross_val_score(estimator, data, targets, cv=rkf, scoring='neg_root_mean_squared_error')
return cval.mean() | XGBoost with 5 times repeated 5 fold cross validation.
Parameters
----------
max_depth: int
Maximum depth of a tree.
gamma: float
Minimum loss reduction required to make a further partition on a leaf node of the tree. The larger gamma is,
the more conservative the algorithm will be.
min_child_weight: float
Minimum sum of instance weight (hessian) needed in a child.
scale_pos_weight: float
Balancing of positive and negative weights.
n_estimators: int
Number of gradient boosted trees. Equivalent to number of boosting rounds.
reg_alpha: float
L1 regularization term on weights.
reg_lambda: float
L2 regularization term on weights
max_delta_step: int
Maximum delta step we allow each leaf output to be.
subsample: float [0,1]
Subsample ratio of the training instances.
colsample_bytree: float
Subsample ratio of columns when constructing each tree.
learning_rate: float
Boosting learning rate (xgb’s “eta”)
data: pd.DataFrame
Features (input data) used to train the model.
targets: pd.DataFrame
Labels used for training.
n_jobs: int
Number of parallel threads used to run xgboost.
Returns
-------
float
Mean cross-validation score. | src/models.py | xgboost_cv | MoritzFeigl/Learning-from-mistakes | 0 | python | def xgboost_cv(max_depth: int, gamma: float, min_child_weight: float, scale_pos_weight: float, n_estimators: int, reg_alpha: float, reg_lambda: float, max_delta_step: float, subsample: float, colsample_bytree: float, learning_rate: float, data: pd.DataFrame, targets: pd.DataFrame, n_jobs: int) -> float:
'XGBoost with 5 times repeated 5 fold cross validation.\n\n Parameters\n ----------\n max_depth: int\n Maximum depth of a tree.\n gamma: float\n Minimum loss reduction required to make a further partition on a leaf node of the tree. The larger gamma is,\n the more conservative the algorithm will be.\n min_child_weight: float\n Minimum sum of instance weight (hessian) needed in a child.\n scale_pos_weight: float\n Balancing of positive and negative weights.\n n_estimators: int\n Number of gradient boosted trees. Equivalent to number of boosting rounds.\n reg_alpha: float\n L1 regularization term on weights.\n reg_lambda: float\n L2 regularization term on weights\n max_delta_step: int\n Maximum delta step we allow each leaf output to be.\n subsample: float [0,1]\n Subsample ratio of the training instances.\n colsample_bytree: float\n Subsample ratio of columns when constructing each tree.\n learning_rate: float\n Boosting learning rate (xgb’s “eta”)\n data: pd.DataFrame\n Features (input data) used to train the model.\n targets: pd.DataFrame\n Labels used for training.\n n_jobs: int\n Number of parallel threads used to run xgboost.\n\n Returns\n -------\n float\n Mean cross-validation score.\n '
random.seed(42)
estimator = XGBRegressor(objective='reg:squarederror', n_estimators=n_estimators, max_depth=max_depth, gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=max_delta_step, subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, n_jobs=n_jobs)
rkf = model_selection.RepeatedKFold(n_splits=5, n_repeats=5, random_state=1234)
cval = model_selection.cross_val_score(estimator, data, targets, cv=rkf, scoring='neg_root_mean_squared_error')
return cval.mean() | def xgboost_cv(max_depth: int, gamma: float, min_child_weight: float, scale_pos_weight: float, n_estimators: int, reg_alpha: float, reg_lambda: float, max_delta_step: float, subsample: float, colsample_bytree: float, learning_rate: float, data: pd.DataFrame, targets: pd.DataFrame, n_jobs: int) -> float:
'XGBoost with 5 times repeated 5 fold cross validation.\n\n Parameters\n ----------\n max_depth: int\n Maximum depth of a tree.\n gamma: float\n Minimum loss reduction required to make a further partition on a leaf node of the tree. The larger gamma is,\n the more conservative the algorithm will be.\n min_child_weight: float\n Minimum sum of instance weight (hessian) needed in a child.\n scale_pos_weight: float\n Balancing of positive and negative weights.\n n_estimators: int\n Number of gradient boosted trees. Equivalent to number of boosting rounds.\n reg_alpha: float\n L1 regularization term on weights.\n reg_lambda: float\n L2 regularization term on weights\n max_delta_step: int\n Maximum delta step we allow each leaf output to be.\n subsample: float [0,1]\n Subsample ratio of the training instances.\n colsample_bytree: float\n Subsample ratio of columns when constructing each tree.\n learning_rate: float\n Boosting learning rate (xgb’s “eta”)\n data: pd.DataFrame\n Features (input data) used to train the model.\n targets: pd.DataFrame\n Labels used for training.\n n_jobs: int\n Number of parallel threads used to run xgboost.\n\n Returns\n -------\n float\n Mean cross-validation score.\n '
random.seed(42)
estimator = XGBRegressor(objective='reg:squarederror', n_estimators=n_estimators, max_depth=max_depth, gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=max_delta_step, subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, n_jobs=n_jobs)
rkf = model_selection.RepeatedKFold(n_splits=5, n_repeats=5, random_state=1234)
cval = model_selection.cross_val_score(estimator, data, targets, cv=rkf, scoring='neg_root_mean_squared_error')
return cval.mean()<|docstring|>XGBoost with 5 times repeated 5 fold cross validation.
Parameters
----------
max_depth: int
Maximum depth of a tree.
gamma: float
Minimum loss reduction required to make a further partition on a leaf node of the tree. The larger gamma is,
the more conservative the algorithm will be.
min_child_weight: float
Minimum sum of instance weight (hessian) needed in a child.
scale_pos_weight: float
Balancing of positive and negative weights.
n_estimators: int
Number of gradient boosted trees. Equivalent to number of boosting rounds.
reg_alpha: float
L1 regularization term on weights.
reg_lambda: float
L2 regularization term on weights
max_delta_step: int
Maximum delta step we allow each leaf output to be.
subsample: float [0,1]
Subsample ratio of the training instances.
colsample_bytree: float
Subsample ratio of columns when constructing each tree.
learning_rate: float
Boosting learning rate (xgb’s “eta”)
data: pd.DataFrame
Features (input data) used to train the model.
targets: pd.DataFrame
Labels used for training.
n_jobs: int
Number of parallel threads used to run xgboost.
Returns
-------
float
Mean cross-validation score.<|endoftext|> |
9a623a4fe5de9aac86a680df0ad4f00a59327d2e44d7615edd89d43523882970 | def optimize_xgboost(data: pd.DataFrame, targets: pd.DataFrame, init_points: int, n_iter: int, n_jobs: int) -> bayes_opt.bayesian_optimization.BayesianOptimization:
' Bayesian Optimization of XGBoost parameters\n\n Parameters\n ----------\n data: pd.DataFrame\n Features (input data) used to train the model.\n targets: pd.DataFrame\n Labels used for training.\n init_points: int\n Number of randomly chosen points at the beginning of the optimization.\n n_iter: int\n Number of iterations.\n n_jobs: int\n Number of parallel threads used to run xgboost.\n\n Returns\n -------\n bayes_opt.bayesian_optimization.BayesianOptimization\n The optimizer object.\n '
def xgboost_crossval(max_depth, gamma, n_estimators, min_child_weight, scale_pos_weight, reg_alpha, reg_lambda, max_delta_step, subsample, colsample_bytree, learning_rate):
'Wrapper of XGBoost cross validation.'
return xgboost_cv(n_estimators=int(n_estimators), max_depth=int(max_depth), gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=int(max_delta_step), subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, data=data, targets=targets, n_jobs=n_jobs)
random.seed(42)
optimizer = bayes_opt.BayesianOptimization(f=xgboost_crossval, pbounds=dict(n_estimators=(50, 5000), max_depth=(3, 20), gamma=(0.01, 5), min_child_weight=(0, 10), scale_pos_weight=(1.2, 5), reg_alpha=(4.0, 10.0), reg_lambda=(1.0, 10.0), max_delta_step=(0, 5), subsample=(0.5, 1.0), colsample_bytree=(0.3, 1.0), learning_rate=(0.0, 1.0)), random_state=1234, verbose=2)
random.seed(42)
optimizer.maximize(n_iter=n_iter, init_points=init_points, acq='ucb', kappa=5)
print('Maximum Value: {}'.format(optimizer.max['target']))
print('Best Parameters:')
print(optimizer.max['params'])
return optimizer | Bayesian Optimization of XGBoost parameters
Parameters
----------
data: pd.DataFrame
Features (input data) used to train the model.
targets: pd.DataFrame
Labels used for training.
init_points: int
Number of randomly chosen points at the beginning of the optimization.
n_iter: int
Number of iterations.
n_jobs: int
Number of parallel threads used to run xgboost.
Returns
-------
bayes_opt.bayesian_optimization.BayesianOptimization
The optimizer object. | src/models.py | optimize_xgboost | MoritzFeigl/Learning-from-mistakes | 0 | python | def optimize_xgboost(data: pd.DataFrame, targets: pd.DataFrame, init_points: int, n_iter: int, n_jobs: int) -> bayes_opt.bayesian_optimization.BayesianOptimization:
' Bayesian Optimization of XGBoost parameters\n\n Parameters\n ----------\n data: pd.DataFrame\n Features (input data) used to train the model.\n targets: pd.DataFrame\n Labels used for training.\n init_points: int\n Number of randomly chosen points at the beginning of the optimization.\n n_iter: int\n Number of iterations.\n n_jobs: int\n Number of parallel threads used to run xgboost.\n\n Returns\n -------\n bayes_opt.bayesian_optimization.BayesianOptimization\n The optimizer object.\n '
def xgboost_crossval(max_depth, gamma, n_estimators, min_child_weight, scale_pos_weight, reg_alpha, reg_lambda, max_delta_step, subsample, colsample_bytree, learning_rate):
'Wrapper of XGBoost cross validation.'
return xgboost_cv(n_estimators=int(n_estimators), max_depth=int(max_depth), gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=int(max_delta_step), subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, data=data, targets=targets, n_jobs=n_jobs)
random.seed(42)
optimizer = bayes_opt.BayesianOptimization(f=xgboost_crossval, pbounds=dict(n_estimators=(50, 5000), max_depth=(3, 20), gamma=(0.01, 5), min_child_weight=(0, 10), scale_pos_weight=(1.2, 5), reg_alpha=(4.0, 10.0), reg_lambda=(1.0, 10.0), max_delta_step=(0, 5), subsample=(0.5, 1.0), colsample_bytree=(0.3, 1.0), learning_rate=(0.0, 1.0)), random_state=1234, verbose=2)
random.seed(42)
optimizer.maximize(n_iter=n_iter, init_points=init_points, acq='ucb', kappa=5)
print('Maximum Value: {}'.format(optimizer.max['target']))
print('Best Parameters:')
print(optimizer.max['params'])
return optimizer | def optimize_xgboost(data: pd.DataFrame, targets: pd.DataFrame, init_points: int, n_iter: int, n_jobs: int) -> bayes_opt.bayesian_optimization.BayesianOptimization:
' Bayesian Optimization of XGBoost parameters\n\n Parameters\n ----------\n data: pd.DataFrame\n Features (input data) used to train the model.\n targets: pd.DataFrame\n Labels used for training.\n init_points: int\n Number of randomly chosen points at the beginning of the optimization.\n n_iter: int\n Number of iterations.\n n_jobs: int\n Number of parallel threads used to run xgboost.\n\n Returns\n -------\n bayes_opt.bayesian_optimization.BayesianOptimization\n The optimizer object.\n '
def xgboost_crossval(max_depth, gamma, n_estimators, min_child_weight, scale_pos_weight, reg_alpha, reg_lambda, max_delta_step, subsample, colsample_bytree, learning_rate):
'Wrapper of XGBoost cross validation.'
return xgboost_cv(n_estimators=int(n_estimators), max_depth=int(max_depth), gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=int(max_delta_step), subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, data=data, targets=targets, n_jobs=n_jobs)
random.seed(42)
optimizer = bayes_opt.BayesianOptimization(f=xgboost_crossval, pbounds=dict(n_estimators=(50, 5000), max_depth=(3, 20), gamma=(0.01, 5), min_child_weight=(0, 10), scale_pos_weight=(1.2, 5), reg_alpha=(4.0, 10.0), reg_lambda=(1.0, 10.0), max_delta_step=(0, 5), subsample=(0.5, 1.0), colsample_bytree=(0.3, 1.0), learning_rate=(0.0, 1.0)), random_state=1234, verbose=2)
random.seed(42)
optimizer.maximize(n_iter=n_iter, init_points=init_points, acq='ucb', kappa=5)
print('Maximum Value: {}'.format(optimizer.max['target']))
print('Best Parameters:')
print(optimizer.max['params'])
return optimizer<|docstring|>Bayesian Optimization of XGBoost parameters
Parameters
----------
data: pd.DataFrame
Features (input data) used to train the model.
targets: pd.DataFrame
Labels used for training.
init_points: int
Number of randomly chosen points at the beginning of the optimization.
n_iter: int
Number of iterations.
n_jobs: int
Number of parallel threads used to run xgboost.
Returns
-------
bayes_opt.bayesian_optimization.BayesianOptimization
The optimizer object.<|endoftext|> |
5910dbb38c9796577e48feefd99881745fee08d48c98e719d88bb577fa96f148 | def variance_inflation(self):
'Variance inflation factor for regressors of a linear model\n Computes variance inflation factor for all regressors.\n '
vif = pd.DataFrame({'variables': self.x.columns, 'VIF': [out.variance_inflation_factor(self.x.values, i) for i in range(self.x.shape[1])]})
print(vif) | Variance inflation factor for regressors of a linear model
Computes variance inflation factor for all regressors. | src/models.py | variance_inflation | MoritzFeigl/Learning-from-mistakes | 0 | python | def variance_inflation(self):
'Variance inflation factor for regressors of a linear model\n Computes variance inflation factor for all regressors.\n '
vif = pd.DataFrame({'variables': self.x.columns, 'VIF': [out.variance_inflation_factor(self.x.values, i) for i in range(self.x.shape[1])]})
print(vif) | def variance_inflation(self):
'Variance inflation factor for regressors of a linear model\n Computes variance inflation factor for all regressors.\n '
vif = pd.DataFrame({'variables': self.x.columns, 'VIF': [out.variance_inflation_factor(self.x.values, i) for i in range(self.x.shape[1])]})
print(vif)<|docstring|>Variance inflation factor for regressors of a linear model
Computes variance inflation factor for all regressors.<|endoftext|> |
2825ee512f457844ec2b6be1d96fe66180344d3c67f3bf61499906d171f1adfe | def center_data(self):
' Data centering\n Centers data to reduce influence of multicollinearity.\n '
self.x = self.x.drop(columns='const')
data_centered = pd.DataFrame(preprocessing.scale(self.x, with_mean='True', with_std='False'))
data_centered.columns = self.x.columns
data_centered.index = self.x.index
self.x = data_centered
self.x = tools.tools.add_constant(self.x)
print('All columns successfully centered!') | Data centering
Centers data to reduce influence of multicollinearity. | src/models.py | center_data | MoritzFeigl/Learning-from-mistakes | 0 | python | def center_data(self):
' Data centering\n Centers data to reduce influence of multicollinearity.\n '
self.x = self.x.drop(columns='const')
data_centered = pd.DataFrame(preprocessing.scale(self.x, with_mean='True', with_std='False'))
data_centered.columns = self.x.columns
data_centered.index = self.x.index
self.x = data_centered
self.x = tools.tools.add_constant(self.x)
print('All columns successfully centered!') | def center_data(self):
' Data centering\n Centers data to reduce influence of multicollinearity.\n '
self.x = self.x.drop(columns='const')
data_centered = pd.DataFrame(preprocessing.scale(self.x, with_mean='True', with_std='False'))
data_centered.columns = self.x.columns
data_centered.index = self.x.index
self.x = data_centered
self.x = tools.tools.add_constant(self.x)
print('All columns successfully centered!')<|docstring|>Data centering
Centers data to reduce influence of multicollinearity.<|endoftext|> |
abc4644a30b7271284b6db71fade71d19d2e153b95b98c6b1c1ce328ba5f1f25 | def fit(self):
' Fit OLS regression model\n Fits a OLS regression model of the form y ~ x + intercept\n '
model = api.OLS(self.y, self.x)
results = model.fit()
print(results.summary())
with open('results/tables/regression_model.csv', 'w') as fh:
fh.write(results.summary().as_csv())
print('Saved model summary in results/tables/regression_model.csv') | Fit OLS regression model
Fits a OLS regression model of the form y ~ x + intercept | src/models.py | fit | MoritzFeigl/Learning-from-mistakes | 0 | python | def fit(self):
' Fit OLS regression model\n Fits a OLS regression model of the form y ~ x + intercept\n '
model = api.OLS(self.y, self.x)
results = model.fit()
print(results.summary())
with open('results/tables/regression_model.csv', 'w') as fh:
fh.write(results.summary().as_csv())
print('Saved model summary in results/tables/regression_model.csv') | def fit(self):
' Fit OLS regression model\n Fits a OLS regression model of the form y ~ x + intercept\n '
model = api.OLS(self.y, self.x)
results = model.fit()
print(results.summary())
with open('results/tables/regression_model.csv', 'w') as fh:
fh.write(results.summary().as_csv())
print('Saved model summary in results/tables/regression_model.csv')<|docstring|>Fit OLS regression model
Fits a OLS regression model of the form y ~ x + intercept<|endoftext|> |
eadde10281031d482273f257c56db1b9bacb3b461bf9a6358de5ed3e74cf139a | def xgboost_crossval(max_depth, gamma, n_estimators, min_child_weight, scale_pos_weight, reg_alpha, reg_lambda, max_delta_step, subsample, colsample_bytree, learning_rate):
'Wrapper of XGBoost cross validation.'
return xgboost_cv(n_estimators=int(n_estimators), max_depth=int(max_depth), gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=int(max_delta_step), subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, data=data, targets=targets, n_jobs=n_jobs) | Wrapper of XGBoost cross validation. | src/models.py | xgboost_crossval | MoritzFeigl/Learning-from-mistakes | 0 | python | def xgboost_crossval(max_depth, gamma, n_estimators, min_child_weight, scale_pos_weight, reg_alpha, reg_lambda, max_delta_step, subsample, colsample_bytree, learning_rate):
return xgboost_cv(n_estimators=int(n_estimators), max_depth=int(max_depth), gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=int(max_delta_step), subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, data=data, targets=targets, n_jobs=n_jobs) | def xgboost_crossval(max_depth, gamma, n_estimators, min_child_weight, scale_pos_weight, reg_alpha, reg_lambda, max_delta_step, subsample, colsample_bytree, learning_rate):
return xgboost_cv(n_estimators=int(n_estimators), max_depth=int(max_depth), gamma=gamma, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight, reg_alpha=reg_alpha, reg_lambda=reg_lambda, max_delta_step=int(max_delta_step), subsample=subsample, colsample_bytree=colsample_bytree, learning_rate=learning_rate, data=data, targets=targets, n_jobs=n_jobs)<|docstring|>Wrapper of XGBoost cross validation.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.