body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
1102641b323d87404d4f1cd6b36517af7ec6ccf63aa5b58eaee86abd60fa0f31 | @pytest.mark.serial
def test_delete_zone_twice(shared_zone_test_context):
'\n Test deleting a zone with deleted status returns 404\n '
client = shared_zone_test_context.ok_vinyldns_client
result_zone = None
try:
zone_name = f'one-time{shared_zone_test_context.partition_id}'
zone = {'name': zone_name, 'email': '[email protected]', 'adminGroupId': shared_zone_test_context.ok_group['id'], 'connection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}, 'transferConnection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}}
result = client.create_zone(zone, status=202)
result_zone = result['zone']
client.wait_until_zone_active(result_zone['id'])
client.delete_zone(result_zone['id'], status=202)
client.wait_until_zone_deleted(result_zone['id'])
client.delete_zone(result_zone['id'], status=404)
result_zone = None
finally:
if result_zone:
client.abandon_zones([result_zone['id']], status=202) | Test deleting a zone with deleted status returns 404 | modules/api/src/test/functional/tests/zones/delete_zone_test.py | test_delete_zone_twice | Jay07GIT/vinyldns | 0 | python | @pytest.mark.serial
def test_delete_zone_twice(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
result_zone = None
try:
zone_name = f'one-time{shared_zone_test_context.partition_id}'
zone = {'name': zone_name, 'email': '[email protected]', 'adminGroupId': shared_zone_test_context.ok_group['id'], 'connection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}, 'transferConnection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}}
result = client.create_zone(zone, status=202)
result_zone = result['zone']
client.wait_until_zone_active(result_zone['id'])
client.delete_zone(result_zone['id'], status=202)
client.wait_until_zone_deleted(result_zone['id'])
client.delete_zone(result_zone['id'], status=404)
result_zone = None
finally:
if result_zone:
client.abandon_zones([result_zone['id']], status=202) | @pytest.mark.serial
def test_delete_zone_twice(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
result_zone = None
try:
zone_name = f'one-time{shared_zone_test_context.partition_id}'
zone = {'name': zone_name, 'email': '[email protected]', 'adminGroupId': shared_zone_test_context.ok_group['id'], 'connection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}, 'transferConnection': {'name': 'vinyldns.', 'keyName': VinylDNSTestContext.dns_key_name, 'key': VinylDNSTestContext.dns_key, 'primaryServer': VinylDNSTestContext.name_server_ip}}
result = client.create_zone(zone, status=202)
result_zone = result['zone']
client.wait_until_zone_active(result_zone['id'])
client.delete_zone(result_zone['id'], status=202)
client.wait_until_zone_deleted(result_zone['id'])
client.delete_zone(result_zone['id'], status=404)
result_zone = None
finally:
if result_zone:
client.abandon_zones([result_zone['id']], status=202)<|docstring|>Test deleting a zone with deleted status returns 404<|endoftext|> |
089a1ad491a8f31f81ced3d08a8ab90b2696d5952fe3f411f0d36dd6268bd2b5 | def test_delete_zone_returns_404_if_zone_not_found(shared_zone_test_context):
'\n Test deleting a zone returns a 404 if the zone was not found\n '
client = shared_zone_test_context.ok_vinyldns_client
client.delete_zone('nothere', status=404) | Test deleting a zone returns a 404 if the zone was not found | modules/api/src/test/functional/tests/zones/delete_zone_test.py | test_delete_zone_returns_404_if_zone_not_found | Jay07GIT/vinyldns | 0 | python | def test_delete_zone_returns_404_if_zone_not_found(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
client.delete_zone('nothere', status=404) | def test_delete_zone_returns_404_if_zone_not_found(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
client.delete_zone('nothere', status=404)<|docstring|>Test deleting a zone returns a 404 if the zone was not found<|endoftext|> |
7d0125b2273da0ae00a5f869cf10ca40ebb75d675e3dfd671e1e85e0fbf1b43f | def test_delete_zone_no_authorization(shared_zone_test_context):
'\n Test deleting a zone without authorization\n '
client = shared_zone_test_context.ok_vinyldns_client
client.delete_zone('1234', sign_request=False, status=401) | Test deleting a zone without authorization | modules/api/src/test/functional/tests/zones/delete_zone_test.py | test_delete_zone_no_authorization | Jay07GIT/vinyldns | 0 | python | def test_delete_zone_no_authorization(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
client.delete_zone('1234', sign_request=False, status=401) | def test_delete_zone_no_authorization(shared_zone_test_context):
'\n \n '
client = shared_zone_test_context.ok_vinyldns_client
client.delete_zone('1234', sign_request=False, status=401)<|docstring|>Test deleting a zone without authorization<|endoftext|> |
8d20b1e7de817628462edf5e4e96faa3deb6919c0c6666e3c03b86ff9ac41b3b | def __init__(self, service: str, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
error = f'There was an issue with the {service} API.'
if reason:
error += f' Reason: {reason}.'
super().__init__(error)
self.error = error | Parameters
----------
reason: str
The reason for the Error. Defaults to None. | hypixelio/exceptions/exceptions.py | __init__ | GrandMoff100/HypixelIO | 0 | python | def __init__(self, service: str, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
error = f'There was an issue with the {service} API.'
if reason:
error += f' Reason: {reason}.'
super().__init__(error)
self.error = error | def __init__(self, service: str, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
error = f'There was an issue with the {service} API.'
if reason:
error += f' Reason: {reason}.'
super().__init__(error)
self.error = error<|docstring|>Parameters
----------
reason: str
The reason for the Error. Defaults to None.<|endoftext|> |
de0b05f8dc6485475519625c985d346de577cf09002a3f2073c722685c1b229e | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Hypixel', reason) | Parameters
----------
reason: str
The reason for the Error. Defaults to None. | hypixelio/exceptions/exceptions.py | __init__ | GrandMoff100/HypixelIO | 0 | python | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Hypixel', reason) | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Hypixel', reason)<|docstring|>Parameters
----------
reason: str
The reason for the Error. Defaults to None.<|endoftext|> |
a06c35f2692d590c4480d0a0968ea087d685c6f4d2ced9d514b1f67c02ffe7c9 | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Crafatar', reason) | Parameters
----------
reason: str
The reason for the Error. Defaults to None. | hypixelio/exceptions/exceptions.py | __init__ | GrandMoff100/HypixelIO | 0 | python | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Crafatar', reason) | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Crafatar', reason)<|docstring|>Parameters
----------
reason: str
The reason for the Error. Defaults to None.<|endoftext|> |
71973f2fc340d7befa86b536e5469e41195808d87f0394f768651a173ee528ca | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Mojang', reason) | Parameters
----------
reason: str
The reason for the Error. Defaults to None. | hypixelio/exceptions/exceptions.py | __init__ | GrandMoff100/HypixelIO | 0 | python | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Mojang', reason) | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
super().__init__('Mojang', reason)<|docstring|>Parameters
----------
reason: str
The reason for the Error. Defaults to None.<|endoftext|> |
1500a69248777ef192cd88a6d173f2377b9c73936e839881949a923d70895f59 | def __init__(self, retry_after: datetime) -> None:
'\n Parameters\n ----------\n retry_after: datetime\n The time when the API will be available again for fetching.\n '
error = f"The rate-limit for the Hypixel API was hit. Try again after{retry_after.strftime('%Y-%m-%d %H:%M:%S')}."
super().__init__(error)
self.error = error | Parameters
----------
retry_after: datetime
The time when the API will be available again for fetching. | hypixelio/exceptions/exceptions.py | __init__ | GrandMoff100/HypixelIO | 0 | python | def __init__(self, retry_after: datetime) -> None:
'\n Parameters\n ----------\n retry_after: datetime\n The time when the API will be available again for fetching.\n '
error = f"The rate-limit for the Hypixel API was hit. Try again after{retry_after.strftime('%Y-%m-%d %H:%M:%S')}."
super().__init__(error)
self.error = error | def __init__(self, retry_after: datetime) -> None:
'\n Parameters\n ----------\n retry_after: datetime\n The time when the API will be available again for fetching.\n '
error = f"The rate-limit for the Hypixel API was hit. Try again after{retry_after.strftime('%Y-%m-%d %H:%M:%S')}."
super().__init__(error)
self.error = error<|docstring|>Parameters
----------
retry_after: datetime
The time when the API will be available again for fetching.<|endoftext|> |
f4e2115b021b832445cd1e6d96542669ebca6fcee175453598925b01ecefb37f | def __init__(self, reason: t.Optional[str]=None, user: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the error.\n user: t.Optional[str]\n The user not found when searched for.\n '
error = 'Player not found.'
if reason:
error += f' {reason}.'
super().__init__(error)
self.error = error
self.user = user | Parameters
----------
reason: str
The reason for the error.
user: t.Optional[str]
The user not found when searched for. | hypixelio/exceptions/exceptions.py | __init__ | GrandMoff100/HypixelIO | 0 | python | def __init__(self, reason: t.Optional[str]=None, user: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the error.\n user: t.Optional[str]\n The user not found when searched for.\n '
error = 'Player not found.'
if reason:
error += f' {reason}.'
super().__init__(error)
self.error = error
self.user = user | def __init__(self, reason: t.Optional[str]=None, user: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the error.\n user: t.Optional[str]\n The user not found when searched for.\n '
error = 'Player not found.'
if reason:
error += f' {reason}.'
super().__init__(error)
self.error = error
self.user = user<|docstring|>Parameters
----------
reason: str
The reason for the error.
user: t.Optional[str]
The user not found when searched for.<|endoftext|> |
3964cea8338e8d90048f8bd7df8bea356b0380ba529696ea5a0085eefcbcb5b0 | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
error = 'Guild not found.'
if reason:
error += f' {reason}.'
super().__init__(error)
self.error = error | Parameters
----------
reason: str
The reason for the Error. Defaults to None. | hypixelio/exceptions/exceptions.py | __init__ | GrandMoff100/HypixelIO | 0 | python | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
error = 'Guild not found.'
if reason:
error += f' {reason}.'
super().__init__(error)
self.error = error | def __init__(self, reason: t.Optional[str]=None) -> None:
'\n Parameters\n ----------\n reason: str\n The reason for the Error. Defaults to None.\n '
error = 'Guild not found.'
if reason:
error += f' {reason}.'
super().__init__(error)
self.error = error<|docstring|>Parameters
----------
reason: str
The reason for the Error. Defaults to None.<|endoftext|> |
d06d16036854a3bda9d4b38a0162a04fc77cea7b559c5d1b92f33716d0448c51 | def __init__(self):
'\n Init method.\n '
super().__init__() | Init method. | HybridNet/submodel.py | __init__ | GANWANSHUI/HybridNet | 0 | python | def __init__(self):
'\n \n '
super().__init__() | def __init__(self):
'\n \n '
super().__init__()<|docstring|>Init method.<|endoftext|> |
63e2621e539c4f7081a967d70552be2c044ba1d6cc91d5e0d4084da8018a7a46 | def forward(self, input):
'\n Forward pass of the function.\n '
return (input * torch.tanh(F.softplus(input))) | Forward pass of the function. | HybridNet/submodel.py | forward | GANWANSHUI/HybridNet | 0 | python | def forward(self, input):
'\n \n '
return (input * torch.tanh(F.softplus(input))) | def forward(self, input):
'\n \n '
return (input * torch.tanh(F.softplus(input)))<|docstring|>Forward pass of the function.<|endoftext|> |
74bea9d4c0fe8616e0d76b9f54858f5f802b489856383f056798ad6af7d18d35 | def largest_indices(ary, n):
'Returns the n largest indices from a numpy array.'
flat = ary.flatten()
indices = np.argpartition(flat, (- n))[(- n):]
indices = indices[np.argsort((- flat[indices]))]
return np.unravel_index(indices, ary.shape) | Returns the n largest indices from a numpy array. | VPR_Techniques/CoHOG_Python/CoHOG.py | largest_indices | oravus/VPR-Bench | 30 | python | def largest_indices(ary, n):
flat = ary.flatten()
indices = np.argpartition(flat, (- n))[(- n):]
indices = indices[np.argsort((- flat[indices]))]
return np.unravel_index(indices, ary.shape) | def largest_indices(ary, n):
flat = ary.flatten()
indices = np.argpartition(flat, (- n))[(- n):]
indices = indices[np.argsort((- flat[indices]))]
return np.unravel_index(indices, ary.shape)<|docstring|>Returns the n largest indices from a numpy array.<|endoftext|> |
353c9ee2915c34cea6a446c28f8c9be428980e7f46b7a4aba61f227862c09af3 | def compute_map_features(ref_map):
'INPUT: reference list of images to be matched.'
'OUTPUT: Feature descriptors of all reference images to be matched.'
ref_desc = []
for ref in range(len(ref_map)):
img_1 = cv2.cvtColor(ref_map[ref], cv2.COLOR_BGR2GRAY)
if (img_1 is not None):
img_1 = cv2.resize(img_1, (magic_height, magic_width))
(height, width, angle_unit) = initialize(img_1, cell_size, bin_size)
vector_1 = extract()
vector_1 = np.asfortranarray(vector_1.transpose(), dtype=np.float32)
ref_desc.append(vector_1)
print('Reference images descriptors computed!')
return ref_desc | INPUT: reference list of images to be matched. | VPR_Techniques/CoHOG_Python/CoHOG.py | compute_map_features | oravus/VPR-Bench | 30 | python | def compute_map_features(ref_map):
'OUTPUT: Feature descriptors of all reference images to be matched.'
ref_desc = []
for ref in range(len(ref_map)):
img_1 = cv2.cvtColor(ref_map[ref], cv2.COLOR_BGR2GRAY)
if (img_1 is not None):
img_1 = cv2.resize(img_1, (magic_height, magic_width))
(height, width, angle_unit) = initialize(img_1, cell_size, bin_size)
vector_1 = extract()
vector_1 = np.asfortranarray(vector_1.transpose(), dtype=np.float32)
ref_desc.append(vector_1)
print('Reference images descriptors computed!')
return ref_desc | def compute_map_features(ref_map):
'OUTPUT: Feature descriptors of all reference images to be matched.'
ref_desc = []
for ref in range(len(ref_map)):
img_1 = cv2.cvtColor(ref_map[ref], cv2.COLOR_BGR2GRAY)
if (img_1 is not None):
img_1 = cv2.resize(img_1, (magic_height, magic_width))
(height, width, angle_unit) = initialize(img_1, cell_size, bin_size)
vector_1 = extract()
vector_1 = np.asfortranarray(vector_1.transpose(), dtype=np.float32)
ref_desc.append(vector_1)
print('Reference images descriptors computed!')
return ref_desc<|docstring|>INPUT: reference list of images to be matched.<|endoftext|> |
b63145126a9bf35e065bafd1c05fe6192c24a4cc308ae943958963a28472af4e | def perform_VPR(query_info, ref_map_features):
"INPUT: Query desc and reference list of images' features to be matched."
'OUTPUT: Matching Score and Best Matched Image.'
vector_2 = query_info[0]
regional_goodness = query_info[1]
confusion_vector = np.zeros(len(ref_map_features), dtype=np.float32)
ref_desc = ref_map_features
for ref in range(len(ref_map_features)):
score = conv_match_dotproduct(vector_2.astype('float64'), ref_desc[ref].astype('float64'), regional_goodness, total_no_of_regions)
confusion_vector[ref] = score
return (np.amax(confusion_vector), np.argmax(confusion_vector), confusion_vector) | INPUT: Query desc and reference list of images' features to be matched. | VPR_Techniques/CoHOG_Python/CoHOG.py | perform_VPR | oravus/VPR-Bench | 30 | python | def perform_VPR(query_info, ref_map_features):
'OUTPUT: Matching Score and Best Matched Image.'
vector_2 = query_info[0]
regional_goodness = query_info[1]
confusion_vector = np.zeros(len(ref_map_features), dtype=np.float32)
ref_desc = ref_map_features
for ref in range(len(ref_map_features)):
score = conv_match_dotproduct(vector_2.astype('float64'), ref_desc[ref].astype('float64'), regional_goodness, total_no_of_regions)
confusion_vector[ref] = score
return (np.amax(confusion_vector), np.argmax(confusion_vector), confusion_vector) | def perform_VPR(query_info, ref_map_features):
'OUTPUT: Matching Score and Best Matched Image.'
vector_2 = query_info[0]
regional_goodness = query_info[1]
confusion_vector = np.zeros(len(ref_map_features), dtype=np.float32)
ref_desc = ref_map_features
for ref in range(len(ref_map_features)):
score = conv_match_dotproduct(vector_2.astype('float64'), ref_desc[ref].astype('float64'), regional_goodness, total_no_of_regions)
confusion_vector[ref] = score
return (np.amax(confusion_vector), np.argmax(confusion_vector), confusion_vector)<|docstring|>INPUT: Query desc and reference list of images' features to be matched.<|endoftext|> |
20a622012c7dcbf107ce2bf93bc7c4815fa31b92af499ae8fd7dc1b2a3b14043 | def check_inputs(self):
' Ensure all layers exist\n '
check_vals = {}
good = True
for lyr in [self.pop_layer, self.urban_layer, self.urban_hd_layer]:
check_vals[lyr] = os.path.exists(lyr)
if (not check_vals[lyr]):
good = False
self.check_vals = check_vals
return good | Ensure all layers exist | src/urban_helper.py | check_inputs | eigotateishi/GOST_Urban | 3 | python | def check_inputs(self):
' \n '
check_vals = {}
good = True
for lyr in [self.pop_layer, self.urban_layer, self.urban_hd_layer]:
check_vals[lyr] = os.path.exists(lyr)
if (not check_vals[lyr]):
good = False
self.check_vals = check_vals
return good | def check_inputs(self):
' \n '
check_vals = {}
good = True
for lyr in [self.pop_layer, self.urban_layer, self.urban_hd_layer]:
check_vals[lyr] = os.path.exists(lyr)
if (not check_vals[lyr]):
good = False
self.check_vals = check_vals
return good<|docstring|>Ensure all layers exist<|endoftext|> |
2fcf653d9492809400f651227f0d857163e020be3f1f0d94cba2e650129039f9 | def __init__(self, iso3, output_folder, country_bounds, pop_files, final_folder='', ghspop_suffix=''):
" Create object for managing input data for summarizing urban extents\n \n INPUT\n :param: iso3 - string describing iso3 code\n :param: output_folder - string path to folder to hold results\n :param: country_bounds - geopandas dataframe of admin0 boundary\n \n \n NAMING CONVENTION\n To save this renaming step on my side, which can also induce mistakes, would be possible for you Ben to rename the files in your code directly? This would be also helpful for all other countries we have to do, and for the 1km*1km rasters.\n My conventions are pretty simple. All rasters starts with the three lettres of the country and then _ as you do, and then 3 lettres for the variable, and possibly two figures for the year. So for instance for Tanzania, this is:\n tza_ele tza_slo tza_wat for elevation, slope and water\n tza_gpo tza_gbu for GHS population and built-up\n tza_upo15 and tza_upo18 for WorldPop population unconstrained \n tza_cpo15 and tza_cpo18 for WorldPop population constrained.\n Then for 1km*1km raster, names are the same except that the three lettres of the country's name are followed by 1k, ie tza1k_slo, tza1k_ele and so on.\n "
self.iso3 = iso3
self.out_folder = output_folder
self.suffix = ghspop_suffix
if (final_folder == ''):
self.final_folder = os.path.join(self.out_folder, 'FINAL_STANDARD')
else:
self.final_folder = os.path.join(self.out_folder, final_folder)
if (not os.path.exists(self.out_folder)):
os.makedirs(self.out_folder)
if (not os.path.exists(self.final_folder)):
os.makedirs(self.final_folder)
self.dem_file = os.path.join(output_folder, ('%s_ele.tif' % iso3.lower()))
self.slope_file = os.path.join(output_folder, ('%s_slo.tif' % iso3.lower()))
self.lc_file = os.path.join(output_folder, ('%s_lc.tif' % iso3.lower()))
self.lc_file_h20 = os.path.join(output_folder, ('%s_wat_lc.tif' % iso3.lower()))
self.ghsl_h20 = os.path.join(output_folder, ('%s_wat.tif' % iso3.lower()))
self.ghspop_file = os.path.join(output_folder, ('%s_gpo.tif' % iso3.lower()))
self.ghspop1k_file = os.path.join(output_folder, ('%s1k_gpo.tif' % iso3.lower()))
self.ghsbuilt_file = os.path.join(output_folder, ('%s_gbu.tif' % iso3.lower()))
self.ghssmod_file = os.path.join(output_folder, ('%s_gsmod.tif' % iso3.lower()))
self.admin_file = os.path.join(output_folder, ('%s_adm.tif' % iso3.lower()))
self.admin_shp = os.path.join(self.final_folder, ('%s_adm.shp' % iso3.lower()))
self.pop_files = []
for fileDef in pop_files:
out_pop_file = os.path.join(output_folder, fileDef[1])
self.pop_files.append(out_pop_file)
if (not os.path.exists(out_pop_file)):
shutil.copy(fileDef[0], out_pop_file)
if (ghspop_suffix == '1k'):
self.pop_files.append(self.ghspop1k_file)
else:
self.pop_files.append(self.ghspop_file)
shutil.copy(self.ghspop_file, os.path.join(self.final_folder, os.path.basename(self.ghspop_file)))
self.inD = country_bounds
if (not os.path.exists(self.admin_shp)):
self.inD.to_file(self.admin_shp) | Create object for managing input data for summarizing urban extents
INPUT
:param: iso3 - string describing iso3 code
:param: output_folder - string path to folder to hold results
:param: country_bounds - geopandas dataframe of admin0 boundary
NAMING CONVENTION
To save this renaming step on my side, which can also induce mistakes, would be possible for you Ben to rename the files in your code directly? This would be also helpful for all other countries we have to do, and for the 1km*1km rasters.
My conventions are pretty simple. All rasters starts with the three lettres of the country and then _ as you do, and then 3 lettres for the variable, and possibly two figures for the year. So for instance for Tanzania, this is:
tza_ele tza_slo tza_wat for elevation, slope and water
tza_gpo tza_gbu for GHS population and built-up
tza_upo15 and tza_upo18 for WorldPop population unconstrained
tza_cpo15 and tza_cpo18 for WorldPop population constrained.
Then for 1km*1km raster, names are the same except that the three lettres of the country's name are followed by 1k, ie tza1k_slo, tza1k_ele and so on. | src/urban_helper.py | __init__ | eigotateishi/GOST_Urban | 3 | python | def __init__(self, iso3, output_folder, country_bounds, pop_files, final_folder=, ghspop_suffix=):
" Create object for managing input data for summarizing urban extents\n \n INPUT\n :param: iso3 - string describing iso3 code\n :param: output_folder - string path to folder to hold results\n :param: country_bounds - geopandas dataframe of admin0 boundary\n \n \n NAMING CONVENTION\n To save this renaming step on my side, which can also induce mistakes, would be possible for you Ben to rename the files in your code directly? This would be also helpful for all other countries we have to do, and for the 1km*1km rasters.\n My conventions are pretty simple. All rasters starts with the three lettres of the country and then _ as you do, and then 3 lettres for the variable, and possibly two figures for the year. So for instance for Tanzania, this is:\n tza_ele tza_slo tza_wat for elevation, slope and water\n tza_gpo tza_gbu for GHS population and built-up\n tza_upo15 and tza_upo18 for WorldPop population unconstrained \n tza_cpo15 and tza_cpo18 for WorldPop population constrained.\n Then for 1km*1km raster, names are the same except that the three lettres of the country's name are followed by 1k, ie tza1k_slo, tza1k_ele and so on.\n "
self.iso3 = iso3
self.out_folder = output_folder
self.suffix = ghspop_suffix
if (final_folder == ):
self.final_folder = os.path.join(self.out_folder, 'FINAL_STANDARD')
else:
self.final_folder = os.path.join(self.out_folder, final_folder)
if (not os.path.exists(self.out_folder)):
os.makedirs(self.out_folder)
if (not os.path.exists(self.final_folder)):
os.makedirs(self.final_folder)
self.dem_file = os.path.join(output_folder, ('%s_ele.tif' % iso3.lower()))
self.slope_file = os.path.join(output_folder, ('%s_slo.tif' % iso3.lower()))
self.lc_file = os.path.join(output_folder, ('%s_lc.tif' % iso3.lower()))
self.lc_file_h20 = os.path.join(output_folder, ('%s_wat_lc.tif' % iso3.lower()))
self.ghsl_h20 = os.path.join(output_folder, ('%s_wat.tif' % iso3.lower()))
self.ghspop_file = os.path.join(output_folder, ('%s_gpo.tif' % iso3.lower()))
self.ghspop1k_file = os.path.join(output_folder, ('%s1k_gpo.tif' % iso3.lower()))
self.ghsbuilt_file = os.path.join(output_folder, ('%s_gbu.tif' % iso3.lower()))
self.ghssmod_file = os.path.join(output_folder, ('%s_gsmod.tif' % iso3.lower()))
self.admin_file = os.path.join(output_folder, ('%s_adm.tif' % iso3.lower()))
self.admin_shp = os.path.join(self.final_folder, ('%s_adm.shp' % iso3.lower()))
self.pop_files = []
for fileDef in pop_files:
out_pop_file = os.path.join(output_folder, fileDef[1])
self.pop_files.append(out_pop_file)
if (not os.path.exists(out_pop_file)):
shutil.copy(fileDef[0], out_pop_file)
if (ghspop_suffix == '1k'):
self.pop_files.append(self.ghspop1k_file)
else:
self.pop_files.append(self.ghspop_file)
shutil.copy(self.ghspop_file, os.path.join(self.final_folder, os.path.basename(self.ghspop_file)))
self.inD = country_bounds
if (not os.path.exists(self.admin_shp)):
self.inD.to_file(self.admin_shp) | def __init__(self, iso3, output_folder, country_bounds, pop_files, final_folder=, ghspop_suffix=):
" Create object for managing input data for summarizing urban extents\n \n INPUT\n :param: iso3 - string describing iso3 code\n :param: output_folder - string path to folder to hold results\n :param: country_bounds - geopandas dataframe of admin0 boundary\n \n \n NAMING CONVENTION\n To save this renaming step on my side, which can also induce mistakes, would be possible for you Ben to rename the files in your code directly? This would be also helpful for all other countries we have to do, and for the 1km*1km rasters.\n My conventions are pretty simple. All rasters starts with the three lettres of the country and then _ as you do, and then 3 lettres for the variable, and possibly two figures for the year. So for instance for Tanzania, this is:\n tza_ele tza_slo tza_wat for elevation, slope and water\n tza_gpo tza_gbu for GHS population and built-up\n tza_upo15 and tza_upo18 for WorldPop population unconstrained \n tza_cpo15 and tza_cpo18 for WorldPop population constrained.\n Then for 1km*1km raster, names are the same except that the three lettres of the country's name are followed by 1k, ie tza1k_slo, tza1k_ele and so on.\n "
self.iso3 = iso3
self.out_folder = output_folder
self.suffix = ghspop_suffix
if (final_folder == ):
self.final_folder = os.path.join(self.out_folder, 'FINAL_STANDARD')
else:
self.final_folder = os.path.join(self.out_folder, final_folder)
if (not os.path.exists(self.out_folder)):
os.makedirs(self.out_folder)
if (not os.path.exists(self.final_folder)):
os.makedirs(self.final_folder)
self.dem_file = os.path.join(output_folder, ('%s_ele.tif' % iso3.lower()))
self.slope_file = os.path.join(output_folder, ('%s_slo.tif' % iso3.lower()))
self.lc_file = os.path.join(output_folder, ('%s_lc.tif' % iso3.lower()))
self.lc_file_h20 = os.path.join(output_folder, ('%s_wat_lc.tif' % iso3.lower()))
self.ghsl_h20 = os.path.join(output_folder, ('%s_wat.tif' % iso3.lower()))
self.ghspop_file = os.path.join(output_folder, ('%s_gpo.tif' % iso3.lower()))
self.ghspop1k_file = os.path.join(output_folder, ('%s1k_gpo.tif' % iso3.lower()))
self.ghsbuilt_file = os.path.join(output_folder, ('%s_gbu.tif' % iso3.lower()))
self.ghssmod_file = os.path.join(output_folder, ('%s_gsmod.tif' % iso3.lower()))
self.admin_file = os.path.join(output_folder, ('%s_adm.tif' % iso3.lower()))
self.admin_shp = os.path.join(self.final_folder, ('%s_adm.shp' % iso3.lower()))
self.pop_files = []
for fileDef in pop_files:
out_pop_file = os.path.join(output_folder, fileDef[1])
self.pop_files.append(out_pop_file)
if (not os.path.exists(out_pop_file)):
shutil.copy(fileDef[0], out_pop_file)
if (ghspop_suffix == '1k'):
self.pop_files.append(self.ghspop1k_file)
else:
self.pop_files.append(self.ghspop_file)
shutil.copy(self.ghspop_file, os.path.join(self.final_folder, os.path.basename(self.ghspop_file)))
self.inD = country_bounds
if (not os.path.exists(self.admin_shp)):
self.inD.to_file(self.admin_shp)<|docstring|>Create object for managing input data for summarizing urban extents
INPUT
:param: iso3 - string describing iso3 code
:param: output_folder - string path to folder to hold results
:param: country_bounds - geopandas dataframe of admin0 boundary
NAMING CONVENTION
To save this renaming step on my side, which can also induce mistakes, would be possible for you Ben to rename the files in your code directly? This would be also helpful for all other countries we have to do, and for the 1km*1km rasters.
My conventions are pretty simple. All rasters starts with the three lettres of the country and then _ as you do, and then 3 lettres for the variable, and possibly two figures for the year. So for instance for Tanzania, this is:
tza_ele tza_slo tza_wat for elevation, slope and water
tza_gpo tza_gbu for GHS population and built-up
tza_upo15 and tza_upo18 for WorldPop population unconstrained
tza_cpo15 and tza_cpo18 for WorldPop population constrained.
Then for 1km*1km raster, names are the same except that the three lettres of the country's name are followed by 1k, ie tza1k_slo, tza1k_ele and so on.<|endoftext|> |
80e1ed231e44a6a5902ab848f128a57b70289c4c28b0a4cf7caec27d2fede328 | def process_dem(self, global_dem=''):
' Download DEM from AWS, calculate slope\n '
if ((not os.path.exists(self.dem_file)) and (global_dem == '')):
tPrint('Downloading DEM')
elevation.clip(bounds=self.inD.total_bounds, max_download_tiles=90000, output=self.dem_file, product='SRTM3')
if ((not os.path.exists(self.dem_file)) and (not (global_dem == ''))):
tPrint('Downloading DEM')
rMisc.clipRaster(rasterio.open(global_dem), self.inD, self.dem_file)
if ((not os.path.exists(self.slope_file)) and os.path.exists(self.dem_file)):
tPrint('Calculating slope')
in_dem = rasterio.open(self.dem_file)
in_dem_data = in_dem.read()
beau = richdem.rdarray(in_dem_data[(0, :, :)], no_data=in_dem.meta['nodata'])
slope = richdem.TerrainAttribute(beau, attrib='slope_riserun')
meta = in_dem.meta.copy()
meta.update(dtype=slope.dtype)
with rasterio.open(self.slope_file, 'w', **meta) as outR:
outR.write_band(1, slope) | Download DEM from AWS, calculate slope | src/urban_helper.py | process_dem | eigotateishi/GOST_Urban | 3 | python | def process_dem(self, global_dem=):
' \n '
if ((not os.path.exists(self.dem_file)) and (global_dem == )):
tPrint('Downloading DEM')
elevation.clip(bounds=self.inD.total_bounds, max_download_tiles=90000, output=self.dem_file, product='SRTM3')
if ((not os.path.exists(self.dem_file)) and (not (global_dem == ))):
tPrint('Downloading DEM')
rMisc.clipRaster(rasterio.open(global_dem), self.inD, self.dem_file)
if ((not os.path.exists(self.slope_file)) and os.path.exists(self.dem_file)):
tPrint('Calculating slope')
in_dem = rasterio.open(self.dem_file)
in_dem_data = in_dem.read()
beau = richdem.rdarray(in_dem_data[(0, :, :)], no_data=in_dem.meta['nodata'])
slope = richdem.TerrainAttribute(beau, attrib='slope_riserun')
meta = in_dem.meta.copy()
meta.update(dtype=slope.dtype)
with rasterio.open(self.slope_file, 'w', **meta) as outR:
outR.write_band(1, slope) | def process_dem(self, global_dem=):
' \n '
if ((not os.path.exists(self.dem_file)) and (global_dem == )):
tPrint('Downloading DEM')
elevation.clip(bounds=self.inD.total_bounds, max_download_tiles=90000, output=self.dem_file, product='SRTM3')
if ((not os.path.exists(self.dem_file)) and (not (global_dem == ))):
tPrint('Downloading DEM')
rMisc.clipRaster(rasterio.open(global_dem), self.inD, self.dem_file)
if ((not os.path.exists(self.slope_file)) and os.path.exists(self.dem_file)):
tPrint('Calculating slope')
in_dem = rasterio.open(self.dem_file)
in_dem_data = in_dem.read()
beau = richdem.rdarray(in_dem_data[(0, :, :)], no_data=in_dem.meta['nodata'])
slope = richdem.TerrainAttribute(beau, attrib='slope_riserun')
meta = in_dem.meta.copy()
meta.update(dtype=slope.dtype)
with rasterio.open(self.slope_file, 'w', **meta) as outR:
outR.write_band(1, slope)<|docstring|>Download DEM from AWS, calculate slope<|endoftext|> |
fbfd796611a2954963f8905eb906f2b7a2e911b2dfbbcc163bea4aaf596c7550 | def extract_layers(self, global_landcover, global_ghspop, global_ghspop1k, global_ghbuilt, global_ghsl, global_smod):
' extract global layers for current country\n '
if (not os.path.exists(self.lc_file_h20)):
tPrint('Extracting water')
if (not os.path.exists(self.lc_file)):
rMisc.clipRaster(rasterio.open(global_landcover), self.inD, self.lc_file)
in_lc = rasterio.open(self.lc_file)
inL = in_lc.read()
lcmeta = in_lc.meta.copy()
tempL = (inL == 210).astype(lcmeta['dtype'])
lcmeta.update(nodata=255)
with rasterio.open(self.lc_file_h20, 'w', **lcmeta) as out:
out.write(tempL)
os.remove(self.lc_file)
if (not os.path.exists(self.ghsl_h20)):
tPrint('Extracting water from GHSL')
inR = rasterio.open(global_ghsl)
ul = inR.index(*self.inD.total_bounds[0:2])
lr = inR.index(*self.inD.total_bounds[2:4])
window = ((float(lr[0]), float((ul[0] + 1))), (float(ul[1]), float((lr[1] + 1))))
data = inR.read(1, window=window, masked=False)
data = (data == 1)
b = self.inD.total_bounds
new_transform = rasterio.transform.from_bounds(b[0], b[1], b[2], b[3], data.shape[1], data.shape[0])
meta = inR.meta.copy()
meta.update(driver='GTiff', width=data.shape[1], height=data.shape[0], transform=new_transform)
data = data.astype(meta['dtype'])
with rasterio.open(self.ghsl_h20, 'w', **meta) as outR:
outR.write_band(1, data)
'\n bounds = box(*self.inD.total_bounds)\n if inG.crs != self.inD.crs:\n destCRS = pyproj.Proj(inG.crs)\n fromCRS = pyproj.Proj(self.inD.crs)\n projector = partial(pyproj.transform, fromCRS, destCRS)\n bounds = transform(projector, bounds)\n def getFeatures(gdf):\n #Function to parse features from GeoDataFrame in such a manner that rasterio wants them\n return [json.loads(gdf.to_json())[\'features\'][0][\'geometry\']]\n tD = gpd.GeoDataFrame([[1]], geometry=[bounds])\n coords = getFeatures(tD)\n out_img, out_transform = mask(inG, shapes=coords, crop=True)\n out_meta = inG.meta.copy()\n out_meta.update({"driver": "GTiff",\n "height": out_img.shape[1],\n "width": out_img.shape[2],\n "transform": out_transform})\n water_data = (out_img == 1).astype(out_meta[\'dtype\'])\n with rasterio.open(self.ghsl_h20, \'w\', **out_meta) as outR:\n outR.write(water_data)\n '
if (not os.path.exists(self.ghspop_file)):
tPrint('Extracting GHS-POP')
rMisc.clipRaster(rasterio.open(global_ghspop), self.inD, self.ghspop_file)
if (not os.path.exists(self.ghspop1k_file)):
tPrint('Extracting GHS-POP')
rMisc.clipRaster(rasterio.open(global_ghspop1k), self.inD, self.ghspop1k_file)
if (not os.path.exists(self.ghsbuilt_file)):
tPrint('Clipping GHS-Built')
rMisc.clipRaster(rasterio.open(global_ghbuilt), self.inD, self.ghsbuilt_file)
if (not os.path.exists(self.ghssmod_file)):
tPrint('Clipping GHS-SMOD')
rMisc.clipRaster(rasterio.open(global_smod), self.inD, self.ghssmod_file)
if (not os.path.exists(self.admin_file)):
tPrint('Rasterizing admin boundaries')
xx = rasterio.open(self.ghspop_file)
res = xx.meta['transform'][0]
tempD = self.inD.to_crs(xx.crs)
shapes = ((row['geometry'], 1) for (idx, row) in tempD.iterrows())
burned = features.rasterize(shapes=shapes, out_shape=xx.shape, fill=0, transform=xx.meta['transform'], dtype='int16')
meta = xx.meta.copy()
meta.update(dtype=burned.dtype)
with rasterio.open(self.admin_file, 'w', **meta) as outR:
outR.write_band(1, burned) | extract global layers for current country | src/urban_helper.py | extract_layers | eigotateishi/GOST_Urban | 3 | python | def extract_layers(self, global_landcover, global_ghspop, global_ghspop1k, global_ghbuilt, global_ghsl, global_smod):
' \n '
if (not os.path.exists(self.lc_file_h20)):
tPrint('Extracting water')
if (not os.path.exists(self.lc_file)):
rMisc.clipRaster(rasterio.open(global_landcover), self.inD, self.lc_file)
in_lc = rasterio.open(self.lc_file)
inL = in_lc.read()
lcmeta = in_lc.meta.copy()
tempL = (inL == 210).astype(lcmeta['dtype'])
lcmeta.update(nodata=255)
with rasterio.open(self.lc_file_h20, 'w', **lcmeta) as out:
out.write(tempL)
os.remove(self.lc_file)
if (not os.path.exists(self.ghsl_h20)):
tPrint('Extracting water from GHSL')
inR = rasterio.open(global_ghsl)
ul = inR.index(*self.inD.total_bounds[0:2])
lr = inR.index(*self.inD.total_bounds[2:4])
window = ((float(lr[0]), float((ul[0] + 1))), (float(ul[1]), float((lr[1] + 1))))
data = inR.read(1, window=window, masked=False)
data = (data == 1)
b = self.inD.total_bounds
new_transform = rasterio.transform.from_bounds(b[0], b[1], b[2], b[3], data.shape[1], data.shape[0])
meta = inR.meta.copy()
meta.update(driver='GTiff', width=data.shape[1], height=data.shape[0], transform=new_transform)
data = data.astype(meta['dtype'])
with rasterio.open(self.ghsl_h20, 'w', **meta) as outR:
outR.write_band(1, data)
'\n bounds = box(*self.inD.total_bounds)\n if inG.crs != self.inD.crs:\n destCRS = pyproj.Proj(inG.crs)\n fromCRS = pyproj.Proj(self.inD.crs)\n projector = partial(pyproj.transform, fromCRS, destCRS)\n bounds = transform(projector, bounds)\n def getFeatures(gdf):\n #Function to parse features from GeoDataFrame in such a manner that rasterio wants them\n return [json.loads(gdf.to_json())[\'features\'][0][\'geometry\']]\n tD = gpd.GeoDataFrame([[1]], geometry=[bounds])\n coords = getFeatures(tD)\n out_img, out_transform = mask(inG, shapes=coords, crop=True)\n out_meta = inG.meta.copy()\n out_meta.update({"driver": "GTiff",\n "height": out_img.shape[1],\n "width": out_img.shape[2],\n "transform": out_transform})\n water_data = (out_img == 1).astype(out_meta[\'dtype\'])\n with rasterio.open(self.ghsl_h20, \'w\', **out_meta) as outR:\n outR.write(water_data)\n '
if (not os.path.exists(self.ghspop_file)):
tPrint('Extracting GHS-POP')
rMisc.clipRaster(rasterio.open(global_ghspop), self.inD, self.ghspop_file)
if (not os.path.exists(self.ghspop1k_file)):
tPrint('Extracting GHS-POP')
rMisc.clipRaster(rasterio.open(global_ghspop1k), self.inD, self.ghspop1k_file)
if (not os.path.exists(self.ghsbuilt_file)):
tPrint('Clipping GHS-Built')
rMisc.clipRaster(rasterio.open(global_ghbuilt), self.inD, self.ghsbuilt_file)
if (not os.path.exists(self.ghssmod_file)):
tPrint('Clipping GHS-SMOD')
rMisc.clipRaster(rasterio.open(global_smod), self.inD, self.ghssmod_file)
if (not os.path.exists(self.admin_file)):
tPrint('Rasterizing admin boundaries')
xx = rasterio.open(self.ghspop_file)
res = xx.meta['transform'][0]
tempD = self.inD.to_crs(xx.crs)
shapes = ((row['geometry'], 1) for (idx, row) in tempD.iterrows())
burned = features.rasterize(shapes=shapes, out_shape=xx.shape, fill=0, transform=xx.meta['transform'], dtype='int16')
meta = xx.meta.copy()
meta.update(dtype=burned.dtype)
with rasterio.open(self.admin_file, 'w', **meta) as outR:
outR.write_band(1, burned) | def extract_layers(self, global_landcover, global_ghspop, global_ghspop1k, global_ghbuilt, global_ghsl, global_smod):
' \n '
if (not os.path.exists(self.lc_file_h20)):
tPrint('Extracting water')
if (not os.path.exists(self.lc_file)):
rMisc.clipRaster(rasterio.open(global_landcover), self.inD, self.lc_file)
in_lc = rasterio.open(self.lc_file)
inL = in_lc.read()
lcmeta = in_lc.meta.copy()
tempL = (inL == 210).astype(lcmeta['dtype'])
lcmeta.update(nodata=255)
with rasterio.open(self.lc_file_h20, 'w', **lcmeta) as out:
out.write(tempL)
os.remove(self.lc_file)
if (not os.path.exists(self.ghsl_h20)):
tPrint('Extracting water from GHSL')
inR = rasterio.open(global_ghsl)
ul = inR.index(*self.inD.total_bounds[0:2])
lr = inR.index(*self.inD.total_bounds[2:4])
window = ((float(lr[0]), float((ul[0] + 1))), (float(ul[1]), float((lr[1] + 1))))
data = inR.read(1, window=window, masked=False)
data = (data == 1)
b = self.inD.total_bounds
new_transform = rasterio.transform.from_bounds(b[0], b[1], b[2], b[3], data.shape[1], data.shape[0])
meta = inR.meta.copy()
meta.update(driver='GTiff', width=data.shape[1], height=data.shape[0], transform=new_transform)
data = data.astype(meta['dtype'])
with rasterio.open(self.ghsl_h20, 'w', **meta) as outR:
outR.write_band(1, data)
'\n bounds = box(*self.inD.total_bounds)\n if inG.crs != self.inD.crs:\n destCRS = pyproj.Proj(inG.crs)\n fromCRS = pyproj.Proj(self.inD.crs)\n projector = partial(pyproj.transform, fromCRS, destCRS)\n bounds = transform(projector, bounds)\n def getFeatures(gdf):\n #Function to parse features from GeoDataFrame in such a manner that rasterio wants them\n return [json.loads(gdf.to_json())[\'features\'][0][\'geometry\']]\n tD = gpd.GeoDataFrame([[1]], geometry=[bounds])\n coords = getFeatures(tD)\n out_img, out_transform = mask(inG, shapes=coords, crop=True)\n out_meta = inG.meta.copy()\n out_meta.update({"driver": "GTiff",\n "height": out_img.shape[1],\n "width": out_img.shape[2],\n "transform": out_transform})\n water_data = (out_img == 1).astype(out_meta[\'dtype\'])\n with rasterio.open(self.ghsl_h20, \'w\', **out_meta) as outR:\n outR.write(water_data)\n '
if (not os.path.exists(self.ghspop_file)):
tPrint('Extracting GHS-POP')
rMisc.clipRaster(rasterio.open(global_ghspop), self.inD, self.ghspop_file)
if (not os.path.exists(self.ghspop1k_file)):
tPrint('Extracting GHS-POP')
rMisc.clipRaster(rasterio.open(global_ghspop1k), self.inD, self.ghspop1k_file)
if (not os.path.exists(self.ghsbuilt_file)):
tPrint('Clipping GHS-Built')
rMisc.clipRaster(rasterio.open(global_ghbuilt), self.inD, self.ghsbuilt_file)
if (not os.path.exists(self.ghssmod_file)):
tPrint('Clipping GHS-SMOD')
rMisc.clipRaster(rasterio.open(global_smod), self.inD, self.ghssmod_file)
if (not os.path.exists(self.admin_file)):
tPrint('Rasterizing admin boundaries')
xx = rasterio.open(self.ghspop_file)
res = xx.meta['transform'][0]
tempD = self.inD.to_crs(xx.crs)
shapes = ((row['geometry'], 1) for (idx, row) in tempD.iterrows())
burned = features.rasterize(shapes=shapes, out_shape=xx.shape, fill=0, transform=xx.meta['transform'], dtype='int16')
meta = xx.meta.copy()
meta.update(dtype=burned.dtype)
with rasterio.open(self.admin_file, 'w', **meta) as outR:
outR.write_band(1, burned)<|docstring|>extract global layers for current country<|endoftext|> |
84518453bc30544fbdf99365594a82e8ec4f77567789f65f74713a74475c6bda | def calculate_urban(self, urb_val=300, hd_urb_val=1500):
' Calculate urban and HD urban extents from population files\n '
ghs_R = rasterio.open(self.ghspop_file)
for p_file in self.pop_files:
final_pop = os.path.join(self.final_folder, os.path.basename(p_file).replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix))))
if ('1k1k' in final_pop):
final_pop = final_pop.replace('1k1k', '1k')
final_urban = final_pop.replace('.tif', '_urban.tif')
final_urban_hd = final_pop.replace('.tif', '_urban_hd.tif')
urbanR = urban.urbanGriddedPop(final_pop)
in_raster = rasterio.open(final_pop)
total_ratio = ((in_raster.res[0] * in_raster.res[1]) / 1000000)
print(final_pop)
if (not os.path.exists(final_urban)):
urban_shp = urbanR.calculateUrban(densVal=(urb_val * total_ratio), totalPopThresh=5000, raster=final_urban)
if (not os.path.exists(final_urban_hd)):
cluster_shp = urbanR.calculateUrban(densVal=(hd_urb_val * total_ratio), totalPopThresh=50000, raster=final_urban_hd, smooth=True, queen=True) | Calculate urban and HD urban extents from population files | src/urban_helper.py | calculate_urban | eigotateishi/GOST_Urban | 3 | python | def calculate_urban(self, urb_val=300, hd_urb_val=1500):
' \n '
ghs_R = rasterio.open(self.ghspop_file)
for p_file in self.pop_files:
final_pop = os.path.join(self.final_folder, os.path.basename(p_file).replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix))))
if ('1k1k' in final_pop):
final_pop = final_pop.replace('1k1k', '1k')
final_urban = final_pop.replace('.tif', '_urban.tif')
final_urban_hd = final_pop.replace('.tif', '_urban_hd.tif')
urbanR = urban.urbanGriddedPop(final_pop)
in_raster = rasterio.open(final_pop)
total_ratio = ((in_raster.res[0] * in_raster.res[1]) / 1000000)
print(final_pop)
if (not os.path.exists(final_urban)):
urban_shp = urbanR.calculateUrban(densVal=(urb_val * total_ratio), totalPopThresh=5000, raster=final_urban)
if (not os.path.exists(final_urban_hd)):
cluster_shp = urbanR.calculateUrban(densVal=(hd_urb_val * total_ratio), totalPopThresh=50000, raster=final_urban_hd, smooth=True, queen=True) | def calculate_urban(self, urb_val=300, hd_urb_val=1500):
' \n '
ghs_R = rasterio.open(self.ghspop_file)
for p_file in self.pop_files:
final_pop = os.path.join(self.final_folder, os.path.basename(p_file).replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix))))
if ('1k1k' in final_pop):
final_pop = final_pop.replace('1k1k', '1k')
final_urban = final_pop.replace('.tif', '_urban.tif')
final_urban_hd = final_pop.replace('.tif', '_urban_hd.tif')
urbanR = urban.urbanGriddedPop(final_pop)
in_raster = rasterio.open(final_pop)
total_ratio = ((in_raster.res[0] * in_raster.res[1]) / 1000000)
print(final_pop)
if (not os.path.exists(final_urban)):
urban_shp = urbanR.calculateUrban(densVal=(urb_val * total_ratio), totalPopThresh=5000, raster=final_urban)
if (not os.path.exists(final_urban_hd)):
cluster_shp = urbanR.calculateUrban(densVal=(hd_urb_val * total_ratio), totalPopThresh=50000, raster=final_urban_hd, smooth=True, queen=True)<|docstring|>Calculate urban and HD urban extents from population files<|endoftext|> |
e3b607cea159e8e452880231858dbda32e26642fa3980ad170ea78e4324f6251 | def pop_zonal_admin(self, admin_layer):
' calculate urban and rural \n \n :param: - admin_layer\n '
for p_file in self.pop_files:
pop_file = os.path.join(self.final_folder, os.path.basename(p_file).replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix))))
if ('1k1k' in pop_file):
pop_file = pop_file.replace('1k1k', '1k')
yy = summarize_population(pop_file, admin_layer)
if yy.check_inputs():
res = yy.calculate_zonal(out_name='')
try:
final = final.join(res)
except:
final = res
else:
print(('Error summarizing population for %s' % pop_file))
admin_layer = admin_layer.reset_index()
final = final.filter(regex='_SUM')
final = final.join(admin_layer)
final = final.drop(['geometry'], axis=1)
return final | calculate urban and rural
:param: - admin_layer | src/urban_helper.py | pop_zonal_admin | eigotateishi/GOST_Urban | 3 | python | def pop_zonal_admin(self, admin_layer):
' calculate urban and rural \n \n :param: - admin_layer\n '
for p_file in self.pop_files:
pop_file = os.path.join(self.final_folder, os.path.basename(p_file).replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix))))
if ('1k1k' in pop_file):
pop_file = pop_file.replace('1k1k', '1k')
yy = summarize_population(pop_file, admin_layer)
if yy.check_inputs():
res = yy.calculate_zonal(out_name=)
try:
final = final.join(res)
except:
final = res
else:
print(('Error summarizing population for %s' % pop_file))
admin_layer = admin_layer.reset_index()
final = final.filter(regex='_SUM')
final = final.join(admin_layer)
final = final.drop(['geometry'], axis=1)
return final | def pop_zonal_admin(self, admin_layer):
' calculate urban and rural \n \n :param: - admin_layer\n '
for p_file in self.pop_files:
pop_file = os.path.join(self.final_folder, os.path.basename(p_file).replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix))))
if ('1k1k' in pop_file):
pop_file = pop_file.replace('1k1k', '1k')
yy = summarize_population(pop_file, admin_layer)
if yy.check_inputs():
res = yy.calculate_zonal(out_name=)
try:
final = final.join(res)
except:
final = res
else:
print(('Error summarizing population for %s' % pop_file))
admin_layer = admin_layer.reset_index()
final = final.filter(regex='_SUM')
final = final.join(admin_layer)
final = final.drop(['geometry'], axis=1)
return final<|docstring|>calculate urban and rural
:param: - admin_layer<|endoftext|> |
2cebd5bcea87b5dcbc55b8ebdcb355030d915f0f8ab5fdd3477cdccdacfedec7 | def compare_pop_rasters(self, verbose=True):
' read in and summarize population rasters \n '
all_res = []
for pFile in self.pop_files:
inR = rasterio.open(pFile)
inD = inR.read()
inD = inD[(inD > 0)]
all_res.append([os.path.basename(pFile), inD.sum()])
if verbose:
print(f'{os.path.basename(pFile)}: {inD.sum()}')
return all_res | read in and summarize population rasters | src/urban_helper.py | compare_pop_rasters | eigotateishi/GOST_Urban | 3 | python | def compare_pop_rasters(self, verbose=True):
' \n '
all_res = []
for pFile in self.pop_files:
inR = rasterio.open(pFile)
inD = inR.read()
inD = inD[(inD > 0)]
all_res.append([os.path.basename(pFile), inD.sum()])
if verbose:
print(f'{os.path.basename(pFile)}: {inD.sum()}')
return all_res | def compare_pop_rasters(self, verbose=True):
' \n '
all_res = []
for pFile in self.pop_files:
inR = rasterio.open(pFile)
inD = inR.read()
inD = inD[(inD > 0)]
all_res.append([os.path.basename(pFile), inD.sum()])
if verbose:
print(f'{os.path.basename(pFile)}: {inD.sum()}')
return all_res<|docstring|>read in and summarize population rasters<|endoftext|> |
14b396ad7cceb3faaf6e2b94ae11d3d83a5ec5ecd8343dbf51585c82345fcfb0 | def evaluateOutput(self, admin_stats, commune_stats):
'\n Check the outputs to determine if processing worked correctly\n \n 1. compare population totals between raw, 250m and 1km data\n 2. Calculate urbanization rate\n 3. Water mask\n a. calculate overlap between water classes\n b. calculate overlap between water and population\n c. calculate overlap between water and urban\n \n https://ghsl.jrc.ec.europa.eu/documents/cfs01/V3/CFS_Ghana.pdf\n '
stats_file = os.path.join(self.out_folder, ('DATA_EVALUATION_%s_%s.txt' % (self.iso3, self.suffix)))
with open(stats_file, 'w') as out_stats:
pop_comparison = self.compare_pop_rasters(verbose=False)
out_stats.write('***** Evaluate Total Population *****\n')
for x in pop_comparison:
out_stats.write(f'''{x[0]}: {x[1]}
''')
pop_file_defs = []
for pop_file in self.pop_files:
name = 'GHS'
if ('upo' in pop_file):
name = ('WP_U_%s' % pop_file[(- 6):(- 4)])
if ('cpo' in pop_file):
name = ('WP_C_%s' % pop_file[(- 6):(- 4)])
pop_file_base = os.path.basename(pop_file)
if (self.suffix == '1k'):
pop_file_base = pop_file_base.replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix)))
if ('1k1k' in pop_file_base):
pop_file_base = pop_file_base.replace('1k1k', '1k')
out_pop_file = os.path.join(self.final_folder, pop_file_base)
urban_pop_file = out_pop_file.replace('.tif', '_urban.tif')
hd_pop_file = out_pop_file.replace('.tif', '_urban_hd.tif')
pop_file_defs.append([out_pop_file, urban_pop_file, hd_pop_file, name])
out_stats.write('***** Evaluate Urbanization *****\n')
for fileDef in pop_file_defs:
pFile = fileDef[0]
urb_file = fileDef[1]
hd_file = fileDef[2]
name = fileDef[3]
try:
inPop = rasterio.open(pFile).read()
inPop = (inPop * (inPop > 0))
inUrb = rasterio.open(urb_file).read()
inHd = rasterio.open(hd_file).read()
tPop = inPop.sum()
urbPop = (inPop * inUrb).sum()
hdPop = (inPop * inHd).sum()
out_stats.write(f'''{name}: TotalPop: {tPop.round(0)}, UrbanPop: {urbPop.round(0)}, HD Pop: {hdPop.round(0)}
''')
out_stats.write(f'''{name}: {((urbPop / tPop) * 100).round(2)}% Urban; {((hdPop / tPop) * 100).round(2)}% HD Urban
''')
except:
print(f'Error processing {name}')
print(fileDef)
out_stats.write('***** Evaluate SMOD ******\n')
smod_vals = [10, 11, 12, 13, 21, 22, 23, 30]
inSMOD = rasterio.open(os.path.join(self.final_folder, os.path.basename(self.ghssmod_file).replace(('%s' % self.iso3.lower()), ('%s%s' % (self.iso3.lower(), self.suffix)))))
smod = inSMOD.read()
for pFile in self.pop_files:
if ('gpo' in pFile):
inPop = rasterio.open(pFile)
pop = inPop.read()
pop[(pop < 0)] = 0
total_pop = pop.sum()
total_per = 0
for val in smod_vals:
cur_smod = (smod == val).astype(int)
cur_pop = (pop * cur_smod)
total_curpop = cur_pop.sum()
perUrban = ((total_curpop.sum() / total_pop) * 100)
if (val > 20):
total_per = (total_per + perUrban)
out_stats.write(f'''{val}: {perUrban}
''')
out_stats.write(f'''Total Urban: {total_per}
''')
'3. Water mask \n '
out_stats.write('***** Evaluate Water Intersection *****\n')
water_ghsl = os.path.join(self.final_folder, ('%s%s_wat.tif' % (self.iso3.lower(), self.suffix)))
water_lc = os.path.join(self.final_folder, ('%s%s_wat_lc.tif' % (self.iso3.lower(), self.suffix)))
inWG = rasterio.open(water_ghsl)
wgData = inWG.read()
wgData[(wgData == inWG.meta['nodata'])] = 0
inWLC = rasterio.open(water_lc)
wlcData = inWLC.read()
wlcData[(wlcData == inWLC.meta['nodata'])] = 0
combo = (wgData + wlcData)
out_stats.write(f'''WATER: GHSL count: {wgData.sum()}; LC count: {wlcData.sum()}; overlap: {(combo == 2).sum()}
''')
out_stats.write('***** Evaluate Water Population Overlap *****\n')
for fileDef in pop_file_defs:
pop_file = fileDef[0]
urb_file = fileDef[1]
hd_file = fileDef[2]
name = fileDef[3]
cur_pop = rasterio.open(pop_file)
curP = cur_pop.read()
curP[(curP == cur_pop.meta['nodata'])] = 0
urb = rasterio.open(urb_file).read()
hd = rasterio.open(hd_file).read()
out_stats.write(f'''WATER {name} Population: TotalPop: {curP.sum().round()}, WaterPop GHSL: {(curP * wgData).sum().round()}, WaterPop LC: {(curP * wlcData).sum().round()}
''')
out_stats.write(f'''WATER {name} Urban Cells: TotalUrban Cells: {urb.sum().round()}, WaterUrban GHSL: {(urb * wgData).sum()}, WaterUrb LC: {(urb * wlcData).sum()}
''')
out_stats.write(f'''WATER {name} HD Cells: TotalPop: {hd.sum().round()}, WaterHD GHSL: {(hd * wgData).sum()}, WaterHD LC: {(hd * wlcData).sum()}
''')
for sFile in [admin_stats, commune_stats]:
if os.path.exists(sFile):
tPrint(sFile)
file_name = os.path.basename(sFile)
inD = pd.read_csv(sFile, index_col=0)
out_stats.write(f'''***** Summarizing {file_name}
''')
bad_cols = ['index', 'OBJECTID', 'WB_ADM1_CO', 'WB_ADM0_CO', 'WB_ADM2_CO', 'Shape_Leng', 'Shape_Area']
for col in inD.columns:
if (not (col in bad_cols)):
curD = inD[col]
try:
curD_sum = curD.loc[(curD > 0)].sum()
out_stats.write(f'''{col}: {round(curD_sum)}
''')
except:
pass | Check the outputs to determine if processing worked correctly
1. compare population totals between raw, 250m and 1km data
2. Calculate urbanization rate
3. Water mask
a. calculate overlap between water classes
b. calculate overlap between water and population
c. calculate overlap between water and urban
https://ghsl.jrc.ec.europa.eu/documents/cfs01/V3/CFS_Ghana.pdf | src/urban_helper.py | evaluateOutput | eigotateishi/GOST_Urban | 3 | python | def evaluateOutput(self, admin_stats, commune_stats):
'\n Check the outputs to determine if processing worked correctly\n \n 1. compare population totals between raw, 250m and 1km data\n 2. Calculate urbanization rate\n 3. Water mask\n a. calculate overlap between water classes\n b. calculate overlap between water and population\n c. calculate overlap between water and urban\n \n https://ghsl.jrc.ec.europa.eu/documents/cfs01/V3/CFS_Ghana.pdf\n '
stats_file = os.path.join(self.out_folder, ('DATA_EVALUATION_%s_%s.txt' % (self.iso3, self.suffix)))
with open(stats_file, 'w') as out_stats:
pop_comparison = self.compare_pop_rasters(verbose=False)
out_stats.write('***** Evaluate Total Population *****\n')
for x in pop_comparison:
out_stats.write(f'{x[0]}: {x[1]}
')
pop_file_defs = []
for pop_file in self.pop_files:
name = 'GHS'
if ('upo' in pop_file):
name = ('WP_U_%s' % pop_file[(- 6):(- 4)])
if ('cpo' in pop_file):
name = ('WP_C_%s' % pop_file[(- 6):(- 4)])
pop_file_base = os.path.basename(pop_file)
if (self.suffix == '1k'):
pop_file_base = pop_file_base.replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix)))
if ('1k1k' in pop_file_base):
pop_file_base = pop_file_base.replace('1k1k', '1k')
out_pop_file = os.path.join(self.final_folder, pop_file_base)
urban_pop_file = out_pop_file.replace('.tif', '_urban.tif')
hd_pop_file = out_pop_file.replace('.tif', '_urban_hd.tif')
pop_file_defs.append([out_pop_file, urban_pop_file, hd_pop_file, name])
out_stats.write('***** Evaluate Urbanization *****\n')
for fileDef in pop_file_defs:
pFile = fileDef[0]
urb_file = fileDef[1]
hd_file = fileDef[2]
name = fileDef[3]
try:
inPop = rasterio.open(pFile).read()
inPop = (inPop * (inPop > 0))
inUrb = rasterio.open(urb_file).read()
inHd = rasterio.open(hd_file).read()
tPop = inPop.sum()
urbPop = (inPop * inUrb).sum()
hdPop = (inPop * inHd).sum()
out_stats.write(f'{name}: TotalPop: {tPop.round(0)}, UrbanPop: {urbPop.round(0)}, HD Pop: {hdPop.round(0)}
')
out_stats.write(f'{name}: {((urbPop / tPop) * 100).round(2)}% Urban; {((hdPop / tPop) * 100).round(2)}% HD Urban
')
except:
print(f'Error processing {name}')
print(fileDef)
out_stats.write('***** Evaluate SMOD ******\n')
smod_vals = [10, 11, 12, 13, 21, 22, 23, 30]
inSMOD = rasterio.open(os.path.join(self.final_folder, os.path.basename(self.ghssmod_file).replace(('%s' % self.iso3.lower()), ('%s%s' % (self.iso3.lower(), self.suffix)))))
smod = inSMOD.read()
for pFile in self.pop_files:
if ('gpo' in pFile):
inPop = rasterio.open(pFile)
pop = inPop.read()
pop[(pop < 0)] = 0
total_pop = pop.sum()
total_per = 0
for val in smod_vals:
cur_smod = (smod == val).astype(int)
cur_pop = (pop * cur_smod)
total_curpop = cur_pop.sum()
perUrban = ((total_curpop.sum() / total_pop) * 100)
if (val > 20):
total_per = (total_per + perUrban)
out_stats.write(f'{val}: {perUrban}
')
out_stats.write(f'Total Urban: {total_per}
')
'3. Water mask \n '
out_stats.write('***** Evaluate Water Intersection *****\n')
water_ghsl = os.path.join(self.final_folder, ('%s%s_wat.tif' % (self.iso3.lower(), self.suffix)))
water_lc = os.path.join(self.final_folder, ('%s%s_wat_lc.tif' % (self.iso3.lower(), self.suffix)))
inWG = rasterio.open(water_ghsl)
wgData = inWG.read()
wgData[(wgData == inWG.meta['nodata'])] = 0
inWLC = rasterio.open(water_lc)
wlcData = inWLC.read()
wlcData[(wlcData == inWLC.meta['nodata'])] = 0
combo = (wgData + wlcData)
out_stats.write(f'WATER: GHSL count: {wgData.sum()}; LC count: {wlcData.sum()}; overlap: {(combo == 2).sum()}
')
out_stats.write('***** Evaluate Water Population Overlap *****\n')
for fileDef in pop_file_defs:
pop_file = fileDef[0]
urb_file = fileDef[1]
hd_file = fileDef[2]
name = fileDef[3]
cur_pop = rasterio.open(pop_file)
curP = cur_pop.read()
curP[(curP == cur_pop.meta['nodata'])] = 0
urb = rasterio.open(urb_file).read()
hd = rasterio.open(hd_file).read()
out_stats.write(f'WATER {name} Population: TotalPop: {curP.sum().round()}, WaterPop GHSL: {(curP * wgData).sum().round()}, WaterPop LC: {(curP * wlcData).sum().round()}
')
out_stats.write(f'WATER {name} Urban Cells: TotalUrban Cells: {urb.sum().round()}, WaterUrban GHSL: {(urb * wgData).sum()}, WaterUrb LC: {(urb * wlcData).sum()}
')
out_stats.write(f'WATER {name} HD Cells: TotalPop: {hd.sum().round()}, WaterHD GHSL: {(hd * wgData).sum()}, WaterHD LC: {(hd * wlcData).sum()}
')
for sFile in [admin_stats, commune_stats]:
if os.path.exists(sFile):
tPrint(sFile)
file_name = os.path.basename(sFile)
inD = pd.read_csv(sFile, index_col=0)
out_stats.write(f'***** Summarizing {file_name}
')
bad_cols = ['index', 'OBJECTID', 'WB_ADM1_CO', 'WB_ADM0_CO', 'WB_ADM2_CO', 'Shape_Leng', 'Shape_Area']
for col in inD.columns:
if (not (col in bad_cols)):
curD = inD[col]
try:
curD_sum = curD.loc[(curD > 0)].sum()
out_stats.write(f'{col}: {round(curD_sum)}
')
except:
pass | def evaluateOutput(self, admin_stats, commune_stats):
'\n Check the outputs to determine if processing worked correctly\n \n 1. compare population totals between raw, 250m and 1km data\n 2. Calculate urbanization rate\n 3. Water mask\n a. calculate overlap between water classes\n b. calculate overlap between water and population\n c. calculate overlap between water and urban\n \n https://ghsl.jrc.ec.europa.eu/documents/cfs01/V3/CFS_Ghana.pdf\n '
stats_file = os.path.join(self.out_folder, ('DATA_EVALUATION_%s_%s.txt' % (self.iso3, self.suffix)))
with open(stats_file, 'w') as out_stats:
pop_comparison = self.compare_pop_rasters(verbose=False)
out_stats.write('***** Evaluate Total Population *****\n')
for x in pop_comparison:
out_stats.write(f'{x[0]}: {x[1]}
')
pop_file_defs = []
for pop_file in self.pop_files:
name = 'GHS'
if ('upo' in pop_file):
name = ('WP_U_%s' % pop_file[(- 6):(- 4)])
if ('cpo' in pop_file):
name = ('WP_C_%s' % pop_file[(- 6):(- 4)])
pop_file_base = os.path.basename(pop_file)
if (self.suffix == '1k'):
pop_file_base = pop_file_base.replace(self.iso3.lower(), ('%s%s' % (self.iso3.lower(), self.suffix)))
if ('1k1k' in pop_file_base):
pop_file_base = pop_file_base.replace('1k1k', '1k')
out_pop_file = os.path.join(self.final_folder, pop_file_base)
urban_pop_file = out_pop_file.replace('.tif', '_urban.tif')
hd_pop_file = out_pop_file.replace('.tif', '_urban_hd.tif')
pop_file_defs.append([out_pop_file, urban_pop_file, hd_pop_file, name])
out_stats.write('***** Evaluate Urbanization *****\n')
for fileDef in pop_file_defs:
pFile = fileDef[0]
urb_file = fileDef[1]
hd_file = fileDef[2]
name = fileDef[3]
try:
inPop = rasterio.open(pFile).read()
inPop = (inPop * (inPop > 0))
inUrb = rasterio.open(urb_file).read()
inHd = rasterio.open(hd_file).read()
tPop = inPop.sum()
urbPop = (inPop * inUrb).sum()
hdPop = (inPop * inHd).sum()
out_stats.write(f'{name}: TotalPop: {tPop.round(0)}, UrbanPop: {urbPop.round(0)}, HD Pop: {hdPop.round(0)}
')
out_stats.write(f'{name}: {((urbPop / tPop) * 100).round(2)}% Urban; {((hdPop / tPop) * 100).round(2)}% HD Urban
')
except:
print(f'Error processing {name}')
print(fileDef)
out_stats.write('***** Evaluate SMOD ******\n')
smod_vals = [10, 11, 12, 13, 21, 22, 23, 30]
inSMOD = rasterio.open(os.path.join(self.final_folder, os.path.basename(self.ghssmod_file).replace(('%s' % self.iso3.lower()), ('%s%s' % (self.iso3.lower(), self.suffix)))))
smod = inSMOD.read()
for pFile in self.pop_files:
if ('gpo' in pFile):
inPop = rasterio.open(pFile)
pop = inPop.read()
pop[(pop < 0)] = 0
total_pop = pop.sum()
total_per = 0
for val in smod_vals:
cur_smod = (smod == val).astype(int)
cur_pop = (pop * cur_smod)
total_curpop = cur_pop.sum()
perUrban = ((total_curpop.sum() / total_pop) * 100)
if (val > 20):
total_per = (total_per + perUrban)
out_stats.write(f'{val}: {perUrban}
')
out_stats.write(f'Total Urban: {total_per}
')
'3. Water mask \n '
out_stats.write('***** Evaluate Water Intersection *****\n')
water_ghsl = os.path.join(self.final_folder, ('%s%s_wat.tif' % (self.iso3.lower(), self.suffix)))
water_lc = os.path.join(self.final_folder, ('%s%s_wat_lc.tif' % (self.iso3.lower(), self.suffix)))
inWG = rasterio.open(water_ghsl)
wgData = inWG.read()
wgData[(wgData == inWG.meta['nodata'])] = 0
inWLC = rasterio.open(water_lc)
wlcData = inWLC.read()
wlcData[(wlcData == inWLC.meta['nodata'])] = 0
combo = (wgData + wlcData)
out_stats.write(f'WATER: GHSL count: {wgData.sum()}; LC count: {wlcData.sum()}; overlap: {(combo == 2).sum()}
')
out_stats.write('***** Evaluate Water Population Overlap *****\n')
for fileDef in pop_file_defs:
pop_file = fileDef[0]
urb_file = fileDef[1]
hd_file = fileDef[2]
name = fileDef[3]
cur_pop = rasterio.open(pop_file)
curP = cur_pop.read()
curP[(curP == cur_pop.meta['nodata'])] = 0
urb = rasterio.open(urb_file).read()
hd = rasterio.open(hd_file).read()
out_stats.write(f'WATER {name} Population: TotalPop: {curP.sum().round()}, WaterPop GHSL: {(curP * wgData).sum().round()}, WaterPop LC: {(curP * wlcData).sum().round()}
')
out_stats.write(f'WATER {name} Urban Cells: TotalUrban Cells: {urb.sum().round()}, WaterUrban GHSL: {(urb * wgData).sum()}, WaterUrb LC: {(urb * wlcData).sum()}
')
out_stats.write(f'WATER {name} HD Cells: TotalPop: {hd.sum().round()}, WaterHD GHSL: {(hd * wgData).sum()}, WaterHD LC: {(hd * wlcData).sum()}
')
for sFile in [admin_stats, commune_stats]:
if os.path.exists(sFile):
tPrint(sFile)
file_name = os.path.basename(sFile)
inD = pd.read_csv(sFile, index_col=0)
out_stats.write(f'***** Summarizing {file_name}
')
bad_cols = ['index', 'OBJECTID', 'WB_ADM1_CO', 'WB_ADM0_CO', 'WB_ADM2_CO', 'Shape_Leng', 'Shape_Area']
for col in inD.columns:
if (not (col in bad_cols)):
curD = inD[col]
try:
curD_sum = curD.loc[(curD > 0)].sum()
out_stats.write(f'{col}: {round(curD_sum)}
')
except:
pass<|docstring|>Check the outputs to determine if processing worked correctly
1. compare population totals between raw, 250m and 1km data
2. Calculate urbanization rate
3. Water mask
a. calculate overlap between water classes
b. calculate overlap between water and population
c. calculate overlap between water and urban
https://ghsl.jrc.ec.europa.eu/documents/cfs01/V3/CFS_Ghana.pdf<|endoftext|> |
f2b40f7bd699b11f60ae22238cb41552ca452ef706528ba10d448bce6939114a | def get_flags(config):
'Get glob flags.'
flags = ((((glob.GLOBSTAR | glob.DOTGLOB) | glob.NEGATE) | glob.SPLIT) | glob.NEGATEALL)
if config.get('brace_expansion', False):
flags |= glob.BRACE
if config.get('extended_glob', False):
flags |= glob.EXTGLOB
if config.get('minus_negate', True):
flags |= glob.MINUSNEGATE
if config.get('case_insensitive', False):
flags |= glob.IGNORECASE
return flags | Get glob flags. | label_bot/wildcard_labels.py | get_flags | gir-bot/do-not-merge | 13 | python | def get_flags(config):
flags = ((((glob.GLOBSTAR | glob.DOTGLOB) | glob.NEGATE) | glob.SPLIT) | glob.NEGATEALL)
if config.get('brace_expansion', False):
flags |= glob.BRACE
if config.get('extended_glob', False):
flags |= glob.EXTGLOB
if config.get('minus_negate', True):
flags |= glob.MINUSNEGATE
if config.get('case_insensitive', False):
flags |= glob.IGNORECASE
return flags | def get_flags(config):
flags = ((((glob.GLOBSTAR | glob.DOTGLOB) | glob.NEGATE) | glob.SPLIT) | glob.NEGATEALL)
if config.get('brace_expansion', False):
flags |= glob.BRACE
if config.get('extended_glob', False):
flags |= glob.EXTGLOB
if config.get('minus_negate', True):
flags |= glob.MINUSNEGATE
if config.get('case_insensitive', False):
flags |= glob.IGNORECASE
return flags<|docstring|>Get glob flags.<|endoftext|> |
be79995a0266ec0ae5e6e58551a4d51051cdc05c9b7240e2f9fabc2f81bee3d5 | def get_labels(rules, files, flags):
'Sync labels.'
add_labels = {}
for file in files:
for label in rules:
try:
names = label['labels']
lows = [n.lower() for n in names]
except Exception:
traceback.print_exc(file=sys.stdout)
continue
match = False
for pattern in label['patterns']:
try:
match = glob.globmatch(file, pattern, flags=flags)
except Exception:
traceback.print_exc(file=sys.stdout)
match = False
if match:
break
if match:
for (index, low) in enumerate(lows):
if (low not in add_labels):
add_labels[low] = names[index]
remove_labels = {}
for label in rules:
try:
names = label['labels']
lows = [n.lower() for n in names]
except Exception:
traceback.print_exc(file=sys.stdout)
continue
for (index, low) in enumerate(lows):
if ((low not in add_labels) and (low not in remove_labels)):
remove_labels[low] = names[index]
return (add_labels, remove_labels) | Sync labels. | label_bot/wildcard_labels.py | get_labels | gir-bot/do-not-merge | 13 | python | def get_labels(rules, files, flags):
add_labels = {}
for file in files:
for label in rules:
try:
names = label['labels']
lows = [n.lower() for n in names]
except Exception:
traceback.print_exc(file=sys.stdout)
continue
match = False
for pattern in label['patterns']:
try:
match = glob.globmatch(file, pattern, flags=flags)
except Exception:
traceback.print_exc(file=sys.stdout)
match = False
if match:
break
if match:
for (index, low) in enumerate(lows):
if (low not in add_labels):
add_labels[low] = names[index]
remove_labels = {}
for label in rules:
try:
names = label['labels']
lows = [n.lower() for n in names]
except Exception:
traceback.print_exc(file=sys.stdout)
continue
for (index, low) in enumerate(lows):
if ((low not in add_labels) and (low not in remove_labels)):
remove_labels[low] = names[index]
return (add_labels, remove_labels) | def get_labels(rules, files, flags):
add_labels = {}
for file in files:
for label in rules:
try:
names = label['labels']
lows = [n.lower() for n in names]
except Exception:
traceback.print_exc(file=sys.stdout)
continue
match = False
for pattern in label['patterns']:
try:
match = glob.globmatch(file, pattern, flags=flags)
except Exception:
traceback.print_exc(file=sys.stdout)
match = False
if match:
break
if match:
for (index, low) in enumerate(lows):
if (low not in add_labels):
add_labels[low] = names[index]
remove_labels = {}
for label in rules:
try:
names = label['labels']
lows = [n.lower() for n in names]
except Exception:
traceback.print_exc(file=sys.stdout)
continue
for (index, low) in enumerate(lows):
if ((low not in add_labels) and (low not in remove_labels)):
remove_labels[low] = names[index]
return (add_labels, remove_labels)<|docstring|>Sync labels.<|endoftext|> |
7bc990d0bd7388d71b55399c76b75df02d552dad52b3678fc073ec7dc94c8377 | async def wildcard_labels(event, gh, config):
'Label issues by files that have changed.'
rules = config.get('rules', [])
if rules:
flags = get_flags(config)
files = (await get_changed_files(event, gh))
(add, remove) = get_labels(rules, files, flags)
(await update_issue_labels(event, gh, add, remove)) | Label issues by files that have changed. | label_bot/wildcard_labels.py | wildcard_labels | gir-bot/do-not-merge | 13 | python | async def wildcard_labels(event, gh, config):
rules = config.get('rules', [])
if rules:
flags = get_flags(config)
files = (await get_changed_files(event, gh))
(add, remove) = get_labels(rules, files, flags)
(await update_issue_labels(event, gh, add, remove)) | async def wildcard_labels(event, gh, config):
rules = config.get('rules', [])
if rules:
flags = get_flags(config)
files = (await get_changed_files(event, gh))
(add, remove) = get_labels(rules, files, flags)
(await update_issue_labels(event, gh, add, remove))<|docstring|>Label issues by files that have changed.<|endoftext|> |
9919b2f0d3e0020bcc9aecbdbff3b394665146a3dd2cc7bc134bf2bef15b7703 | async def get_changed_files(event, gh):
'Get changed files.'
files = []
compare = (await gh.getitem(event.compare_url, {'base': event.base, 'head': event.head}))
for file in compare['files']:
files.append(file['filename'])
return files | Get changed files. | label_bot/wildcard_labels.py | get_changed_files | gir-bot/do-not-merge | 13 | python | async def get_changed_files(event, gh):
files = []
compare = (await gh.getitem(event.compare_url, {'base': event.base, 'head': event.head}))
for file in compare['files']:
files.append(file['filename'])
return files | async def get_changed_files(event, gh):
files = []
compare = (await gh.getitem(event.compare_url, {'base': event.base, 'head': event.head}))
for file in compare['files']:
files.append(file['filename'])
return files<|docstring|>Get changed files.<|endoftext|> |
9554eea4f809a94a4d38f24b7f9f99a0f6cdb7d61887a8545101aed563c23bbc | async def update_issue_labels(event, gh, add_labels, remove_labels):
'Update issue labels.'
remove = []
async for name in event.get_issue_labels(gh):
low = name.lower()
if (low not in remove_labels):
if (low in add_labels):
del add_labels[low]
else:
remove.append(name)
add = [label for label in add_labels.values()]
print('WILDCARD: Removing: ', str(remove))
print('WILDCARD: Adding: ', str(add))
(await event.add_issue_labels(gh, add))
(await event.remove_issue_labels(gh, remove)) | Update issue labels. | label_bot/wildcard_labels.py | update_issue_labels | gir-bot/do-not-merge | 13 | python | async def update_issue_labels(event, gh, add_labels, remove_labels):
remove = []
async for name in event.get_issue_labels(gh):
low = name.lower()
if (low not in remove_labels):
if (low in add_labels):
del add_labels[low]
else:
remove.append(name)
add = [label for label in add_labels.values()]
print('WILDCARD: Removing: ', str(remove))
print('WILDCARD: Adding: ', str(add))
(await event.add_issue_labels(gh, add))
(await event.remove_issue_labels(gh, remove)) | async def update_issue_labels(event, gh, add_labels, remove_labels):
remove = []
async for name in event.get_issue_labels(gh):
low = name.lower()
if (low not in remove_labels):
if (low in add_labels):
del add_labels[low]
else:
remove.append(name)
add = [label for label in add_labels.values()]
print('WILDCARD: Removing: ', str(remove))
print('WILDCARD: Adding: ', str(add))
(await event.add_issue_labels(gh, add))
(await event.remove_issue_labels(gh, remove))<|docstring|>Update issue labels.<|endoftext|> |
9690522cf42c196c5d18d3f0c335f7c09622036bdae4a4cf4008b2c0b298dc55 | async def pending(event, gh):
'Set task to pending.'
(await event.set_status(gh, util.EVT_PENDING, 'labels/auto-labels', 'Pending')) | Set task to pending. | label_bot/wildcard_labels.py | pending | gir-bot/do-not-merge | 13 | python | async def pending(event, gh):
(await event.set_status(gh, util.EVT_PENDING, 'labels/auto-labels', 'Pending')) | async def pending(event, gh):
(await event.set_status(gh, util.EVT_PENDING, 'labels/auto-labels', 'Pending'))<|docstring|>Set task to pending.<|endoftext|> |
3e8d34060129a96b8dd7d9fae5edbb95cf60f5f33c6d322a10ae7b5267877d28 | async def run(event, gh, config, **kwargs):
'Run task.'
print(f'WILDCARD: {event.full_name}')
try:
if config.get('error', ''):
raise Exception(config['error'])
(await wildcard_labels(event, gh, config))
success = True
except Exception:
traceback.print_exc(file=sys.stdout)
success = False
(await event.set_status(gh, (util.EVT_SUCCESS if success else util.EVT_FAILURE), 'labels/auto-labels', ('Task completed' if success else 'Failed to complete task'))) | Run task. | label_bot/wildcard_labels.py | run | gir-bot/do-not-merge | 13 | python | async def run(event, gh, config, **kwargs):
print(f'WILDCARD: {event.full_name}')
try:
if config.get('error', ):
raise Exception(config['error'])
(await wildcard_labels(event, gh, config))
success = True
except Exception:
traceback.print_exc(file=sys.stdout)
success = False
(await event.set_status(gh, (util.EVT_SUCCESS if success else util.EVT_FAILURE), 'labels/auto-labels', ('Task completed' if success else 'Failed to complete task'))) | async def run(event, gh, config, **kwargs):
print(f'WILDCARD: {event.full_name}')
try:
if config.get('error', ):
raise Exception(config['error'])
(await wildcard_labels(event, gh, config))
success = True
except Exception:
traceback.print_exc(file=sys.stdout)
success = False
(await event.set_status(gh, (util.EVT_SUCCESS if success else util.EVT_FAILURE), 'labels/auto-labels', ('Task completed' if success else 'Failed to complete task')))<|docstring|>Run task.<|endoftext|> |
8e354be01e6d7893ae015d4bc8d24163768aad78ce8d7a7144e52f54bd63297c | def _update_type_str(self):
'\n Get type string for Label\n\n Returns\n -------\n : ~str\n\n '
self.type_str = repr(type(self.value)) | Get type string for Label
Returns
-------
: ~str | tardis/plasma/properties/base.py | _update_type_str | wkerzendorf/tardis | 1 | python | def _update_type_str(self):
'\n Get type string for Label\n\n Returns\n -------\n : ~str\n\n '
self.type_str = repr(type(self.value)) | def _update_type_str(self):
'\n Get type string for Label\n\n Returns\n -------\n : ~str\n\n '
self.type_str = repr(type(self.value))<|docstring|>Get type string for Label
Returns
-------
: ~str<|endoftext|> |
62ac963262b237195c7af04717d7e618089b8bb8b4e7c4dcc1a7a2808679a894 | def _update_inputs(self):
'\n This function uses the CPython API to read the variable names from the\n `calculate`-function and makes the plasma routines easily programmable.\n '
calculate_call_signature = self.calculate.func_code.co_varnames[:self.calculate.func_code.co_argcount]
self.inputs = [item for item in calculate_call_signature if (item != 'self')] | This function uses the CPython API to read the variable names from the
`calculate`-function and makes the plasma routines easily programmable. | tardis/plasma/properties/base.py | _update_inputs | wkerzendorf/tardis | 1 | python | def _update_inputs(self):
'\n This function uses the CPython API to read the variable names from the\n `calculate`-function and makes the plasma routines easily programmable.\n '
calculate_call_signature = self.calculate.func_code.co_varnames[:self.calculate.func_code.co_argcount]
self.inputs = [item for item in calculate_call_signature if (item != 'self')] | def _update_inputs(self):
'\n This function uses the CPython API to read the variable names from the\n `calculate`-function and makes the plasma routines easily programmable.\n '
calculate_call_signature = self.calculate.func_code.co_varnames[:self.calculate.func_code.co_argcount]
self.inputs = [item for item in calculate_call_signature if (item != 'self')]<|docstring|>This function uses the CPython API to read the variable names from the
`calculate`-function and makes the plasma routines easily programmable.<|endoftext|> |
9e4c2182878ab61f07ab1e748cb80ae336a9bfd0155799475fbc6abbdb81a710 | def update(self):
'\n Updates the processing Plasma by calling the `calculate`-method with\n the required inputs\n\n :return:\n '
if (len(self.outputs) == 1):
setattr(self, self.outputs[0], self.calculate(*self._get_input_values()))
else:
new_values = self.calculate(*self._get_input_values())
for (i, output) in enumerate(self.outputs):
setattr(self, output, new_values[i]) | Updates the processing Plasma by calling the `calculate`-method with
the required inputs
:return: | tardis/plasma/properties/base.py | update | wkerzendorf/tardis | 1 | python | def update(self):
'\n Updates the processing Plasma by calling the `calculate`-method with\n the required inputs\n\n :return:\n '
if (len(self.outputs) == 1):
setattr(self, self.outputs[0], self.calculate(*self._get_input_values()))
else:
new_values = self.calculate(*self._get_input_values())
for (i, output) in enumerate(self.outputs):
setattr(self, output, new_values[i]) | def update(self):
'\n Updates the processing Plasma by calling the `calculate`-method with\n the required inputs\n\n :return:\n '
if (len(self.outputs) == 1):
setattr(self, self.outputs[0], self.calculate(*self._get_input_values()))
else:
new_values = self.calculate(*self._get_input_values())
for (i, output) in enumerate(self.outputs):
setattr(self, output, new_values[i])<|docstring|>Updates the processing Plasma by calling the `calculate`-method with
the required inputs
:return:<|endoftext|> |
2c8ed47490a3d5b4b274e6e4979988229f4d28307a77ad9991c07c17d560e602 | @abc.abstractmethod
def _create_commit_lock(self, cursor):
'\n Create the global lock held during commit.\n\n (MySQL and PostgreSQL do this differently.)\n '
raise NotImplementedError() | Create the global lock held during commit.
(MySQL and PostgreSQL do this differently.) | relstorage/adapters/schema.py | _create_commit_lock | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_commit_lock(self, cursor):
'\n Create the global lock held during commit.\n\n (MySQL and PostgreSQL do this differently.)\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_commit_lock(self, cursor):
'\n Create the global lock held during commit.\n\n (MySQL and PostgreSQL do this differently.)\n '
raise NotImplementedError()<|docstring|>Create the global lock held during commit.
(MySQL and PostgreSQL do this differently.)<|endoftext|> |
a9bf50ac586fc2c2b43ab479a07ecb996d274452c883b85fd2e31f8ccb184ff5 | @abc.abstractmethod
def _create_pack_lock(self, cursor):
'\n Create the global lock held during pack.\n\n (MySQL and PostgreSQL do this differently.)\n '
raise NotImplementedError() | Create the global lock held during pack.
(MySQL and PostgreSQL do this differently.) | relstorage/adapters/schema.py | _create_pack_lock | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_pack_lock(self, cursor):
'\n Create the global lock held during pack.\n\n (MySQL and PostgreSQL do this differently.)\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_pack_lock(self, cursor):
'\n Create the global lock held during pack.\n\n (MySQL and PostgreSQL do this differently.)\n '
raise NotImplementedError()<|docstring|>Create the global lock held during pack.
(MySQL and PostgreSQL do this differently.)<|endoftext|> |
e377771d9e0453d22cc6f86578bd860cc8ada129dbd6b477cf82c754fb373af5 | @abc.abstractmethod
def _create_transaction(self, cursor):
'\n The transaction table lists all the transactions in the database.\n\n This table is only used for history-preserving databases.\n '
raise NotImplementedError() | The transaction table lists all the transactions in the database.
This table is only used for history-preserving databases. | relstorage/adapters/schema.py | _create_transaction | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_transaction(self, cursor):
'\n The transaction table lists all the transactions in the database.\n\n This table is only used for history-preserving databases.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_transaction(self, cursor):
'\n The transaction table lists all the transactions in the database.\n\n This table is only used for history-preserving databases.\n '
raise NotImplementedError()<|docstring|>The transaction table lists all the transactions in the database.
This table is only used for history-preserving databases.<|endoftext|> |
bb137e976f508c47d4a55e90e205b1caa8bb1c8f49cfe69a66a77cf04175d249 | @abc.abstractmethod
def _create_new_oid(self, cursor):
'\n Create the incrementing sequence for new OIDs.\n\n This should be the same for history free and preserving\n schemas.\n '
raise NotImplementedError() | Create the incrementing sequence for new OIDs.
This should be the same for history free and preserving
schemas. | relstorage/adapters/schema.py | _create_new_oid | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_new_oid(self, cursor):
'\n Create the incrementing sequence for new OIDs.\n\n This should be the same for history free and preserving\n schemas.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_new_oid(self, cursor):
'\n Create the incrementing sequence for new OIDs.\n\n This should be the same for history free and preserving\n schemas.\n '
raise NotImplementedError()<|docstring|>Create the incrementing sequence for new OIDs.
This should be the same for history free and preserving
schemas.<|endoftext|> |
a78e9a8c5e27adc575179209f07f0311901e4d4d26ed83bd3464fd776b221194 | @abc.abstractmethod
def _create_object_state(self, cursor):
'\n Create the table holding all object states for all transactions.\n\n If the schema is history-free, only store the current state.\n History-preserving schemas may have a NULL `object_state` to represent\n uncreation.\n '
raise NotImplementedError() | Create the table holding all object states for all transactions.
If the schema is history-free, only store the current state.
History-preserving schemas may have a NULL `object_state` to represent
uncreation. | relstorage/adapters/schema.py | _create_object_state | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_object_state(self, cursor):
'\n Create the table holding all object states for all transactions.\n\n If the schema is history-free, only store the current state.\n History-preserving schemas may have a NULL `object_state` to represent\n uncreation.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_object_state(self, cursor):
'\n Create the table holding all object states for all transactions.\n\n If the schema is history-free, only store the current state.\n History-preserving schemas may have a NULL `object_state` to represent\n uncreation.\n '
raise NotImplementedError()<|docstring|>Create the table holding all object states for all transactions.
If the schema is history-free, only store the current state.
History-preserving schemas may have a NULL `object_state` to represent
uncreation.<|endoftext|> |
5423d5ed658d7a0b48280eb77ee3d0c6e47f67e361b46ba8e4ed95df55dbfb1d | @abc.abstractmethod
def _create_blob_chunk(self, cursor):
'\n Create the table holding all blob states for all transactions.\n\n If the schema is history-free, only store the current state.\n '
raise NotImplementedError() | Create the table holding all blob states for all transactions.
If the schema is history-free, only store the current state. | relstorage/adapters/schema.py | _create_blob_chunk | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_blob_chunk(self, cursor):
'\n Create the table holding all blob states for all transactions.\n\n If the schema is history-free, only store the current state.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_blob_chunk(self, cursor):
'\n Create the table holding all blob states for all transactions.\n\n If the schema is history-free, only store the current state.\n '
raise NotImplementedError()<|docstring|>Create the table holding all blob states for all transactions.
If the schema is history-free, only store the current state.<|endoftext|> |
1dc04931f9e855d2622f4035336a4ed9bf440467f5efccfbe9dcbf10a20037a2 | @abc.abstractmethod
def _create_current_object(self, cursor):
'\n Table that stores pointers to the current object state.\n\n This table is only used for history-preserving databases.\n '
raise NotImplementedError() | Table that stores pointers to the current object state.
This table is only used for history-preserving databases. | relstorage/adapters/schema.py | _create_current_object | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_current_object(self, cursor):
'\n Table that stores pointers to the current object state.\n\n This table is only used for history-preserving databases.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_current_object(self, cursor):
'\n Table that stores pointers to the current object state.\n\n This table is only used for history-preserving databases.\n '
raise NotImplementedError()<|docstring|>Table that stores pointers to the current object state.
This table is only used for history-preserving databases.<|endoftext|> |
079602306d4fcc43c76f397d97eba29e8a7593981876b48ef18cda0c2afaf618 | @abc.abstractmethod
def _create_object_ref(self, cursor):
'\n A list of referenced OIDs from each object_state. This\n table is populated as needed during packing. To prevent unnecessary\n table locking, it does not use foreign keys, which is safe because\n rows in object_state are never modified once committed, and rows are\n removed from object_state only by packing.\n '
raise NotImplementedError() | A list of referenced OIDs from each object_state. This
table is populated as needed during packing. To prevent unnecessary
table locking, it does not use foreign keys, which is safe because
rows in object_state are never modified once committed, and rows are
removed from object_state only by packing. | relstorage/adapters/schema.py | _create_object_ref | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_object_ref(self, cursor):
'\n A list of referenced OIDs from each object_state. This\n table is populated as needed during packing. To prevent unnecessary\n table locking, it does not use foreign keys, which is safe because\n rows in object_state are never modified once committed, and rows are\n removed from object_state only by packing.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_object_ref(self, cursor):
'\n A list of referenced OIDs from each object_state. This\n table is populated as needed during packing. To prevent unnecessary\n table locking, it does not use foreign keys, which is safe because\n rows in object_state are never modified once committed, and rows are\n removed from object_state only by packing.\n '
raise NotImplementedError()<|docstring|>A list of referenced OIDs from each object_state. This
table is populated as needed during packing. To prevent unnecessary
table locking, it does not use foreign keys, which is safe because
rows in object_state are never modified once committed, and rows are
removed from object_state only by packing.<|endoftext|> |
c51cddf16d75cac37e8801dbc1e25537dd288a1a2fce7b758c4f5a0752be0774 | @abc.abstractmethod
def _create_object_refs_added(self, cursor):
'\n The object_refs_added table tracks whether object_refs has been\n populated for all states in a given transaction. An entry is added\n only when the work is finished. To prevent unnecessary table locking,\n it does not use foreign keys, which is safe because object states are\n never added to a transaction once committed, and rows are removed\n from the transaction table only by packing.\n '
raise NotImplementedError() | The object_refs_added table tracks whether object_refs has been
populated for all states in a given transaction. An entry is added
only when the work is finished. To prevent unnecessary table locking,
it does not use foreign keys, which is safe because object states are
never added to a transaction once committed, and rows are removed
from the transaction table only by packing. | relstorage/adapters/schema.py | _create_object_refs_added | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_object_refs_added(self, cursor):
'\n The object_refs_added table tracks whether object_refs has been\n populated for all states in a given transaction. An entry is added\n only when the work is finished. To prevent unnecessary table locking,\n it does not use foreign keys, which is safe because object states are\n never added to a transaction once committed, and rows are removed\n from the transaction table only by packing.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_object_refs_added(self, cursor):
'\n The object_refs_added table tracks whether object_refs has been\n populated for all states in a given transaction. An entry is added\n only when the work is finished. To prevent unnecessary table locking,\n it does not use foreign keys, which is safe because object states are\n never added to a transaction once committed, and rows are removed\n from the transaction table only by packing.\n '
raise NotImplementedError()<|docstring|>The object_refs_added table tracks whether object_refs has been
populated for all states in a given transaction. An entry is added
only when the work is finished. To prevent unnecessary table locking,
it does not use foreign keys, which is safe because object states are
never added to a transaction once committed, and rows are removed
from the transaction table only by packing.<|endoftext|> |
b124021d88da845db82b64db756394d22896619312834ff68b1a967f4b573268 | @abc.abstractmethod
def _create_pack_object(self, cursor):
"\n pack_object contains temporary state during garbage collection: The\n list of all objects, a flag signifying whether the object should be\n kept, and a flag signifying whether the object's references have been\n visited. The keep_tid field specifies the current revision of the\n object.\n "
raise NotImplementedError() | pack_object contains temporary state during garbage collection: The
list of all objects, a flag signifying whether the object should be
kept, and a flag signifying whether the object's references have been
visited. The keep_tid field specifies the current revision of the
object. | relstorage/adapters/schema.py | _create_pack_object | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_pack_object(self, cursor):
"\n pack_object contains temporary state during garbage collection: The\n list of all objects, a flag signifying whether the object should be\n kept, and a flag signifying whether the object's references have been\n visited. The keep_tid field specifies the current revision of the\n object.\n "
raise NotImplementedError() | @abc.abstractmethod
def _create_pack_object(self, cursor):
"\n pack_object contains temporary state during garbage collection: The\n list of all objects, a flag signifying whether the object should be\n kept, and a flag signifying whether the object's references have been\n visited. The keep_tid field specifies the current revision of the\n object.\n "
raise NotImplementedError()<|docstring|>pack_object contains temporary state during garbage collection: The
list of all objects, a flag signifying whether the object should be
kept, and a flag signifying whether the object's references have been
visited. The keep_tid field specifies the current revision of the
object.<|endoftext|> |
194e4cba2d7e36656bdd334187c79697a928e5ca14da3412797c7054954cf449 | @abc.abstractmethod
def _create_pack_state(self, cursor):
'\n Temporary state during packing: the list of object states\n # to pack.\n\n This is only used in history-preserving databases.\n '
raise NotImplementedError() | Temporary state during packing: the list of object states
# to pack.
This is only used in history-preserving databases. | relstorage/adapters/schema.py | _create_pack_state | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_pack_state(self, cursor):
'\n Temporary state during packing: the list of object states\n # to pack.\n\n This is only used in history-preserving databases.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_pack_state(self, cursor):
'\n Temporary state during packing: the list of object states\n # to pack.\n\n This is only used in history-preserving databases.\n '
raise NotImplementedError()<|docstring|>Temporary state during packing: the list of object states
# to pack.
This is only used in history-preserving databases.<|endoftext|> |
fa837b778215ec99e26474774547d3f5da876d50940e29ddf9dc9fc60588a4ba | @abc.abstractmethod
def _create_pack_state_tid(self, cursor):
'\n Temporary state during packing: the list of\n transactions that have at least one object state to pack.\n\n This is only used in history-preserving databases.\n '
raise NotImplementedError() | Temporary state during packing: the list of
transactions that have at least one object state to pack.
This is only used in history-preserving databases. | relstorage/adapters/schema.py | _create_pack_state_tid | dpedu/relstorage | 1 | python | @abc.abstractmethod
def _create_pack_state_tid(self, cursor):
'\n Temporary state during packing: the list of\n transactions that have at least one object state to pack.\n\n This is only used in history-preserving databases.\n '
raise NotImplementedError() | @abc.abstractmethod
def _create_pack_state_tid(self, cursor):
'\n Temporary state during packing: the list of\n transactions that have at least one object state to pack.\n\n This is only used in history-preserving databases.\n '
raise NotImplementedError()<|docstring|>Temporary state during packing: the list of
transactions that have at least one object state to pack.
This is only used in history-preserving databases.<|endoftext|> |
d01d75f1bc51ccf507a6c77e5512157ac3e1a516eafdf02883b3745488a5d999 | def _create_temp_store(self, _cursor):
'States that will soon be stored.'
return | States that will soon be stored. | relstorage/adapters/schema.py | _create_temp_store | dpedu/relstorage | 1 | python | def _create_temp_store(self, _cursor):
return | def _create_temp_store(self, _cursor):
return<|docstring|>States that will soon be stored.<|endoftext|> |
ef5d23457d4202501efa8205bf3855d203c397093e8735dfa0c2fe6ed369aef1 | def _create_temp_blob_chunk(self, _cursor):
'\n Temporary state during packing: a list of objects\n whose references need to be examined.\n '
return | Temporary state during packing: a list of objects
whose references need to be examined. | relstorage/adapters/schema.py | _create_temp_blob_chunk | dpedu/relstorage | 1 | python | def _create_temp_blob_chunk(self, _cursor):
'\n Temporary state during packing: a list of objects\n whose references need to be examined.\n '
return | def _create_temp_blob_chunk(self, _cursor):
'\n Temporary state during packing: a list of objects\n whose references need to be examined.\n '
return<|docstring|>Temporary state during packing: a list of objects
whose references need to be examined.<|endoftext|> |
f2ade3e860c51478e3fa2ae169a48fb1b0e72f44538a9b79182bcf45f98e52f1 | def _create_temp_undo(self, _cursor):
'\n Temporary state during undo: a list of objects\n to be undone and the tid of the undone state.\n '
return | Temporary state during undo: a list of objects
to be undone and the tid of the undone state. | relstorage/adapters/schema.py | _create_temp_undo | dpedu/relstorage | 1 | python | def _create_temp_undo(self, _cursor):
'\n Temporary state during undo: a list of objects\n to be undone and the tid of the undone state.\n '
return | def _create_temp_undo(self, _cursor):
'\n Temporary state during undo: a list of objects\n to be undone and the tid of the undone state.\n '
return<|docstring|>Temporary state during undo: a list of objects
to be undone and the tid of the undone state.<|endoftext|> |
2d0eeeb2c880ce3de1abb16f1ec4af6d54fd4028acf1ab3ec1b3121eb465871b | def _init_after_create(self, cursor):
"\n Create a special '0' transaction to represent object creation. The\n '0' transaction is often referenced by object_state.prev_tid, but\n never by object_state.tid.\n\n Only in history-preserving databases.\n "
if self.keep_history:
stmt = "\n INSERT INTO transaction (tid, username, description)\n VALUES (0, 'system', 'special transaction for object creation');\n "
self.runner.run_script(cursor, stmt) | Create a special '0' transaction to represent object creation. The
'0' transaction is often referenced by object_state.prev_tid, but
never by object_state.tid.
Only in history-preserving databases. | relstorage/adapters/schema.py | _init_after_create | dpedu/relstorage | 1 | python | def _init_after_create(self, cursor):
"\n Create a special '0' transaction to represent object creation. The\n '0' transaction is often referenced by object_state.prev_tid, but\n never by object_state.tid.\n\n Only in history-preserving databases.\n "
if self.keep_history:
stmt = "\n INSERT INTO transaction (tid, username, description)\n VALUES (0, 'system', 'special transaction for object creation');\n "
self.runner.run_script(cursor, stmt) | def _init_after_create(self, cursor):
"\n Create a special '0' transaction to represent object creation. The\n '0' transaction is often referenced by object_state.prev_tid, but\n never by object_state.tid.\n\n Only in history-preserving databases.\n "
if self.keep_history:
stmt = "\n INSERT INTO transaction (tid, username, description)\n VALUES (0, 'system', 'special transaction for object creation');\n "
self.runner.run_script(cursor, stmt)<|docstring|>Create a special '0' transaction to represent object creation. The
'0' transaction is often referenced by object_state.prev_tid, but
never by object_state.tid.
Only in history-preserving databases.<|endoftext|> |
7daac01eda963767e5556d3f2ca98e1467dc8db533c5ef31784a2ebb0838c6b5 | def create(self, cursor):
'Create the database tables.'
for table in self.all_tables:
meth = getattr(self, ('_create_' + table))
meth(cursor)
self._init_after_create(cursor)
tables = self.list_tables(cursor)
self.check_compatibility(cursor, tables) | Create the database tables. | relstorage/adapters/schema.py | create | dpedu/relstorage | 1 | python | def create(self, cursor):
for table in self.all_tables:
meth = getattr(self, ('_create_' + table))
meth(cursor)
self._init_after_create(cursor)
tables = self.list_tables(cursor)
self.check_compatibility(cursor, tables) | def create(self, cursor):
for table in self.all_tables:
meth = getattr(self, ('_create_' + table))
meth(cursor)
self._init_after_create(cursor)
tables = self.list_tables(cursor)
self.check_compatibility(cursor, tables)<|docstring|>Create the database tables.<|endoftext|> |
765cb8fd1d4b22d7a28644f0b679d1f42a9d9deb67919610ae8f70a5db0c592a | def prepare(self):
'Create the database schema if it does not already exist.'
def callback(_conn, cursor):
tables = self.list_tables(cursor)
if ('object_state' not in tables):
self.create(cursor)
else:
self.check_compatibility(cursor, tables)
self.update_schema(cursor, tables)
self.connmanager.open_and_call(callback) | Create the database schema if it does not already exist. | relstorage/adapters/schema.py | prepare | dpedu/relstorage | 1 | python | def prepare(self):
def callback(_conn, cursor):
tables = self.list_tables(cursor)
if ('object_state' not in tables):
self.create(cursor)
else:
self.check_compatibility(cursor, tables)
self.update_schema(cursor, tables)
self.connmanager.open_and_call(callback) | def prepare(self):
def callback(_conn, cursor):
tables = self.list_tables(cursor)
if ('object_state' not in tables):
self.create(cursor)
else:
self.check_compatibility(cursor, tables)
self.update_schema(cursor, tables)
self.connmanager.open_and_call(callback)<|docstring|>Create the database schema if it does not already exist.<|endoftext|> |
bb6fb995e7f00533bb00625bb9d57d98d7c9f4d68ff8585a8bc1b94ab2890bdb | def zap_all(self, reset_oid=True, slow=False):
'\n Clear all data out of the database.\n\n :keyword bool slow: If True (*not* the default) then database\n specific optimizations will be skipped and rows will simply be\n DELETEd. This is helpful when other connections might be open and\n holding some kind of locks.\n '
stmt = (self._zap_all_tbl_stmt if (not slow) else AbstractSchemaInstaller._zap_all_tbl_stmt)
def callback(_conn, cursor):
existent = set(self.list_tables(cursor))
todo = reversed(self.all_tables)
log.debug('Checking tables: %r', todo)
for table in todo:
log.debug('Considering table %s', table)
if table.startswith('temp_'):
continue
if (table in existent):
log.debug('Deleting from table %s...', table)
cursor.execute((stmt % table))
log.debug('Done deleting from tables.')
log.debug('Running init script.')
self._init_after_create(cursor)
log.debug('Done running init script.')
if reset_oid:
log.debug('Running OID reset script.')
self._reset_oid(cursor)
log.debug('Done running OID reset script.')
self.connmanager.open_and_call(callback) | Clear all data out of the database.
:keyword bool slow: If True (*not* the default) then database
specific optimizations will be skipped and rows will simply be
DELETEd. This is helpful when other connections might be open and
holding some kind of locks. | relstorage/adapters/schema.py | zap_all | dpedu/relstorage | 1 | python | def zap_all(self, reset_oid=True, slow=False):
'\n Clear all data out of the database.\n\n :keyword bool slow: If True (*not* the default) then database\n specific optimizations will be skipped and rows will simply be\n DELETEd. This is helpful when other connections might be open and\n holding some kind of locks.\n '
stmt = (self._zap_all_tbl_stmt if (not slow) else AbstractSchemaInstaller._zap_all_tbl_stmt)
def callback(_conn, cursor):
existent = set(self.list_tables(cursor))
todo = reversed(self.all_tables)
log.debug('Checking tables: %r', todo)
for table in todo:
log.debug('Considering table %s', table)
if table.startswith('temp_'):
continue
if (table in existent):
log.debug('Deleting from table %s...', table)
cursor.execute((stmt % table))
log.debug('Done deleting from tables.')
log.debug('Running init script.')
self._init_after_create(cursor)
log.debug('Done running init script.')
if reset_oid:
log.debug('Running OID reset script.')
self._reset_oid(cursor)
log.debug('Done running OID reset script.')
self.connmanager.open_and_call(callback) | def zap_all(self, reset_oid=True, slow=False):
'\n Clear all data out of the database.\n\n :keyword bool slow: If True (*not* the default) then database\n specific optimizations will be skipped and rows will simply be\n DELETEd. This is helpful when other connections might be open and\n holding some kind of locks.\n '
stmt = (self._zap_all_tbl_stmt if (not slow) else AbstractSchemaInstaller._zap_all_tbl_stmt)
def callback(_conn, cursor):
existent = set(self.list_tables(cursor))
todo = reversed(self.all_tables)
log.debug('Checking tables: %r', todo)
for table in todo:
log.debug('Considering table %s', table)
if table.startswith('temp_'):
continue
if (table in existent):
log.debug('Deleting from table %s...', table)
cursor.execute((stmt % table))
log.debug('Done deleting from tables.')
log.debug('Running init script.')
self._init_after_create(cursor)
log.debug('Done running init script.')
if reset_oid:
log.debug('Running OID reset script.')
self._reset_oid(cursor)
log.debug('Done running OID reset script.')
self.connmanager.open_and_call(callback)<|docstring|>Clear all data out of the database.
:keyword bool slow: If True (*not* the default) then database
specific optimizations will be skipped and rows will simply be
DELETEd. This is helpful when other connections might be open and
holding some kind of locks.<|endoftext|> |
4d4b9675605f51823727566f5514818ea63163dbd9ec716f3bc8c301841957a2 | def drop_all(self):
'Drop all tables and sequences.'
def callback(_conn, cursor):
existent = set(self.list_tables(cursor))
todo = list(self.all_tables)
todo.reverse()
for table in todo:
if (table in existent):
cursor.execute(('DROP TABLE %s' % table))
for sequence in self.list_sequences(cursor):
cursor.execute(('DROP SEQUENCE %s' % sequence))
self.connmanager.open_and_call(callback) | Drop all tables and sequences. | relstorage/adapters/schema.py | drop_all | dpedu/relstorage | 1 | python | def drop_all(self):
def callback(_conn, cursor):
existent = set(self.list_tables(cursor))
todo = list(self.all_tables)
todo.reverse()
for table in todo:
if (table in existent):
cursor.execute(('DROP TABLE %s' % table))
for sequence in self.list_sequences(cursor):
cursor.execute(('DROP SEQUENCE %s' % sequence))
self.connmanager.open_and_call(callback) | def drop_all(self):
def callback(_conn, cursor):
existent = set(self.list_tables(cursor))
todo = list(self.all_tables)
todo.reverse()
for table in todo:
if (table in existent):
cursor.execute(('DROP TABLE %s' % table))
for sequence in self.list_sequences(cursor):
cursor.execute(('DROP SEQUENCE %s' % sequence))
self.connmanager.open_and_call(callback)<|docstring|>Drop all tables and sequences.<|endoftext|> |
2c0c7dc0ca55c5e023b558fc4c702385e149bab4474379c9e19a57ad7047d04f | def buildProtocol(self, addr):
'\n Creates an instance of the ChatClient protocol. Invoked when\n the user attempts to connect to the server.\n\n Returns:\n An instance of the chat client protocol.\n '
return self.protocol(self.username, self.delegate) | Creates an instance of the ChatClient protocol. Invoked when
the user attempts to connect to the server.
Returns:
An instance of the chat client protocol. | client/core/client/chatclientfactory.py | buildProtocol | pkrll/ChatMaster3000 | 0 | python | def buildProtocol(self, addr):
'\n Creates an instance of the ChatClient protocol. Invoked when\n the user attempts to connect to the server.\n\n Returns:\n An instance of the chat client protocol.\n '
return self.protocol(self.username, self.delegate) | def buildProtocol(self, addr):
'\n Creates an instance of the ChatClient protocol. Invoked when\n the user attempts to connect to the server.\n\n Returns:\n An instance of the chat client protocol.\n '
return self.protocol(self.username, self.delegate)<|docstring|>Creates an instance of the ChatClient protocol. Invoked when
the user attempts to connect to the server.
Returns:
An instance of the chat client protocol.<|endoftext|> |
92b08e464ac529e71cb09f95f0d2659964fe47294e583ff65743897b18276a87 | def clientConnectionFailed(self, connector, reason):
'\n Called when the connection has failed.\n '
self.delegate.didFailConnection(reason.getErrorMessage()) | Called when the connection has failed. | client/core/client/chatclientfactory.py | clientConnectionFailed | pkrll/ChatMaster3000 | 0 | python | def clientConnectionFailed(self, connector, reason):
'\n \n '
self.delegate.didFailConnection(reason.getErrorMessage()) | def clientConnectionFailed(self, connector, reason):
'\n \n '
self.delegate.didFailConnection(reason.getErrorMessage())<|docstring|>Called when the connection has failed.<|endoftext|> |
6448fbee5f2ecea1c9d5ddf4ff242f50214ba566852458d35553d21de6102f18 | def add_sdd_mosek(task, A, b, Ac=None):
' \n add a variable C and constraints\n A*x - Ac vec(C) = b\n C SDD\n which with an appropriate choice of Ac makes A*x - b >= 0\n\n if Ac is not given it is set to identity\n '
if (Ac is None):
n_Ac = A.shape[0]
Ac = sp.coo_matrix((np.ones(n_Ac), (range(n_Ac), range(n_Ac))), (n_Ac, n_Ac))
if (len(b) != A.shape[0]):
raise Exception('invalid size of A')
if (len(b) != Ac.shape[0]):
raise Exception('invalid size of Ac')
veclen = Ac.shape[1]
C_size = veclen_to_matsize(veclen)
numcon = task.getnumcon()
numvar = task.getnumvar()
numvar_new = ((C_size * (C_size - 1)) // 2)
numcon_new = A.shape[0]
D_row_idx = []
D_col_idx = []
D_vals = []
for row in range(veclen):
(i, j) = k_to_ij(row, veclen)
sdd_idx = sdd_index(i, j, C_size)
D_row_idx += ([row] * len(sdd_idx))
D_col_idx += [((3 * k) + l) for (k, l) in sdd_idx]
D_vals += [(2.0 if (l == 0) else 1.0) for (k, l) in sdd_idx]
D = sp.coo_matrix((D_vals, (D_row_idx, D_col_idx)), (veclen, (3 * numvar_new)))
task.appendvars((3 * numvar_new))
task.putvarboundslice(numvar, (numvar + (3 * numvar_new)), (([mosek.boundkey.fr] * 3) * numvar_new), (([0.0] * 3) * numvar_new), (([0.0] * 3) * numvar_new))
task.appendcons(numcon_new)
new_A = sp.bmat([[A, spzeros(numcon_new, (numvar - A.shape[1])), (- Ac.dot(D))]]).tocsr()
task.putarowslice(numcon, (numcon + numcon_new), new_A.indptr[:(- 1)], new_A.indptr[1:], new_A.indices, new_A.data)
task.putconboundslice(numcon, (numcon + numcon_new), ([mosek.boundkey.fx] * numcon_new), b, b)
task.appendconesseq(([mosek.conetype.rquad] * numvar_new), ([0.0] * numvar_new), ([3] * numvar_new), numvar) | add a variable C and constraints
A*x - Ac vec(C) = b
C SDD
which with an appropriate choice of Ac makes A*x - b >= 0
if Ac is not given it is set to identity | posipoly/ppp.py | add_sdd_mosek | pettni/posipoly | 1 | python | def add_sdd_mosek(task, A, b, Ac=None):
' \n add a variable C and constraints\n A*x - Ac vec(C) = b\n C SDD\n which with an appropriate choice of Ac makes A*x - b >= 0\n\n if Ac is not given it is set to identity\n '
if (Ac is None):
n_Ac = A.shape[0]
Ac = sp.coo_matrix((np.ones(n_Ac), (range(n_Ac), range(n_Ac))), (n_Ac, n_Ac))
if (len(b) != A.shape[0]):
raise Exception('invalid size of A')
if (len(b) != Ac.shape[0]):
raise Exception('invalid size of Ac')
veclen = Ac.shape[1]
C_size = veclen_to_matsize(veclen)
numcon = task.getnumcon()
numvar = task.getnumvar()
numvar_new = ((C_size * (C_size - 1)) // 2)
numcon_new = A.shape[0]
D_row_idx = []
D_col_idx = []
D_vals = []
for row in range(veclen):
(i, j) = k_to_ij(row, veclen)
sdd_idx = sdd_index(i, j, C_size)
D_row_idx += ([row] * len(sdd_idx))
D_col_idx += [((3 * k) + l) for (k, l) in sdd_idx]
D_vals += [(2.0 if (l == 0) else 1.0) for (k, l) in sdd_idx]
D = sp.coo_matrix((D_vals, (D_row_idx, D_col_idx)), (veclen, (3 * numvar_new)))
task.appendvars((3 * numvar_new))
task.putvarboundslice(numvar, (numvar + (3 * numvar_new)), (([mosek.boundkey.fr] * 3) * numvar_new), (([0.0] * 3) * numvar_new), (([0.0] * 3) * numvar_new))
task.appendcons(numcon_new)
new_A = sp.bmat([[A, spzeros(numcon_new, (numvar - A.shape[1])), (- Ac.dot(D))]]).tocsr()
task.putarowslice(numcon, (numcon + numcon_new), new_A.indptr[:(- 1)], new_A.indptr[1:], new_A.indices, new_A.data)
task.putconboundslice(numcon, (numcon + numcon_new), ([mosek.boundkey.fx] * numcon_new), b, b)
task.appendconesseq(([mosek.conetype.rquad] * numvar_new), ([0.0] * numvar_new), ([3] * numvar_new), numvar) | def add_sdd_mosek(task, A, b, Ac=None):
' \n add a variable C and constraints\n A*x - Ac vec(C) = b\n C SDD\n which with an appropriate choice of Ac makes A*x - b >= 0\n\n if Ac is not given it is set to identity\n '
if (Ac is None):
n_Ac = A.shape[0]
Ac = sp.coo_matrix((np.ones(n_Ac), (range(n_Ac), range(n_Ac))), (n_Ac, n_Ac))
if (len(b) != A.shape[0]):
raise Exception('invalid size of A')
if (len(b) != Ac.shape[0]):
raise Exception('invalid size of Ac')
veclen = Ac.shape[1]
C_size = veclen_to_matsize(veclen)
numcon = task.getnumcon()
numvar = task.getnumvar()
numvar_new = ((C_size * (C_size - 1)) // 2)
numcon_new = A.shape[0]
D_row_idx = []
D_col_idx = []
D_vals = []
for row in range(veclen):
(i, j) = k_to_ij(row, veclen)
sdd_idx = sdd_index(i, j, C_size)
D_row_idx += ([row] * len(sdd_idx))
D_col_idx += [((3 * k) + l) for (k, l) in sdd_idx]
D_vals += [(2.0 if (l == 0) else 1.0) for (k, l) in sdd_idx]
D = sp.coo_matrix((D_vals, (D_row_idx, D_col_idx)), (veclen, (3 * numvar_new)))
task.appendvars((3 * numvar_new))
task.putvarboundslice(numvar, (numvar + (3 * numvar_new)), (([mosek.boundkey.fr] * 3) * numvar_new), (([0.0] * 3) * numvar_new), (([0.0] * 3) * numvar_new))
task.appendcons(numcon_new)
new_A = sp.bmat([[A, spzeros(numcon_new, (numvar - A.shape[1])), (- Ac.dot(D))]]).tocsr()
task.putarowslice(numcon, (numcon + numcon_new), new_A.indptr[:(- 1)], new_A.indptr[1:], new_A.indices, new_A.data)
task.putconboundslice(numcon, (numcon + numcon_new), ([mosek.boundkey.fx] * numcon_new), b, b)
task.appendconesseq(([mosek.conetype.rquad] * numvar_new), ([0.0] * numvar_new), ([3] * numvar_new), numvar)<|docstring|>add a variable C and constraints
A*x - Ac vec(C) = b
C SDD
which with an appropriate choice of Ac makes A*x - b >= 0
if Ac is not given it is set to identity<|endoftext|> |
cb2fddde8cf7bbb6fd3c7f25edca2d37378e932e6d2309325180cafbd651197e | def add_psd_mosek(task, A, b, Ac=None):
' \n add a variable C and constraints\n A*x - Ac*vec(C) = b\n C PSD\n which with an appropriate choice of Ac makes A*x - b >= 0\n\n if Ac is not given it is set to identity\n '
if (Ac is None):
n_Ac = A.shape[0]
Ac = sp.coo_matrix((np.ones(n_Ac), (range(n_Ac), range(n_Ac))), (n_Ac, n_Ac))
if ((len(b) != A.shape[0]) or (task.getnumvar() != A.shape[1])):
raise Exception('invalid size of A')
if (len(b) != Ac.shape[0]):
raise Exception('invalid size of Ac')
veclen = Ac.shape[1]
C_size = veclen_to_matsize(veclen)
numbarvar = task.getnumbarvar()
numcon = task.getnumcon()
task.appendbarvars([C_size])
numcon_new = len(b)
task.appendcons(numcon_new)
Ac_csr = Ac.tocsr()
for it in range(numcon_new):
it_row = Ac_csr.getrow(it)
if (it_row.nnz > 0):
(i_list, j_list) = zip(*[k_to_ij(k, veclen) for k in it_row.indices])
val = [(it_row.data[l] if (i_list[l] == j_list[l]) else (it_row.data[l] / 2)) for l in range(len(i_list))]
mat_it = task.appendsparsesymmat(C_size, j_list, i_list, (- np.array(val)))
task.putbaraij((numcon + it), numbarvar, [mat_it], [1.0])
A_csr = A.tocsr()
task.putarowslice(numcon, (numcon + numcon_new), A_csr.indptr[:(- 1)], A_csr.indptr[1:], A_csr.indices, A_csr.data)
task.putconboundslice(numcon, (numcon + numcon_new), ([mosek.boundkey.fx] * numcon_new), b, b) | add a variable C and constraints
A*x - Ac*vec(C) = b
C PSD
which with an appropriate choice of Ac makes A*x - b >= 0
if Ac is not given it is set to identity | posipoly/ppp.py | add_psd_mosek | pettni/posipoly | 1 | python | def add_psd_mosek(task, A, b, Ac=None):
' \n add a variable C and constraints\n A*x - Ac*vec(C) = b\n C PSD\n which with an appropriate choice of Ac makes A*x - b >= 0\n\n if Ac is not given it is set to identity\n '
if (Ac is None):
n_Ac = A.shape[0]
Ac = sp.coo_matrix((np.ones(n_Ac), (range(n_Ac), range(n_Ac))), (n_Ac, n_Ac))
if ((len(b) != A.shape[0]) or (task.getnumvar() != A.shape[1])):
raise Exception('invalid size of A')
if (len(b) != Ac.shape[0]):
raise Exception('invalid size of Ac')
veclen = Ac.shape[1]
C_size = veclen_to_matsize(veclen)
numbarvar = task.getnumbarvar()
numcon = task.getnumcon()
task.appendbarvars([C_size])
numcon_new = len(b)
task.appendcons(numcon_new)
Ac_csr = Ac.tocsr()
for it in range(numcon_new):
it_row = Ac_csr.getrow(it)
if (it_row.nnz > 0):
(i_list, j_list) = zip(*[k_to_ij(k, veclen) for k in it_row.indices])
val = [(it_row.data[l] if (i_list[l] == j_list[l]) else (it_row.data[l] / 2)) for l in range(len(i_list))]
mat_it = task.appendsparsesymmat(C_size, j_list, i_list, (- np.array(val)))
task.putbaraij((numcon + it), numbarvar, [mat_it], [1.0])
A_csr = A.tocsr()
task.putarowslice(numcon, (numcon + numcon_new), A_csr.indptr[:(- 1)], A_csr.indptr[1:], A_csr.indices, A_csr.data)
task.putconboundslice(numcon, (numcon + numcon_new), ([mosek.boundkey.fx] * numcon_new), b, b) | def add_psd_mosek(task, A, b, Ac=None):
' \n add a variable C and constraints\n A*x - Ac*vec(C) = b\n C PSD\n which with an appropriate choice of Ac makes A*x - b >= 0\n\n if Ac is not given it is set to identity\n '
if (Ac is None):
n_Ac = A.shape[0]
Ac = sp.coo_matrix((np.ones(n_Ac), (range(n_Ac), range(n_Ac))), (n_Ac, n_Ac))
if ((len(b) != A.shape[0]) or (task.getnumvar() != A.shape[1])):
raise Exception('invalid size of A')
if (len(b) != Ac.shape[0]):
raise Exception('invalid size of Ac')
veclen = Ac.shape[1]
C_size = veclen_to_matsize(veclen)
numbarvar = task.getnumbarvar()
numcon = task.getnumcon()
task.appendbarvars([C_size])
numcon_new = len(b)
task.appendcons(numcon_new)
Ac_csr = Ac.tocsr()
for it in range(numcon_new):
it_row = Ac_csr.getrow(it)
if (it_row.nnz > 0):
(i_list, j_list) = zip(*[k_to_ij(k, veclen) for k in it_row.indices])
val = [(it_row.data[l] if (i_list[l] == j_list[l]) else (it_row.data[l] / 2)) for l in range(len(i_list))]
mat_it = task.appendsparsesymmat(C_size, j_list, i_list, (- np.array(val)))
task.putbaraij((numcon + it), numbarvar, [mat_it], [1.0])
A_csr = A.tocsr()
task.putarowslice(numcon, (numcon + numcon_new), A_csr.indptr[:(- 1)], A_csr.indptr[1:], A_csr.indices, A_csr.data)
task.putconboundslice(numcon, (numcon + numcon_new), ([mosek.boundkey.fx] * numcon_new), b, b)<|docstring|>add a variable C and constraints
A*x - Ac*vec(C) = b
C PSD
which with an appropriate choice of Ac makes A*x - b >= 0
if Ac is not given it is set to identity<|endoftext|> |
0fc85e4a1731655fc8ba93bd9f0a20aaa75dc2ba63e6ecf793789a868db6ffd2 | def is_dd(A):
" Returns 'True' if A is dd (diagonally dominant), 'False' otherwise "
epsilon = 1e-10
A_arr = np.array(A)
if (A_arr.shape[0] != A_arr.shape[1]):
return False
n = A_arr.shape[0]
for i in range(n):
if (not ((A[(i, i)] + epsilon) >= sum(np.abs(A[(i, [j for j in range(n) if (i != j)])])))):
return False
return True | Returns 'True' if A is dd (diagonally dominant), 'False' otherwise | posipoly/ppp.py | is_dd | pettni/posipoly | 1 | python | def is_dd(A):
" "
epsilon = 1e-10
A_arr = np.array(A)
if (A_arr.shape[0] != A_arr.shape[1]):
return False
n = A_arr.shape[0]
for i in range(n):
if (not ((A[(i, i)] + epsilon) >= sum(np.abs(A[(i, [j for j in range(n) if (i != j)])])))):
return False
return True | def is_dd(A):
" "
epsilon = 1e-10
A_arr = np.array(A)
if (A_arr.shape[0] != A_arr.shape[1]):
return False
n = A_arr.shape[0]
for i in range(n):
if (not ((A[(i, i)] + epsilon) >= sum(np.abs(A[(i, [j for j in range(n) if (i != j)])])))):
return False
return True<|docstring|>Returns 'True' if A is dd (diagonally dominant), 'False' otherwise<|endoftext|> |
a45dde8a30079ec4b46c9a921a36b68070c2ad6a47b8ab07162e15ccc98044ba | def sdd_index(i, j, n):
" An n x n sdd matrix A can be written as A = sum Mij.\n Given Mij's stored as a (n-1)*n/2 x 3 matrix, where each row represents a 2x2 symmetric matrix, return\n the indices i_s, j_s such that A_ij = sum_s Mij(i_s, j_s) "
num_vars = int(((n * (n - 1)) / 2))
if (i == j):
return [[ij_to_k(min(i, l), (max(i, l) - 1), num_vars), (0 if (i < l) else 1)] for l in range(n) if (l != i)]
else:
return [[ij_to_k(min(i, j), (max(i, j) - 1), num_vars), 2]] | An n x n sdd matrix A can be written as A = sum Mij.
Given Mij's stored as a (n-1)*n/2 x 3 matrix, where each row represents a 2x2 symmetric matrix, return
the indices i_s, j_s such that A_ij = sum_s Mij(i_s, j_s) | posipoly/ppp.py | sdd_index | pettni/posipoly | 1 | python | def sdd_index(i, j, n):
" An n x n sdd matrix A can be written as A = sum Mij.\n Given Mij's stored as a (n-1)*n/2 x 3 matrix, where each row represents a 2x2 symmetric matrix, return\n the indices i_s, j_s such that A_ij = sum_s Mij(i_s, j_s) "
num_vars = int(((n * (n - 1)) / 2))
if (i == j):
return [[ij_to_k(min(i, l), (max(i, l) - 1), num_vars), (0 if (i < l) else 1)] for l in range(n) if (l != i)]
else:
return [[ij_to_k(min(i, j), (max(i, j) - 1), num_vars), 2]] | def sdd_index(i, j, n):
" An n x n sdd matrix A can be written as A = sum Mij.\n Given Mij's stored as a (n-1)*n/2 x 3 matrix, where each row represents a 2x2 symmetric matrix, return\n the indices i_s, j_s such that A_ij = sum_s Mij(i_s, j_s) "
num_vars = int(((n * (n - 1)) / 2))
if (i == j):
return [[ij_to_k(min(i, l), (max(i, l) - 1), num_vars), (0 if (i < l) else 1)] for l in range(n) if (l != i)]
else:
return [[ij_to_k(min(i, j), (max(i, j) - 1), num_vars), 2]]<|docstring|>An n x n sdd matrix A can be written as A = sum Mij.
Given Mij's stored as a (n-1)*n/2 x 3 matrix, where each row represents a 2x2 symmetric matrix, return
the indices i_s, j_s such that A_ij = sum_s Mij(i_s, j_s)<|endoftext|> |
cbebc5bd59a8aa40ff4d25bfcdabf36e9ae33676510a9e83b81244682f5d4f98 | @property
def varnames(self):
'get names of all variables'
return list(self.varinfo.keys()) | get names of all variables | posipoly/ppp.py | varnames | pettni/posipoly | 1 | python | @property
def varnames(self):
return list(self.varinfo.keys()) | @property
def varnames(self):
return list(self.varinfo.keys())<|docstring|>get names of all variables<|endoftext|> |
d3d81656bf42a8b395ade257869501be65a0dd128b9aa374c9b01596a95c17d1 | @property
def varsizes(self):
'get coefficient sizes of all polynomial variables'
return [self.varsize(name) for name in self.varinfo.keys()] | get coefficient sizes of all polynomial variables | posipoly/ppp.py | varsizes | pettni/posipoly | 1 | python | @property
def varsizes(self):
return [self.varsize(name) for name in self.varinfo.keys()] | @property
def varsizes(self):
return [self.varsize(name) for name in self.varinfo.keys()]<|docstring|>get coefficient sizes of all polynomial variables<|endoftext|> |
fa3d4081682b6eaf1b4b028a63ff5349e74d3e90547589d8b5e91c86e9bc981f | @property
def numvar(self):
'total number of coefficient variables'
return sum(self.varsizes) | total number of coefficient variables | posipoly/ppp.py | numvar | pettni/posipoly | 1 | python | @property
def numvar(self):
return sum(self.varsizes) | @property
def numvar(self):
return sum(self.varsizes)<|docstring|>total number of coefficient variables<|endoftext|> |
0ad6cfb6ea33e9bc0dcf050022669720a424abb268d609b832e96f7220d44ea2 | @property
def numcon(self):
'total number of constraints'
return (self.Aeq.shape[0] + self.Aiq.shape[0]) | total number of constraints | posipoly/ppp.py | numcon | pettni/posipoly | 1 | python | @property
def numcon(self):
return (self.Aeq.shape[0] + self.Aiq.shape[0]) | @property
def numcon(self):
return (self.Aeq.shape[0] + self.Aiq.shape[0])<|docstring|>total number of constraints<|endoftext|> |
7371b2fbdc61e4f61a898b46a8a49fcec86f33aacb43e4005bedc64f6b9d20b2 | def varsize(self, varname):
'get size of variable "varname"'
if (varname not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(varname))
n = self.varinfo[varname][0]
d = self.varinfo[varname][1]
if (self.varinfo[varname][2] == 'pp'):
num_mon = count_monomials_leq(n, int(ceil((float(d) / 2))))
return ((num_mon * (num_mon + 1)) // 2)
if (self.varinfo[varname][2] == 'coef'):
return count_monomials_leq(n, d) | get size of variable "varname" | posipoly/ppp.py | varsize | pettni/posipoly | 1 | python | def varsize(self, varname):
if (varname not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(varname))
n = self.varinfo[varname][0]
d = self.varinfo[varname][1]
if (self.varinfo[varname][2] == 'pp'):
num_mon = count_monomials_leq(n, int(ceil((float(d) / 2))))
return ((num_mon * (num_mon + 1)) // 2)
if (self.varinfo[varname][2] == 'coef'):
return count_monomials_leq(n, d) | def varsize(self, varname):
if (varname not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(varname))
n = self.varinfo[varname][0]
d = self.varinfo[varname][1]
if (self.varinfo[varname][2] == 'pp'):
num_mon = count_monomials_leq(n, int(ceil((float(d) / 2))))
return ((num_mon * (num_mon + 1)) // 2)
if (self.varinfo[varname][2] == 'coef'):
return count_monomials_leq(n, d)<|docstring|>get size of variable "varname"<|endoftext|> |
28f9a7fdd3be61cc7e83a49acfe6d35e500257d72ee81628d439c8c23a3d2fb1 | def varpos(self, varname):
'return starting position of variable'
if (varname not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(varname))
ret = 0
for name in self.varnames:
if (name == varname):
break
ret += self.varsize(name)
return ret | return starting position of variable | posipoly/ppp.py | varpos | pettni/posipoly | 1 | python | def varpos(self, varname):
if (varname not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(varname))
ret = 0
for name in self.varnames:
if (name == varname):
break
ret += self.varsize(name)
return ret | def varpos(self, varname):
if (varname not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(varname))
ret = 0
for name in self.varnames:
if (name == varname):
break
ret += self.varsize(name)
return ret<|docstring|>return starting position of variable<|endoftext|> |
91280ab9726b13ad5d00ee5794bb3a156f86b9fa3cdb32766bddc08d50ad6d7b | def add_constraint(self, Aop_dict, b, tp):
" \n add a constraint to problem of form\n T1 var1 + T2 var2 <= b if tp=='iq' (coefficient-wise inequality)\n T1 var1 + T2 var2 = b if tp=='eq' (coefficient-wise equality)\n T1 var1 + T2 var2 - b pp if tp=='pp' (positive polynomial)\n\n Parameters\n ----------\n Aop_dict : dict\n dict with values PTrans. \n Variables that are not present as keys are assumed \n to have the zero operator.\n b : Polynomial\n Right-Hand side of constraint\n tp : {'eq', 'iq', 'pp'}\n Type of constraint ('eq'uality, 'in'equality, or 'p'ositive 'p'olynomial).\n \n Example\n ----------\n >>> prob = PPP({'x': (2, 2, 'gram'), 'y': (2, 3, 'gram')})\n >>> T = PTrans.eye(2,2)\n >>> b = Polynomial({(1,2): 1}) # x * y**2\n >>> prob.add_constraint({'x': T}, b, 'eq')\n "
if (tp not in ['eq', 'iq', 'pp']):
raise Exception('tp must be "eq" or "iq" or "pp"')
for name in Aop_dict.keys():
if (name not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(name))
if (not all(((Aop_dict[name].n0 == self.varinfo[name][0]) for name in Aop_dict.keys()))):
raise Exception('PTrans initial dimensions do not agree')
if (not all(((Aop_dict[name].d0 >= self.varinfo[name][1]) for name in Aop_dict.keys()))):
raise Exception('PTrans initial degrees too low')
n1_list = [Aop_dict[name].n1 for name in Aop_dict.keys()]
d1_list = [Aop_dict[name].d1 for name in Aop_dict.keys()]
if ((max(n1_list) != min(n1_list)) or (max(d1_list) != min(d1_list))):
raise Exception('final degrees and dimensions must match')
if ((n1_list[0] != b.n) or (d1_list[0] < b.d)):
raise Exception('must have b.n = Aop.n1 and b.d <= Aop.d1 for all Aop')
if ((tp == 'iq') and (b.d > 0)):
print('Warning: adding coefficient-wise inequality constraint. make sure this is what you want')
n = n1_list[0]
d = d1_list[0]
self.constraints.append((Aop_dict, b, n, d, tp)) | add a constraint to problem of form
T1 var1 + T2 var2 <= b if tp=='iq' (coefficient-wise inequality)
T1 var1 + T2 var2 = b if tp=='eq' (coefficient-wise equality)
T1 var1 + T2 var2 - b pp if tp=='pp' (positive polynomial)
Parameters
----------
Aop_dict : dict
dict with values PTrans.
Variables that are not present as keys are assumed
to have the zero operator.
b : Polynomial
Right-Hand side of constraint
tp : {'eq', 'iq', 'pp'}
Type of constraint ('eq'uality, 'in'equality, or 'p'ositive 'p'olynomial).
Example
----------
>>> prob = PPP({'x': (2, 2, 'gram'), 'y': (2, 3, 'gram')})
>>> T = PTrans.eye(2,2)
>>> b = Polynomial({(1,2): 1}) # x * y**2
>>> prob.add_constraint({'x': T}, b, 'eq') | posipoly/ppp.py | add_constraint | pettni/posipoly | 1 | python | def add_constraint(self, Aop_dict, b, tp):
" \n add a constraint to problem of form\n T1 var1 + T2 var2 <= b if tp=='iq' (coefficient-wise inequality)\n T1 var1 + T2 var2 = b if tp=='eq' (coefficient-wise equality)\n T1 var1 + T2 var2 - b pp if tp=='pp' (positive polynomial)\n\n Parameters\n ----------\n Aop_dict : dict\n dict with values PTrans. \n Variables that are not present as keys are assumed \n to have the zero operator.\n b : Polynomial\n Right-Hand side of constraint\n tp : {'eq', 'iq', 'pp'}\n Type of constraint ('eq'uality, 'in'equality, or 'p'ositive 'p'olynomial).\n \n Example\n ----------\n >>> prob = PPP({'x': (2, 2, 'gram'), 'y': (2, 3, 'gram')})\n >>> T = PTrans.eye(2,2)\n >>> b = Polynomial({(1,2): 1}) # x * y**2\n >>> prob.add_constraint({'x': T}, b, 'eq')\n "
if (tp not in ['eq', 'iq', 'pp']):
raise Exception('tp must be "eq" or "iq" or "pp"')
for name in Aop_dict.keys():
if (name not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(name))
if (not all(((Aop_dict[name].n0 == self.varinfo[name][0]) for name in Aop_dict.keys()))):
raise Exception('PTrans initial dimensions do not agree')
if (not all(((Aop_dict[name].d0 >= self.varinfo[name][1]) for name in Aop_dict.keys()))):
raise Exception('PTrans initial degrees too low')
n1_list = [Aop_dict[name].n1 for name in Aop_dict.keys()]
d1_list = [Aop_dict[name].d1 for name in Aop_dict.keys()]
if ((max(n1_list) != min(n1_list)) or (max(d1_list) != min(d1_list))):
raise Exception('final degrees and dimensions must match')
if ((n1_list[0] != b.n) or (d1_list[0] < b.d)):
raise Exception('must have b.n = Aop.n1 and b.d <= Aop.d1 for all Aop')
if ((tp == 'iq') and (b.d > 0)):
print('Warning: adding coefficient-wise inequality constraint. make sure this is what you want')
n = n1_list[0]
d = d1_list[0]
self.constraints.append((Aop_dict, b, n, d, tp)) | def add_constraint(self, Aop_dict, b, tp):
" \n add a constraint to problem of form\n T1 var1 + T2 var2 <= b if tp=='iq' (coefficient-wise inequality)\n T1 var1 + T2 var2 = b if tp=='eq' (coefficient-wise equality)\n T1 var1 + T2 var2 - b pp if tp=='pp' (positive polynomial)\n\n Parameters\n ----------\n Aop_dict : dict\n dict with values PTrans. \n Variables that are not present as keys are assumed \n to have the zero operator.\n b : Polynomial\n Right-Hand side of constraint\n tp : {'eq', 'iq', 'pp'}\n Type of constraint ('eq'uality, 'in'equality, or 'p'ositive 'p'olynomial).\n \n Example\n ----------\n >>> prob = PPP({'x': (2, 2, 'gram'), 'y': (2, 3, 'gram')})\n >>> T = PTrans.eye(2,2)\n >>> b = Polynomial({(1,2): 1}) # x * y**2\n >>> prob.add_constraint({'x': T}, b, 'eq')\n "
if (tp not in ['eq', 'iq', 'pp']):
raise Exception('tp must be "eq" or "iq" or "pp"')
for name in Aop_dict.keys():
if (name not in self.varinfo.keys()):
raise Exception('unknown variable {}'.format(name))
if (not all(((Aop_dict[name].n0 == self.varinfo[name][0]) for name in Aop_dict.keys()))):
raise Exception('PTrans initial dimensions do not agree')
if (not all(((Aop_dict[name].d0 >= self.varinfo[name][1]) for name in Aop_dict.keys()))):
raise Exception('PTrans initial degrees too low')
n1_list = [Aop_dict[name].n1 for name in Aop_dict.keys()]
d1_list = [Aop_dict[name].d1 for name in Aop_dict.keys()]
if ((max(n1_list) != min(n1_list)) or (max(d1_list) != min(d1_list))):
raise Exception('final degrees and dimensions must match')
if ((n1_list[0] != b.n) or (d1_list[0] < b.d)):
raise Exception('must have b.n = Aop.n1 and b.d <= Aop.d1 for all Aop')
if ((tp == 'iq') and (b.d > 0)):
print('Warning: adding coefficient-wise inequality constraint. make sure this is what you want')
n = n1_list[0]
d = d1_list[0]
self.constraints.append((Aop_dict, b, n, d, tp))<|docstring|>add a constraint to problem of form
T1 var1 + T2 var2 <= b if tp=='iq' (coefficient-wise inequality)
T1 var1 + T2 var2 = b if tp=='eq' (coefficient-wise equality)
T1 var1 + T2 var2 - b pp if tp=='pp' (positive polynomial)
Parameters
----------
Aop_dict : dict
dict with values PTrans.
Variables that are not present as keys are assumed
to have the zero operator.
b : Polynomial
Right-Hand side of constraint
tp : {'eq', 'iq', 'pp'}
Type of constraint ('eq'uality, 'in'equality, or 'p'ositive 'p'olynomial).
Example
----------
>>> prob = PPP({'x': (2, 2, 'gram'), 'y': (2, 3, 'gram')})
>>> T = PTrans.eye(2,2)
>>> b = Polynomial({(1,2): 1}) # x * y**2
>>> prob.add_constraint({'x': T}, b, 'eq')<|endoftext|> |
dd05a93e733982e027343b154e2c8c98e98c0a398e1541b5ab824f2bd4fef1dc | def set_objective(self, c_dict):
" \n add objective to problem \n\n Parameters\n ----------\n c_dict : dict\n keys: varnames\n values: PTrans with final degree 0 (scalar) or array_like\n\n Example\n ----------\n >>> T = PTrans.integrate(n,d,dims,boxes) # results in scalar\n >>> prob.add_constraint({'a': [0,1 ], 'b': T} )\n "
if (not (type(c_dict) is dict)):
raise Exception('c_dict must be dict')
def pick_vec(varname):
if (varname not in c_dict.keys()):
return np.zeros(self.varsize(varname))
if (type(c_dict[varname]) is PTrans):
if (not (c_dict[varname].d1 == 0)):
raise Exception('cost for {} not scalar'.format(varname))
if (self.varinfo[varname][2] == 'pp'):
return c_dict[varname].Acg().todense().getA1()
else:
return c_dict[varname].Acc().todense().getA1()
else:
if (len(c_dict[varname]) != self.varsize(varname)):
raise Exception('cost for {} is wrong size'.format(varname))
return c_dict[varname]
self.c = np.hstack([pick_vec(varname) for varname in self.varnames])
if (not (len(self.c) == self.numvar)):
raise Exception('wrong size of objective') | add objective to problem
Parameters
----------
c_dict : dict
keys: varnames
values: PTrans with final degree 0 (scalar) or array_like
Example
----------
>>> T = PTrans.integrate(n,d,dims,boxes) # results in scalar
>>> prob.add_constraint({'a': [0,1 ], 'b': T} ) | posipoly/ppp.py | set_objective | pettni/posipoly | 1 | python | def set_objective(self, c_dict):
" \n add objective to problem \n\n Parameters\n ----------\n c_dict : dict\n keys: varnames\n values: PTrans with final degree 0 (scalar) or array_like\n\n Example\n ----------\n >>> T = PTrans.integrate(n,d,dims,boxes) # results in scalar\n >>> prob.add_constraint({'a': [0,1 ], 'b': T} )\n "
if (not (type(c_dict) is dict)):
raise Exception('c_dict must be dict')
def pick_vec(varname):
if (varname not in c_dict.keys()):
return np.zeros(self.varsize(varname))
if (type(c_dict[varname]) is PTrans):
if (not (c_dict[varname].d1 == 0)):
raise Exception('cost for {} not scalar'.format(varname))
if (self.varinfo[varname][2] == 'pp'):
return c_dict[varname].Acg().todense().getA1()
else:
return c_dict[varname].Acc().todense().getA1()
else:
if (len(c_dict[varname]) != self.varsize(varname)):
raise Exception('cost for {} is wrong size'.format(varname))
return c_dict[varname]
self.c = np.hstack([pick_vec(varname) for varname in self.varnames])
if (not (len(self.c) == self.numvar)):
raise Exception('wrong size of objective') | def set_objective(self, c_dict):
" \n add objective to problem \n\n Parameters\n ----------\n c_dict : dict\n keys: varnames\n values: PTrans with final degree 0 (scalar) or array_like\n\n Example\n ----------\n >>> T = PTrans.integrate(n,d,dims,boxes) # results in scalar\n >>> prob.add_constraint({'a': [0,1 ], 'b': T} )\n "
if (not (type(c_dict) is dict)):
raise Exception('c_dict must be dict')
def pick_vec(varname):
if (varname not in c_dict.keys()):
return np.zeros(self.varsize(varname))
if (type(c_dict[varname]) is PTrans):
if (not (c_dict[varname].d1 == 0)):
raise Exception('cost for {} not scalar'.format(varname))
if (self.varinfo[varname][2] == 'pp'):
return c_dict[varname].Acg().todense().getA1()
else:
return c_dict[varname].Acc().todense().getA1()
else:
if (len(c_dict[varname]) != self.varsize(varname)):
raise Exception('cost for {} is wrong size'.format(varname))
return c_dict[varname]
self.c = np.hstack([pick_vec(varname) for varname in self.varnames])
if (not (len(self.c) == self.numvar)):
raise Exception('wrong size of objective')<|docstring|>add objective to problem
Parameters
----------
c_dict : dict
keys: varnames
values: PTrans with final degree 0 (scalar) or array_like
Example
----------
>>> T = PTrans.integrate(n,d,dims,boxes) # results in scalar
>>> prob.add_constraint({'a': [0,1 ], 'b': T} )<|endoftext|> |
349f0a4a43468da8b5f8438e4338984cf42b1afdb075de9ca7f4a2e1402fe801 | def get_bmat(self, Aop_dict, numcon):
' \n concatenate matrices in dict in variable order, fill in with zero matrix\n for those variables that do not appear\n '
def pick_mat(varname):
if (varname not in Aop_dict.keys()):
return spzeros(numcon, self.varsize(varname))
if (self.varinfo[varname][2] == 'pp'):
return Aop_dict[varname].Acg()
if (self.varinfo[varname][2] == 'coef'):
return Aop_dict[varname].Acc()
return sp.bmat([[pick_mat(vn) for vn in self.varnames]]) | concatenate matrices in dict in variable order, fill in with zero matrix
for those variables that do not appear | posipoly/ppp.py | get_bmat | pettni/posipoly | 1 | python | def get_bmat(self, Aop_dict, numcon):
' \n concatenate matrices in dict in variable order, fill in with zero matrix\n for those variables that do not appear\n '
def pick_mat(varname):
if (varname not in Aop_dict.keys()):
return spzeros(numcon, self.varsize(varname))
if (self.varinfo[varname][2] == 'pp'):
return Aop_dict[varname].Acg()
if (self.varinfo[varname][2] == 'coef'):
return Aop_dict[varname].Acc()
return sp.bmat([[pick_mat(vn) for vn in self.varnames]]) | def get_bmat(self, Aop_dict, numcon):
' \n concatenate matrices in dict in variable order, fill in with zero matrix\n for those variables that do not appear\n '
def pick_mat(varname):
if (varname not in Aop_dict.keys()):
return spzeros(numcon, self.varsize(varname))
if (self.varinfo[varname][2] == 'pp'):
return Aop_dict[varname].Acg()
if (self.varinfo[varname][2] == 'coef'):
return Aop_dict[varname].Acc()
return sp.bmat([[pick_mat(vn) for vn in self.varnames]])<|docstring|>concatenate matrices in dict in variable order, fill in with zero matrix
for those variables that do not appear<|endoftext|> |
9eb282b70fc131004fc47fee317236387c277e46bcbf9068b816f600c0a9fc8c | def solve(self, pp_cone):
" \n solve Positive Polynomial Program \n\n Parameters\n ----------\n pp_cone : {'psd', 'sdd'}\n cone for positivity constraints\n \n Returns\n ----------\n sol : solution vector\n sta : status\n "
Aeq = sp.coo_matrix((0, self.numvar))
Aiq = sp.coo_matrix((0, self.numvar))
beq = np.zeros(0)
biq = np.zeros(0)
for (Aop_dict, b, n, d, tp) in self.constraints:
Amat = self.get_bmat(Aop_dict, count_monomials_leq(n, d))
if (tp == 'eq'):
Aeq = sp.bmat([[Aeq], [Amat]])
beq = np.hstack([beq, b.mon_coefs(d)])
if (tp == 'iq'):
Aiq = sp.bmat([[Aiq], [Amat]])
biq = np.hstack([biq, b.mon_coefs(d)])
numcon_iq = Aiq.shape[0]
numcon_eq = Aeq.shape[0]
env = mosek.Env()
task = env.Task(0, 0)
task.appendvars(self.numvar)
task.putvarboundslice(0, self.numvar, ([mosek.boundkey.fr] * self.numvar), ([0.0] * self.numvar), ([0.0] * self.numvar))
task.putcslice(0, self.numvar, self.c)
task.putobjsense(mosek.objsense.minimize)
A = sp.bmat([[Aeq], [Aiq]])
task.appendcons((numcon_eq + numcon_iq))
task.putaijlist(A.row, A.col, A.data)
task.putconboundslice(0, (numcon_eq + numcon_iq), (([mosek.boundkey.fx] * numcon_eq) + ([mosek.boundkey.up] * numcon_iq)), (list(beq) + ([0.0] * numcon_iq)), (list(beq) + list(biq)))
for varname in self.varnames:
if (self.varinfo[varname][2] == 'pp'):
Asp = speye(self.varsize(varname), self.varpos(varname), self.numvar)
if (pp_cone == 'psd'):
add_psd_mosek(task, Asp, np.zeros(self.varsize(varname)))
if (pp_cone == 'sdd'):
add_sdd_mosek(task, Asp, np.zeros(self.varsize(varname)))
for (Aop_dict, b_pol, n, d, tp) in self.constraints:
if (tp == 'pp'):
if (pp_cone == 'psd'):
add_psd_mosek(task, self.get_bmat(Aop_dict, count_monomials_leq(n, d)), b_pol.mon_coefs(d), PTrans.eye(n, d).Acg())
if (pp_cone == 'sdd'):
add_sdd_mosek(task, self.get_bmat(Aop_dict, count_monomials_leq(n, d)), b_pol.mon_coefs(d), PTrans.eye(n, d).Acg())
print('optimizing...')
t_start = time.clock()
task.optimize()
print('solved in {:.2f}s'.format((time.clock() - t_start)))
solsta = task.getsolsta(mosek.soltype.itr)
print(solsta)
if (solsta == mosek.solsta.optimal):
sol = ([0.0] * self.numvar)
task.getxxslice(mosek.soltype.itr, 0, self.numvar, sol)
self.sol = sol
self.check_sol(Aiq, biq, Aeq, beq)
return (sol, solsta)
else:
return (None, solsta) | solve Positive Polynomial Program
Parameters
----------
pp_cone : {'psd', 'sdd'}
cone for positivity constraints
Returns
----------
sol : solution vector
sta : status | posipoly/ppp.py | solve | pettni/posipoly | 1 | python | def solve(self, pp_cone):
" \n solve Positive Polynomial Program \n\n Parameters\n ----------\n pp_cone : {'psd', 'sdd'}\n cone for positivity constraints\n \n Returns\n ----------\n sol : solution vector\n sta : status\n "
Aeq = sp.coo_matrix((0, self.numvar))
Aiq = sp.coo_matrix((0, self.numvar))
beq = np.zeros(0)
biq = np.zeros(0)
for (Aop_dict, b, n, d, tp) in self.constraints:
Amat = self.get_bmat(Aop_dict, count_monomials_leq(n, d))
if (tp == 'eq'):
Aeq = sp.bmat([[Aeq], [Amat]])
beq = np.hstack([beq, b.mon_coefs(d)])
if (tp == 'iq'):
Aiq = sp.bmat([[Aiq], [Amat]])
biq = np.hstack([biq, b.mon_coefs(d)])
numcon_iq = Aiq.shape[0]
numcon_eq = Aeq.shape[0]
env = mosek.Env()
task = env.Task(0, 0)
task.appendvars(self.numvar)
task.putvarboundslice(0, self.numvar, ([mosek.boundkey.fr] * self.numvar), ([0.0] * self.numvar), ([0.0] * self.numvar))
task.putcslice(0, self.numvar, self.c)
task.putobjsense(mosek.objsense.minimize)
A = sp.bmat([[Aeq], [Aiq]])
task.appendcons((numcon_eq + numcon_iq))
task.putaijlist(A.row, A.col, A.data)
task.putconboundslice(0, (numcon_eq + numcon_iq), (([mosek.boundkey.fx] * numcon_eq) + ([mosek.boundkey.up] * numcon_iq)), (list(beq) + ([0.0] * numcon_iq)), (list(beq) + list(biq)))
for varname in self.varnames:
if (self.varinfo[varname][2] == 'pp'):
Asp = speye(self.varsize(varname), self.varpos(varname), self.numvar)
if (pp_cone == 'psd'):
add_psd_mosek(task, Asp, np.zeros(self.varsize(varname)))
if (pp_cone == 'sdd'):
add_sdd_mosek(task, Asp, np.zeros(self.varsize(varname)))
for (Aop_dict, b_pol, n, d, tp) in self.constraints:
if (tp == 'pp'):
if (pp_cone == 'psd'):
add_psd_mosek(task, self.get_bmat(Aop_dict, count_monomials_leq(n, d)), b_pol.mon_coefs(d), PTrans.eye(n, d).Acg())
if (pp_cone == 'sdd'):
add_sdd_mosek(task, self.get_bmat(Aop_dict, count_monomials_leq(n, d)), b_pol.mon_coefs(d), PTrans.eye(n, d).Acg())
print('optimizing...')
t_start = time.clock()
task.optimize()
print('solved in {:.2f}s'.format((time.clock() - t_start)))
solsta = task.getsolsta(mosek.soltype.itr)
print(solsta)
if (solsta == mosek.solsta.optimal):
sol = ([0.0] * self.numvar)
task.getxxslice(mosek.soltype.itr, 0, self.numvar, sol)
self.sol = sol
self.check_sol(Aiq, biq, Aeq, beq)
return (sol, solsta)
else:
return (None, solsta) | def solve(self, pp_cone):
" \n solve Positive Polynomial Program \n\n Parameters\n ----------\n pp_cone : {'psd', 'sdd'}\n cone for positivity constraints\n \n Returns\n ----------\n sol : solution vector\n sta : status\n "
Aeq = sp.coo_matrix((0, self.numvar))
Aiq = sp.coo_matrix((0, self.numvar))
beq = np.zeros(0)
biq = np.zeros(0)
for (Aop_dict, b, n, d, tp) in self.constraints:
Amat = self.get_bmat(Aop_dict, count_monomials_leq(n, d))
if (tp == 'eq'):
Aeq = sp.bmat([[Aeq], [Amat]])
beq = np.hstack([beq, b.mon_coefs(d)])
if (tp == 'iq'):
Aiq = sp.bmat([[Aiq], [Amat]])
biq = np.hstack([biq, b.mon_coefs(d)])
numcon_iq = Aiq.shape[0]
numcon_eq = Aeq.shape[0]
env = mosek.Env()
task = env.Task(0, 0)
task.appendvars(self.numvar)
task.putvarboundslice(0, self.numvar, ([mosek.boundkey.fr] * self.numvar), ([0.0] * self.numvar), ([0.0] * self.numvar))
task.putcslice(0, self.numvar, self.c)
task.putobjsense(mosek.objsense.minimize)
A = sp.bmat([[Aeq], [Aiq]])
task.appendcons((numcon_eq + numcon_iq))
task.putaijlist(A.row, A.col, A.data)
task.putconboundslice(0, (numcon_eq + numcon_iq), (([mosek.boundkey.fx] * numcon_eq) + ([mosek.boundkey.up] * numcon_iq)), (list(beq) + ([0.0] * numcon_iq)), (list(beq) + list(biq)))
for varname in self.varnames:
if (self.varinfo[varname][2] == 'pp'):
Asp = speye(self.varsize(varname), self.varpos(varname), self.numvar)
if (pp_cone == 'psd'):
add_psd_mosek(task, Asp, np.zeros(self.varsize(varname)))
if (pp_cone == 'sdd'):
add_sdd_mosek(task, Asp, np.zeros(self.varsize(varname)))
for (Aop_dict, b_pol, n, d, tp) in self.constraints:
if (tp == 'pp'):
if (pp_cone == 'psd'):
add_psd_mosek(task, self.get_bmat(Aop_dict, count_monomials_leq(n, d)), b_pol.mon_coefs(d), PTrans.eye(n, d).Acg())
if (pp_cone == 'sdd'):
add_sdd_mosek(task, self.get_bmat(Aop_dict, count_monomials_leq(n, d)), b_pol.mon_coefs(d), PTrans.eye(n, d).Acg())
print('optimizing...')
t_start = time.clock()
task.optimize()
print('solved in {:.2f}s'.format((time.clock() - t_start)))
solsta = task.getsolsta(mosek.soltype.itr)
print(solsta)
if (solsta == mosek.solsta.optimal):
sol = ([0.0] * self.numvar)
task.getxxslice(mosek.soltype.itr, 0, self.numvar, sol)
self.sol = sol
self.check_sol(Aiq, biq, Aeq, beq)
return (sol, solsta)
else:
return (None, solsta)<|docstring|>solve Positive Polynomial Program
Parameters
----------
pp_cone : {'psd', 'sdd'}
cone for positivity constraints
Returns
----------
sol : solution vector
sta : status<|endoftext|> |
59fb6f05480c1d0f45f00fc4332e938cf316992e9094173b4806b15a1fc6cc7f | def check_sol(self, Aiq, biq, Aeq, beq, tol=1e-06):
'\n check solution against tolerance to see if constraints are met,\n prints warnings messages if violations above tol are found\n REMARK: currently does not check manually added pp constraints\n '
if ((Aiq.shape[0] > 0) and (min((biq - Aiq.dot(self.sol))) < (- tol))):
print('warning, iq constraint violated by {:f}'.format(abs(min((biq - Aiq.dot(self.sol))))))
if ((Aeq.shape[0] > 0) and (max(np.abs((Aeq.dot(self.sol) - beq))) > tol)):
print('warning, eq constraint violated by {:f}'.format(max(np.abs((Aeq.dot(self.sol) - beq)))))
for varname in self.varnames:
if (self.varinfo[varname][2] == 'pp'):
a = self.varpos(varname)
b = (a + self.varsize(varname))
mat = vec_to_mat(self.sol[a:b])
(v, _) = np.linalg.eig(mat)
if (min(v) < (- tol)):
print('warning, pp constraint violated by {:f}'.format(abs(min(v)))) | check solution against tolerance to see if constraints are met,
prints warnings messages if violations above tol are found
REMARK: currently does not check manually added pp constraints | posipoly/ppp.py | check_sol | pettni/posipoly | 1 | python | def check_sol(self, Aiq, biq, Aeq, beq, tol=1e-06):
'\n check solution against tolerance to see if constraints are met,\n prints warnings messages if violations above tol are found\n REMARK: currently does not check manually added pp constraints\n '
if ((Aiq.shape[0] > 0) and (min((biq - Aiq.dot(self.sol))) < (- tol))):
print('warning, iq constraint violated by {:f}'.format(abs(min((biq - Aiq.dot(self.sol))))))
if ((Aeq.shape[0] > 0) and (max(np.abs((Aeq.dot(self.sol) - beq))) > tol)):
print('warning, eq constraint violated by {:f}'.format(max(np.abs((Aeq.dot(self.sol) - beq)))))
for varname in self.varnames:
if (self.varinfo[varname][2] == 'pp'):
a = self.varpos(varname)
b = (a + self.varsize(varname))
mat = vec_to_mat(self.sol[a:b])
(v, _) = np.linalg.eig(mat)
if (min(v) < (- tol)):
print('warning, pp constraint violated by {:f}'.format(abs(min(v)))) | def check_sol(self, Aiq, biq, Aeq, beq, tol=1e-06):
'\n check solution against tolerance to see if constraints are met,\n prints warnings messages if violations above tol are found\n REMARK: currently does not check manually added pp constraints\n '
if ((Aiq.shape[0] > 0) and (min((biq - Aiq.dot(self.sol))) < (- tol))):
print('warning, iq constraint violated by {:f}'.format(abs(min((biq - Aiq.dot(self.sol))))))
if ((Aeq.shape[0] > 0) and (max(np.abs((Aeq.dot(self.sol) - beq))) > tol)):
print('warning, eq constraint violated by {:f}'.format(max(np.abs((Aeq.dot(self.sol) - beq)))))
for varname in self.varnames:
if (self.varinfo[varname][2] == 'pp'):
a = self.varpos(varname)
b = (a + self.varsize(varname))
mat = vec_to_mat(self.sol[a:b])
(v, _) = np.linalg.eig(mat)
if (min(v) < (- tol)):
print('warning, pp constraint violated by {:f}'.format(abs(min(v))))<|docstring|>check solution against tolerance to see if constraints are met,
prints warnings messages if violations above tol are found
REMARK: currently does not check manually added pp constraints<|endoftext|> |
b8652e3ba4b9c994a2fd7b8adaf81925a7f2f769341c4bb8fc9bfa4cd7d27c9a | def get_poly(self, varname):
'return a Polynomial object from solution'
if (self.sol is None):
raise Exception('no solution stored')
if (varname not in self.varnames):
raise Exception('unknown variable {}'.format())
a = self.varpos(varname)
b = (a + self.varsize(varname))
n = self.varinfo[varname][0]
d = self.varinfo[varname][1]
if (self.varinfo[varname][2] == 'coef'):
mon_coefs = self.sol[a:b]
else:
mon_coefs = PTrans.eye(n, d).Acg().dot(self.sol[a:b])
return Polynomial.from_mon_coefs(n, mon_coefs) | return a Polynomial object from solution | posipoly/ppp.py | get_poly | pettni/posipoly | 1 | python | def get_poly(self, varname):
if (self.sol is None):
raise Exception('no solution stored')
if (varname not in self.varnames):
raise Exception('unknown variable {}'.format())
a = self.varpos(varname)
b = (a + self.varsize(varname))
n = self.varinfo[varname][0]
d = self.varinfo[varname][1]
if (self.varinfo[varname][2] == 'coef'):
mon_coefs = self.sol[a:b]
else:
mon_coefs = PTrans.eye(n, d).Acg().dot(self.sol[a:b])
return Polynomial.from_mon_coefs(n, mon_coefs) | def get_poly(self, varname):
if (self.sol is None):
raise Exception('no solution stored')
if (varname not in self.varnames):
raise Exception('unknown variable {}'.format())
a = self.varpos(varname)
b = (a + self.varsize(varname))
n = self.varinfo[varname][0]
d = self.varinfo[varname][1]
if (self.varinfo[varname][2] == 'coef'):
mon_coefs = self.sol[a:b]
else:
mon_coefs = PTrans.eye(n, d).Acg().dot(self.sol[a:b])
return Polynomial.from_mon_coefs(n, mon_coefs)<|docstring|>return a Polynomial object from solution<|endoftext|> |
4b2282af96d846a00d89dff2fffc08526d85b854196e7f7465e55677cc8c936b | def get_versions():
'\n Return basic information on our software installation, and out installed versions of packages.\n '
d = {'host': get_system_info(), 'packages': get_package_info(required_packages), 'optional': get_optional_info()}
return d | Return basic information on our software installation, and out installed versions of packages. | src/hangar/diagnostics/ecosystem.py | get_versions | dxenonb/hangar-py | 0 | python | def get_versions():
'\n \n '
d = {'host': get_system_info(), 'packages': get_package_info(required_packages), 'optional': get_optional_info()}
return d | def get_versions():
'\n \n '
d = {'host': get_system_info(), 'packages': get_package_info(required_packages), 'optional': get_optional_info()}
return d<|docstring|>Return basic information on our software installation, and out installed versions of packages.<|endoftext|> |
040590345ef1eeb3664414c07d9754da9574998419c9021d74f27302915ad4b0 | def get_package_info(pkgs):
' get package versions for the passed required & optional packages '
pversions = []
for (modname, ver_f) in pkgs:
try:
mod = importlib.import_module(modname)
ver = ver_f(mod)
pversions.append((modname, ver))
except Exception:
pversions.append((modname, None))
return pversions | get package versions for the passed required & optional packages | src/hangar/diagnostics/ecosystem.py | get_package_info | dxenonb/hangar-py | 0 | python | def get_package_info(pkgs):
' '
pversions = []
for (modname, ver_f) in pkgs:
try:
mod = importlib.import_module(modname)
ver = ver_f(mod)
pversions.append((modname, ver))
except Exception:
pversions.append((modname, None))
return pversions | def get_package_info(pkgs):
' '
pversions = []
for (modname, ver_f) in pkgs:
try:
mod = importlib.import_module(modname)
ver = ver_f(mod)
pversions.append((modname, ver))
except Exception:
pversions.append((modname, None))
return pversions<|docstring|>get package versions for the passed required & optional packages<|endoftext|> |
3cfd1ce127c5aa45beb28a8527f8ca5285e695d74b08ea3bf78d5efeb316589d | def process_data(params):
'Provides end-to-end signal processing, including reading, detrending,\n tapering, filtering, instrument response removal, and saving.\n\n Parameters\n ----------\n params : SignalProcessing object\n Object containing signal processing parameters passed.\n\n Returns\n -------\n int\n Number of files saved to disk.\n '
st = processing.pipeline(params)
if (st == 0):
return 0
if (params.mode == 'preprocess'):
if params.verbose:
print('Trimming.')
st.trim(starttime=obspy.core.UTCDateTime(params.start), endtime=obspy.core.UTCDateTime(params.stop))
count = 0
for tr in st:
path = f'{params.writepath}/MSEED/{tr.stats.network}/{tr.stats.station}'
if (not os.path.exists(path)):
os.makedirs(path)
fname = f'{tr.stats.network}.{tr.stats.station}.{tr.stats.channel}.{params.start.year}.{params.start.dayofyear:03d}.mseed'
tr.data = tr.data.astype('float32')
tr.write(f'{path}/{fname}', format='MSEED', encoding=4)
count += 1
return count
else:
count = 0
if params.verbose:
print('Running detector.')
path = params.writepath
if (not os.path.exists(path)):
os.makedirs(path)
for tr in st:
fs = tr.stats.sampling_rate
catalogue = pd.DataFrame(columns=['network', 'station', 'channel', 'dt_on', 'dt_off', 'dt_peak', 'peak', 'unit', 'fs', 'delta', 'npts', 'STA', 'LTA', 'on', 'off'])
if (not os.path.exists(f'{path}/catalogue.csv')):
catalogue.to_csv(f'{path}/catalogue.csv', mode='a', index=False)
secs = np.arange(0, (tr.stats.npts * tr.stats.delta), tr.stats.delta)
time = (params.start_processing + pd.to_timedelta(secs, unit='sec'))
if params.verbose:
print('Calculating CFT.')
if (params.detector == 'classic'):
cft = trigger.classic_sta_lta(tr.data, int((fs * params.STA)), int((fs * params.LTA)))
elif (params.detector == 'recursive'):
cft = trigger.recursive_sta_lta(tr, int((fs * params.STA)), int((fs * params.LTA)))
elif (params.detector == 'z'):
cft = trigger.z_detect(tr.data, int((fs * 3)))
if params.verbose:
print('Locating triggers.')
on_off = trigger.trigger_onset(cft, params.on, params.off)
if isinstance(on_off, list):
del catalogue
continue
on_off = on_off[(((time[on_off[(:, 0)]] >= params.start) & (time[on_off[(:, 0)]] < params.stop)), :)]
nrows = on_off.shape[0]
catalogue['network'] = [tr.stats.network for i in range(nrows)]
catalogue['station'] = [tr.stats.station for i in range(nrows)]
catalogue['channel'] = [tr.stats.channel for i in range(nrows)]
catalogue['dt_on'] = time[on_off[(:, 0)]]
catalogue['dt_off'] = time[on_off[(:, 1)]]
i_max = [(on_off[(i, 0)] + np.argmax(abs(tr.data[on_off[(i, 0)]:on_off[(i, 1)]]))) for i in range(on_off.shape[0])]
catalogue['dt_peak'] = time[i_max]
catalogue['peak'] = tr.data[i_max]
catalogue['unit'] = [params.output for i in range(nrows)]
catalogue['fs'] = [fs for i in range(nrows)]
catalogue['delta'] = [tr.stats.delta for i in range(nrows)]
catalogue['npts'] = [tr.stats.npts for i in range(nrows)]
catalogue['STA'] = [params.STA for i in range(nrows)]
catalogue['LTA'] = [params.LTA for i in range(nrows)]
catalogue['on'] = [params.on for i in range(nrows)]
catalogue['off'] = [params.off for i in range(nrows)]
catalogue.to_csv(f'{path}/catalogue.csv', mode='a', index=False, header=False)
if params.verbose:
print('Catalogue built.')
del catalogue
count += on_off.shape[0]
return count | Provides end-to-end signal processing, including reading, detrending,
tapering, filtering, instrument response removal, and saving.
Parameters
----------
params : SignalProcessing object
Object containing signal processing parameters passed.
Returns
-------
int
Number of files saved to disk. | RISProcess/workflows.py | process_data | firstkingofrome/RISProcess | 0 | python | def process_data(params):
'Provides end-to-end signal processing, including reading, detrending,\n tapering, filtering, instrument response removal, and saving.\n\n Parameters\n ----------\n params : SignalProcessing object\n Object containing signal processing parameters passed.\n\n Returns\n -------\n int\n Number of files saved to disk.\n '
st = processing.pipeline(params)
if (st == 0):
return 0
if (params.mode == 'preprocess'):
if params.verbose:
print('Trimming.')
st.trim(starttime=obspy.core.UTCDateTime(params.start), endtime=obspy.core.UTCDateTime(params.stop))
count = 0
for tr in st:
path = f'{params.writepath}/MSEED/{tr.stats.network}/{tr.stats.station}'
if (not os.path.exists(path)):
os.makedirs(path)
fname = f'{tr.stats.network}.{tr.stats.station}.{tr.stats.channel}.{params.start.year}.{params.start.dayofyear:03d}.mseed'
tr.data = tr.data.astype('float32')
tr.write(f'{path}/{fname}', format='MSEED', encoding=4)
count += 1
return count
else:
count = 0
if params.verbose:
print('Running detector.')
path = params.writepath
if (not os.path.exists(path)):
os.makedirs(path)
for tr in st:
fs = tr.stats.sampling_rate
catalogue = pd.DataFrame(columns=['network', 'station', 'channel', 'dt_on', 'dt_off', 'dt_peak', 'peak', 'unit', 'fs', 'delta', 'npts', 'STA', 'LTA', 'on', 'off'])
if (not os.path.exists(f'{path}/catalogue.csv')):
catalogue.to_csv(f'{path}/catalogue.csv', mode='a', index=False)
secs = np.arange(0, (tr.stats.npts * tr.stats.delta), tr.stats.delta)
time = (params.start_processing + pd.to_timedelta(secs, unit='sec'))
if params.verbose:
print('Calculating CFT.')
if (params.detector == 'classic'):
cft = trigger.classic_sta_lta(tr.data, int((fs * params.STA)), int((fs * params.LTA)))
elif (params.detector == 'recursive'):
cft = trigger.recursive_sta_lta(tr, int((fs * params.STA)), int((fs * params.LTA)))
elif (params.detector == 'z'):
cft = trigger.z_detect(tr.data, int((fs * 3)))
if params.verbose:
print('Locating triggers.')
on_off = trigger.trigger_onset(cft, params.on, params.off)
if isinstance(on_off, list):
del catalogue
continue
on_off = on_off[(((time[on_off[(:, 0)]] >= params.start) & (time[on_off[(:, 0)]] < params.stop)), :)]
nrows = on_off.shape[0]
catalogue['network'] = [tr.stats.network for i in range(nrows)]
catalogue['station'] = [tr.stats.station for i in range(nrows)]
catalogue['channel'] = [tr.stats.channel for i in range(nrows)]
catalogue['dt_on'] = time[on_off[(:, 0)]]
catalogue['dt_off'] = time[on_off[(:, 1)]]
i_max = [(on_off[(i, 0)] + np.argmax(abs(tr.data[on_off[(i, 0)]:on_off[(i, 1)]]))) for i in range(on_off.shape[0])]
catalogue['dt_peak'] = time[i_max]
catalogue['peak'] = tr.data[i_max]
catalogue['unit'] = [params.output for i in range(nrows)]
catalogue['fs'] = [fs for i in range(nrows)]
catalogue['delta'] = [tr.stats.delta for i in range(nrows)]
catalogue['npts'] = [tr.stats.npts for i in range(nrows)]
catalogue['STA'] = [params.STA for i in range(nrows)]
catalogue['LTA'] = [params.LTA for i in range(nrows)]
catalogue['on'] = [params.on for i in range(nrows)]
catalogue['off'] = [params.off for i in range(nrows)]
catalogue.to_csv(f'{path}/catalogue.csv', mode='a', index=False, header=False)
if params.verbose:
print('Catalogue built.')
del catalogue
count += on_off.shape[0]
return count | def process_data(params):
'Provides end-to-end signal processing, including reading, detrending,\n tapering, filtering, instrument response removal, and saving.\n\n Parameters\n ----------\n params : SignalProcessing object\n Object containing signal processing parameters passed.\n\n Returns\n -------\n int\n Number of files saved to disk.\n '
st = processing.pipeline(params)
if (st == 0):
return 0
if (params.mode == 'preprocess'):
if params.verbose:
print('Trimming.')
st.trim(starttime=obspy.core.UTCDateTime(params.start), endtime=obspy.core.UTCDateTime(params.stop))
count = 0
for tr in st:
path = f'{params.writepath}/MSEED/{tr.stats.network}/{tr.stats.station}'
if (not os.path.exists(path)):
os.makedirs(path)
fname = f'{tr.stats.network}.{tr.stats.station}.{tr.stats.channel}.{params.start.year}.{params.start.dayofyear:03d}.mseed'
tr.data = tr.data.astype('float32')
tr.write(f'{path}/{fname}', format='MSEED', encoding=4)
count += 1
return count
else:
count = 0
if params.verbose:
print('Running detector.')
path = params.writepath
if (not os.path.exists(path)):
os.makedirs(path)
for tr in st:
fs = tr.stats.sampling_rate
catalogue = pd.DataFrame(columns=['network', 'station', 'channel', 'dt_on', 'dt_off', 'dt_peak', 'peak', 'unit', 'fs', 'delta', 'npts', 'STA', 'LTA', 'on', 'off'])
if (not os.path.exists(f'{path}/catalogue.csv')):
catalogue.to_csv(f'{path}/catalogue.csv', mode='a', index=False)
secs = np.arange(0, (tr.stats.npts * tr.stats.delta), tr.stats.delta)
time = (params.start_processing + pd.to_timedelta(secs, unit='sec'))
if params.verbose:
print('Calculating CFT.')
if (params.detector == 'classic'):
cft = trigger.classic_sta_lta(tr.data, int((fs * params.STA)), int((fs * params.LTA)))
elif (params.detector == 'recursive'):
cft = trigger.recursive_sta_lta(tr, int((fs * params.STA)), int((fs * params.LTA)))
elif (params.detector == 'z'):
cft = trigger.z_detect(tr.data, int((fs * 3)))
if params.verbose:
print('Locating triggers.')
on_off = trigger.trigger_onset(cft, params.on, params.off)
if isinstance(on_off, list):
del catalogue
continue
on_off = on_off[(((time[on_off[(:, 0)]] >= params.start) & (time[on_off[(:, 0)]] < params.stop)), :)]
nrows = on_off.shape[0]
catalogue['network'] = [tr.stats.network for i in range(nrows)]
catalogue['station'] = [tr.stats.station for i in range(nrows)]
catalogue['channel'] = [tr.stats.channel for i in range(nrows)]
catalogue['dt_on'] = time[on_off[(:, 0)]]
catalogue['dt_off'] = time[on_off[(:, 1)]]
i_max = [(on_off[(i, 0)] + np.argmax(abs(tr.data[on_off[(i, 0)]:on_off[(i, 1)]]))) for i in range(on_off.shape[0])]
catalogue['dt_peak'] = time[i_max]
catalogue['peak'] = tr.data[i_max]
catalogue['unit'] = [params.output for i in range(nrows)]
catalogue['fs'] = [fs for i in range(nrows)]
catalogue['delta'] = [tr.stats.delta for i in range(nrows)]
catalogue['npts'] = [tr.stats.npts for i in range(nrows)]
catalogue['STA'] = [params.STA for i in range(nrows)]
catalogue['LTA'] = [params.LTA for i in range(nrows)]
catalogue['on'] = [params.on for i in range(nrows)]
catalogue['off'] = [params.off for i in range(nrows)]
catalogue.to_csv(f'{path}/catalogue.csv', mode='a', index=False, header=False)
if params.verbose:
print('Catalogue built.')
del catalogue
count += on_off.shape[0]
return count<|docstring|>Provides end-to-end signal processing, including reading, detrending,
tapering, filtering, instrument response removal, and saving.
Parameters
----------
params : SignalProcessing object
Object containing signal processing parameters passed.
Returns
-------
int
Number of files saved to disk.<|endoftext|> |
69a6cdb15124c9b081cfedda0bdab643ab5731261bf55b381ba31370f1ea79e8 | def to_json_serializable(data):
'Make an object containing numpy arrays/scalars JSON serializable.'
if (data is None):
return None
if isinstance(data, dict):
return {key: to_json_serializable(value) for (key, value) in data.items()}
elif (isinstance(data, list) or isinstance(data, tuple)):
return [to_json_serializable(v) for v in data]
elif isinstance(data, np.ndarray):
return data.tolist()
elif np.isscalar(data):
return data
else:
raise ValueError('Unsupported JSON type `{}` (key `{}`)'.format(type(data), data)) | Make an object containing numpy arrays/scalars JSON serializable. | shepherd/runner/json_runner.py | to_json_serializable | iterait/shepherd | 5 | python | def to_json_serializable(data):
if (data is None):
return None
if isinstance(data, dict):
return {key: to_json_serializable(value) for (key, value) in data.items()}
elif (isinstance(data, list) or isinstance(data, tuple)):
return [to_json_serializable(v) for v in data]
elif isinstance(data, np.ndarray):
return data.tolist()
elif np.isscalar(data):
return data
else:
raise ValueError('Unsupported JSON type `{}` (key `{}`)'.format(type(data), data)) | def to_json_serializable(data):
if (data is None):
return None
if isinstance(data, dict):
return {key: to_json_serializable(value) for (key, value) in data.items()}
elif (isinstance(data, list) or isinstance(data, tuple)):
return [to_json_serializable(v) for v in data]
elif isinstance(data, np.ndarray):
return data.tolist()
elif np.isscalar(data):
return data
else:
raise ValueError('Unsupported JSON type `{}` (key `{}`)'.format(type(data), data))<|docstring|>Make an object containing numpy arrays/scalars JSON serializable.<|endoftext|> |
9da664b5c774a97a988de72029ace0500f6ea8fc1ad2fbcb06d5d477d90b37f8 | def run(model: el.AbstractModel, dataset: el.AbstractDataset, stream_name: str, payload: Any) -> el.Batch:
'\n Get the specified data stream from the given dataset, apply the given model on its batches and return the results.\n\n The components have to be **emloop** compatible with:\n - dataset having method named ``[stream_name]_stream`` taking the payload and returning the stream\n - (optional) dataset having method named ``postprocess_batch`` taking both the input and output batches and\n returning the post-processed batch\n\n :param model: emloop model to be run\n :param dataset: emloop dataset to get the stream from\n :param stream_name: stream name\n :param payload: payload passed to the method creating the stream\n :return: result batch (if the stream produces multiple batches its the concatenation of all the results)\n '
result = defaultdict(list)
for input_batch in getattr(dataset, (stream_name + '_stream'))(payload):
logging.info('Another batch (%s)', list(input_batch.keys()))
output_batch = model.run(input_batch, train=False, stream=None)
if hasattr(dataset, 'postprocess_batch'):
logging.info('\tPostprocessing')
result_batch = dataset.postprocess_batch(input_batch=input_batch, output_batch=output_batch)
logging.info('\tdone')
else:
logging.info('Skipping postprocessing')
result_batch = output_batch
for (source, value) in result_batch.items():
result[source] += list(value)
return result | Get the specified data stream from the given dataset, apply the given model on its batches and return the results.
The components have to be **emloop** compatible with:
- dataset having method named ``[stream_name]_stream`` taking the payload and returning the stream
- (optional) dataset having method named ``postprocess_batch`` taking both the input and output batches and
returning the post-processed batch
:param model: emloop model to be run
:param dataset: emloop dataset to get the stream from
:param stream_name: stream name
:param payload: payload passed to the method creating the stream
:return: result batch (if the stream produces multiple batches its the concatenation of all the results) | shepherd/runner/json_runner.py | run | iterait/shepherd | 5 | python | def run(model: el.AbstractModel, dataset: el.AbstractDataset, stream_name: str, payload: Any) -> el.Batch:
'\n Get the specified data stream from the given dataset, apply the given model on its batches and return the results.\n\n The components have to be **emloop** compatible with:\n - dataset having method named ``[stream_name]_stream`` taking the payload and returning the stream\n - (optional) dataset having method named ``postprocess_batch`` taking both the input and output batches and\n returning the post-processed batch\n\n :param model: emloop model to be run\n :param dataset: emloop dataset to get the stream from\n :param stream_name: stream name\n :param payload: payload passed to the method creating the stream\n :return: result batch (if the stream produces multiple batches its the concatenation of all the results)\n '
result = defaultdict(list)
for input_batch in getattr(dataset, (stream_name + '_stream'))(payload):
logging.info('Another batch (%s)', list(input_batch.keys()))
output_batch = model.run(input_batch, train=False, stream=None)
if hasattr(dataset, 'postprocess_batch'):
logging.info('\tPostprocessing')
result_batch = dataset.postprocess_batch(input_batch=input_batch, output_batch=output_batch)
logging.info('\tdone')
else:
logging.info('Skipping postprocessing')
result_batch = output_batch
for (source, value) in result_batch.items():
result[source] += list(value)
return result | def run(model: el.AbstractModel, dataset: el.AbstractDataset, stream_name: str, payload: Any) -> el.Batch:
'\n Get the specified data stream from the given dataset, apply the given model on its batches and return the results.\n\n The components have to be **emloop** compatible with:\n - dataset having method named ``[stream_name]_stream`` taking the payload and returning the stream\n - (optional) dataset having method named ``postprocess_batch`` taking both the input and output batches and\n returning the post-processed batch\n\n :param model: emloop model to be run\n :param dataset: emloop dataset to get the stream from\n :param stream_name: stream name\n :param payload: payload passed to the method creating the stream\n :return: result batch (if the stream produces multiple batches its the concatenation of all the results)\n '
result = defaultdict(list)
for input_batch in getattr(dataset, (stream_name + '_stream'))(payload):
logging.info('Another batch (%s)', list(input_batch.keys()))
output_batch = model.run(input_batch, train=False, stream=None)
if hasattr(dataset, 'postprocess_batch'):
logging.info('\tPostprocessing')
result_batch = dataset.postprocess_batch(input_batch=input_batch, output_batch=output_batch)
logging.info('\tdone')
else:
logging.info('Skipping postprocessing')
result_batch = output_batch
for (source, value) in result_batch.items():
result[source] += list(value)
return result<|docstring|>Get the specified data stream from the given dataset, apply the given model on its batches and return the results.
The components have to be **emloop** compatible with:
- dataset having method named ``[stream_name]_stream`` taking the payload and returning the stream
- (optional) dataset having method named ``postprocess_batch`` taking both the input and output batches and
returning the post-processed batch
:param model: emloop model to be run
:param dataset: emloop dataset to get the stream from
:param stream_name: stream name
:param payload: payload passed to the method creating the stream
:return: result batch (if the stream produces multiple batches its the concatenation of all the results)<|endoftext|> |
97feba130cace2c037110d86d2acf5ea25eec9429baed5cdd72fc23232626ec6 | def _process_job(self, input_path: str, output_path: str) -> None:
'\n Process a JSON job\n - load ``input_path``/``input``\n - create dataset stream with the loaded JSON\n - run the model\n - save the output to ``output_path``/``output``\n\n :param input_path: input data directory\n :param output_path: output data directory\n '
self._load_dataset()
self._load_model()
payload = json.load(open(path.join(input_path, DEFAULT_PAYLOAD_FILE)))
result = run(self._model, self._dataset, self._stream_name, payload)
result_json = to_json_serializable(result)
json.dump(result_json, open(path.join(output_path, DEFAULT_OUTPUT_FILE), 'w')) | Process a JSON job
- load ``input_path``/``input``
- create dataset stream with the loaded JSON
- run the model
- save the output to ``output_path``/``output``
:param input_path: input data directory
:param output_path: output data directory | shepherd/runner/json_runner.py | _process_job | iterait/shepherd | 5 | python | def _process_job(self, input_path: str, output_path: str) -> None:
'\n Process a JSON job\n - load ``input_path``/``input``\n - create dataset stream with the loaded JSON\n - run the model\n - save the output to ``output_path``/``output``\n\n :param input_path: input data directory\n :param output_path: output data directory\n '
self._load_dataset()
self._load_model()
payload = json.load(open(path.join(input_path, DEFAULT_PAYLOAD_FILE)))
result = run(self._model, self._dataset, self._stream_name, payload)
result_json = to_json_serializable(result)
json.dump(result_json, open(path.join(output_path, DEFAULT_OUTPUT_FILE), 'w')) | def _process_job(self, input_path: str, output_path: str) -> None:
'\n Process a JSON job\n - load ``input_path``/``input``\n - create dataset stream with the loaded JSON\n - run the model\n - save the output to ``output_path``/``output``\n\n :param input_path: input data directory\n :param output_path: output data directory\n '
self._load_dataset()
self._load_model()
payload = json.load(open(path.join(input_path, DEFAULT_PAYLOAD_FILE)))
result = run(self._model, self._dataset, self._stream_name, payload)
result_json = to_json_serializable(result)
json.dump(result_json, open(path.join(output_path, DEFAULT_OUTPUT_FILE), 'w'))<|docstring|>Process a JSON job
- load ``input_path``/``input``
- create dataset stream with the loaded JSON
- run the model
- save the output to ``output_path``/``output``
:param input_path: input data directory
:param output_path: output data directory<|endoftext|> |
21df66eb71da9371128ed988b1ca7b2c76ff611bb653315912d53b7bccd0e74c | def repackage_hidden(h):
'Wraps hidden states in new Tensors, to detach them from their history.'
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple((repackage_hidden(v) for v in h)) | Wraps hidden states in new Tensors, to detach them from their history. | telemanom/modeling.py | repackage_hidden | sedgewickmm18/telemanom | 0 | python | def repackage_hidden(h):
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple((repackage_hidden(v) for v in h)) | def repackage_hidden(h):
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple((repackage_hidden(v) for v in h))<|docstring|>Wraps hidden states in new Tensors, to detach them from their history.<|endoftext|> |
edf8e2edc8deb9823ac22596955909ccf8bf58ea4ed730a8cdfc59879b1768c0 | def __init__(self, config, run_id, channel, Path=None, Train=True):
'\n Loads/trains RNN and predicts future telemetry values for a channel.\n\n Args:\n config (obj): Config object containing parameters for processing\n and model training\n run_id (str): Datetime referencing set of predictions in use\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n\n Attributes:\n config (obj): see Args\n chan_id (str): channel id\n run_id (str): see Args\n y_hat (arr): predicted channel values\n model (obj): trained RNN model for predicting channel values\n '
self.name = 'Model'
self.config = config
self.chan_id = channel.id
self.run_id = run_id
self.y_hat = np.array([])
self.model = None
self.onnx_session = None
self.history = None
if (Path is None):
Path = ''
if (not Train):
self.new_model((None, channel.X_train.shape[2]))
elif (not self.config.train):
try:
self.load(Path)
except FileNotFoundError:
path = os.path.join(Path, 'data', self.config.use_id, 'models', (self.chan_id + '.h5'))
logger.warning("Training new model, couldn't find existing model at {}".format(path))
self.train_new(channel)
self.save(Path)
else:
self.train_new(channel)
self.save(Path) | Loads/trains RNN and predicts future telemetry values for a channel.
Args:
config (obj): Config object containing parameters for processing
and model training
run_id (str): Datetime referencing set of predictions in use
channel (obj): Channel class object containing train/test data
for X,y for a single channel
Attributes:
config (obj): see Args
chan_id (str): channel id
run_id (str): see Args
y_hat (arr): predicted channel values
model (obj): trained RNN model for predicting channel values | telemanom/modeling.py | __init__ | sedgewickmm18/telemanom | 0 | python | def __init__(self, config, run_id, channel, Path=None, Train=True):
'\n Loads/trains RNN and predicts future telemetry values for a channel.\n\n Args:\n config (obj): Config object containing parameters for processing\n and model training\n run_id (str): Datetime referencing set of predictions in use\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n\n Attributes:\n config (obj): see Args\n chan_id (str): channel id\n run_id (str): see Args\n y_hat (arr): predicted channel values\n model (obj): trained RNN model for predicting channel values\n '
self.name = 'Model'
self.config = config
self.chan_id = channel.id
self.run_id = run_id
self.y_hat = np.array([])
self.model = None
self.onnx_session = None
self.history = None
if (Path is None):
Path =
if (not Train):
self.new_model((None, channel.X_train.shape[2]))
elif (not self.config.train):
try:
self.load(Path)
except FileNotFoundError:
path = os.path.join(Path, 'data', self.config.use_id, 'models', (self.chan_id + '.h5'))
logger.warning("Training new model, couldn't find existing model at {}".format(path))
self.train_new(channel)
self.save(Path)
else:
self.train_new(channel)
self.save(Path) | def __init__(self, config, run_id, channel, Path=None, Train=True):
'\n Loads/trains RNN and predicts future telemetry values for a channel.\n\n Args:\n config (obj): Config object containing parameters for processing\n and model training\n run_id (str): Datetime referencing set of predictions in use\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n\n Attributes:\n config (obj): see Args\n chan_id (str): channel id\n run_id (str): see Args\n y_hat (arr): predicted channel values\n model (obj): trained RNN model for predicting channel values\n '
self.name = 'Model'
self.config = config
self.chan_id = channel.id
self.run_id = run_id
self.y_hat = np.array([])
self.model = None
self.onnx_session = None
self.history = None
if (Path is None):
Path =
if (not Train):
self.new_model((None, channel.X_train.shape[2]))
elif (not self.config.train):
try:
self.load(Path)
except FileNotFoundError:
path = os.path.join(Path, 'data', self.config.use_id, 'models', (self.chan_id + '.h5'))
logger.warning("Training new model, couldn't find existing model at {}".format(path))
self.train_new(channel)
self.save(Path)
else:
self.train_new(channel)
self.save(Path)<|docstring|>Loads/trains RNN and predicts future telemetry values for a channel.
Args:
config (obj): Config object containing parameters for processing
and model training
run_id (str): Datetime referencing set of predictions in use
channel (obj): Channel class object containing train/test data
for X,y for a single channel
Attributes:
config (obj): see Args
chan_id (str): channel id
run_id (str): see Args
y_hat (arr): predicted channel values
model (obj): trained RNN model for predicting channel values<|endoftext|> |
26cdb7311e4d4801f26d1723f7ba41003755d5a7275e15b4a1fc5b0e89fafb01 | def load(self, Path=None):
'\n Load model for channel.\n '
logger.info('Loading pre-trained model')
self.model = self.model.load_state_dict(torch.load(os.path.join(Path, self.config.use_id, 'models', (self.chan_id + '.h5')))) | Load model for channel. | telemanom/modeling.py | load | sedgewickmm18/telemanom | 0 | python | def load(self, Path=None):
'\n \n '
logger.info('Loading pre-trained model')
self.model = self.model.load_state_dict(torch.load(os.path.join(Path, self.config.use_id, 'models', (self.chan_id + '.h5')))) | def load(self, Path=None):
'\n \n '
logger.info('Loading pre-trained model')
self.model = self.model.load_state_dict(torch.load(os.path.join(Path, self.config.use_id, 'models', (self.chan_id + '.h5'))))<|docstring|>Load model for channel.<|endoftext|> |
6c29c56cbfaf032788d2669d2d673244d45ed1fa1820f68402aa2c0217d4379c | def load_onnx(self, Path=None):
'\n Load ONNX model\n '
import onnxruntime
if (Path is None):
Path = ''
self.onnx_session = onnxruntime.InferenceSession(Path) | Load ONNX model | telemanom/modeling.py | load_onnx | sedgewickmm18/telemanom | 0 | python | def load_onnx(self, Path=None):
'\n \n '
import onnxruntime
if (Path is None):
Path =
self.onnx_session = onnxruntime.InferenceSession(Path) | def load_onnx(self, Path=None):
'\n \n '
import onnxruntime
if (Path is None):
Path =
self.onnx_session = onnxruntime.InferenceSession(Path)<|docstring|>Load ONNX model<|endoftext|> |
30c01d4bba4bf344474db1b16951bae8d6aba2c4756c36cb062d8791a5ea5af8 | def new_model(self, Input_shape):
'\n Train LSTM model according to specifications in config.yaml.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n '
if (self.model is not None):
return
self.model = LSTM_2L(n_features=Input_shape[1], hidden_dims=self.config.layers, seq_length=self.config.l_s, batch_size=self.config.lstm_batch_size, n_predictions=self.config.n_predictions, dropout=self.config.dropout)
print('input shape: ', Input_shape)
return | Train LSTM model according to specifications in config.yaml.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel | telemanom/modeling.py | new_model | sedgewickmm18/telemanom | 0 | python | def new_model(self, Input_shape):
'\n Train LSTM model according to specifications in config.yaml.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n '
if (self.model is not None):
return
self.model = LSTM_2L(n_features=Input_shape[1], hidden_dims=self.config.layers, seq_length=self.config.l_s, batch_size=self.config.lstm_batch_size, n_predictions=self.config.n_predictions, dropout=self.config.dropout)
print('input shape: ', Input_shape)
return | def new_model(self, Input_shape):
'\n Train LSTM model according to specifications in config.yaml.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n '
if (self.model is not None):
return
self.model = LSTM_2L(n_features=Input_shape[1], hidden_dims=self.config.layers, seq_length=self.config.l_s, batch_size=self.config.lstm_batch_size, n_predictions=self.config.n_predictions, dropout=self.config.dropout)
print('input shape: ', Input_shape)
return<|docstring|>Train LSTM model according to specifications in config.yaml.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel<|endoftext|> |
4e40e3ebf66f8e2c067f7dfd59e6a6b4b604cefccc892843559688e812b134ec | def train_new(self, channel):
'\n Train LSTM model according to specifications in config.yaml.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n '
self.new_model((None, channel.X_train.shape[2]))
self.model.apply(initialize_weights)
training_losses = []
validation_losses = []
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(self.model.parameters())
train_hist = np.zeros(self.config.epochs)
(X_train, X_validation, y_train, y_validation) = train_test_split(channel.X_train, channel.y_train, train_size=0.8)
print('Shapes: ', channel.X_train.shape, channel.y_train.shape)
print('Training shapes: ', X_train.shape, y_train.shape)
print('Validation shapes: ', X_validation.shape, y_validation.shape)
train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train))
validation_dataset = TensorDataset(torch.Tensor(X_validation), torch.Tensor(y_validation))
train_loader = DataLoader(dataset=train_dataset, batch_size=self.config.lstm_batch_size, drop_last=True, shuffle=True)
val_loader = DataLoader(dataset=validation_dataset, batch_size=self.config.lstm_batch_size, drop_last=True, shuffle=True)
self.model.train()
print('Beginning model training...')
hidden = self.model.init_hidden_state()
for t in range(self.config.epochs):
train_losses_batch = []
print('Epoch ', t)
i = 0
for (X_batch_train, y_batch_train) in train_loader:
print('Batch ', i)
i += 1
hidden = repackage_hidden(hidden)
(y_hat_train, hidden) = self.model(X_batch_train, hidden=hidden)
loss = loss_function(y_hat_train.float(), y_batch_train)
train_loss_batch = loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_losses_batch.append(train_loss_batch)
training_loss = np.mean(train_losses_batch)
print('After batch ', (i - 1), training_loss)
training_losses.append(training_loss)
with torch.no_grad():
val_losses_batch = []
for (X_val_batch, y_val_batch) in val_loader:
self.model.eval()
(y_hat_val, _) = self.model(X_val_batch, hidden=hidden)
val_loss_batch = loss_function(y_hat_val.float(), y_val_batch).item()
val_losses_batch.append(val_loss_batch)
validation_loss = np.mean(val_losses_batch)
validation_losses.append(validation_loss)
print(f'[{(t + 1)}] Training loss: {training_loss} Validation loss: {validation_loss} ')
if ((training_loss < 0.02) and (validation_loss < 0.02)):
break
print('Training complete...')
return self.model.eval() | Train LSTM model according to specifications in config.yaml.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel | telemanom/modeling.py | train_new | sedgewickmm18/telemanom | 0 | python | def train_new(self, channel):
'\n Train LSTM model according to specifications in config.yaml.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n '
self.new_model((None, channel.X_train.shape[2]))
self.model.apply(initialize_weights)
training_losses = []
validation_losses = []
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(self.model.parameters())
train_hist = np.zeros(self.config.epochs)
(X_train, X_validation, y_train, y_validation) = train_test_split(channel.X_train, channel.y_train, train_size=0.8)
print('Shapes: ', channel.X_train.shape, channel.y_train.shape)
print('Training shapes: ', X_train.shape, y_train.shape)
print('Validation shapes: ', X_validation.shape, y_validation.shape)
train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train))
validation_dataset = TensorDataset(torch.Tensor(X_validation), torch.Tensor(y_validation))
train_loader = DataLoader(dataset=train_dataset, batch_size=self.config.lstm_batch_size, drop_last=True, shuffle=True)
val_loader = DataLoader(dataset=validation_dataset, batch_size=self.config.lstm_batch_size, drop_last=True, shuffle=True)
self.model.train()
print('Beginning model training...')
hidden = self.model.init_hidden_state()
for t in range(self.config.epochs):
train_losses_batch = []
print('Epoch ', t)
i = 0
for (X_batch_train, y_batch_train) in train_loader:
print('Batch ', i)
i += 1
hidden = repackage_hidden(hidden)
(y_hat_train, hidden) = self.model(X_batch_train, hidden=hidden)
loss = loss_function(y_hat_train.float(), y_batch_train)
train_loss_batch = loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_losses_batch.append(train_loss_batch)
training_loss = np.mean(train_losses_batch)
print('After batch ', (i - 1), training_loss)
training_losses.append(training_loss)
with torch.no_grad():
val_losses_batch = []
for (X_val_batch, y_val_batch) in val_loader:
self.model.eval()
(y_hat_val, _) = self.model(X_val_batch, hidden=hidden)
val_loss_batch = loss_function(y_hat_val.float(), y_val_batch).item()
val_losses_batch.append(val_loss_batch)
validation_loss = np.mean(val_losses_batch)
validation_losses.append(validation_loss)
print(f'[{(t + 1)}] Training loss: {training_loss} Validation loss: {validation_loss} ')
if ((training_loss < 0.02) and (validation_loss < 0.02)):
break
print('Training complete...')
return self.model.eval() | def train_new(self, channel):
'\n Train LSTM model according to specifications in config.yaml.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n '
self.new_model((None, channel.X_train.shape[2]))
self.model.apply(initialize_weights)
training_losses = []
validation_losses = []
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(self.model.parameters())
train_hist = np.zeros(self.config.epochs)
(X_train, X_validation, y_train, y_validation) = train_test_split(channel.X_train, channel.y_train, train_size=0.8)
print('Shapes: ', channel.X_train.shape, channel.y_train.shape)
print('Training shapes: ', X_train.shape, y_train.shape)
print('Validation shapes: ', X_validation.shape, y_validation.shape)
train_dataset = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train))
validation_dataset = TensorDataset(torch.Tensor(X_validation), torch.Tensor(y_validation))
train_loader = DataLoader(dataset=train_dataset, batch_size=self.config.lstm_batch_size, drop_last=True, shuffle=True)
val_loader = DataLoader(dataset=validation_dataset, batch_size=self.config.lstm_batch_size, drop_last=True, shuffle=True)
self.model.train()
print('Beginning model training...')
hidden = self.model.init_hidden_state()
for t in range(self.config.epochs):
train_losses_batch = []
print('Epoch ', t)
i = 0
for (X_batch_train, y_batch_train) in train_loader:
print('Batch ', i)
i += 1
hidden = repackage_hidden(hidden)
(y_hat_train, hidden) = self.model(X_batch_train, hidden=hidden)
loss = loss_function(y_hat_train.float(), y_batch_train)
train_loss_batch = loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_losses_batch.append(train_loss_batch)
training_loss = np.mean(train_losses_batch)
print('After batch ', (i - 1), training_loss)
training_losses.append(training_loss)
with torch.no_grad():
val_losses_batch = []
for (X_val_batch, y_val_batch) in val_loader:
self.model.eval()
(y_hat_val, _) = self.model(X_val_batch, hidden=hidden)
val_loss_batch = loss_function(y_hat_val.float(), y_val_batch).item()
val_losses_batch.append(val_loss_batch)
validation_loss = np.mean(val_losses_batch)
validation_losses.append(validation_loss)
print(f'[{(t + 1)}] Training loss: {training_loss} Validation loss: {validation_loss} ')
if ((training_loss < 0.02) and (validation_loss < 0.02)):
break
print('Training complete...')
return self.model.eval()<|docstring|>Train LSTM model according to specifications in config.yaml.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel<|endoftext|> |
552b35655e82d7d5d1187c18176e2d0702f68fe718a68de0158ce383d71ac559 | def export(self, Path=None):
'\n Export trained model as ONNX\n '
self.model.eval()
if (Path is None):
Path = ''
torch_in = None
torch_out = None
with torch.no_grad():
torch_in = torch.randn(self.model.batch_size, self.model.hidden_dim, self.model.n_features, requires_grad=True)
(torch_out, _) = self.model(torch_in)
torch.onnx.export(self.model, torch_in, Path)
"\n onnx_model = onnx.load(Path)\n\n logger.debug('Checked model: ', str(onnx_model.graph.input[0]))\n\n onnx.checker.check_model(onnx_model)\n " | Export trained model as ONNX | telemanom/modeling.py | export | sedgewickmm18/telemanom | 0 | python | def export(self, Path=None):
'\n \n '
self.model.eval()
if (Path is None):
Path =
torch_in = None
torch_out = None
with torch.no_grad():
torch_in = torch.randn(self.model.batch_size, self.model.hidden_dim, self.model.n_features, requires_grad=True)
(torch_out, _) = self.model(torch_in)
torch.onnx.export(self.model, torch_in, Path)
"\n onnx_model = onnx.load(Path)\n\n logger.debug('Checked model: ', str(onnx_model.graph.input[0]))\n\n onnx.checker.check_model(onnx_model)\n " | def export(self, Path=None):
'\n \n '
self.model.eval()
if (Path is None):
Path =
torch_in = None
torch_out = None
with torch.no_grad():
torch_in = torch.randn(self.model.batch_size, self.model.hidden_dim, self.model.n_features, requires_grad=True)
(torch_out, _) = self.model(torch_in)
torch.onnx.export(self.model, torch_in, Path)
"\n onnx_model = onnx.load(Path)\n\n logger.debug('Checked model: ', str(onnx_model.graph.input[0]))\n\n onnx.checker.check_model(onnx_model)\n "<|docstring|>Export trained model as ONNX<|endoftext|> |
3fb612763cce0ec7b064fafc75f540b5f03ad837e9f46f5e745fe9fac442b5f9 | def save(self, Path=None):
'\n Save trained model.\n '
self.model.eval()
if (Path is None):
Path = ''
torch.save(self.model.state_dict(), os.path.join(Path, 'data', self.run_id, 'models', '{}.torch'.format(self.chan_id))) | Save trained model. | telemanom/modeling.py | save | sedgewickmm18/telemanom | 0 | python | def save(self, Path=None):
'\n \n '
self.model.eval()
if (Path is None):
Path =
torch.save(self.model.state_dict(), os.path.join(Path, 'data', self.run_id, 'models', '{}.torch'.format(self.chan_id))) | def save(self, Path=None):
'\n \n '
self.model.eval()
if (Path is None):
Path =
torch.save(self.model.state_dict(), os.path.join(Path, 'data', self.run_id, 'models', '{}.torch'.format(self.chan_id)))<|docstring|>Save trained model.<|endoftext|> |
c66a575629427c42929dbda0c506e51bb404adcda066eaec85fbf78586ac0e93 | def aggregate_predictions(self, y_hat_batch, method='first'):
'\n Aggregates predictions for each timestep. When predicting n steps\n ahead where n > 1, will end up with multiple predictions for a\n timestep.\n\n Args:\n y_hat_batch (arr): predictions shape (<batch length>, <n_preds)\n method (string): indicates how to aggregate for a timestep - "first"\n or "mean"\n '
agg_y_hat_batch = np.array([])
for t in range(len(y_hat_batch)):
start_idx = (t - self.config.n_predictions)
start_idx = (start_idx if (start_idx >= 0) else 0)
y_hat_t = np.flipud(y_hat_batch[start_idx:(t + 1)]).diagonal()
if (method == 'first'):
agg_y_hat_batch = np.append(agg_y_hat_batch, [y_hat_t[0]])
elif (method == 'mean'):
agg_y_hat_batch = np.append(agg_y_hat_batch, np.mean(y_hat_t))
agg_y_hat_batch = agg_y_hat_batch.reshape(len(agg_y_hat_batch), 1)
self.y_hat = np.append(self.y_hat, agg_y_hat_batch) | Aggregates predictions for each timestep. When predicting n steps
ahead where n > 1, will end up with multiple predictions for a
timestep.
Args:
y_hat_batch (arr): predictions shape (<batch length>, <n_preds)
method (string): indicates how to aggregate for a timestep - "first"
or "mean" | telemanom/modeling.py | aggregate_predictions | sedgewickmm18/telemanom | 0 | python | def aggregate_predictions(self, y_hat_batch, method='first'):
'\n Aggregates predictions for each timestep. When predicting n steps\n ahead where n > 1, will end up with multiple predictions for a\n timestep.\n\n Args:\n y_hat_batch (arr): predictions shape (<batch length>, <n_preds)\n method (string): indicates how to aggregate for a timestep - "first"\n or "mean"\n '
agg_y_hat_batch = np.array([])
for t in range(len(y_hat_batch)):
start_idx = (t - self.config.n_predictions)
start_idx = (start_idx if (start_idx >= 0) else 0)
y_hat_t = np.flipud(y_hat_batch[start_idx:(t + 1)]).diagonal()
if (method == 'first'):
agg_y_hat_batch = np.append(agg_y_hat_batch, [y_hat_t[0]])
elif (method == 'mean'):
agg_y_hat_batch = np.append(agg_y_hat_batch, np.mean(y_hat_t))
agg_y_hat_batch = agg_y_hat_batch.reshape(len(agg_y_hat_batch), 1)
self.y_hat = np.append(self.y_hat, agg_y_hat_batch) | def aggregate_predictions(self, y_hat_batch, method='first'):
'\n Aggregates predictions for each timestep. When predicting n steps\n ahead where n > 1, will end up with multiple predictions for a\n timestep.\n\n Args:\n y_hat_batch (arr): predictions shape (<batch length>, <n_preds)\n method (string): indicates how to aggregate for a timestep - "first"\n or "mean"\n '
agg_y_hat_batch = np.array([])
for t in range(len(y_hat_batch)):
start_idx = (t - self.config.n_predictions)
start_idx = (start_idx if (start_idx >= 0) else 0)
y_hat_t = np.flipud(y_hat_batch[start_idx:(t + 1)]).diagonal()
if (method == 'first'):
agg_y_hat_batch = np.append(agg_y_hat_batch, [y_hat_t[0]])
elif (method == 'mean'):
agg_y_hat_batch = np.append(agg_y_hat_batch, np.mean(y_hat_t))
agg_y_hat_batch = agg_y_hat_batch.reshape(len(agg_y_hat_batch), 1)
self.y_hat = np.append(self.y_hat, agg_y_hat_batch)<|docstring|>Aggregates predictions for each timestep. When predicting n steps
ahead where n > 1, will end up with multiple predictions for a
timestep.
Args:
y_hat_batch (arr): predictions shape (<batch length>, <n_preds)
method (string): indicates how to aggregate for a timestep - "first"
or "mean"<|endoftext|> |
d61e5d8d5e8039f70753999b6c3d4db5739979370f2b4abdec72ca5708db6080 | def batch_predict(self, channel, Train=False, Path=None):
'\n Used trained LSTM model to predict test data arriving in batches.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n\n Returns:\n channel (obj): Channel class object with y_hat values as attribute\n '
if Train:
num_batches = int(((channel.y_train.shape[0] - self.config.l_s) / self.config.batch_size))
else:
num_batches = int(((channel.y_test.shape[0] - self.config.l_s) / self.config.batch_size))
logger.debug('predict: num_batches ', num_batches)
if (num_batches < 0):
raise ValueError('l_s ({}) too large for stream length {}.'.format(self.config.l_s, channel.y_test.shape[0]))
for i in range(0, (num_batches + 1)):
prior_idx = (i * self.config.batch_size)
idx = ((i + 1) * self.config.batch_size)
if ((i + 1) == (num_batches + 1)):
if Train:
idx = channel.y_test.shape[0]
else:
idx = channel.y_train.shape[0]
if Train:
X_train_batch = channel.X_train[prior_idx:idx]
if (self.onnx_session is not None):
ort_outs = self.onnx_session.run(None, X_train_batch)
y_hat_batch = ort_outs[0]
else:
(y_hat_batch, _) = self.model(X_train_batch)
else:
X_test_batch = channel.X_test[prior_idx:idx]
if (self.onnx_session is not None):
ort_inputs = {self.onnx_session.get_inputs()[0].name: X_test_batch.astype(np.float32)}
ort_outs = self.onnx_session.run(None, ort_inputs)
y_hat_batch = ort_outs[0]
else:
(y_hat_batch, _) = self.model(X_test_batch)
logger.debug('predict: batch ', i, ' - ', y_hat_batch.shape)
self.aggregate_predictions(y_hat_batch)
self.y_hat = np.reshape(self.y_hat, (self.y_hat.size,))
if Train:
channel.y_train_hat = self.y_hat
elif self.config.FFT:
logger.info('FFT modelling')
channel.y_hat = sp.fft.irfft(self.y_hat)
else:
channel.y_hat = self.y_hat
if (Path is None):
Path = ''
np.save(os.path.join(Path, 'data', self.run_id, 'y_hat', '{}.npy'.format(self.chan_id)), self.y_hat)
return channel | Used trained LSTM model to predict test data arriving in batches.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
Returns:
channel (obj): Channel class object with y_hat values as attribute | telemanom/modeling.py | batch_predict | sedgewickmm18/telemanom | 0 | python | def batch_predict(self, channel, Train=False, Path=None):
'\n Used trained LSTM model to predict test data arriving in batches.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n\n Returns:\n channel (obj): Channel class object with y_hat values as attribute\n '
if Train:
num_batches = int(((channel.y_train.shape[0] - self.config.l_s) / self.config.batch_size))
else:
num_batches = int(((channel.y_test.shape[0] - self.config.l_s) / self.config.batch_size))
logger.debug('predict: num_batches ', num_batches)
if (num_batches < 0):
raise ValueError('l_s ({}) too large for stream length {}.'.format(self.config.l_s, channel.y_test.shape[0]))
for i in range(0, (num_batches + 1)):
prior_idx = (i * self.config.batch_size)
idx = ((i + 1) * self.config.batch_size)
if ((i + 1) == (num_batches + 1)):
if Train:
idx = channel.y_test.shape[0]
else:
idx = channel.y_train.shape[0]
if Train:
X_train_batch = channel.X_train[prior_idx:idx]
if (self.onnx_session is not None):
ort_outs = self.onnx_session.run(None, X_train_batch)
y_hat_batch = ort_outs[0]
else:
(y_hat_batch, _) = self.model(X_train_batch)
else:
X_test_batch = channel.X_test[prior_idx:idx]
if (self.onnx_session is not None):
ort_inputs = {self.onnx_session.get_inputs()[0].name: X_test_batch.astype(np.float32)}
ort_outs = self.onnx_session.run(None, ort_inputs)
y_hat_batch = ort_outs[0]
else:
(y_hat_batch, _) = self.model(X_test_batch)
logger.debug('predict: batch ', i, ' - ', y_hat_batch.shape)
self.aggregate_predictions(y_hat_batch)
self.y_hat = np.reshape(self.y_hat, (self.y_hat.size,))
if Train:
channel.y_train_hat = self.y_hat
elif self.config.FFT:
logger.info('FFT modelling')
channel.y_hat = sp.fft.irfft(self.y_hat)
else:
channel.y_hat = self.y_hat
if (Path is None):
Path =
np.save(os.path.join(Path, 'data', self.run_id, 'y_hat', '{}.npy'.format(self.chan_id)), self.y_hat)
return channel | def batch_predict(self, channel, Train=False, Path=None):
'\n Used trained LSTM model to predict test data arriving in batches.\n\n Args:\n channel (obj): Channel class object containing train/test data\n for X,y for a single channel\n\n Returns:\n channel (obj): Channel class object with y_hat values as attribute\n '
if Train:
num_batches = int(((channel.y_train.shape[0] - self.config.l_s) / self.config.batch_size))
else:
num_batches = int(((channel.y_test.shape[0] - self.config.l_s) / self.config.batch_size))
logger.debug('predict: num_batches ', num_batches)
if (num_batches < 0):
raise ValueError('l_s ({}) too large for stream length {}.'.format(self.config.l_s, channel.y_test.shape[0]))
for i in range(0, (num_batches + 1)):
prior_idx = (i * self.config.batch_size)
idx = ((i + 1) * self.config.batch_size)
if ((i + 1) == (num_batches + 1)):
if Train:
idx = channel.y_test.shape[0]
else:
idx = channel.y_train.shape[0]
if Train:
X_train_batch = channel.X_train[prior_idx:idx]
if (self.onnx_session is not None):
ort_outs = self.onnx_session.run(None, X_train_batch)
y_hat_batch = ort_outs[0]
else:
(y_hat_batch, _) = self.model(X_train_batch)
else:
X_test_batch = channel.X_test[prior_idx:idx]
if (self.onnx_session is not None):
ort_inputs = {self.onnx_session.get_inputs()[0].name: X_test_batch.astype(np.float32)}
ort_outs = self.onnx_session.run(None, ort_inputs)
y_hat_batch = ort_outs[0]
else:
(y_hat_batch, _) = self.model(X_test_batch)
logger.debug('predict: batch ', i, ' - ', y_hat_batch.shape)
self.aggregate_predictions(y_hat_batch)
self.y_hat = np.reshape(self.y_hat, (self.y_hat.size,))
if Train:
channel.y_train_hat = self.y_hat
elif self.config.FFT:
logger.info('FFT modelling')
channel.y_hat = sp.fft.irfft(self.y_hat)
else:
channel.y_hat = self.y_hat
if (Path is None):
Path =
np.save(os.path.join(Path, 'data', self.run_id, 'y_hat', '{}.npy'.format(self.chan_id)), self.y_hat)
return channel<|docstring|>Used trained LSTM model to predict test data arriving in batches.
Args:
channel (obj): Channel class object containing train/test data
for X,y for a single channel
Returns:
channel (obj): Channel class object with y_hat values as attribute<|endoftext|> |
74d41f21b8ef8ddc31d7054478e6c5571b6c1c7f023d5740a83b3fb932ad8046 | def __init__(__self__, *, encryption_algorithm: str, value: str, encryption_cert_thumbprint: Optional[str]=None):
'\n Represent the secrets intended for encryption with asymmetric key pair.\n :param str encryption_algorithm: The algorithm used to encrypt "Value".\n :param str value: The value of the secret.\n :param str encryption_cert_thumbprint: Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null.\n '
pulumi.set(__self__, 'encryption_algorithm', encryption_algorithm)
pulumi.set(__self__, 'value', value)
if (encryption_cert_thumbprint is not None):
pulumi.set(__self__, 'encryption_cert_thumbprint', encryption_cert_thumbprint) | Represent the secrets intended for encryption with asymmetric key pair.
:param str encryption_algorithm: The algorithm used to encrypt "Value".
:param str value: The value of the secret.
:param str encryption_cert_thumbprint: Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null. | sdk/python/pulumi_azure_native/storsimple/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, encryption_algorithm: str, value: str, encryption_cert_thumbprint: Optional[str]=None):
'\n Represent the secrets intended for encryption with asymmetric key pair.\n :param str encryption_algorithm: The algorithm used to encrypt "Value".\n :param str value: The value of the secret.\n :param str encryption_cert_thumbprint: Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null.\n '
pulumi.set(__self__, 'encryption_algorithm', encryption_algorithm)
pulumi.set(__self__, 'value', value)
if (encryption_cert_thumbprint is not None):
pulumi.set(__self__, 'encryption_cert_thumbprint', encryption_cert_thumbprint) | def __init__(__self__, *, encryption_algorithm: str, value: str, encryption_cert_thumbprint: Optional[str]=None):
'\n Represent the secrets intended for encryption with asymmetric key pair.\n :param str encryption_algorithm: The algorithm used to encrypt "Value".\n :param str value: The value of the secret.\n :param str encryption_cert_thumbprint: Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null.\n '
pulumi.set(__self__, 'encryption_algorithm', encryption_algorithm)
pulumi.set(__self__, 'value', value)
if (encryption_cert_thumbprint is not None):
pulumi.set(__self__, 'encryption_cert_thumbprint', encryption_cert_thumbprint)<|docstring|>Represent the secrets intended for encryption with asymmetric key pair.
:param str encryption_algorithm: The algorithm used to encrypt "Value".
:param str value: The value of the secret.
:param str encryption_cert_thumbprint: Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null.<|endoftext|> |
33ef9ec755dda7c2696c55275316fd8b716cd45e27b8b88a10a77e4a73b2d7a6 | @property
@pulumi.getter(name='encryptionAlgorithm')
def encryption_algorithm(self) -> str:
'\n The algorithm used to encrypt "Value".\n '
return pulumi.get(self, 'encryption_algorithm') | The algorithm used to encrypt "Value". | sdk/python/pulumi_azure_native/storsimple/outputs.py | encryption_algorithm | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='encryptionAlgorithm')
def encryption_algorithm(self) -> str:
'\n \n '
return pulumi.get(self, 'encryption_algorithm') | @property
@pulumi.getter(name='encryptionAlgorithm')
def encryption_algorithm(self) -> str:
'\n \n '
return pulumi.get(self, 'encryption_algorithm')<|docstring|>The algorithm used to encrypt "Value".<|endoftext|> |
a72b97818c5757b45e23443833e92535957f24e9405b4d1edad11cd16dede7a0 | @property
@pulumi.getter
def value(self) -> str:
'\n The value of the secret.\n '
return pulumi.get(self, 'value') | The value of the secret. | sdk/python/pulumi_azure_native/storsimple/outputs.py | value | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def value(self) -> str:
'\n \n '
return pulumi.get(self, 'value') | @property
@pulumi.getter
def value(self) -> str:
'\n \n '
return pulumi.get(self, 'value')<|docstring|>The value of the secret.<|endoftext|> |
5c563ba8474071a16f58934a68e897716d81b54e2dfe42b0a6ddf23ecc369258 | @property
@pulumi.getter(name='encryptionCertThumbprint')
def encryption_cert_thumbprint(self) -> Optional[str]:
'\n Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null.\n '
return pulumi.get(self, 'encryption_cert_thumbprint') | Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null. | sdk/python/pulumi_azure_native/storsimple/outputs.py | encryption_cert_thumbprint | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='encryptionCertThumbprint')
def encryption_cert_thumbprint(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'encryption_cert_thumbprint') | @property
@pulumi.getter(name='encryptionCertThumbprint')
def encryption_cert_thumbprint(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'encryption_cert_thumbprint')<|docstring|>Thumbprint certificate that was used to encrypt "Value". If the value in unencrypted, it will be null.<|endoftext|> |
a41e87ace6e7900f90a28d6418c3d92eac2bf949788db8802dafe629cc18d615 | def __init__(__self__, *, days: Sequence[str], rate_in_mbps: int, start: 'outputs.TimeResponse', stop: 'outputs.TimeResponse'):
"\n The schedule for bandwidth setting.\n :param Sequence[str] days: The days of the week when this schedule is applicable.\n :param int rate_in_mbps: The rate in Mbps.\n :param 'TimeResponseArgs' start: The start time of the schedule.\n :param 'TimeResponseArgs' stop: The stop time of the schedule.\n "
pulumi.set(__self__, 'days', days)
pulumi.set(__self__, 'rate_in_mbps', rate_in_mbps)
pulumi.set(__self__, 'start', start)
pulumi.set(__self__, 'stop', stop) | The schedule for bandwidth setting.
:param Sequence[str] days: The days of the week when this schedule is applicable.
:param int rate_in_mbps: The rate in Mbps.
:param 'TimeResponseArgs' start: The start time of the schedule.
:param 'TimeResponseArgs' stop: The stop time of the schedule. | sdk/python/pulumi_azure_native/storsimple/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, days: Sequence[str], rate_in_mbps: int, start: 'outputs.TimeResponse', stop: 'outputs.TimeResponse'):
"\n The schedule for bandwidth setting.\n :param Sequence[str] days: The days of the week when this schedule is applicable.\n :param int rate_in_mbps: The rate in Mbps.\n :param 'TimeResponseArgs' start: The start time of the schedule.\n :param 'TimeResponseArgs' stop: The stop time of the schedule.\n "
pulumi.set(__self__, 'days', days)
pulumi.set(__self__, 'rate_in_mbps', rate_in_mbps)
pulumi.set(__self__, 'start', start)
pulumi.set(__self__, 'stop', stop) | def __init__(__self__, *, days: Sequence[str], rate_in_mbps: int, start: 'outputs.TimeResponse', stop: 'outputs.TimeResponse'):
"\n The schedule for bandwidth setting.\n :param Sequence[str] days: The days of the week when this schedule is applicable.\n :param int rate_in_mbps: The rate in Mbps.\n :param 'TimeResponseArgs' start: The start time of the schedule.\n :param 'TimeResponseArgs' stop: The stop time of the schedule.\n "
pulumi.set(__self__, 'days', days)
pulumi.set(__self__, 'rate_in_mbps', rate_in_mbps)
pulumi.set(__self__, 'start', start)
pulumi.set(__self__, 'stop', stop)<|docstring|>The schedule for bandwidth setting.
:param Sequence[str] days: The days of the week when this schedule is applicable.
:param int rate_in_mbps: The rate in Mbps.
:param 'TimeResponseArgs' start: The start time of the schedule.
:param 'TimeResponseArgs' stop: The stop time of the schedule.<|endoftext|> |
c2418695034b5aa45c313188df5302ee0baa29e5ade01e84100eaba31c4fd89c | @property
@pulumi.getter
def days(self) -> Sequence[str]:
'\n The days of the week when this schedule is applicable.\n '
return pulumi.get(self, 'days') | The days of the week when this schedule is applicable. | sdk/python/pulumi_azure_native/storsimple/outputs.py | days | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def days(self) -> Sequence[str]:
'\n \n '
return pulumi.get(self, 'days') | @property
@pulumi.getter
def days(self) -> Sequence[str]:
'\n \n '
return pulumi.get(self, 'days')<|docstring|>The days of the week when this schedule is applicable.<|endoftext|> |
76f52346508bf7dceed7d4aea14ba5ab4c354270e8d3605a065e08a3de17ff70 | @property
@pulumi.getter(name='rateInMbps')
def rate_in_mbps(self) -> int:
'\n The rate in Mbps.\n '
return pulumi.get(self, 'rate_in_mbps') | The rate in Mbps. | sdk/python/pulumi_azure_native/storsimple/outputs.py | rate_in_mbps | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='rateInMbps')
def rate_in_mbps(self) -> int:
'\n \n '
return pulumi.get(self, 'rate_in_mbps') | @property
@pulumi.getter(name='rateInMbps')
def rate_in_mbps(self) -> int:
'\n \n '
return pulumi.get(self, 'rate_in_mbps')<|docstring|>The rate in Mbps.<|endoftext|> |
e93b02335adb81368019c807a19a5a53f2a31e5c34edbff1216e0916a3087962 | @property
@pulumi.getter
def start(self) -> 'outputs.TimeResponse':
'\n The start time of the schedule.\n '
return pulumi.get(self, 'start') | The start time of the schedule. | sdk/python/pulumi_azure_native/storsimple/outputs.py | start | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def start(self) -> 'outputs.TimeResponse':
'\n \n '
return pulumi.get(self, 'start') | @property
@pulumi.getter
def start(self) -> 'outputs.TimeResponse':
'\n \n '
return pulumi.get(self, 'start')<|docstring|>The start time of the schedule.<|endoftext|> |
210083d0ee5e8470dd23e897b01c185460dab15042d0617072eeaae85bf3dca6 | @property
@pulumi.getter
def stop(self) -> 'outputs.TimeResponse':
'\n The stop time of the schedule.\n '
return pulumi.get(self, 'stop') | The stop time of the schedule. | sdk/python/pulumi_azure_native/storsimple/outputs.py | stop | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def stop(self) -> 'outputs.TimeResponse':
'\n \n '
return pulumi.get(self, 'stop') | @property
@pulumi.getter
def stop(self) -> 'outputs.TimeResponse':
'\n \n '
return pulumi.get(self, 'stop')<|docstring|>The stop time of the schedule.<|endoftext|> |
fca50fda1a74c790d7b302867e99845f94cfec640a797167476dc0e79f7bc59e | def __init__(__self__, *, error_message: Optional[str]=None, is_eligible_for_failover: Optional[bool]=None):
'\n The eligibility result of failover set, for failover.\n :param str error_message: The error message, if the failover set is not eligible for failover.\n :param bool is_eligible_for_failover: Represents if this failover set is eligible for failover or not.\n '
if (error_message is not None):
pulumi.set(__self__, 'error_message', error_message)
if (is_eligible_for_failover is not None):
pulumi.set(__self__, 'is_eligible_for_failover', is_eligible_for_failover) | The eligibility result of failover set, for failover.
:param str error_message: The error message, if the failover set is not eligible for failover.
:param bool is_eligible_for_failover: Represents if this failover set is eligible for failover or not. | sdk/python/pulumi_azure_native/storsimple/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, error_message: Optional[str]=None, is_eligible_for_failover: Optional[bool]=None):
'\n The eligibility result of failover set, for failover.\n :param str error_message: The error message, if the failover set is not eligible for failover.\n :param bool is_eligible_for_failover: Represents if this failover set is eligible for failover or not.\n '
if (error_message is not None):
pulumi.set(__self__, 'error_message', error_message)
if (is_eligible_for_failover is not None):
pulumi.set(__self__, 'is_eligible_for_failover', is_eligible_for_failover) | def __init__(__self__, *, error_message: Optional[str]=None, is_eligible_for_failover: Optional[bool]=None):
'\n The eligibility result of failover set, for failover.\n :param str error_message: The error message, if the failover set is not eligible for failover.\n :param bool is_eligible_for_failover: Represents if this failover set is eligible for failover or not.\n '
if (error_message is not None):
pulumi.set(__self__, 'error_message', error_message)
if (is_eligible_for_failover is not None):
pulumi.set(__self__, 'is_eligible_for_failover', is_eligible_for_failover)<|docstring|>The eligibility result of failover set, for failover.
:param str error_message: The error message, if the failover set is not eligible for failover.
:param bool is_eligible_for_failover: Represents if this failover set is eligible for failover or not.<|endoftext|> |
b914ce06e2ac41c5b4ac85bb59b78173884fcb6fceb6fdfe680d3ce8d3d7df3e | @property
@pulumi.getter(name='errorMessage')
def error_message(self) -> Optional[str]:
'\n The error message, if the failover set is not eligible for failover.\n '
return pulumi.get(self, 'error_message') | The error message, if the failover set is not eligible for failover. | sdk/python/pulumi_azure_native/storsimple/outputs.py | error_message | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='errorMessage')
def error_message(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'error_message') | @property
@pulumi.getter(name='errorMessage')
def error_message(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'error_message')<|docstring|>The error message, if the failover set is not eligible for failover.<|endoftext|> |
09457cc4d41e8950d2e6b2ecf27e19b2fe1bbfde091ea2f76d4fb565338c53ac | @property
@pulumi.getter(name='isEligibleForFailover')
def is_eligible_for_failover(self) -> Optional[bool]:
'\n Represents if this failover set is eligible for failover or not.\n '
return pulumi.get(self, 'is_eligible_for_failover') | Represents if this failover set is eligible for failover or not. | sdk/python/pulumi_azure_native/storsimple/outputs.py | is_eligible_for_failover | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='isEligibleForFailover')
def is_eligible_for_failover(self) -> Optional[bool]:
'\n \n '
return pulumi.get(self, 'is_eligible_for_failover') | @property
@pulumi.getter(name='isEligibleForFailover')
def is_eligible_for_failover(self) -> Optional[bool]:
'\n \n '
return pulumi.get(self, 'is_eligible_for_failover')<|docstring|>Represents if this failover set is eligible for failover or not.<|endoftext|> |
964a76c98cf3ea728f0a341c0fdfc781cd8842f2f245f4e6d264d2ee8fc2f4b6 | def __init__(__self__, *, eligibility_result: Optional['outputs.FailoverSetEligibilityResultResponseResult']=None, volume_containers: Optional[Sequence['outputs.VolumeContainerFailoverMetadataResponseResult']]=None):
"\n The failover set on a device.\n :param 'FailoverSetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the failover set, for failover.\n :param Sequence['VolumeContainerFailoverMetadataResponseArgs'] volume_containers: The list of meta data of volume containers, which are part of the failover set.\n "
if (eligibility_result is not None):
pulumi.set(__self__, 'eligibility_result', eligibility_result)
if (volume_containers is not None):
pulumi.set(__self__, 'volume_containers', volume_containers) | The failover set on a device.
:param 'FailoverSetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the failover set, for failover.
:param Sequence['VolumeContainerFailoverMetadataResponseArgs'] volume_containers: The list of meta data of volume containers, which are part of the failover set. | sdk/python/pulumi_azure_native/storsimple/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, eligibility_result: Optional['outputs.FailoverSetEligibilityResultResponseResult']=None, volume_containers: Optional[Sequence['outputs.VolumeContainerFailoverMetadataResponseResult']]=None):
"\n The failover set on a device.\n :param 'FailoverSetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the failover set, for failover.\n :param Sequence['VolumeContainerFailoverMetadataResponseArgs'] volume_containers: The list of meta data of volume containers, which are part of the failover set.\n "
if (eligibility_result is not None):
pulumi.set(__self__, 'eligibility_result', eligibility_result)
if (volume_containers is not None):
pulumi.set(__self__, 'volume_containers', volume_containers) | def __init__(__self__, *, eligibility_result: Optional['outputs.FailoverSetEligibilityResultResponseResult']=None, volume_containers: Optional[Sequence['outputs.VolumeContainerFailoverMetadataResponseResult']]=None):
"\n The failover set on a device.\n :param 'FailoverSetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the failover set, for failover.\n :param Sequence['VolumeContainerFailoverMetadataResponseArgs'] volume_containers: The list of meta data of volume containers, which are part of the failover set.\n "
if (eligibility_result is not None):
pulumi.set(__self__, 'eligibility_result', eligibility_result)
if (volume_containers is not None):
pulumi.set(__self__, 'volume_containers', volume_containers)<|docstring|>The failover set on a device.
:param 'FailoverSetEligibilityResultResponseArgs' eligibility_result: The eligibility result of the failover set, for failover.
:param Sequence['VolumeContainerFailoverMetadataResponseArgs'] volume_containers: The list of meta data of volume containers, which are part of the failover set.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.