filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_5713 | #!/usr/bin/env python
"""
requests_cache
~~~~~~~~~~~~~~
Transparent cache for ``requests`` library with persistence and async support
Just write::
import requests_cache
requests_cache.install_cache()
And requests to resources will be cached for faster repeated access::
import requests
for i in range(10):
r = requests.get('http://httpbin.org/delay/5')
# will took approximately 5 seconds instead 50
:copyright: (c) 2012 by Roman Haritonov.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext'
__version__ = '0.5.2'
from .core import (
CachedSession,
clear,
disabled,
enabled,
get_cache,
install_cache,
remove_expired_responses,
uninstall_cache,
)
|
the-stack_0_5714 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide base classes that define what is an adversarial for object detection models."""
import math
from .base import Criterion
import numpy as np
class TargetClassMiss(Criterion):
""" Defines adversarials as images for which the target class is not
in the detection result.
"""
def __init__(self, target_class):
super(TargetClassMiss, self).__init__()
self._target_class = target_class
def target_class(self):
"""Return target class."""
return self._target_class
def name(self):
"""Return ctiterion name."""
return 'TargetClassMiss'
def is_adversarial(self, predictions, annotation):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return True
return self._target_class not in predictions['classes']
class RegionalTargetClassMiss(Criterion):
"""Defines adversarials as images for which the target class in target region is not
in the detection result.
"""
def __init__(self, target_class, target_region):
super(RegionalTargetClassMiss, self).__init__()
self._target_class = target_class
self._target_retion = np.array(target_region).astype(int)
def target_class(self):
"""Return target class."""
return self._target_class
def target_region(self):
"""Return target region."""
return self._target_retion
def name(self):
"""Return ctiterion name."""
return 'RegionalTargetClassMiss'
def is_adversarial(self, predictions, annotation):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return True
bbox_list = predictions['boxes']
class_list = predictions['classes']
for bbox_pred, cls_pred in zip(bbox_list, class_list):
iou = self._get_IoU(bbox_pred, self._target_retion)
if iou > 0 and cls_pred == self._target_class:
return False
return True
@staticmethod
def _get_IoU(bbox1, bbox2):
bi = [max(bbox1[0], bbox2[0]), max(bbox1[1], bbox2[1]),
min(bbox1[2], bbox2[2]), min(bbox1[3], bbox2[3])]
ih = bi[2] - bi[0] + 1
iw = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bbox1[2] - bbox1[0] + 1) * (bbox1[3] - bbox1[1] + 1) + \
(bbox2[2] - bbox2[0] + 1) * \
(bbox2[3] - bbox2[1] + 1) - iw * ih
ov = iw * ih / ua
return ov
else:
return 0.0
class TargetClassMissGoogle(Criterion):
"""Defines adversarials as images for which the target class is not
in the Google object detection result.
"""
def __init__(self, target_class):
super(TargetClassMissGoogle, self).__init__()
self._target_class = target_class
def target_class(self):
"""Return target class."""
return self._target_class
def name(self):
"""Return ctiterion name."""
return '{}-{}'.format(
self.__class__.__name__, self.target_class())
def is_adversarial(self, predictions):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return True
assert isinstance(predictions, list), 'Predictions should be list.'
for pred in predictions:
if pred['name'].lower() == self._target_class.lower():
return False
return True
class WeightedAP(Criterion):
"""Defines adversarials as weighted AP value
larger than given threshold.
"""
_defaults = {
"alpha": 0.001,
"lambda_tp_area": 0,
"lambda_tp_dis": 0,
"lambda_tp_cs": 0,
"lambda_tp_cls": 1,
"lambda_fp_area": 0.1,
"lambda_fp_cs": 0,
'lambda_fn_area': 0.1,
'lambda_fn_cs': 0,
'a_set': [1, 1, 1, 0.1],
'MINOVERLAP': 0.5,
}
@classmethod
def get_defaults(cls, n):
"""Return default value of n.
Parameters
----------
n : str
Key of the defalut dictionary.
"""
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, Height, Width, distance_th, print_f=False):
self.__dict__.update(self._defaults) # set up default values
self.Height = float(Height)
self.Width = float(Width)
self.th_is_adv = distance_th
self.print_f = print_f
self.a_tp = self.a_set[0]
self.a_fp = self.a_set[1]
self.a_fn = self.a_set[2]
self.a_er = self.a_set[3]
def name(self):
"""Return ctiterion name."""
return 'WeightedAP'
def is_adversarial(self, predictions, annotation):
"""Decides if predictions for an image are adversarial."""
if predictions is None:
return None
return self.distance_score(annotation, predictions) > self.th_is_adv
def _get_bb_area(self, bbox):
return (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
def _get_IoU(self, obj_1, obj_2):
bb = obj_1["bbox"]
bbgt = obj_2["bbox"]
bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]),
min(bb[2], bbgt[2]), min(bb[3], bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + \
(bbgt[2] - bbgt[0] + 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
return ov
else:
return 0.0
def _find_by_idx(self, idx, source_dic_list):
for temp_obj in source_dic_list:
if temp_obj['index'] == idx:
return temp_obj
return {}
def _get_largest_bb_area(self, obj_list):
temp_max = 1
for temp_obj in obj_list:
bb = temp_obj['bbox']
bb_area = self._get_bb_area(bb)
if bb_area > temp_max:
temp_max = bb_area
return temp_max
def _get_total_bb_area(self, obj_list):
total_area = 1
for temp_obj in obj_list:
bb = temp_obj['bbox']
bb_area = self._get_bb_area(bb)
total_area += bb_area
return total_area
def _get_largest_bb_edge(self, obj_list):
temp_max = -1
for temp_obj in obj_list:
bb = temp_obj['bbox']
if abs(bb[2] - bb[0]) > temp_max:
temp_max = abs(bb[2] - bb[0])
if abs(bb[3] - bb[1]) > temp_max:
temp_max = abs(bb[3] - bb[1])
return temp_max
def _sort_by_conf(self, ori_list, source_dic_list):
tup_list = []
if len(ori_list) <= 1:
return ori_list
for temp in ori_list:
temp_obj = self._find_by_idx(temp, source_dic_list)
if not temp_obj:
raise ValueError('object cannot be found by index.')
tup_list.append((temp_obj['index'], temp_obj['confident_score']))
tup_list.sort(key=lambda tup: tup[1])
return [x[0] for x in tup_list]
def _sort_match_dic(self, ori_index_dic, source_dic_list):
sorted_dic = {}
for temp_key in ori_index_dic.keys():
temp_list = ori_index_dic[temp_key]
if len(temp_list) <= 1:
sorted_dic[temp_key] = temp_list
else:
sorted_dic[temp_key] = self._sort_by_conf(
temp_list, source_dic_list)
return sorted_dic
def _get_fn_list(self, tp_match_dic, source_list):
dst_list = []
for temp_source in source_list:
flag_found = False
for temp_idx_pair in tp_match_dic.keys():
if (temp_source['index'] in tp_match_dic[temp_idx_pair]):
flag_found = True
if not flag_found:
dst_list.append(temp_source)
return dst_list
def _get_bb_distance(self, bb1, bb2):
c1 = [0.5 * (bb1[2] + bb1[0]), 0.5 * (bb1[3] + bb1[1])]
c2 = [0.5 * (bb2[2] + bb2[0]), 0.5 * (bb2[3] + bb2[1])]
return math.sqrt((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2)
def distance_score(self, gt_dic, pd_dic):
"""Compute metric distance between given two detection results.
Parameters
----------
gt_dic : dict
The ground truth annotation which contains: scores, boxes and classes.
pd_dic : dict
The target output form detector which contains: scores, boxes and classes.
"""
gt_list = self._dic2list(gt_dic)
pd_list = self._dic2list(pd_dic)
return self._compute_score(gt_list, pd_list)
def _dic2list(self, dic):
res_list = []
for idx, key in enumerate(dic.keys()):
if idx == 0:
for sub_idx in range(len(dic[key])):
res_list.append({'index': sub_idx})
if key == 'scores':
temp_name = 'confident_score'
elif key == 'boxes':
temp_name = 'bbox'
elif key == 'classes':
temp_name = 'class_name'
else:
raise ValueError('Invalid key.')
for sub_idx, temp_obj in enumerate(dic[key]):
if temp_name is 'bbox':
temp_obj = [
temp_obj[1],
temp_obj[0],
temp_obj[3],
temp_obj[2]]
res_list[sub_idx][temp_name] = temp_obj
return res_list
def _compute_score(self, gt_obj_list, pd_obj_list):
'''
Notes
-----
compute metirc distance score for two results from object detection.
input:
pd_obj_list: object list of prediction
gt_obj_list: object list of ground gruth
obj = {
'class_name' : 'car'
'bbox' : '634 663 787 913' string of [left, up, right, down] splited by ' '
'confident score' : 0.9918241
'index' : 0
}
'''
tp_match_dic = {} # {pd_idx : [gt_idx1, gt_idx2...]}
for pd_obj in pd_obj_list:
tp_match_dic[pd_obj['index']] = []
for gt_obj in gt_obj_list:
IoU = self._get_IoU(pd_obj, gt_obj)
# and gt_obj['class_name'] == pd_obj['class_name']:
if IoU >= self.MINOVERLAP:
tp_match_dic[pd_obj['index']].append(gt_obj['index'])
tp_match_dic = self._sort_match_dic(tp_match_dic, gt_obj_list)
tp_pair = []
fp_pd = []
for temp_idx in tp_match_dic.keys():
if not tp_match_dic[temp_idx]:
fp_pd.append(self._find_by_idx(temp_idx, pd_obj_list))
else:
tp_pair.append(
(self._find_by_idx(
temp_idx, pd_obj_list), self._find_by_idx(
tp_match_dic[temp_idx][0], gt_obj_list)))
fn_gt = self._get_fn_list(tp_match_dic, gt_obj_list)
self.largest_area_gt = self._get_largest_bb_area(gt_obj_list)
self.largest_edge_gt = self._get_largest_bb_edge(gt_obj_list)
self.total_area_gt = self._get_total_bb_area(gt_obj_list)
self.total_area_pd = self._get_total_bb_area(pd_obj_list)
cum_tp_penal = 0.0
for temp_tp_pair in tp_pair:
results = self._tp_panelize(temp_tp_pair)
distance = results['distance']
area_dif = results['area_dif']
cs_dif = results['cs_dif']
class_dif = results['class_dif']
temp_tp_penal = self.lambda_tp_dis * distance + self.lambda_tp_area * area_dif \
+ self.lambda_tp_cs * cs_dif + self.lambda_tp_cls * class_dif
cum_tp_penal += temp_tp_penal
if self.print_f:
print('cum tp: ', cum_tp_penal)
if len(tp_pair) > 1:
cum_tp_penal /= len(tp_pair)
cum_fp_penal = 0.0
for temp_fp_pd in fp_pd:
area, cs = self._fp_fn_panelize(temp_fp_pd)
drop_func_out = self._factor_func(
self.total_area_pd / (self.Height * self.Width))
temp_fp_panel = self.lambda_fp_area * drop_func_out * \
area / self.total_area_pd + self.lambda_fp_cs * cs
cum_fp_penal += temp_fp_panel
if self.print_f:
print('cum fp: ', cum_fp_penal)
if len(fp_pd) > 1:
cum_fp_penal /= len(fp_pd)
cum_fn_penal = 0.0
for temp_fn_gt in fn_gt:
area, cs = self._fp_fn_panelize(temp_fn_gt)
drop_func_out = self._factor_func(
self.total_area_gt / (self.Height * self.Width))
temp_fn_panel = self.lambda_fn_area * drop_func_out * \
area / self.total_area_gt + self.lambda_fn_cs * cs
cum_fn_penal += temp_fn_panel
if self.print_f:
print('cum fn: ', cum_fn_penal)
if len(fn_gt) > 1:
cum_fn_penal /= len(fn_gt)
if (len(tp_pair) + len(fp_pd) + len(fn_gt)) == 0:
err_panel = 0
else:
err_panel = float((len(fp_pd) + len(fn_gt))) / \
(len(tp_pair) + len(fp_pd) + len(fn_gt))
if self.print_f:
print('tp: ', len(tp_pair), ' cum_tp_penal: ', cum_tp_penal)
print('fp: ', len(fp_pd), ' cum_fp_penal: ', cum_fp_penal)
print('fn: ', len(fn_gt), ' cum_fn_penal: ', cum_fn_penal)
print(
'total num: ',
len(tp_pair) +
len(fp_pd) +
len(fn_gt),
' err_panel: ',
err_panel)
score_final = (self.a_tp * cum_tp_penal + self.a_fp * cum_fp_penal + self.a_fn
* cum_fn_penal + self.a_er * err_panel) \
/ (self.a_tp + self.a_fp + self.a_fn + self.a_er)
return score_final
def _factor_func(self, x):
x = float(x)
if x != 0:
return x / (x + self.alpha)
return x
def _tp_panelize(self, obj_pair):
bb0 = obj_pair[0]['bbox']
bb1 = obj_pair[1]['bbox']
distance = self._get_bb_distance(bb0, bb1)
area0 = self._get_bb_area(bb0)
area1 = self._get_bb_area(bb1)
area_dif = abs(area0 - area1)
cs_dif = abs(
float(
obj_pair[0]['confident_score']) -
float(
obj_pair[1]['confident_score']))
class_dif = 0
if obj_pair[0]['class_name'] != obj_pair[1]['class_name']:
class_dif = 1
return {'distance': distance, 'area_dif': area_dif, 'cs_dif': cs_dif, 'class_dif': class_dif}
def _fp_fn_panelize(self, obj):
bb = obj['bbox']
area = self._get_bb_area(bb)
cs = float(obj['confident_score'])
return area, cs
|
the-stack_0_5717 | class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
def add(self, addend):
return Complex(self.r + addend.r, self.i + addend.i)
def subtract(self, subtrahend):
return Complex(self.r - subtrahend.r, self.i - subtrahend.i)
def multiply(self, multiplicand):
return Complex((self.r * multiplicand.r) - (self.i * multiplicand.i),
(self.r * multiplicand.i) + (self.i * multiplicand.r))
def divide(self, divisor):
return Complex(((self.r *
divisor.r) +
(self.i *
divisor.i)) /
((divisor.r)**2 +
(divisor.i)**2), ((self.i *
divisor.r) -
(self.r *
divisor.i)) /
((divisor.r)**2 +
(divisor.i)**2))
a = Complex(3.0, -4.5)
b = Complex(4.0, -9)
x = a.add(b)
y = a.subtract(b)
z = a.multiply(b)
w = a.divide(b)
print('sum:',
x.r,
'+',
str(x.i) + 'i',
'\ndifference:',
y.r,
'+',
str(y.i) + 'i',
'\nproduct:',
z.r,
'+',
str(z.i) + 'i',
'\nquotient:',
w.r,
'+',
str(w.i) + 'i')
|
the-stack_0_5721 | __author__ = 'jlegind'
from urllib import parse, request
import requests
import json
import collections
import csv
class SearchAPI(object):
def __init__(self, url, read_path, write_path, suffix='', separator='\t'):
"""
:param url: JSON api url
:param read_path: File that contains the search params
:param write_path: Output file
:param suffix: If the url has a suffix like /verbatim after the params this can be tagged on
"""
self.wp = write_path
self.file = open(read_path, mode='r', encoding='utf-8-sig')
self.write_file = open(write_path, mode='w', encoding='utf-8')
self.url = url
self.suffix = suffix
self.appended = ''
self.separator = separator
def take_parameters(self, *args, **kwargs):
"""
:param args: The JSON values you want returned
:param kwargs: The API search term[key] in the API call,
and position[value] in the read_file (tab separated columns)
"""
line = self.file.readline()
while line:
new_url = self.url
to_paging_params = []
split_line = line.split(self.separator)
if kwargs:
print(kwargs)
for k, v in kwargs.items():
print('this is k in kwargs:', k)
kw = split_line[kwargs[k]].strip()
print('value ? ', v)
new_url += k+'='+parse.quote_plus(kw)+'&'
to_paging_params.append(kw)
else:
vl = split_line[0].strip()
new_url += vl
print(vl+" prrr---")
self.appended = vl
to_paging_params.append(vl)
self.pagination(new_url.strip('&')+self.suffix, to_paging_params, args)
line = self.file.readline()
def searching_gbif_api(self, url):
'''
Just get the GBIF api search result
'''
rson = requests.get(url)
rson = rson.json()
return rson
def filter_api_response(self, response, fields):
'''
response = json response from api
fields = A list of fields to parse for
'''
resp_dict = dict.fromkeys(fields)
for j in fields:
resp_dict[j] = response[j]
return resp_dict
def make_search_name(self, positions):
'''
assumes multiple columns and composes a name from these in order of positions
param: positions = a LIST of column positions in a csv/text file
'''
line = self.file.readline()
while line:
rowlist = line.split(self.separator)
# res = [name for name in rowlist]
name = [rowlist[e] for e in positions]
stripped_name = [j.rstrip() for j in name]
stripped_name = ' '.join(stripped_name)
print('stripped name: ', stripped_name)
line = self.file.readline()
search_url = self.url+stripped_name
yield search_url
def pagination(self, url, terms, keys, offset=None, appended=''):
"""
:param url: Takes the url with the search term and value added
:param terms: A list of search values
:param keys: A list of JSON keys that you want the value for
:param offset: Used to increment paging
"""
#print(url)
if not terms or offset == None:
print('Absolute no_param')
new_url = url
else:
new_url = url+'&offset='+str(offset)+'&limit=100'
print(new_url)
try:
response = request.urlopen(new_url)
r = response.read()
decoded_json = json.loads(r.decode('utf-8'))
print('debug1')
end_of_records = None
try:
results = decoded_json['results']
end_of_records = decoded_json['endOfRecords']
except KeyError:
print('keyError !!!!!!!')
results = decoded_json
#print(results)
if end_of_records is False:
print('False')
for j in results:
self.parse_json(j, keys, terms)
offset += 100
self.pagination(url, terms, keys, offset=offset)
else:
print('debug2')
try:
for j in results:
#print('debug3')
self.parse_json(j, keys, terms)
except:
#print('8888debuggg')
self.parse_json(results, keys, terms)
except Exception as err:
print(err, 'err')
print(type(err))
#Below is NOT TESTED outside a plain non-JSON result (like the count API call)
self.write_output(decoded_json)
def write_output(self, input_to_file):
#print('debug5')
if isinstance(input_to_file, collections.Iterable):
output = '\t'.join(str(e) for e in input_to_file)
else:
output = input_to_file
#output string is created from the input_to_file list. Integers are cast to str
#print(output)
self.write_file.write(str(output)+'\t'+self.appended+'\n')
def parse_json(self, json_element, keys, terms):
list_output = []
#print('debug4')
for k in keys:
try:
list_output.append(json_element[k])
except KeyError:
#print('keyerror---')
list_output.append('NULL')
#print('debug3', terms)
[list_output.append(i) for i in terms]
#print('debug4', list_output)
self.write_output(list_output)
def main():
#my_api = SearchAPI('http://api.gbif.org/v1/species/', 'G:/GIASIP/export/nubkeys.txt', 'G:/GIASIP/export/GISDnubtaxonomy_test.txt')
"""my_api.take_parameters("key", "nubKey", "taxonID", "kingdom", "phylum", "order", "family", "genus",
"species", "kingdomKey", "phylumKey", "classKey", "orderKey", "familyKey",
"genusKey", "speciesKey", "datasetKey", "parentKey", "parent", "acceptedKey",
"accepted", "scientificName", "canonicalName", "authorship", "nameType",
"rank", "origin", "taxonomicStatus", "nomenclaturalStatus", "accordingTo",
"numDescendants", "synonym", "class", "publishedIn", "references",
no_param=0)
"""
# my_api = SearchAPI('http://api.gbif.org/v1/species/', 'G:/GIASIP/export/GISDnubtaxonomy_unique.txt', 'G:/GIASIP/export/no_param_test.txt', suffix="/distributions?")
# my_api.take_parameters("locationId", "locality", "country", "status", "establishmentMeans", "sourceTaxonKey")
# my_api = SearchAPI('http://api.gbif.org/v1/species/', 'C:/Users/jlegind/Dropbox/GIASIP/taxon_keys.txt', 'C:/Users/jlegind/Dropbox/GIASIP/export/GISDvernacularnames2.txt', suffix="/vernacularNames?")
# my_api.take_parameters("vernacularName", "language", "sourceTaxonKey", "preferred")
#
# my_api = SearchAPI('http://api.gbif.org/v1/species/match?kingdom=Animalia&', 'G:/Custom exports/Imanol/names.txt', 'G:/Custom exports/Imanol/interpreted_names.txt', separator=';')
# my_api.take_parameters("usageKey",
# "scientificName", "rank",
# name=0)
my_api = SearchAPI('http://api.gbif.org/v1/species/match?kingdom=Animalia&name=', 'H:/into_api/atomized_fish_list.txt', 'H:/output_api/interpreted_names_fish.txt')
# #separator = tab
# my_api.take_parameters("usageKey",
# "scientificName", "kingdom", "phylum", "class", "order", "family", "genus", "rank", "status", "confidence",
# genus=0, name=1)
res = my_api.make_search_name([0,1,2])
with open('H:/output_api/interpreted_names_fish.txt', 'w+', newline='') as wfile:
field_list = ["usageKey", "acceptedUsageKey", "scientificName", "kingdom", "phylum", "class", "order", "family", "genus", "rank", "status", "confidence"]
writer = csv.DictWriter(wfile, fieldnames=field_list, delimiter='\t')
writer.writeheader()
for j in res:
print('name url == ', j)
try:
reply = my_api.searching_gbif_api(j)
res = my_api.filter_api_response(reply, field_list)
print('return dict === ', res)
writer.writerow(res)
except:
print('ERROR')
#
# my_api.pagination(j, ["usageKey",
# "scientificName", "kingdom", "phylum", "class", "order", "family", "genus", "rank", "status", "confidence"], )
# name_list.append(j)
# my_api = SearchAPI('http://api.gbif.org/v1/dataset/', 'G:/Custom exports/dataset_list.csv', 'G:/Custom exports/lic_datasets.txt')
# my_api.take_parameters("key", "title", "type")
# my_api.take_parameters("key", "title", identifier=0)
# my_api = SearchAPI('http://api.gbif.org/v1/occurrence/count?datasetKey=', 'G:/Deletion/deleted_datasets/datasetkeys.csv', 'G:/Custom exports/del_counts.txt')
# my_api.take_parameters(None)
# UGLY HACK line 49 offset=None , must be 0
if __name__ == '__main__':
main() |
the-stack_0_5722 | from __future__ import unicode_literals
import click
import six
from ..aliases import aliases_database
from .base import cli
@cli.command(name='clean-aliases',
short_help="Remove aliases mapping to closed or inexistent "
"activities.")
@click.option('-y', '--yes', 'force_yes', is_flag=True,
help="Don't ask confirmation.")
@click.pass_context
def clean_aliases(ctx, force_yes):
"""
Removes aliases from your config file that point to inactive projects.
"""
inactive_aliases = []
for (alias, mapping) in six.iteritems(aliases_database):
# Ignore local aliases
if mapping.mapping is None:
continue
project = ctx.obj['projects_db'].get(mapping.mapping[0],
mapping.backend)
if (project is None or not project.is_active() or
(mapping.mapping[1] is not None
and project.get_activity(mapping.mapping[1]) is None)):
inactive_aliases.append(((alias, mapping), project))
if not inactive_aliases:
ctx.obj['view'].msg("No inactive aliases found.")
return
if not force_yes:
confirm = ctx.obj['view'].clean_inactive_aliases(inactive_aliases)
if force_yes or confirm:
ctx.obj['settings'].remove_aliases(
[item[0] for item in inactive_aliases]
)
ctx.obj['settings'].write_config()
ctx.obj['view'].msg("%d inactive aliases have been successfully"
" cleaned." % len(inactive_aliases))
|
the-stack_0_5723 | import glob
import os
from typing import List
from torch import sigmoid
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModelWithWandb(Callback):
"""Make WandbLogger watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeToWandbAsArtifact(Callback):
"""Upload all *.py files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str):
self.code_dir = code_dir
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.use_artifact(code)
class UploadCheckpointsToWandbAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.use_artifact(ckpts)
class LogConfusionMatrixToWandb(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmapToWandb(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class ImagePredictionLogger(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
# preds = torch.argmax(logits, axis=-1)
preds = sigmoid(logits).squeeze()
preds[preds>=0.5]=1
preds[preds<0.5]=0
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
|
the-stack_0_5724 | # -*- coding: utf-8 -*-
import wtforms
from flask import render_template, request, Markup, abort, flash, redirect, escape, url_for, make_response
from .. import b__ as __
from .form import Form
from .fields import SubmitField
class ConfirmDeleteForm(Form):
"""
Confirm a delete operation
"""
# The labels on these widgets are not used. See delete.html.
delete = SubmitField(__(u"Delete"))
cancel = SubmitField(__(u"Cancel"))
def render_form(form, title, message='', formid='form', submit=__(u"Submit"), cancel_url=None, ajax=False):
multipart = False
for field in form:
if isinstance(field.widget, wtforms.widgets.FileInput):
multipart = True
if form.errors:
code = 200 # 400
else:
code = 200
if request.is_xhr and ajax:
return make_response(render_template('baseframe/ajaxform.html', form=form, title=title,
message=message, formid=formid, submit=submit,
cancel_url=cancel_url, multipart=multipart), code)
else:
return make_response(render_template('baseframe/autoform.html', form=form, title=title,
message=message, formid=formid, submit=submit,
cancel_url=cancel_url, ajax=ajax, multipart=multipart), code)
def render_message(title, message, code=200):
if request.is_xhr:
return make_response(Markup("<p>%s</p>" % escape(message)), code)
else:
return make_response(render_template('baseframe/message.html', title=title, message=message), code)
def render_redirect(url, code=302):
if request.is_xhr:
return make_response(render_template('baseframe/redirect.html', url=url))
else:
return redirect(url, code=code)
def render_delete_sqla(obj, db, title, message, success=u'', next=None, cancel_url=None):
if not obj:
abort(404)
form = ConfirmDeleteForm()
if request.method in ('POST', 'DELETE') and form.validate():
if 'delete' in request.form or request.method == 'DELETE':
db.session.delete(obj)
db.session.commit()
if success:
flash(success, 'success')
return render_redirect(next or url_for('index'), code=303)
else:
return render_redirect(cancel_url or next or url_for('index'), code=303)
return make_response(render_template('baseframe/delete.html', form=form, title=title, message=message))
|
the-stack_0_5725 | import math
import pytest
from click.testing import CliRunner
from r2b2.athena import Athena
from r2b2.cli import cli
from r2b2.contest import Contest
from r2b2.contest import ContestType
from r2b2.minerva import Minerva
from r2b2.tests import util as util
default_contest = util.generate_contest(10000)
def test_simple_athena():
simple_athena = Athena(.1, 2**31 - 1, .1, default_contest)
assert simple_athena.alpha == .1
assert simple_athena.beta == 0.0
assert simple_athena.delta == 2**31 - 1
assert simple_athena.max_fraction_to_draw == .1
assert len(simple_athena.rounds) == 0
assert len(simple_athena.sub_audits['a-b'].min_winner_ballots) == 0
assert simple_athena.get_risk_level() is None
def test_athena_minerva_paper():
contest = Contest(100000, {'A': 75000, 'B': 25000}, 1, ['A'], ContestType.MAJORITY)
athena = Athena(.1, 1, .1, contest)
minerva = Minerva(.1, .1, contest)
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [50])
minerva.compute_min_winner_ballots(minerva.sub_audits['A-B'], [50])
# From Athena paper
assert athena.sub_audits['A-B'].min_winner_ballots == [32]
assert minerva.sub_audits['A-B'].min_winner_ballots == [31]
def test_athena_execute_round():
contest = Contest(100000, {'A': 75000, 'B': 25000}, 1, ['A'], ContestType.MAJORITY)
athena = Athena(.1, 1, .1, contest)
assert not athena.execute_round(50, {'A': 31, 'B': 19})
assert not athena.stopped
assert athena.sample_ballots['A'] == [31]
assert athena.sample_ballots['B'] == [19]
assert not athena.sub_audits['A-B'].stopped
assert athena.rounds == [50]
assert athena.execute_round(100, {'A': 70, 'B': 30})
assert athena.stopped
assert athena.sample_ballots['A'] == [31, 70]
assert athena.sample_ballots['B'] == [19, 30]
assert athena.sub_audits['A-B'].stopped
assert athena.rounds == [50, 100]
assert athena.get_risk_level() < 0.1
def test_interactive_athena():
runner = CliRunner()
user_in = 'athena\n0.1\n0.1\n100000\n2\nA\n75000\nB\n25000\n1\nA\nMAJORITY\ny\n1\ny\nn\n50\n31\n19\nn\nn\n100\n70\n30\n'
result = runner.invoke(cli, 'interactive', input=user_in)
output_file = open('src/r2b2/tests/data/cli_test_expected_out_interactive_athena.txt', 'r')
expected_out = output_file.read()
assert result.output == expected_out
output_file.close()
def test_bulk_athena():
# Same as Minerva (that is, delta = infinity)
# Ballot-by-ballot Minerva should yield identical stopping rules to BRAVO.
contest = Contest(100000, {'A': 60000, 'B': 40000}, 1, ['A'], ContestType.MAJORITY)
athena = Athena(.1, 2**31 - 1, .01, contest)
athena.compute_all_min_winner_ballots(athena.sub_audits['A-B'])
# p0 not hardcoded as .5 for scalability with odd total contest ballots.
p0 = (athena.contest.contest_ballots // 2) / athena.contest.contest_ballots
log_winner_multiplier = math.log(athena.sub_audits['A-B'].sub_contest.winner_prop / p0)
log_loser_multiplier = math.log((1 - athena.sub_audits['A-B'].sub_contest.winner_prop) / p0)
log_rhs = math.log(1 / athena.alpha)
for i in range(len(athena.rounds)):
n = athena.rounds[i]
kmin = athena.sub_audits['A-B'].min_winner_ballots[i]
# Assert this kmin satisfies ratio, but a kmin one less does not.
assert kmin * log_winner_multiplier + (n - kmin) * log_loser_multiplier > log_rhs
assert (kmin - 1) * log_winner_multiplier + (n - kmin + 1) * log_loser_multiplier <= log_rhs
def test_athena_next_sample_size():
# TODO: Create tests for ahtena next sample size
simple_athena = Athena(0.1, 1, 0.1, default_contest)
simple_athena.next_sample_size()
pass
def test_exceptions():
contest = Contest(100000, {'A': 60000, 'B': 40000}, 1, ['A'], ContestType.MAJORITY)
with pytest.raises(ValueError):
Athena(.1, 0, .1, contest)
athena = Athena(.1, 1, .1, contest)
with pytest.raises(Exception):
athena.stopping_condition_pairwise('A-B')
athena.rounds.append(10)
with pytest.raises(ValueError):
athena.stopping_condition_pairwise('X')
athena.rounds = []
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [0])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [1, 2])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20, 20])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20, 19])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [10001])
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [20])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [19])
with pytest.raises(ValueError):
athena.compute_min_winner_ballots(athena.sub_audits['A-B'], [10001])
contest2 = Contest(100, {'A': 60, 'B': 30}, 1, ['A'], ContestType.MAJORITY)
athena2 = Athena(0.1, 1, 1.0, contest2)
with pytest.raises(ValueError):
athena2.compute_min_winner_ballots(athena2.sub_audits['A-B'], [91])
athena2.rounds.append(10)
with pytest.raises(Exception):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'])
athena2.rounds = []
with pytest.raises(ValueError):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'], 0)
with pytest.raises(ValueError):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'], 200)
with pytest.raises(ValueError):
athena2.compute_all_min_winner_ballots(athena2.sub_audits['A-B'], 0)
|
the-stack_0_5727 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DQN net"""
import mindspore.nn as nn
import mindspore.ops as ops
class DQN(nn. Cell):
def __init__(self, input_size, hidden_size, output_size):
super(DQN, self).__init__()
self.linear1 = nn.Dense(input_size, hidden_size)
self.linear2 = nn.Dense(hidden_size, output_size)
self.relu = nn.ReLU()
def construct(self, x):
x = self.relu(self.linear1(x))
return self.linear2(x)
class WithLossCell(nn.Cell):
"""
network with loss function
"""
def __init__(self, backbone, loss_fn):
super(WithLossCell, self).__init__(auto_prefix=False)
self._backbone = backbone
self._loss_fn = loss_fn
self.gather = ops.GatherD()
def construct(self, x, act, label):
out = self._backbone(x)
out = self.gather(out, 1, act)
loss = self._loss_fn(out, label)
return loss
|
the-stack_0_5728 | import os
import sys
import importlib
# Setting the correct config file
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path + "config")
output_FF_layers = [100, 2] #[4000, 1000, 100, 1] #[200, 200, 100, 100, 1]
cur_work_dir = os.getcwd()
d_model = 8
attention_heads = 4
attention_dropout = 0.0
n_layers = 1
add_seq_cnn = True
add_parallel_cnn = False
k_dim = 10
cnn_dropout = 0.2
attention_norm = False
attention_layer_norm = False
n_feature_dim = config.embedding_vec_dim
analysis = None #'deepCrispr'
oversample = False
#(should always be 1)
data_folder = os.path.join(cur_work_dir, 'datas_OTcpf1')
if not os.path.exists(data_folder):
print("Create {0} directory".format(data_folder))
os.mkdir(data_folder)
|
the-stack_0_5733 | import sys
import os.path
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import QThread, pyqtSlot
from mychat_client.client import User
from mychat_client.handlers import GuiReceiver
try:
addr = sys.argv[1]
except IndexError:
addr = 'localhost'
try:
port = int(sys.argv[2])
except IndexError:
port = 7777
except ValueError:
print('Порт должен быть целым числом')
sys.exit(0)
try:
name = sys.argv[3]
print(name)
except IndexError:
login = input('Login: ')
name = login
paths = sys.path
b = ''
for i in paths:
if i.endswith('site-packages'):
b = i
form_path = b + '\mychat_client\sv_main.ui'
app = QtWidgets.QApplication(sys.argv)
window = uic.loadUi(form_path)
client = User(name, addr, port)
client.connect()
listener = GuiReceiver(client.sock, client.request_queue)
@pyqtSlot(str)
def update_chat(data):
try:
msg = data
window.listWidgetMessages.addItem(msg)
except Exception as e:
print(e)
listener.gotData.connect(update_chat)
th = QThread()
listener.moveToThread(th)
th.started.connect(listener.poll)
th.start()
contact_list = client.get_contacts()
def load_contacts(contacts):
window.listWidgetContacts.clear()
for contact in contacts:
window.listWidgetContacts.addItem(contact)
load_contacts(contact_list)
def add_contact():
try:
username = window.textEditUsername.toPlainText()
if username:
client.add_contact(username)
window.listWidgetContacts.addItem(username)
except Exception as e:
print(e)
window.pushButtonAddContact.clicked.connect(add_contact)
def del_contact():
try:
current_item = window.listWidgetContacts.currentItem()
username = current_item.text()
client.del_contact(username)
current_item = window.listWidgetContactstake.Item(window.listWidgetContacts.row(current_item))
del current_item
except Exception as e:
print(e)
def send_message():
text = window.textEditMessage.toPlainText()
if text:
selected_index = window.listWidgetContacts.currentIndex()
user_name = selected_index.data()
client.send_message(user_name, text)
msg = '{:>30}: {}'.format(name, text)
window.listWidgetMessages.addItem(msg)
window.textEditMessage.clear()
window.pushButtonDelContact.clicked.connect(del_contact)
window.pushButtonSend.clicked.connect(send_message)
# window.show()
# sys.exit(app.exec_())
def main():
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
the-stack_0_5734 | import torch.nn as nn
import torch.nn.functional as F
from layers import GraphSN
import torch
class GNN(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, n_layers, batchnorm_dim, dropout_1, dropout_2):
super().__init__()
self.dropout = dropout_1
self.convs = nn.ModuleList()
self.convs.append(GraphSN(input_dim, hidden_dim, batchnorm_dim, dropout_2))
for _ in range(n_layers-1):
self.convs.append(GraphSN(hidden_dim, hidden_dim, batchnorm_dim, dropout_2))
# In order to perform graph classification, each hidden state
# [batch x nodes x hidden_dim] is concatenated, resulting in
# [batch x nodes x input_dim+hidden_dim*(n_layers)], then aggregated
# along nodes dimension, without keeping that dimension:
# [batch x input_dim+hidden_dim*(n_layers)].
#self.out_proj = nn.Linear(input_dim+hidden_dim*(n_layers), output_dim)
self.out_proj = nn.Linear((input_dim+hidden_dim*(n_layers)), output_dim)
def forward(self, data):
X, A = data[:2]
hidden_states = [X]
for layer in self.convs:
X = F.dropout(layer(A, X), self.dropout)
hidden_states.append(X)
X = torch.cat(hidden_states, dim=2).sum(dim=1)
X = self.out_proj(X)
return X |
the-stack_0_5737 | import configparser
import logging
import os
import warnings
_logger = logging.getLogger(__name__)
FILENAME = "jolly_brancher.ini"
# CONFIG VARS
KEYS_AND_PROMPTS = [
["auth_email", "your login email for Atlassian"],
["base_url", "the base URL for Atlassian (e.g., https://cirrusv2x.atlassian.net)"],
[
"token",
"your Atlassian API token which can be generated here (https://id.atlassian.com/manage-profile/security/api-tokens)",
],
]
CONFIG_DIR = os.path.expanduser("~/.config")
CONFIG_FILENAME = os.path.join(CONFIG_DIR, FILENAME)
JIRA_SECTION_NAME = "jira"
GIT_SECTION_NAME = "git"
def config_setup():
config = configparser.ConfigParser()
if not os.path.exists(CONFIG_DIR):
os.mkdir(CONFIG_DIR)
if os.path.exists(CONFIG_FILENAME):
config.read(CONFIG_FILENAME)
for key, input_prompt in KEYS_AND_PROMPTS:
if (
key not in config[JIRA_SECTION_NAME]
or config[JIRA_SECTION_NAME][key] == ""
): # check all entries are present and populated
config[JIRA_SECTION_NAME][key] = input(f"Please enter {input_prompt}: ")
else:
warnings.warn(f"~/.config/{FILENAME} does not exist. Creating the file now...")
config[JIRA_SECTION_NAME] = {
key: input(f"Please enter {input_prompt}: ")
for key, input_prompt in KEYS_AND_PROMPTS
} # ask for input and set all entries
with open(CONFIG_FILENAME, "w") as configfile:
config.write(configfile)
def fetch_config():
config_setup()
config = configparser.ConfigParser()
config.read(CONFIG_FILENAME)
default_config = config[JIRA_SECTION_NAME]
git_config = config[GIT_SECTION_NAME]
DEFAULT_BRANCH_FORMAT = "{issue_type}/{ticket}-{summary}"
return (
git_config["repo_root"],
default_config["token"],
default_config["base_url"],
default_config["auth_email"],
default_config.get("branch_format", DEFAULT_BRANCH_FORMAT),
git_config["pat"],
git_config["forge_root"],
)
|
the-stack_0_5739 | if __name__ == '__main__':
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
plt.style.use('ja')
data_dir = '../IceCubeData/'
mp = 1.0
nu_mass = 0.15
filename = 'mp' + str(mp) + 'mnu' + str(nu_mass) + '.csv'
data = pd.read_csv(data_dir + 'complex/' + filename, index_col=0)
print('--- Opened ' + data_dir + 'complex/' + filename + ' ---')
gmin_arr1 = data['gmin_arr']
mn_arr1 = data['mn_arr']
mp = 0.5
nu_mass = 0.1
filename = 'mp' + str(mp) + 'mnu' + str(nu_mass) + '.csv'
data = pd.read_csv(data_dir + 'complex/' + filename, index_col=0)
print('--- Opened ' + data_dir + 'complex/' + filename + ' ---')
gmin_arr2 = data['gmin_arr']
mn_arr2 = data['mn_arr']
mp = 0.5
nu_mass = 0.05
filename = 'mp' + str(mp) + 'mnu' + str(nu_mass) + '.csv'
data = pd.read_csv(data_dir + 'complex/' + filename, index_col=0)
print('--- Opened ' + data_dir + 'complex/' + filename + ' ---')
gmin_arr3 = data['gmin_arr']
mn_arr3 = data['mn_arr']
mn_max = 12.0 # MeV
g_min = -5.0
g_max = -1.0
axis_min = -4.0
axis_max = -1.0
plt.figure()
plt.rcParams.update({'font.size': 22})
plt.plot([mp, mp], [np.power(10.0, -4), np.power(10.0, -2)], c='k', linewidth=1.0)
plt.plot([mp, 10.0], [np.power(10.0, -2), np.power(10.0, -2)], c='k', linewidth=1.0)
plt.plot([10.0, 10.0], [np.power(10.0, -2), np.power(10.0, -4)], c='k', linewidth=1.0)
plt.semilogy(mn_arr1, gmin_arr1, linewidth=1.0, linestyle='-', marker='', markersize=0.0, markerfacecolor='r', alpha = 1.0, markeredgewidth=0.0)
upper_limit1 = np.empty(len(gmin_arr1))
upper_limit1.fill(np.power(10.0, g_max))
plt.fill_between(mn_arr1, gmin_arr1, upper_limit1, alpha=0.2, edgecolor='r', facecolor='k', linewidth=2.0)
style = dict(size=15, color='r')
plt.text(6.0, np.power(10.0, -2.6), r'$m_\nu = 0.15 \, \mathrm{eV}$', **style)
plt.semilogy(mn_arr2, gmin_arr2, linewidth=1.0, linestyle='-', marker='', markersize=0.0, markerfacecolor='b', alpha = 1.0, markeredgewidth=0.0)
upper_limit2 = np.empty(len(gmin_arr2))
upper_limit2.fill(np.power(10.0, g_max))
plt.fill_between(mn_arr2, gmin_arr2, upper_limit2, alpha=0.2, edgecolor='b', facecolor='k', linewidth=0.0)
style = dict(size=15, color='b')
plt.text(6.0, np.power(10.0, -2.9), r'$m_\nu = 0.10 \, \mathrm{eV}$', **style)
plt.semilogy(mn_arr3, gmin_arr3, linewidth=1.0, linestyle='-', marker='', markersize=0.0, markerfacecolor='g', alpha = 1.0, markeredgewidth=0.0)
upper_limit3 = np.empty(len(gmin_arr3))
upper_limit3.fill(np.power(10.0, g_max))
plt.fill_between(mn_arr3, gmin_arr3, upper_limit3, alpha=0.2, edgecolor='g', facecolor='k', linewidth=0.0)
style = dict(size=15, color='g')
plt.text(6.0, np.power(10.0, -3.2), r'$m_\nu = 0.05 \, \mathrm{eV}$', **style)
plt.fill_between([10.0, mn_max], [np.power(10.0, g_min), np.power(10.0, g_min)], [np.power(10.0, g_max), np.power(10.0, g_max)], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
plt.fill_between([0.0, mp], [np.power(10.0, g_min), np.power(10.0, g_min)], [np.power(10.0, g_max), np.power(10.0, g_max)], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
# plt.fill_betweenx([np.power(10.0, -5), 3*np.power(10.0,-4)], [mp, mp], [10.0, 10.0], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
plt.fill_betweenx([np.power(10.0, -2), np.power(10.0, g_max)], [mp, mp], [10.0, 10.0], alpha=0.1, edgecolor='k', facecolor='k', linewidth=0.0)
style = dict(size=15, color='k')
plt.text(6.0, np.power(10.0, -3.5), r'$K^+$ decay constraint', **style)
axes = plt.axis()
plt.axis([0.0, mn_max, np.power(10.0, axis_min), np.power(10.0, axis_max)])
plt.xlabel(r'$m_N / \mathrm{MeV}$')
plt.ylabel(r'$g_\mu$')
#plt.savefig('/Users/james/allMyStuff/Neutrinos/Constraints/plots/constraints[{},{}].pdf'.format(mp, nu_mass))
plt.savefig('/Users/james/allMyStuff/Neutrinos/Constraints/plots/constraints[mp{}].pdf'.format(mp, nu_mass))
print('--- Saved constraints[mp{}].pdf ---'.format(mp, nu_mass))
|
the-stack_0_5740 | """
Spin up an instance, run a single command, spin it down :-)
Usage:
run.py [options] -- <COMMAND> ...
run.py [options] <COMMAND> ...
Options:
--type TYPE type, eg ng0 for bfboost, or ngd3 for dual Titan X [default: ng0]
--image IMAGE image [default: s1]
"""
from __future__ import print_function
import sys
import yaml
import json
import requests
import time
from docopt import docopt
from util.logtailer import LogTailer
api_url = 'https://api.jarvice.com/jarvice'
args = docopt(__doc__)
instancetype = args['--type']
image = args['--image']
command = args['<COMMAND>']
print('command', command)
with open('nimbix.yaml', 'r') as f:
config = yaml.load(f)
username = config['username']
apikey = config['apikey']
launch_data = {
"machine": {
"nodes": "1",
"type": instancetype
},
"variables": {
"FOO": "BAR"
},
"vault": {
"readonly": False,
"force": False,
"name": "drop.jarvice.com"
},
"user": {
"username": username,
"apikey": apikey
},
"nae": {
"force": False,
"name": image,
# "geometry": "1904x881",
"command": " ".join(command),
"ephemeral": False,
"staging": True,
"interactive": False
}
}
res = requests.post('%s/submit' % api_url, json=launch_data)
assert res.status_code == 200
res = json.loads(res.content.decode('utf-8'))
jobnumber = res['number']
print('jobnumber %s' % jobnumber)
logtailer = LogTailer(username=username, apikey=apikey, jobnumber=jobnumber)
while True:
res = requests.get('%s/status?username=%s&apikey=%s&number=%s' % (api_url, username, apikey, jobnumber))
assert res.status_code == 200
res = json.loads(res.content.decode('utf-8'))
status = res[str(jobnumber)]['job_status']
logtailer.updateFromTail()
if 'COMPLETED' in status:
break
time.sleep(1)
logtailer.updateFromOutput()
res = requests.get('%s/status?username=%s&apikey=%s&number=%s' % (api_url, username, apikey, jobnumber))
assert res.status_code == 200
res = json.loads(res.content.decode('utf-8'))
print('wall time %s' % res[str(jobnumber)]['job_walltime'])
|
the-stack_0_5741 | from __future__ import print_function, division
from sympy.core import S, C
from sympy.core.compatibility import u
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.core.logic import fuzzy_not
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core import Add, Mul
from sympy.core.relational import Eq
from sympy.functions.elementary.trigonometric import atan, atan2
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
>>> from sympy import re, im, I, E
>>> from sympy.abc import x, y
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
See Also
========
im
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return arg
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return S.Zero
elif arg.is_Function and arg.func is conjugate:
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_real:
excluded.append(term)
else:
# Try to do some advanced expansion. If
# impossible, don't try to do re(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[0])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) - im(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Returns the real number with a zero complex part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return re(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* im(Derivative(self.args[0], x, evaluate=True))
def _eval_rewrite_as_im(self, arg):
return self.args[0] - im(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _sage_(self):
import sage.all as sage
return sage.real_part(self.args[0]._sage_())
class im(Function):
"""
Returns imaginary part of expression. This function performs only
elementary analysis and so it will fail to decompose properly more
complicated expressions. If completely simplified result is needed then
use Basic.as_real_imag() or perform complex expansion on instance of
this function.
Examples
========
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> re(2*I + 17)
17
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
See Also
========
re
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return S.Zero
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return -S.ImaginaryUnit * arg
elif arg.is_Function and arg.func is conjugate:
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_real:
# Try to do some advanced expansion. If
# impossible, don't try to do im(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[1])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) + re(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Return the imaginary part with a zero real part.
Examples
========
>>> from sympy.functions import im
>>> from sympy import I
>>> im(2 + 3*I).as_real_imag()
(3, 0)
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return im(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* re(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.imag_part(self.args[0]._sage_())
def _eval_rewrite_as_re(self, arg):
return self.args[0] - re(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""
Returns the complex sign of an expression:
If the expresssion is real the sign will be:
* 1 if expression is positive
* 0 if expression is equal to zero
* -1 if expression is negative
If the expresssion is imaginary the sign will be:
* I if im(expression) is positive
* -I if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy.functions import sign
>>> from sympy.core.numbers import I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
See Also
========
Abs, conjugate
"""
is_finite = True
is_complex = True
def doit(self):
if self.args[0].is_nonzero:
return self.args[0] / Abs(self.args[0])
return self
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_negative:
s = -s
elif a.is_positive:
pass
else:
ai = im(a)
if a.is_imaginary and ai.is_comparable: # i.e. a = I*real
s *= S.ImaginaryUnit
if ai.is_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_positive:
return S.One
if arg.is_negative:
return S.NegativeOne
if arg.is_Function:
if arg.func is sign:
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return S.ImaginaryUnit
arg2 = -S.ImaginaryUnit * arg
if arg2.is_positive:
return S.ImaginaryUnit
if arg2.is_negative:
return -S.ImaginaryUnit
def _eval_Abs(self):
if self.args[0].is_nonzero:
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-S.ImaginaryUnit * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
self.args[0].is_real and
self.args[0].is_nonzero and
other.is_integer and
other.is_even
):
return S.One
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_rewrite_as_Heaviside(self, arg):
if arg.is_real:
return C.Heaviside(arg)*2-1
def _eval_simplify(self, ratio, measure):
return self.func(self.args[0].factor())
class Abs(Function):
"""
Return the absolute value of the argument.
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
========
>>> from sympy import Abs, Symbol, S
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
See Also
========
sign, conjugate
"""
is_real = True
is_negative = False
unbranched = True
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
Examples
========
>>> from sympy.abc import x
>>> from sympy.functions import Abs
>>> Abs(-x).fdiff()
sign(x)
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
if not isinstance(arg, C.Expr):
raise TypeError("Bad argument type for Abs(): %s" % type(arg))
# handle what we can
arg = signsimp(arg, evaluate=False)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
tnew = cls(t)
if tnew.func is cls:
unk.append(tnew.args[0])
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if base.is_real:
if exponent.is_integer:
if exponent.is_even:
return arg
if base is S.NegativeOne:
return S.One
if base.func is cls and exponent is S.NegativeOne:
return arg
return Abs(base)**exponent
if base.is_positive == True:
return base**re(exponent)
return (-base)**re(exponent)*C.exp(-S.Pi*im(exponent))
if isinstance(arg, C.exp):
return C.exp(re(arg.args[0]))
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_nonnegative:
return arg
if arg.is_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -S.ImaginaryUnit * arg
if arg2.is_nonnegative:
return arg2
if arg.is_Add:
if arg.has(S.Infinity, S.NegativeInfinity):
if any(a.is_infinite for a in arg.as_real_imag()):
return S.Infinity
if arg.is_real is None and arg.is_imaginary is None:
if all(a.is_real or a.is_imaginary or (S.ImaginaryUnit*a).is_real for a in arg.args):
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
if arg.is_real is False and arg.is_imaginary is False:
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
def _eval_is_integer(self):
if self.args[0].is_real:
return self.args[0].is_integer
def _eval_is_nonzero(self):
return self._args[0].is_nonzero
def _eval_is_positive(self):
return self.is_nonzero
def _eval_is_rational(self):
if self.args[0].is_real:
return self.args[0].is_rational
def _eval_is_even(self):
if self.args[0].is_real:
return self.args[0].is_even
def _eval_is_odd(self):
if self.args[0].is_real:
return self.args[0].is_odd
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_power(self, exponent):
if self.args[0].is_real and exponent.is_integer:
if exponent.is_even:
return self.args[0]**exponent
elif exponent is not S.NegativeOne and exponent.is_Integer:
return self.args[0]**(exponent - 1)*self
return
def _eval_nseries(self, x, n, logx):
direction = self.args[0].leadterm(x)[0]
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
when = Eq(direction, 0)
return Piecewise(
((s.subs(direction, 0)), when),
(sign(direction)*s, True),
)
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
return (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
def _eval_rewrite_as_Heaviside(self, arg):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
if arg.is_real:
return arg*(C.Heaviside(arg) - C.Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((arg, arg >= 0), (-arg, True))
def _eval_rewrite_as_sign(self, arg):
return arg/C.sign(arg)
class arg(Function):
"""Returns the argument (in radians) of a complex number"""
is_real = True
is_finite = True
@classmethod
def eval(cls, arg):
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
x, y = re(arg_), im(arg_)
rv = C.atan2(y, x)
if rv.is_number and not rv.atoms(AppliedUndef):
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = re(self.args[0]), im(self.args[0])
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg):
x, y = re(self.args[0]), im(self.args[0])
return atan2(y, x)
class conjugate(Function):
"""
Changes the sign of the imaginary part of a complex number.
Examples
========
>>> from sympy import conjugate, I
>>> conjugate(1 + I)
1 - I
See Also
========
sign, Abs
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
class transpose(Function):
"""
Linear map transposition.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
"""
Conjugate transpose or Hermite conjugation.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dag}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, printer._print(exp))
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm(u('\N{DAGGER}'))
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
"""
Lift argument to the Riemann surface of the logarithm, using the
standard branch.
>>> from sympy import Symbol, polar_lift, I
>>> p = Symbol('p', polar=True)
>>> x = Symbol('x')
>>> polar_lift(4)
4*exp_polar(0)
>>> polar_lift(-4)
4*exp_polar(I*pi)
>>> polar_lift(-I)
exp_polar(-I*pi/2)
>>> polar_lift(I + 2)
polar_lift(2 + I)
>>> polar_lift(4*x)
4*polar_lift(x)
>>> polar_lift(4*p)
4*p
See Also
========
sympy.functions.elementary.exponential.exp_polar
periodic_argument
"""
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy import exp_polar, pi, I, arg as argument
if arg.is_number:
ar = argument(arg)
#if not ar.has(argument) and not ar.has(atan):
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
"""
Represent the argument on a quotient of the Riemann surface of the
logarithm. That is, given a period P, always return a value in
(-P/2, P/2], by using exp(P*I) == 1.
>>> from sympy import exp, exp_polar, periodic_argument, unbranched_argument
>>> from sympy import I, pi
>>> unbranched_argument(exp(5*I*pi))
pi
>>> unbranched_argument(exp_polar(5*I*pi))
5*pi
>>> periodic_argument(exp_polar(5*I*pi), 2*pi)
pi
>>> periodic_argument(exp_polar(5*I*pi), 3*pi)
-pi
>>> periodic_argument(exp_polar(5*I*pi), pi)
0
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
principal_branch
"""
@classmethod
def _getunbranched(cls, ar):
from sympy import exp_polar, log, polar_lift
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif a.func is exp_polar:
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif a.func is polar_lift:
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
# Our strategy is to evaluate the argument on the Riemann surface of the
# logarithm, and then reduce.
# NOTE evidently this means it is a rather bad idea to use this with
# period != 2*pi and non-polar numbers.
from sympy import ceiling, oo, atan2, atan, polar_lift, pi, Mul
if not period.is_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if ar.func is polar_lift and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, arg, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S(1)/2)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
from sympy import ceiling, oo
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S(1)/2)*period)._eval_evalf(prec)
def unbranched_argument(arg):
from sympy import oo
return periodic_argument(arg, oo)
class principal_branch(Function):
"""
Represent a polar number reduced to its principal branch on a quotient
of the Riemann surface of the logarithm.
This is a function of two arguments. The first argument is a polar
number `z`, and the second one a positive real number of infinity, `p`.
The result is "z mod exp_polar(I*p)".
>>> from sympy import exp_polar, principal_branch, oo, I, pi
>>> from sympy.abc import z
>>> principal_branch(z, oo)
z
>>> principal_branch(exp_polar(2*pi*I)*3, 2*pi)
3*exp_polar(0)
>>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi)
3*principal_branch(z, 2*pi)
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
periodic_argument
"""
is_polar = True
is_comparable = False # cannot always be evalf'd
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
if not pl.has(polar_lift):
res = exp_polar(I*(barg - ub))*pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
# /cyclic/
from sympy.core import basic as _
_.abs_ = Abs
del _
|
the-stack_0_5742 | """This module contains nodes for spectral analysis with Timeflux."""
import numpy as np
import pandas as pd
import xarray as xr
from scipy.signal import welch
from scipy.fft import fftfreq, rfftfreq, fft, rfft
from timeflux.core.node import Node
class FFT(Node):
"""Compute the one-dimensional discrete Fourier Transform for each column using the Fast Fourier Tranform algorithm.
Attributes:
i (Port): default input, expects DataFrame.
o (Port): default output, provides DataArray.
Example:
In this exemple, we simulate a white noise and we apply FFT:
* ``fs`` = `10.0`
* ``nfft`` = `5`
* ``return_onesided`` = `False`
self.i.data::
A B C
2017-12-31 23:59:59.998745401 0.185133 0.541901 0.872946
2018-01-01 00:00:00.104507143 0.732225 0.806561 0.658783
2018-01-01 00:00:00.202319939 0.692277 0.849196 0.249668
2018-01-01 00:00:00.300986584 0.489425 0.221209 0.987668
2018-01-01 00:00:00.396560186 0.944059 0.039427 0.705575
self.o.data::
xarray.DataArray (times: 1, freqs: 5, space: 3)
array([[[ 3.043119+0.j , 2.458294+0.j , 3.47464 +0.j ],
[-0.252884+0.082233j, -0.06265 -1.098709j, 0.29353 +0.478287j],
[-0.805843+0.317437j, 0.188256+0.146341j, 0.151515-0.674376j],
[-0.805843-0.317437j, 0.188256-0.146341j, 0.151515+0.674376j],
[-0.252884-0.082233j, -0.06265 +1.098709j, 0.29353 -0.478287j]]])
Coordinates:
* times (times) datetime64[ns] 2018-01-01T00:00:00.396560186
* freqs (freqs) float64 0.0 2.0 4.0 -4.0 -2.0
* space (space) object 'A' 'B' 'C'
Notes:
This node should be used after a buffer.
References:
* `scipy.fft <https://docs.scipy.org/doc/scipy/reference/fft.html>`_
"""
def __init__(self, fs=1.0, nfft=None, return_onesided=True):
"""
Args:
fs (float): Nominal sampling rate of the input data.
nfft (int|None): Length of the Fourier transform. Default: length of the chunk.
return_onesided (bool): If `True`, return a one-sided spectrum for real data.
If `False` return a two-sided spectrum.
(Note that for complex data, a two-sided spectrum is always returned.)
Default: `True`.
"""
self._fs = fs
self._nfft = nfft
if return_onesided:
self._sides = "onesided"
else:
self._sides = "twosided"
if self._nfft is not None:
self._set_freqs()
def _check_nfft(self):
# Check validity of nfft at first chunk
if self._nfft is None:
self.logger.debug("nfft := length of the chunk ")
self._nfft = self.i.data.shape[0]
self._set_freqs()
elif self._nfft < self.i.data.shape[0]:
raise ValueError("nfft must be greater than or equal to length of chunk.")
else:
self._nfft = int(self._nfft)
def _set_freqs(self):
# Set freqs indexes
if self._sides == "onesided":
self._freqs = rfftfreq(self._nfft, 1 / self._fs)
else:
self._freqs = fftfreq(self._nfft, 1 / self._fs)
def update(self):
# copy the meta
self.o = self.i
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# At this point, we are sure that we have some data to process
self._check_nfft()
self.o.data = self.i.data
if self._sides == "twosided":
func = fft
else:
self.o.data = self.o.data.apply(lambda x: x.real)
func = rfft
values = func(self.o.data.values.T, n=self._nfft).T
self.o.data = xr.DataArray(
np.stack([values], 0),
coords=[[self.o.data.index[-1]], self._freqs, self.o.data.columns],
dims=["time", "freq", "space"],
)
class Welch(Node):
"""Estimate power spectral density using Welch’s method.
Attributes:
i (Port): default input, expects DataFrame.
o (Port): default output, provides DataArray with dimensions (time, freq, space).
Example:
In this exemple, we simulate data with noisy sinus on three sensors (columns `a`, `b`, `c`):
* ``fs`` = `100.0`
* ``nfft`` = `24`
node.i.data::
\s a b c
1970-01-01 00:00:00.000 -0.233920 -0.343296 0.157988
1970-01-01 00:00:00.010 0.460353 0.777296 0.957201
1970-01-01 00:00:00.020 0.768459 1.234923 1.942190
1970-01-01 00:00:00.030 1.255393 1.782445 2.326175
... ... ... ...
1970-01-01 00:00:01.190 1.185759 2.603828 3.315607
node.o.data::
<xarray.DataArray (time: 1, freq: 13, space: 3)>
array([[[2.823924e-02, 1.087382e-01, 1.153163e-01],
[1.703466e-01, 6.048703e-01, 6.310628e-01],
... ... ...
[9.989429e-04, 8.519226e-04, 7.769918e-04],
[1.239551e-03, 7.412518e-04, 9.863335e-04],
[5.382880e-04, 4.999334e-04, 4.702757e-04]]])
Coordinates:
* time (time) datetime64[ns] 1970-01-01T00:00:01.190000
* freq (freq) float64 0.0 4.167 8.333 12.5 16.67 ... 37.5 41.67 45.83 50.0
* space (space) object 'a' 'b' 'c'
Notes:
This node should be used after a Window with the appropriate length, with regard to the parameters
`noverlap`, `nperseg` and `nfft`.
It should be noted that a pipeline such as {LargeWindow-Welch} is in fact equivalent to a pipeline
{SmallWindow-FFT-LargeWindow-Average} with SmallWindow 's parameters `length` and `step` respectively
equivalent to `nperseg` and `step` and with FFT node with same kwargs.
"""
def __init__(self, rate=None, closed="right", **kwargs):
"""
Args:
rate (float|None): Nominal sampling rate of the input data. If `None`, the rate will be taken from the input meta/
closed (str): Make the index closed on the `right`, `left` or `center`.
kwargs: Keyword arguments to pass to scipy.signal.welch function.
You can specify: window, nperseg, noverlap, nfft, detrend, return_onesided and scaling.
"""
self._rate = rate
self._closed = closed
self._kwargs = kwargs
self._set_default()
def _set_default(self):
# We set the default params if they are not specifies in kwargs in order to check that they are valid, in respect of the length and sampling of the input data.
if "nperseg" not in self._kwargs.keys():
self._kwargs["nperseg"] = 256
self.logger.debug("nperseg := 256")
if "nfft" not in self._kwargs.keys():
self._kwargs["nfft"] = self._kwargs["nperseg"]
self.logger.debug(
"nfft := nperseg := {nperseg}".format(nperseg=self._kwargs["nperseg"])
)
if "noverlap" not in self._kwargs.keys():
self._kwargs["noverlap"] = self._kwargs["nperseg"] // 2
self.logger.debug(
"noverlap := nperseg/2 := {noverlap}".format(
noverlap=self._kwargs["noverlap"]
)
)
def _check_nfft(self):
# Check validity of nfft at first chun
if not all(
i <= len(self.i.data)
for i in [self._kwargs[k] for k in ["nfft", "nperseg", "noverlap"]]
):
raise ValueError(
"nfft, noverlap and nperseg must be greater than or equal to length of chunk."
)
else:
self._kwargs.update(
{
keyword: int(self._kwargs[keyword])
for keyword in ["nfft", "nperseg", "noverlap"]
}
)
def update(self):
# copy the meta
self.o = self.i
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# Check rate
if self._rate:
rate = self._rate
elif "rate" in self.i.meta:
rate = self.i.meta["rate"]
else:
raise ValueError(
"The rate was neither explicitely defined nor found in the stream meta."
)
# At this point, we are sure that we have some data to process
# apply welch on the data:
self._check_nfft()
f, Pxx = welch(x=self.i.data, fs=rate, **self._kwargs, axis=0)
if self._closed == "left":
time = self.i.data.index[-1]
elif self._closed == "center":
def middle(a):
return int(np.ceil(len(a) / 2)) - 1
time = self.i.data.index[middle(self.i.data)]
else: # right
time = self.i.data.index[-1]
# f is the frequency axis and Pxx the average power of shape (Nfreqs x Nchanels)
# we reshape Pxx to fit the ('time' x 'freq' x 'space') dimensions
self.o.data = xr.DataArray(
np.stack([Pxx], 0),
coords=[[time], f, self.i.data.columns],
dims=["time", "frequency", "space"],
)
class Bands(Node):
"""Averages the XArray values over freq dimension according to the frequencies bands given in arguments.
This node selects a subset of values over the chosen dimensions, averages them along this axis and convert the result into a flat dataframe.
This node will output as many ports bands as given bands, with their respective name as suffix.
Attributes:
i (Port): default output, provides DataArray with 3 dimensions (time, freq, space).
o (Port): Default output, provides DataFrame.
o_* (Port): Dynamic outputs, provide DataFrame.
"""
def __init__(self, bands=None, relative=False):
"""
Args:
bands (dict): Define the band to extract given its name and its range.
An output port will be created with the given names as suffix.
"""
bands = bands or {
"delta": [1, 4],
"theta": [4, 8],
"alpha": [8, 12],
"beta": [12, 30],
}
self._relative = relative
self._bands = []
for band_name, band_range in bands.items():
self._bands.append(
dict(
port=getattr(self, "o_" + band_name),
slice=slice(band_range[0], band_range[1]),
meta={"bands": {"range": band_range, "relative": relative}},
)
)
def update(self):
# When we have not received data, there is nothing to do
if not self.i.ready():
return
# At this point, we are sure that we have some data to process
for band in self._bands:
# 1. select the Xarray on freq axis in the range, 2. average along freq axis
band_power = (
self.i.data.loc[{"frequency": band["slice"]}].sum("frequency").values
) # todo: sum
if self._relative:
tot_power = self.i.data.sum("frequency").values
tot_power[tot_power == 0.0] = 1
band_power /= tot_power
band["port"].data = pd.DataFrame(
columns=self.i.data.space.values,
index=self.i.data.time.values,
data=band_power,
)
band["port"].meta = {**(self.i.meta or {}), **band["meta"]}
|
the-stack_0_5744 | # Copyright 2016 Medical Research Council Harwell.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @author James Brown <[email protected]>
import subprocess as sp
import numpy as np
import re
from tempfile import NamedTemporaryFile
minc_dtypes = {'unsigned': {'byte': np.uint8, 'short': np.uint16, 'float': np.float32},
'signed': {'byte': np.int8, 'short': np.int16, 'float': np.float32}}
def minc_to_numpy(minc_file):
info = minc_info(minc_file)
if not info:
return False
mrsg = MincRawSliceGenerator(minc_file, info)
vol = mrsg.volume
return vol
def mincstats_to_numpy(minc_file):
info = minc_info(minc_file)
if not info:
return False
info['dtype'] = 'float' # have to force it, because mincinfo is hopeless
info['np_dtype'] = np.float32
mrsg = MincRawSliceGenerator(minc_file, info)
vol = mrsg.volume
return vol
class SliceGenerator(object):
def __init__(self, recon):
self.recon = recon
self.slice_index = 0
def slices(self):
"""The slices method should yield xy image slices from a memory mapped numpy array."""
raise NotImplementedError("Ths method needs overriding")
def dtype(self):
"""The dtype method should return the datatype of the memory mapped numpy array"""
raise NotImplementedError("Ths method needs overriding")
def shape(self):
"""The shape method should return the shape of the memory mapped numpy array in x, y, z order."""
raise NotImplementedError("Ths method needs overriding")
class MincRawSliceGenerator(SliceGenerator):
"""The MincRawSliceGenerator class extends SliceGenerator, yielding slices from a single MINC (Medical Image NetCDF)
file, having been dumped to a temporary raw file via minctoraw. mincinfo is used to determine the file type/dimensions
"""
def __init__(self, recon, info):
"""The constructor takes a recon path as an argument, and dumps the MINC file to a temporary raw file. The raw file
is then memory mapped using numpy, from which slices are yielded.
:param recon: a path to a MINC file.
"""
super(MincRawSliceGenerator, self).__init__(recon)
self.ext = 'mnc'
tmp_file = NamedTemporaryFile() # TemporaryFile() seems not to work with Python3.4
sp.call(['mincextract', '-{}'.format(info['dtype']),
'-{}'.format(info['sign']), recon], stdout=tmp_file)
self.volume = np.fromfile(tmp_file.name, dtype=info['np_dtype']).reshape(info['shape'])
def slices(self, start=0):
"""Slices are yielded one slice at a time from the memory mapped numpy array
"""
try:
for i in range(self.volume.shape[0]):
yield self.volume[i, :, :]
except Exception:
raise CorruptReconError("Error yielding slices from MINC file")
def dtype(self):
"""Overrides the superclass to return the data type of the MINC file i.e. 8 bit/16 bit.
"""
return self.volume.dtype
def shape(self):
"""Overrides the superclass to return the shape of the MINC file.
"""
return self.volume.shape[::-1]
def minc_info(recon):
try:
info = sp.check_output(['mincinfo', recon], universal_newlines=True)
except OSError as e:
raise OSError("Minc tools not installed\n{}".format(e))
#info = str(info)
info_dict = {}
dims = []
for line in info.splitlines():
if 'image:' in line: # strip non alphanumeric characters
# Get range
min_max = re.findall('\d+ to \d+', line)[0]
info_dict['min'], info_dict['max'] = int(min_max.split()[0]), int(min_max.split()[2])
regex = re.compile('[^a-zA-Z]')
info_dict['sign'] = regex.sub('', line.split()[1])
info_dict['dtype'] = regex.sub('', line.split()[2])
try:
info_dict['np_dtype'] = minc_dtypes[info_dict['sign']][info_dict['dtype']]
except KeyError:
return None
elif 'dimensions' not in line and any(space in line for space in ['xspace', 'yspace', 'zspace']):
spacing = line.split()
dims.append(int(spacing[1]))
info_dict['voxel_size'] = float(spacing[2]) * 1000 # in microns
info_dict['shape'] = tuple(dims) # zspace, yspace, xspace
return info_dict
class ReconFormatError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class CorruptReconError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
|
the-stack_0_5745 | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from bricks.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('bricks'.upper() + '_LOCALEDIR')
_t = gettext.translation('bricks', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('bricks' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='bricks')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='bricks' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='bricks', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
|
the-stack_0_5746 | import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.KratosUnittest as KratosUnittest
from math import sqrt, sin, cos, pi, exp, atan
class TestComputeCenterOfGravity(KratosUnittest.TestCase):
# muting the output
KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)
def _apply_beam_material_properties(self,mp,dim):
#define properties
mp.GetProperties()[0].SetValue(KratosMultiphysics.YOUNG_MODULUS,210e9)
mp.GetProperties()[0].SetValue(KratosMultiphysics.DENSITY,7850)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.CROSS_AREA,0.01)
mp.GetProperties()[0].SetValue(KratosMultiphysics.POISSON_RATIO,0.30)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.TORSIONAL_INERTIA,0.00001)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I22,0.00001)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I33,0.00001)
cl = StructuralMechanicsApplication.LinearElastic3DLaw()
mp.GetProperties()[0].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _apply_shell_material_properties(self,mp):
#define properties
mp.GetProperties()[1].SetValue(KratosMultiphysics.YOUNG_MODULUS,100e3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.POISSON_RATIO,0.3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.THICKNESS,1.0)
mp.GetProperties()[1].SetValue(KratosMultiphysics.DENSITY,1.0)
cl = StructuralMechanicsApplication.LinearElasticPlaneStress2DLaw()
mp.GetProperties()[1].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _apply_orthotropic_shell_material_properties(self,mp):
#define properties
# we specify only the properties we need (others are youngs modulus etc)
num_plies = 3
orthotropic_props = KratosMultiphysics.Matrix(num_plies,16)
for row in range(num_plies):
for col in range(16):
orthotropic_props[row,col] = 0.0
# Orthotropic mechanical moduli
orthotropic_props[0,0] = 0.005 # lamina thickness
orthotropic_props[0,2] = 2200 # density
orthotropic_props[1,0] = 0.01 # lamina thickness
orthotropic_props[1,2] = 1475 # density
orthotropic_props[2,0] = 0.015 # lamina thickness
orthotropic_props[2,2] = 520 # density
mp.GetProperties()[1].SetValue(StructuralMechanicsApplication.SHELL_ORTHOTROPIC_LAYERS,orthotropic_props)
cl = StructuralMechanicsApplication.LinearElasticOrthotropic2DLaw()
mp.GetProperties()[1].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _apply_solid_material_properties(self,mp):
#define properties
mp.GetProperties()[1].SetValue(KratosMultiphysics.YOUNG_MODULUS,100e3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.POISSON_RATIO,0.3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.THICKNESS,1.0)
mp.GetProperties()[1].SetValue(KratosMultiphysics.DENSITY,1.0)
cl = StructuralMechanicsApplication.LinearElasticPlaneStrain2DLaw()
mp.GetProperties()[1].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _create_shell_nodes(self,mp):
mp.CreateNewNode(1, -0.5, - 0.45, 0.1)
mp.CreateNewNode(2, 0.7, -0.5, 0.2)
mp.CreateNewNode(3, 0.55, 0.6, 0.15)
mp.CreateNewNode(4, -0.48, 0.65, 0.0)
mp.CreateNewNode(5, 0.02, -0.01, -0.15)
def _create_shell_elements(self,mp,element_name = "ShellThinElementCorotational3D3N"):
mp.CreateNewElement(element_name, 1, [1,2,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 2, [2,3,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 3, [3,4,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 4, [4,1,5], mp.GetProperties()[1])
def test_nodal_cog(self):
dim = 3
nr_nodes = 4
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_nodal_masses")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
#create nodes
dx = 1.2
for i in range(nr_nodes):
mp.CreateNewNode(i+1,i*dx,0.00,0.00)
#add dofs
#create Element
elem1 = mp.CreateNewElement("NodalConcentratedElement2D1N", 1, [1], mp.GetProperties()[0])
elem2 = mp.CreateNewElement("NodalConcentratedElement2D1N", 2, [2], mp.GetProperties()[0])
elem3 = mp.CreateNewElement("NodalConcentratedElement3D1N", 3, [3], mp.GetProperties()[0])
elem4 = mp.CreateNewElement("NodalConcentratedElement3D1N", 4, [4], mp.GetProperties()[0])
elem1.SetValue(KratosMultiphysics.NODAL_MASS,21.234)
elem2.SetValue(KratosMultiphysics.NODAL_MASS,5.234)
elem3.SetValue(KratosMultiphysics.NODAL_MASS,112.234)
elem4.SetValue(KratosMultiphysics.NODAL_MASS,78.234)
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(2.5688903639, center_of_gravity[0])
self.assertAlmostEqual(0.0, center_of_gravity[1])
self.assertAlmostEqual(0.0, center_of_gravity[2])
def test_beam_cog(self):
dim = 3
nr_nodes = 11
nr_elements = nr_nodes-1
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_beams")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
self._apply_beam_material_properties(mp,dim)
#create nodes
dx = 1.20 / nr_elements
for i in range(nr_nodes):
mp.CreateNewNode(i+1,i*dx,0.00,0.00)
#add dofs
#create Element
for i in range(nr_elements):
elem = mp.CreateNewElement("CrLinearBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0])
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.6, center_of_gravity[0])
self.assertAlmostEqual(0.0, center_of_gravity[1])
self.assertAlmostEqual(0.0, center_of_gravity[2])
def test_shell_cog(self):
dim = 3
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_shells")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
mp.SetBufferSize(2)
self._apply_shell_material_properties(mp)
self._create_shell_nodes(mp)
self._create_shell_elements(mp)
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.0723057, center_of_gravity[0])
self.assertAlmostEqual(0.0517395, center_of_gravity[1])
self.assertAlmostEqual(0.0269436, center_of_gravity[2])
def test_orthotropic_shell_cog(self):
dim = 3
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_orthotropic_shells")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
mp.SetBufferSize(2)
self._apply_orthotropic_shell_material_properties(mp)
self._create_shell_nodes(mp)
self._create_shell_elements(mp)
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.0723057, center_of_gravity[0])
self.assertAlmostEqual(0.0517395, center_of_gravity[1])
self.assertAlmostEqual(0.0269436, center_of_gravity[2])
def test_solid_cog(self):
dim = 2
current_model = KratosMultiphysics.Model()
mp = current_model.CreateModelPart("structural_part_solids")
mp.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] = dim
mp.SetBufferSize(2)
self._apply_solid_material_properties(mp)
#create nodes
mp.CreateNewNode(1,0.5,0.5,0.0)
mp.CreateNewNode(2,0.7,0.2,0.0)
mp.CreateNewNode(3,0.9,0.8,0.0)
mp.CreateNewNode(4,0.3,0.7,0.0)
mp.CreateNewNode(5,0.6,0.6,0.0)
#create Element
mp.CreateNewElement("TotalLagrangianElement2D3N", 1, [1,2,5], mp.GetProperties()[1])
mp.CreateNewElement("TotalLagrangianElement2D3N", 2, [2,3,5], mp.GetProperties()[1])
mp.CreateNewElement("TotalLagrangianElement2D3N", 3, [3,4,5], mp.GetProperties()[1])
mp.CreateNewElement("TotalLagrangianElement2D3N", 4, [4,1,5], mp.GetProperties()[1])
cog_process = StructuralMechanicsApplication.ComputeCenterOfGravityProcess(mp)
cog_process.Execute()
center_of_gravity = mp.ProcessInfo[StructuralMechanicsApplication.CENTER_OF_GRAVITY]
self.assertAlmostEqual(0.6416666667, center_of_gravity[0])
self.assertAlmostEqual(0.5729166667, center_of_gravity[1])
self.assertAlmostEqual(0.0, center_of_gravity[2])
if __name__ == '__main__':
KratosUnittest.main()
|
the-stack_0_5747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import itertools
import re
import threading
import uuid
import requests
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, rawResponse
from bson.objectid import ObjectId
from girderformindlogger.constants import AccessType, SortDir, TokenScope, \
DEFINED_INFORMANTS, REPROLIB_CANONICAL, SPECIAL_SUBJECTS, USER_ROLES
from girderformindlogger.api import access
from girderformindlogger.exceptions import AccessException, ValidationException
from girderformindlogger.models.activity import Activity as ActivityModel
from girderformindlogger.models.applet import Applet as AppletModel
from girderformindlogger.models.collection import Collection as CollectionModel
from girderformindlogger.models.folder import Folder as FolderModel
from girderformindlogger.models.group import Group as GroupModel
from girderformindlogger.models.item import Item as ItemModel
from girderformindlogger.models.protocol import Protocol as ProtocolModel
from girderformindlogger.models.roles import getCanonicalUser, getUserCipher
from girderformindlogger.models.user import User as UserModel
from girderformindlogger.utility import config, jsonld_expander
from pyld import jsonld
USER_ROLE_KEYS = USER_ROLES.keys()
class Applet(Resource):
def __init__(self):
super(Applet, self).__init__()
self.resourceName = 'applet'
self._model = AppletModel()
self.route('GET', (':id',), self.getApplet)
self.route('GET', (':id', 'data'), self.getAppletData)
self.route('GET', (':id', 'groups'), self.getAppletGroups)
self.route('POST', (), self.createApplet)
self.route('PUT', (':id', 'informant'), self.updateInformant)
self.route('PUT', (':id', 'assign'), self.assignGroup)
self.route('PUT', (':id', 'constraints'), self.setConstraints)
self.route('PUT', (':id', 'schedule'), self.setSchedule)
self.route('POST', (':id', 'invite'), self.invite)
self.route('GET', (':id', 'roles'), self.getAppletRoles)
self.route('GET', (':id', 'users'), self.getAppletUsers)
self.route('DELETE', (':id',), self.deactivateApplet)
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Get userlist, groups & statuses.')
.modelParam(
'id',
model=FolderModel,
level=AccessType.ADMIN,
destName='applet'
)
)
def getAppletUsers(self, applet):
thisUser=self.getCurrentUser()
if AppletModel().isCoordinator(applet['_id'], thisUser):
return(AppletModel().getAppletUsers(applet, thisUser, force=True))
else:
raise AccessException(
"Only coordinators and managers can see user lists."
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Assign a group to a role in an applet.')
.deprecated()
.responseClass('Folder')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.param(
'group',
'ID of the group to assign.',
required=True,
strip=True
)
.param(
'role',
'Role to invite this user to. One of ' + str(USER_ROLE_KEYS),
default='user',
required=False,
strip=True
)
.jsonParam(
'subject',
'Requires a JSON Object in the form \n```'
'{'
' "groups": {'
' "«relationship»": []'
' },'
' "users": {'
' "«relationship»": []'
' }'
'}'
'``` \n For \'user\' or \'reviewer\' assignments, specify '
'group-level relationships, filling in \'«relationship»\' with a '
'JSON-ld key semantically defined in in your context, and IDs in '
'the value Arrays (either applet-specific or canonical IDs in the '
'case of users; applet-specific IDs will be stored either way).',
paramType='form',
required=False,
requireObject=True
)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the folder or its new parent object.', 403)
)
def assignGroup(self, folder, group, role, subject):
applet = folder
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
thisUser=self.getCurrentUser()
group=GroupModel().load(group, level=AccessType.WRITE, user=thisUser)
return(
AppletModel().setGroupRole(
applet,
group,
role,
currentUser=thisUser,
force=False,
subject=subject
)
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Create an applet.')
.param(
'protocolUrl',
'URL of Activity Set from which to create applet',
required=False
)
.param(
'name',
'Name to give the applet. The Protocol\'s name will be used if '
'this parameter is not provided.',
required=False
)
.param(
'informant',
' '.join([
'Relationship from informant to individual of interest.',
'Currently handled informant relationships are',
str([r for r in DEFINED_INFORMANTS.keys()])
]),
required=False
)
.errorResponse('Write access was denied for this applet.', 403)
)
def createApplet(self, protocolUrl=None, name=None, informant=None):
thisUser = self.getCurrentUser()
thread = threading.Thread(
target=AppletModel().createAppletFromUrl,
kwargs={
'name': name,
'protocolUrl': protocolUrl,
'user': thisUser,
'constraints': {
'informantRelationship': informant
} if informant is not None else None
}
)
thread.start()
return({
"message": "The applet is being created. Please check back in "
"several mintutes to see it. If you have an email "
"address associated with your account, you will receive "
"an email when your applet is ready."
})
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Get all data you are authorized to see for an applet.')
.param(
'id',
'ID of the applet for which to fetch data',
required=True
)
.param(
'format',
'JSON or CSV',
required=False
)
.errorResponse('Write access was denied for this applet.', 403)
)
def getAppletData(self, id, format='json'):
import pandas as pd
from datetime import datetime
from ..rest import setContentDisposition, setRawResponse, setResponseHeader
format = ('json' if format is None else format).lower()
thisUser = self.getCurrentUser()
data = AppletModel().getResponseData(id, thisUser)
setContentDisposition("{}-{}.{}".format(
str(id),
datetime.now().isoformat(),
format
))
if format=='csv':
setRawResponse()
setResponseHeader('Content-Type', 'text/{}'.format(format))
csv = pd.DataFrame(data).to_csv(index=False)
return(csv)
setResponseHeader('Content-Type', 'application/{}'.format(format))
return(data)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('(managers only) Update the informant of an applet.')
.modelParam(
'id',
model=AppletModel,
description='ID of the applet to update',
destName='applet',
force=True,
required=True
)
.param(
'informant',
' '.join([
'Relationship from informant to individual of interest.',
'Currently handled informant relationships are',
str([r for r in DEFINED_INFORMANTS.keys()])
]),
required=True
)
.errorResponse('Write access was denied for this applet.', 403)
)
def updateInformant(self, applet, informant):
user = self.getCurrentUser()
if not AppletModel().isManager(applet['_id'], user):
raise AccessException(
"Only managers can update informant relationship"
)
AppletModel().updateRelationship(applet, informant)
return(
jsonld_expander.formatLdObject(
applet,
'applet',
user,
refreshCache=False
)
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Deactivate an applet by ID.')
.modelParam('id', model=AppletModel, level=AccessType.WRITE)
.errorResponse('Invalid applet ID.')
.errorResponse('Write access was denied for this applet.', 403)
)
def deactivateApplet(self, folder):
applet = folder
user = Applet().getCurrentUser()
applet['meta']['applet']['deleted'] = True
applet = AppletModel().setMetadata(applet, applet.get('meta'), user)
if applet.get('meta', {}).get('applet', {}).get('deleted')==True:
message = 'Successfully deactivated applet {} ({}).'.format(
AppletModel().preferredName(applet),
applet.get('_id')
)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, user)
)
thread.start()
else:
message = 'Could not deactivate applet {} ({}).'.format(
AppletModel().preferredName(applet),
applet.get('_id')
)
Description().errorResponse(message, 403)
return(message)
@access.user(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get an applet by ID.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.param(
'refreshCache',
'Reparse JSON-LD',
required=False,
dataType='boolean'
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def getApplet(self, applet, refreshCache=False):
user = self.getCurrentUser()
if refreshCache:
thread = threading.Thread(
target=jsonld_expander.formatLdObject,
args=(applet, 'applet', user),
kwargs={'refreshCache': refreshCache}
)
thread.start()
return({
"message": "The applet is being refreshed. Please check back "
"in several mintutes to see it."
})
return(
jsonld_expander.formatLdObject(
applet,
'applet',
user,
refreshCache=refreshCache
)
)
@access.user(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get associated groups for a given role and applet ID.')
.modelParam('id', 'ID of the Applet.', model=AppletModel, level=AccessType.READ)
.param(
'role',
'One of ' + str(set(USER_ROLE_KEYS)),
default='user',
required=False,
strip=True
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def getAppletGroups(self, folder, role):
applet = folder
user = self.getCurrentUser()
groups = [
group for group in AppletModel(
).getAppletGroups(applet).get(role) if ObjectId(group) in [
*user.get('groups', []),
*user.get('formerGroups', []),
*[invite['groupId'] for invite in [
*user.get('groupInvites', []),
*user.get('declinedInvites', [])
]]
]
]
return(
groups
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Get roles for an applet by ID.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.WRITE,
description='ID of the Applet.'
)
.errorResponse('Invalid applet ID.')
.errorResponse('Write access was denied for this applet.', 403)
.notes('Only users with write access can see roles.')
)
def getAppletRoles(self, folder):
applet = folder
user = Applet().getCurrentUser()
return(AppletModel().getFullRolesList(applet))
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Invite a user to a role in an applet.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.param(
'role',
'Role to invite this user to. One of ' + str(set(USER_ROLE_KEYS)),
default='user',
required=False,
strip=True
)
.param(
'idCode',
'ID code for data reporting. One will be generated if none is '
'provided.',
required=False,
strip=True
)
.jsonParam(
'profile',
'Optional, coordinator-defined user profile information, eg, '
'`displayName`, `email`',
required=False,
paramType='form'
)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the folder or its new parent object.', 403)
)
def invite(self, applet, role="user", idCode=None, profile=None):
from girderformindlogger.models.invitation import Invitation
from girderformindlogger.models.profile import Profile
user = self.getCurrentUser()
try:
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
invitation = Invitation().createInvitation(
applet=applet,
coordinator=user,
role=role,
profile=profile,
idCode=idCode
)
return(Profile().displayProfileFields(invitation, user))
except:
import sys, traceback
print(sys.exc_info())
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Deprecated. Do not use')
.modelParam('id', model=AppletModel, level=AccessType.READ)
.param(
'activity',
'Deprecated. Do not use.'
'schedule.',
required=False
)
.jsonParam(
'schedule',
'Deprecated. Do not use.',
paramType='form',
required=False
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
.deprecated()
)
def setConstraints(self, folder, activity, schedule, **kwargs):
thisUser = self.getCurrentUser()
applet = jsonld_expander.formatLdObject(
_setConstraints(folder, activity, schedule, thisUser),
'applet',
thisUser,
refreshCache=True
)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, thisUser)
)
thread.start()
return(applet)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Set or update schedule information for an applet.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.jsonParam(
'schedule',
'A JSON object containing schedule information for an applet',
paramType='form',
required=False
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def setSchedule(self, applet, schedule, **kwargs):
thisUser = self.getCurrentUser()
if not AppletModel().isCoordinator(applet['_id'], thisUser):
raise AccessException(
"Only coordinators and managers can update applet schedules."
)
appletMeta = applet['meta'] if 'meta' in applet else {'applet': {}}
if 'applet' not in appletMeta:
appletMeta['applet'] = {}
appletMeta['applet']['schedule'] = schedule
AppletModel().setMetadata(applet, appletMeta)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, thisUser)
)
thread.start()
return(appletMeta)
def authorizeReviewer(applet, reviewer, user):
thisUser = Applet().getCurrentUser()
user = UserModel().load(
user,
level=AccessType.NONE,
user=thisUser
)
try:
applet = FolderModel().load(
applet,
level=AccessType.READ,
user=thisUser
)
responsesCollection = FolderModel().createFolder(
parent=user,
name='Responses',
parentType='user',
public=False,
creator=thisUser,
reuseExisting=True
)
thisApplet = list(FolderModel().childFolders(
parent=responsesCollection,
parentType='folder',
user=thisUser,
filters={
'meta.applet.@id': str(applet['_id'])
}
))
thisApplet = thisApplet[0] if len(
thisApplet
) else FolderModel().setMetadata(
FolderModel().createFolder(
parent=responsesCollection,
name=FolderModel().preferredName(applet),
parentType='folder',
public=False,
creator=thisUser,
allowRename=True,
reuseExisting=False
),
{
'applet': {
'@id': str(applet['_id'])
}
}
)
accessList = thisApplet['access']
accessList['users'].append({
"id": reviewer,
"level": AccessType.READ
})
thisApplet = FolderModel().setAccessList(
thisApplet,
accessList,
save=True,
recurse=True,
user=thisUser
)
except:
thisApplet = None
return(thisApplet)
def authorizeReviewers(assignment):
assignment = assignment.get('meta', assignment)
thisUser = Applet().getCurrentUser()
allUsers = []
reviewAll = []
members = assignment.get('members', [])
applet = assignment.get('applet').get('@id')
for member in [member for member in members if 'roles' in member]:
try:
if member['roles']['user']:
allUsers.append(getCanonicalUser(member.get("@id")))
except:
pass
if 'reviewer' in member['roles']:
if "ALL" in member['roles']['reviewer']:
reviewAll.append(getCanonicalUser(member.get("@id")))
for user in [
user for user in member['roles'][
'reviewer'
] if user not in SPECIAL_SUBJECTS
]:
authorizeReviewer(
assignment.get('applet').get('@id'),
getCanonicalUser(member.get('@id')),
getCanonicalUser(user)
)
for reviewer in reviewAll:
[authorizeReviewer(
assignment.get('applet').get('@id'),
reviewer,
user
) for user in allUsers]
return(None)
def _invite(applet, user, role, rsvp, subject):
"""
Helper function to invite a user to an applet.
:param applet: Applet to invite user to
:type applet: AppletModel
:param user: ID (canonical or applet-specific) or email address of user to
invite
:type user: string
:param role: Role to invite user to
:type role: string
:param rsvp: Require user acceptance?
:type rsvp: boolean
:param subject: Subject about 'user' role can inform or about which
'reviewer' role can review
:type subject: string or literal
:returns: New assignment (dictionary)
"""
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
thisUser = Applet().getCurrentUser()
user = user if user else str(thisUser['_id'])
if bool(rsvp):
groupName = {
'title': '{} {}s'.format(
str(applet.get('_id')),
role
)
}
groupName['lower'] = groupName.get('title', '').lower()
group = GroupModel().findOne(query={'lowerName': groupName['lower']})
if not group or group is None:
group = GroupModel().createGroup(
name=groupName['title'],
creator=thisUser,
public=bool(role in ['manager', 'reviewer'])
)
try:
assignments = CollectionModel().createCollection(
name="Assignments",
public=True,
reuseExisting=True
)
assignmentType = 'collection'
except AccessException:
assignments, assignmentType = selfAssignment()
appletAssignment = list(FolderModel().childFolders(
parent=assignments,
parentType=assignmentType,
user=thisUser,
filters={
'meta.applet.@id': str(applet['_id']) if '_id' in applet else None
}
))
appletAssignment = appletAssignment[0] if len(
appletAssignment
) else FolderModel().setMetadata(
FolderModel().createFolder(
parent=assignments,
name=FolderModel().preferredName(applet),
parentType=assignmentType,
public=False,
creator=thisUser,
allowRename=True,
reuseExisting=False
),
{
'applet': {
'@id': str(applet['_id']) if '_id' in applet else None
}
}
)
meta = appletAssignment.get('meta', {})
members = meta.get('members', []) if meta.get(
'members'
) is not None else []
cUser = getUserCipher(appletAssignment, user)
subject = subject.upper() if subject is not None and subject.upper(
) in SPECIAL_SUBJECTS else getUserCipher(
appletAssignment,
str(thisUser['_id']) if subject is None else subject
)
thisAppletAssignment = {
'@id': str(cUser),
'roles': {
role: True if role not in [
'reviewer',
'user'
] else [
subject
]
}
}
for i, u in enumerate(members):
if '@id' in u and u["@id"]==str(cUser):
thisAppletAssignment = members.pop(i)
if 'roles' not in thisAppletAssignment:
thisAppletAssignment['roles'] = {}
thisAppletAssignment['roles'][
role
] = True if role not in [
'reviewer',
'user'
] else [
subject
] if (
subject in SPECIAL_SUBJECTS
) or (
'reviewer' not in thisAppletAssignment[
'roles'
]
) else list(set(
thisAppletAssignment['roles']['reviewer'] + [subject]
).difference(set(
SPECIAL_SUBJECTS
))) if "ALL" not in thisAppletAssignment['roles'][
'reviewer'
] else ["ALL"]
members.append(thisAppletAssignment)
meta['members'] = members
appletAssignment = FolderModel().setMetadata(appletAssignment, meta)
authorizeReviewers(appletAssignment)
return(appletAssignment)
def selfAssignment():
thisUser = Applet().getCurrentUser()
assignmentsFolder = FolderModel().createFolder(
parent=thisUser,
parentType='user',
name='Assignments',
creator=thisUser,
public=False,
reuseExisting=True
)
return((
assignmentsFolder,
'folder'
))
def _setConstraints(applet, activity, schedule, user, refreshCache=False):
"""
Helper function for method recursion.
:param applet: applet Object
:type applet: dict
:param activity: Activity ID
:type activity: str, list, or None
:param schedule: schedule data
:type schedule: dict, list, or None
:param user: user making the call
:type user: dict
:returns: updated applet Object
"""
if activity is None:
if schedule is not None:
appletMeta = applet.get('meta', {})
appletMeta['applet']['schedule'] = schedule
applet = AppletModel().setMetadata(applet, appletMeta)
return(applet)
if isinstance(activity, str) and activity.startswith('['):
try:
activity = [
activity_.replace(
"'",
""
).replace(
'"',
''
).strip() for activity_ in activity[1:-1].split(',')
]
except (TypeError, AttributeError) as e:
print(e)
if isinstance(activity, list):
for activity_ in activity:
applet = _setConstraints(
applet,
activity_,
schedule,
user
)
return(applet)
try:
activityLoaded = ActivityModel().getFromUrl(
activity,
'activity',
thisUser,
refreshCache
)[0]
except:
activityLoaded = ActivityModel().load(
activity,
AccessType.WRITE,
user
)
try:
activityMeta = activityLoaded['meta'].get('activity')
except AttributeError:
raise ValidationException(
'Invalid activity.',
'activity'
)
activityKey = activityMeta.get(
'url',
activityMeta.get(
'@id',
activityLoaded.get(
'_id'
)
)
)
if activityKey is None:
raise ValidationException(
'Invalid activity.',
'activity'
)
else:
activityKey = jsonld_expander.reprolibPrefix(activityKey)
protocolExpanded = jsonld_expander.formatLdObject(
applet,
'applet',
user
).get('applet', {})
protocolOrder = protocolExpanded.get('ui', {}).get('order', [])
framedActivityKeys = [
protocolOrder[i] for i, v in enumerate(
protocolExpanded.get(
"reprolib:terms/order"
)[0].get(
"@list"
)
) if jsonld_expander.reprolibPrefix(v.get("@id"))==activityKey
]
if schedule is not None:
appletMeta = applet.get('meta', {})
scheduleInApplet = appletMeta.get('applet', {}).get('schedule', {})
for k in framedActivityKeys:
scheduleInApplet[k] = schedule
appletMeta['applet']['schedule'] = scheduleInApplet
applet = AppletModel().setMetadata(applet, appletMeta)
return(applet)
|
the-stack_0_5748 | from transformers import AutoTokenizer, AutoModelWithLMHead
import numpy as np
from pathlib import Path
import json
import joblib
def model_fn(model_dir):
tokenizer = AutoTokenizer.from_pretrained("distilgpt2", cache_dir=model_dir)
model = AutoModelWithLMHead.from_pretrained("distilgpt2", cache_dir=model_dir)
model_assets = {
"tokenizer": tokenizer,
"model": model
}
return model_assets
def input_fn(request_body_str, request_content_type):
assert (
request_content_type == "application/json"
), "content_type must be 'application/json'"
request_body = json.loads(request_body_str)
return request_body
def get_parameter(request_body, parameter_name, default):
parameter = default
if 'parameters' in request_body:
if parameter_name in request_body['parameters']:
parameter = request_body['parameters'][parameter_name]
return parameter
def predict_fn(request_body, model_assets):
input_text = request_body["text"]
tokenizer = model_assets['tokenizer']
model = model_assets['model']
input_ids = tokenizer.encode(input_text, return_tensors='pt')
sample_output = model.generate(
input_ids,
do_sample=True,
min_length=get_parameter(request_body, 'min_length', 25),
max_length=get_parameter(request_body, 'max_length', 100),
top_k=0,
temperature=get_parameter(request_body, 'temperature', 100)
)
output_text = tokenizer.decode(sample_output[0], skip_special_tokens=True)
return {"text": output_text}
def output_fn(prediction, response_content_type):
assert (
response_content_type == "application/json"
), "accept must be 'application/json'"
response_body_str = json.dumps(prediction)
return response_body_str
|
the-stack_0_5751 | #!/usr/bin/env python
"""
/proc/thedir
"""
from slashproc_parser.basic_parser import BasicSPParser
class TheParser(BasicSPParser):
THEDIR = "/proc/thedir"
def __init__(self):
super(TheParser, self).__init__(self)
@staticmethod
def get_groups():
"""
REMOVE THIS DOCSTRING AND CREATE ONE APPROPIATE TO THE PARSER
Ensure first group is the parser name and its parent is ['root']
Ensure group labels are unique
if there are multiple then subscript with number etc...
Ensure each group has a parent, and parents is a list
:rtype: dict
"""
groups = {
'theparser': {'label': "Formatted Long Parser Label",
'desc': "Description of the parser",
'parents': ['root']},
'group1': {'label': 'The first group',
'parents': ['theparser']},
'group2': {'label': 'The second group',
'parents': ['group1'],
'desc': "Desc recommended but not necessary"}
}
return groups
@staticmethod
def get_vars():
"""
REMOVE THIS DOCSTRING AND CREATE ONE APPROPIATE TO THE PARSER
Ensure var labels are all lower case, contain underscores (not dash)
and the following chars are not permitted "()[]/\ "
Ensure every var has a unit where appropriate
:rtype: dict
"""
vars = {
'var1': {'label': 'The first Variable'},
'var2': {'label': 'The Second Variable',
'unit': 'kB',
'desc': 'Description recommended but not necessary'}
}
return vars
@staticmethod
def get_data():
"""
REMOVE THIS DOCSTRING AND CREATE ONE APPROPIATE TO THE PARSER
Ensure first group is the parser name
Ensure return adheres to the groups structure
Ensure all groups are present in the groups dict
Ensure all vars adhere to the var format
Ensure all vars are present in the vars dict
Ensure every value is a string
:rtype: dict
"""
data = {'theparser': {
'group1': {
'group2': {'var1': 'val1',
'var2': 'val2'},
}
}
}
return data
if __name__ == "__main__":
c = TheParser()
c.test_parse()
|
the-stack_0_5753 | # -*- coding: utf-8 -*
#!/usr/bin/python
from dealctrl import *
class deal_7_com(dealctrl):
def __init__(self,con):
dealctrl.__init__(self,con)
def run(self):
userid=int(self.recvdic['userid'])
aid=int(self.recvdic['aid'])
content=self.recvdic['content']
sql=("INSERT INTO `activity_comment` (`aid`,`userid`,`content`,`commenttime`) VALUES (%d,%d,'%s',%d)" % (aid,userid,content,self.now))
self.log.write("sql: %s\n" % sql)
self.db.execute(sql)
cid=self.db.insert_id()
senddic={
'type':'7_com_r',
'reply':1,
'cid':cid
}
self.sendmessage(senddic)
return 1
|
the-stack_0_5754 | from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import scipy
import numbers
import random
from matplotlib import colors
import matplotlib.patches as mpatches
from statsmodels.nonparametric.kde import KDEUnivariate
from PIL import ImageFilter
from kornia import augmentation as augs
from kornia import filters, color
def adjust_learning_rate(epoch, opt, optimizer):
"""Sets the learning rate to the initial LR decayed by 0.2 every steep step"""
steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
if steps > 0:
new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def convert_to_np(tensor):
# convert pytorch tensors to numpy arrays
if not isinstance(tensor, np.ndarray):
tensor = tensor.cpu().numpy()
return tensor
def labels_to_dfc(tensor, no_savanna):
"""
INPUT:
Classes encoded in the training scheme (0-9 if savanna is a valid label
or 0-8 if not). Invalid labels are marked by 255 and will not be changed.
OUTPUT:
Classes encoded in the DFC2020 scheme (1-10, and 255 for invalid).
"""
# transform to numpy array
tensor = convert_to_np(tensor)
# copy the original input
out = np.copy(tensor)
# shift labels if there is no savanna class
if no_savanna:
for i in range(2, 9):
out[tensor == i] = i + 1
else:
pass
# transform from zero-based labels to 1-10
out[tensor != 255] += 1
# make sure the mask is intact and return transformed labels
assert np.all((tensor == 255) == (out == 255))
return out
def display_input_batch(tensor, display_indices=0, brightness_factor=3):
# extract display channels
tensor = tensor[:, display_indices, :, :]
# restore NCHW tensor shape if single channel image
if len(tensor.shape) == 3:
tensor = tensor.unsqueeze(1)
# scale image
tensor = torch.clamp((tensor * brightness_factor), 0, 1)
return tensor
def display_label_batch(tensor, no_savanna=False):
# get predictions if input is one-hot encoded
if len(tensor.shape) == 4:
tensor = tensor.max(1)[1]
# convert train labels to DFC2020 class scheme
tensor = labels_to_dfc(tensor, no_savanna)
# colorize labels
cmap = mycmap()
imgs = []
for s in range(tensor.shape[0]):
im = (tensor[s, :, :] - 1) / 10
im = cmap(im)[:, :, 0:3]
im = np.rollaxis(im, 2, 0)
imgs.append(im)
tensor = np.array(imgs)
return tensor
def classnames():
return ["Forest", "Shrubland", "Savanna", "Grassland", "Wetlands",
"Croplands", "Urban/Built-up", "Snow/Ice", "Barren", "Water"]
def mycmap():
cmap = colors.ListedColormap(['#009900',
'#c6b044',
'#fbff13',
'#b6ff05',
'#27ff87',
'#c24f44',
'#a5a5a5',
'#69fff8',
'#f9ffa4',
'#1c0dff',
'#ffffff'])
return cmap
def mypatches():
patches = []
for counter, name in enumerate(classnames()):
patches.append(mpatches.Patch(color=mycmap().colors[counter],
label=name))
return patches
## tensor operation
def _is_tensor_video_clip(clip):
if not torch.is_tensor(clip):
raise TypeError("clip should be Tesnor. Got %s" % type(clip))
if not clip.ndimension() == 4:
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
return True
def crop(clip, i, j, h, w):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
"""
assert len(clip.size()) == 4, "clip should be a 4D tensor"
return clip[..., i:i + h, j:j + w]
def center_crop(clip, crop_size):
assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor"
h, w = clip.size(-2), clip.size(-1)
th, tw = crop_size, crop_size
assert h >= th and w >= tw, "height and width must be no smaller than crop_size"
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(clip, i, j, th, tw)
class CenterCropVideo(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, size, size)
"""
return center_crop(clip, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
def ztz(x, y):
"""
Compute the inner product between datapoints from corresponding patches of data
organized in batches. Since x and y are data between the range [-1,1],
it is normalized to be between the range [0,1] using max_norm.
Input:x - float, array of [batch_size, patch_size, patch_size, num_channels],
Batch of patches from data domain x.
y - float, array of [batch_size, patch_size, patch_size, num_channels],
Batch of patches from data domain y.
Output:
ztz - float, array of [batch_size, patch_size^2, patch_size^2], Inner product
"""
max_norm = x.shape[-1]
flat_shape = [x.shape[0], x.shape[1] ** 2, -1]
x = torch.reshape(x, flat_shape)
y = torch.reshape(y, flat_shape)
#ztz = (tf.keras.backend.batch_dot(y, x, -1) + max_norm) / (2 * max_norm) ??
ztz = (torch.bmm(x, y.permute(0, 2, 1)) + max_norm)/ (2 * max_norm)
return ztz
def affinity(x):
"""
Compute the affinity matrices of the patches of contained in a batch.
It first computes the distances between the datapoints within a patch.
Then it finds the suitable kernel width for each patch.
Finally, applies the RBF.
Input:
x - float, array of [batch_size, patch_size, patch_size, num_channels],
Batch of patches from data domain x.
Output:
A - float, array of [batch_size, patch_size^2, patch_size^2], Affinity matrix
"""
_, h, w, c = x.shape
x_1 = torch.unsqueeze(torch.reshape(x, [-1, h * w, c]), 2)
x_2 = torch.unsqueeze(torch.reshape(x, [-1, h * w, c]), 1)
A = torch.norm(x_1 - x_2, dim=-1)
krnl_width, _ = torch.topk(A, k=A.shape[-1])
krnl_width = torch.mean(krnl_width[:, :, (h * w) // 4], 1)
krnl_width = torch.reshape(krnl_width, (-1, 1, 1))
krnl_width = torch.where(torch.eq(krnl_width, torch.zeros_like(krnl_width)), torch.ones_like(krnl_width), krnl_width)
A = torch.exp(-(torch.div(A, krnl_width) ** 2))
return A
def Degree_matrix(x, y):
"""
Compute the degree matrix starting from corresponding patches of data organized
in batches. It first computes the affinity matrices of the two batches and then
it computes the norm of the difference between the rows of Ax and the rows of Ay.
Then it is normalized.
Input:
x - float, array of [batch_size, patch_size, patch_size, num_channels_x],
Batch of patches from data domain x.
y - float, array of [batch_size, patch_size, patch_size, num_channels_y],
Batch of patches from data domain y.
Output:
D - float, array of [batch_size, patch_size^2, patch_size^2], Degree matrix
"""
ax = affinity(x)
ay = affinity(y)
D = torch.norm(torch.unsqueeze(ax, 1) - torch.unsqueeze(ay, 2), 2, -1)
D = (D - torch.min(D)) / (torch.max(D) - torch.min(D))
return D
#CVA
def cva(X, Y):
diff = X - Y
diff_s = (diff**2).sum(axis=-1)
return torch.sqrt(diff_s)
def SFA(X, Y):
'''
see http://sigma.whu.edu.cn/data/res/files/SFACode.zip
'''
norm_flag = True
m, n = np.shape(X)
meanX = np.mean(X, axis=0)
meanY = np.mean(Y, axis=0)
stdX = np.std(X, axis=0)
stdY = np.std(Y, axis=0)
Xc = (X - meanX) / stdX
Yc = (Y - meanY) / stdY
Xc = Xc.T
Yc = Yc.T
A = np.matmul((Xc-Yc), (Xc-Yc).T)/m
B = (np.matmul(Yc, Yc.T)+np.matmul(Yc, Yc.T))/2/m
D, V = scipy.linalg.eig(A, B) # V is column wise
D = D.real
#idx = D.argsort()
#D = D[idx]
if norm_flag is True:
aux1 = np.matmul(np.matmul(V.T, B), V)
aux2 = 1/np.sqrt(np.diag(aux1))
V = V * aux2
#V = V[:,0:3]
X_trans = np.matmul(V.T, Xc).T
Y_trans = np.matmul(V.T, Yc).T
return X_trans, Y_trans
# split whole image to patches
def patchize(img: torch.Tensor, patch_size, unfold_stride) -> torch.Tensor:
"""
img.shape
B : batch size
C : channels of image (same to patches.shape[1])
iH : height of image
iW : width of image
pH : height of patch
pW : width of patch
V : values in a patch (pH * pW * C)
"""
B, C, iH, iW = img.shape
pH = patch_size
pW = patch_size
unfold = nn.Unfold(kernel_size=(pH, pW), stride=unfold_stride)
patches = unfold(img) # (B, V, P)
patches = patches.permute(0, 2, 1).contiguous() # (B, P, V)
patches = patches.view(-1, C, pH, pW) # (P, C, pH, pW)
return patches
#thresholding methods
def kde_statsmodels_u(x, x_grid, bandwidth, **kwargs):
kde = KDEUnivariate(x)
kde.fit(bw=bandwidth, **kwargs)
return kde.evaluate(x_grid)
#Rosin
def rosin(heatmap):
heatmap_list = heatmap.flatten().tolist()
f_heatmap = np.array(heatmap_list)
new_data = f_heatmap - np.min(f_heatmap)
print(np.min(new_data))
# declare kernel estimation parameters
bandwidth = 0.06
# estimate kernel
x_grid = np.linspace(0, np.max(new_data), 90) # x-coordinates for data points in the kernel
kernel = kde_statsmodels_u(new_data, x_grid, bandwidth) # get kernel
# get the index of the kernal peak
maxIndex = np.argmax(kernel)
# Assign percent below the max kernel value for the 'zero' peak i.e. a value of 2 = 2% the maximum value
maxPercent = 1
# assign x and y coords for peak-to-base line
x1 = x_grid[maxIndex]
y1 = kernel[maxIndex]
# find all local minima in the kernel
local_mins = np.where(np.r_[True, kernel[1:] < kernel[:-1]] & np.r_[kernel[:-1] < kernel[1:], True])
local_mins = local_mins[0] # un 'tuple' local mins
# filter for points below a certain kernel max
local_mins = local_mins[(np.where(kernel[local_mins] < (y1 / (100 / maxPercent))))]
# get local minima beyond the peak
local_mins = local_mins[(np.where(local_mins > maxIndex))] # get local minima that meet percent max threshold
x2_index = local_mins[0] # find minumum beyond peak of kernel
x2 = x_grid[x2_index] # index to local min beyond kernel peak
y2 = kernel[x2_index]
# calculate line slope and get perpendicular line
slope = (y2 - y1) / (x2 - x1)
# find y_intercept for line
y_int = y1 - (slope * x1)
slopeTan = -1 / slope # perpendicular line slope
# allocate lists for x-y coordinates and distance values
dist = list()
# save x-y coords of intersect points
yii = list()
xii = list()
# iterate and generate perpendicular lines
for i in range(maxIndex + 1, x2_index):
# find intersection point between lines
# determine equation of the perpendicular line based on current bin coordinate
xt1 = x_grid[i]
yt1 = kernel[i]
y_int_tan = yt1 - (slopeTan * xt1)
# calculate intersection point between lines
b1 = y_int
b2 = y_int_tan
m1 = slope
m2 = slopeTan
# y = mx + b
# Set both lines equal to find the intersection point in the x direction, y1=y2, x1=x2
# y1 = m1 * x + b1, y2 = m2 * x + b2
# if y1 == y2...
# m1 * x + b1 = m2 * x + b2
# m1 * x - m2 * x = b2 - b1
# x * (m1 - m2) = b2 - b1
# x = (b2 - b1) / (m1 - m2)
xi = (b2 - b1) / (m1 - m2)
# Now solve for y -- use either line, because they are equal here
# y = mx + b
yi = m1 * xi + b1
# assert that the new line generated is equal or very close to the correct perpendicular value of the max deviation line
assert ((m2 - m2 * .01) < ((yi - y_int_tan) / (xi - 0)) < (
m2 + m2 * .01)) # an error will throw if this statement is false
# save x-y coordinates of the point
yii.append(yi)
xii.append(xi)
# get euclidean distance between kernel coordinate and intersect point
euc = np.sqrt((xi - xt1) ** 2 + (yi - yt1) ** 2)
# store the euclidean distance
dist.append(euc)
# get kernel point with the maximum distance from the Rosin line
# remeber, we started at maxIndex+1, so the index of the optimalPoint in the kernel array will be maxIndex+1
# + the index in the 'dist' array
optimalPoint = np.argmax(dist) + maxIndex + 1
# plot the optimal point over the kernel with Rosin line we plotted before
threshold = x_grid[optimalPoint]
final_threhold = threshold + np.min(f_heatmap)
#return heatmap < final_threhold
return final_threhold
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
def default(val, def_val):
return def_val if val is None else val
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# default SimCLR augmentation
image_size = 256
DEFAULT_AUG = nn.Sequential(
RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
augs.RandomGrayscale(p=0.2),
augs.RandomHorizontalFlip(),
RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
augs.RandomResizedCrop((image_size, image_size)))
#color.Normalize(mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])))
if __name__ == '__main__':
meter = AverageMeter()
|
the-stack_0_5762 | """
Author: Andreas Rössler
"""
import os
import argparse
import torch
import pretrainedmodels
import torch.nn as nn
import torch.nn.functional as F
from xception import xception
import math
import torchvision
def return_pytorch04_xception(init_checkpoint=None):
# Raises warning "src not broadcastable to dst" but thats fine
model = xception(pretrained=False)
if init_checkpoint is not None:
# Load model in torch 0.4+
model.fc = model.last_linear
del model.last_linear
state_dict = torch.load(
init_checkpoint)
for name, weights in state_dict.items():
if 'pointwise' in name:
state_dict[name] = weights.unsqueeze(-1).unsqueeze(-1)
model.load_state_dict(state_dict)
model.last_linear = model.fc
del model.fc
return model
class TransferModel(nn.Module):
"""
Simple transfer learning model that takes an imagenet pretrained model with
a fc layer as base model and retrains a new fc layer for num_out_classes
"""
def __init__(self, modelchoice, num_out_classes=2, dropout=0.0, init_checkpoint=None):
super(TransferModel, self).__init__()
self.modelchoice = modelchoice
if modelchoice == 'xception':
self.model = return_pytorch04_xception(init_checkpoint)
# Replace fc
num_ftrs = self.model.last_linear.in_features
if not dropout:
self.model.last_linear = nn.Linear(num_ftrs, num_out_classes)
else:
print('Using dropout', dropout)
self.model.last_linear = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(num_ftrs, num_out_classes)
)
elif modelchoice == 'resnet50' or modelchoice == 'resnet18':
if modelchoice == 'resnet50':
self.model = torchvision.models.resnet50(pretrained=True)
if modelchoice == 'resnet18':
self.model = torchvision.models.resnet18(pretrained=True)
# Replace fc
num_ftrs = self.model.fc.in_features
if not dropout:
self.model.fc = nn.Linear(num_ftrs, num_out_classes)
else:
self.model.fc = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(num_ftrs, num_out_classes)
)
else:
raise Exception('Choose valid model, e.g. resnet50')
def set_trainable_up_to(self, boolean, layername="Conv2d_4a_3x3"):
"""
Freezes all layers below a specific layer and sets the following layers
to true if boolean else only the fully connected final layer
:param boolean:
:param layername: depends on network, for inception e.g. Conv2d_4a_3x3
:return:
"""
# Stage-1: freeze all the layers
if layername is None:
for i, param in self.model.named_parameters():
param.requires_grad = True
return
else:
for i, param in self.model.named_parameters():
param.requires_grad = False
if boolean:
# Make all layers following the layername layer trainable
ct = []
found = False
for name, child in self.model.named_children():
if layername in ct:
found = True
for params in child.parameters():
params.requires_grad = True
ct.append(name)
if not found:
raise Exception('Layer not found, cant finetune!'.format(
layername))
else:
if self.modelchoice == 'xception':
# Make fc trainable
for param in self.model.last_linear.parameters():
param.requires_grad = True
else:
# Make fc trainable
for param in self.model.fc.parameters():
param.requires_grad = True
def forward(self, x):
x = self.model(x)
return x
def model_selection(modelname, num_out_classes,
dropout=None, init_checkpoint=None):
"""
:param modelname:
:return: model, image size, pretraining<yes/no>, input_list
"""
if modelname == 'xception':
return TransferModel(modelchoice='xception',
num_out_classes=num_out_classes, init_checkpoint=init_checkpoint), 299, \
True, ['image'], None
elif modelname == 'resnet18':
return TransferModel(modelchoice='resnet18', dropout=dropout,
num_out_classes=num_out_classes), \
224, True, ['image'], None
else:
raise NotImplementedError(modelname) |
the-stack_0_5764 | from math import ceil
# 入力
N, A, B = map(int, input().split())
h = [int(input()) for _ in range(N)]
# 二分法により解を求める
def bis(p, ok, ng):
mid = (ok + ng) // 2
return (
ok if abs(ok - ng) == 1 else
bis(p, mid, ng) if p(mid) else
bis(p, ok, mid)
)
ans = bis(
lambda k: sum(max(0, ceil((x - k * B) / (A - B))) for x in h) <= k,
10**10,
0
)
# 出力
print(ans)
|
the-stack_0_5767 | import requests as req
import json
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.conf.urls import url
from django.conf import settings
from django.core.mail import send_mail
client_id = settings.FENIX_CLIENT_ID
clientSecret = settings.FENIX_CLIENT_SECRET
redirect_uri = settings.URL_HOST + settings.FENIX_REDIRECT_URL_PATH
# Note, make sure that you exported the URL_HOST variable, otherwise localhost will be the default
print("*SETUP* redirect_url:" + str(redirect_uri)) # debug
fenixLoginpage = settings.FENIX_LOGIN
fenixacesstokenpage = settings.FENIX_URL_TOKEN
RequestPage = fenixLoginpage % (client_id, redirect_uri)
def login_fenix_oauth(request):
from helios_auth.views import after # if dajngo is set sync, the import must be inside because with the aspps are not loaded yet
from helios_auth import url_names
code = request.GET.get('code') # registration code used to obtain the access token
payload = {'client_id': client_id, 'client_secret': clientSecret, 'redirect_uri' : redirect_uri, 'code' : code, 'grant_type': 'authorization_code'}
response = req.post(fenixacesstokenpage, params = payload)
if(response.status_code == 200):
r_token = response.json()
params = {'access_token': r_token['access_token']}
#print("login_fenix_0auth() - OUATH PARAMS",params) # debug
request.session['access_token_fenix'] =r_token['access_token'] # save token
request.session['auth_system_name']='fenix'
return HttpResponseRedirect(reverse(url_names.AUTH_AFTER))
else:
print("login_fenix_0auth() - OAUTH FAILED")
def get_auth_url(request, redirect_url = None):
# the app redirects the user to the FENIX login page
return RequestPage
def get_user_info_after_auth(request):
token = request.session['access_token_fenix'] # token saved in the current session
params = {'access_token': token}
resp = req.get("https://fenix.tecnico.ulisboa.pt/api/fenix/v1/person", params = params)
#print("\n\n", "get_user_info_after_auth() - FENIX RESPONSE", resp.json()["username"])
r_info = resp.json() # user data from Fenix
del request.session['access_token_fenix']
obj = {'type': 'fenix', 'user_id' : json.dumps(r_info["username"]),'name':r_info["name"],'info':{'email': r_info["email"]}, 'token': None}
return obj
def send_message(user_id, name, user_info, subject, body):
#send email to google users. user_id is the email for google.
send_mail(subject, body, settings.SERVER_EMAIL, ["%s <%s>" % (name, user_id)], fail_silently=False)
#
# Election Creation
#
def can_create_election(user_id, user_info):
return True
FENIX_LOGIN = 'auth@fenix@login'
#^ matches the start of the string. this urlpattern must be include at urls.py
urlpatterns = [
url(r'^fenix/login', login_fenix_oauth, name=FENIX_LOGIN),
]
|
the-stack_0_5769 | # coding: utf-8
"""
Translator Knowledge Beacon Aggregator API
This is the Translator Knowledge Beacon Aggregator web service application programming interface (API) that provides integrated access to a pool of knowledge sources publishing concepts and relations through the Translator Knowledge Beacon API. This API is similar to that of the latter mentioned API with the addition of some extra informative endpoints plus session identifier and beacon indices. These latter identifiers are locally assigned numeric indices provided to track the use of specific registered beacons within the aggregator API itself. # noqa: E501
OpenAPI spec version: 1.1.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ClientStatementsQueryBeaconStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'beacon': 'int',
'count': 'int',
'status': 'int'
}
attribute_map = {
'beacon': 'beacon',
'count': 'count',
'status': 'status'
}
def __init__(self, beacon=None, count=None, status=None): # noqa: E501
"""ClientStatementsQueryBeaconStatus - a model defined in OpenAPI""" # noqa: E501
self._beacon = None
self._count = None
self._status = None
self.discriminator = None
if beacon is not None:
self.beacon = beacon
if count is not None:
self.count = count
if status is not None:
self.status = status
@property
def beacon(self):
"""Gets the beacon of this ClientStatementsQueryBeaconStatus. # noqa: E501
Index number of beacon providing these statements # noqa: E501
:return: The beacon of this ClientStatementsQueryBeaconStatus. # noqa: E501
:rtype: int
"""
return self._beacon
@beacon.setter
def beacon(self, beacon):
"""Sets the beacon of this ClientStatementsQueryBeaconStatus.
Index number of beacon providing these statements # noqa: E501
:param beacon: The beacon of this ClientStatementsQueryBeaconStatus. # noqa: E501
:type: int
"""
self._beacon = beacon
@property
def count(self):
"""Gets the count of this ClientStatementsQueryBeaconStatus. # noqa: E501
When a 200 status code is returned, this integer designates the number of statements matched by the query for the given beacon. # noqa: E501
:return: The count of this ClientStatementsQueryBeaconStatus. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ClientStatementsQueryBeaconStatus.
When a 200 status code is returned, this integer designates the number of statements matched by the query for the given beacon. # noqa: E501
:param count: The count of this ClientStatementsQueryBeaconStatus. # noqa: E501
:type: int
"""
self._count = count
@property
def status(self):
"""Gets the status of this ClientStatementsQueryBeaconStatus. # noqa: E501
Http code status of beacon API - 200 means 'data ready', 102 means 'query in progress', other codes (e.g. 500) are server errors. Once a beacon has a '200' success code, then the /statements/data endpoint may be used to retrieve it. # noqa: E501
:return: The status of this ClientStatementsQueryBeaconStatus. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ClientStatementsQueryBeaconStatus.
Http code status of beacon API - 200 means 'data ready', 102 means 'query in progress', other codes (e.g. 500) are server errors. Once a beacon has a '200' success code, then the /statements/data endpoint may be used to retrieve it. # noqa: E501
:param status: The status of this ClientStatementsQueryBeaconStatus. # noqa: E501
:type: int
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClientStatementsQueryBeaconStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_5770 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Sequence, Union
import nnabla as nn
from nnabla_rl.environments.environment_info import EnvironmentInfo
from nnabla_rl.model_trainers.model_trainer import TrainingVariables, rnn_support
from nnabla_rl.model_trainers.q_value.quantile_distribution_function_trainer import (
QuantileDistributionFunctionTrainer, QuantileDistributionFunctionTrainerConfig)
from nnabla_rl.models import QuantileDistributionFunction
from nnabla_rl.utils.misc import create_variables
@dataclass
class QRDQNQTrainerConfig(QuantileDistributionFunctionTrainerConfig):
pass
class QRDQNQTrainer(QuantileDistributionFunctionTrainer):
# type declarations to type check with mypy
# NOTE: declared variables are instance variable and NOT class variable, unless it is marked with ClassVar
# See https://mypy.readthedocs.io/en/stable/class_basics.html for details
_target_function: QuantileDistributionFunction
_prev_target_rnn_states: Dict[str, Dict[str, nn.Variable]]
def __init__(self,
train_functions: Union[QuantileDistributionFunction, Sequence[QuantileDistributionFunction]],
solvers: Dict[str, nn.solver.Solver],
target_function: QuantileDistributionFunction,
env_info: EnvironmentInfo,
config: QRDQNQTrainerConfig = QRDQNQTrainerConfig()):
self._target_function = target_function
self._prev_target_rnn_states = {}
super(QRDQNQTrainer, self).__init__(train_functions, solvers, env_info, config)
def support_rnn(self) -> bool:
return True
def _compute_target(self, training_variables: TrainingVariables, **kwargs) -> nn.Variable:
gamma = training_variables.gamma
reward = training_variables.reward
non_terminal = training_variables.non_terminal
s_next = training_variables.s_next
prev_rnn_states = self._prev_target_rnn_states
train_rnn_states = training_variables.rnn_states
with rnn_support(self._target_function, prev_rnn_states, train_rnn_states, training_variables, self._config):
theta_j = self._target_function.max_q_quantiles(s_next)
Ttheta_j = reward + non_terminal * gamma * theta_j
return Ttheta_j
def _setup_training_variables(self, batch_size: int) -> TrainingVariables:
training_variables = super()._setup_training_variables(batch_size)
rnn_states = {}
if self._target_function.is_recurrent():
shapes = self._target_function.internal_state_shapes()
rnn_state_variables = create_variables(batch_size, shapes)
rnn_states[self._target_function.scope_name] = rnn_state_variables
training_variables.rnn_states.update(rnn_states)
return training_variables
|
the-stack_0_5771 | """ Contains a class for logic of the Subjects.
"""
import os
import logging
import json
import pkg_resources
import mne
import meggie.utilities.filemanager as filemanager
from meggie.mainwindow.dynamic import find_all_datatype_specs
class Subject:
""" The class for holding subject-specific information
and subject-specific data.
Parameters
----------
experiment : meggie.experiment.Experiment
The experiment to which the subject is created.
name : str
Name of the subject.
raw_fname : str
Path to the subject data.
uid : str
A unique identifier to differentiate between subjects that have
same name.
ica_applied : bool
Whether ICA has been applied (at least once) to this data.
rereferenced : bool
Whether the data has been rereferenced (at least once).
"""
def __init__(self, experiment, name, raw_fname, uid,
ica_applied=False, rereferenced=False):
self.name = name
self.raw_fname = raw_fname
self.uid = uid
self._raw = None
self.ica_applied = ica_applied
self.rereferenced = rereferenced
self.path = os.path.join(experiment.path,
name)
datatype_specs = find_all_datatype_specs()
for source, package, datatype_spec in datatype_specs.values():
datatype = datatype_spec['id']
dir_ = datatype_spec['dir']
setattr(self, datatype, dict())
setattr(self, datatype + '_directory',
os.path.join(self.path, dir_))
def add(self, dataobject, datatype):
""" Adds a dataobject of type datatype to the subject.
Parameters
----------
dataobject : instance of a datatype
A data object.
datatype : str
Name of the datatype.
"""
container = getattr(self, datatype)
name = dataobject.name
container[name] = dataobject
def remove(self, name, datatype):
""" Removes a dataobject by name from the subject.
Parameters
----------
name : str
Name of the data object.
datatype : str
Name of the datatype.
"""
container = getattr(self, datatype)
dataobject = container.pop(name, None)
try:
dataobject.delete_content()
except Exception as exc:
logging.getLogger('ui_logger').exception('')
raise IOError('Could not delete ' + str(datatype) +
' from folders')
@property
def raw_path(self):
""" Returns the raw path."""
path = os.path.join(self.path,
self.raw_fname)
return path
def get_raw(self, preload=True, verbose='warning'):
""" Gets the raw object for the subject.
Reads from the file system if not in the memory already.
Parameters
----------
preload : bool
Whether to read the data or only the metadata.
verbose : str
Verbose level of read_raw.
Returns
-------
mne.io.Raw
The raw object.
"""
if self._raw is not None:
if preload:
self._raw.load_data()
return self._raw
else:
try:
raw = filemanager.open_raw(self.raw_path, preload=preload,
verbose=verbose)
except OSError:
raise IOError("Could not find the raw file.")
self._raw = raw
return raw
def save(self):
""" Saves the data to the existing path. """
try:
filemanager.save_raw(self._raw, self.raw_path)
except Exception as exc:
raise Exception("Could not save the raw file. Please ensure "
"that the entire experiment folder has "
"write permissions.")
def release_memory(self):
""" Releases data from the memory.
"""
if self._raw is not None:
self._raw = None
@property
def has_eeg(self):
""" Checks if the raw has eeg data present
"""
raw = self.get_raw(preload=False)
channels = mne.pick_types(raw.info, eeg=True, meg=False)
if len(channels) == 0:
return False
return True
@property
def sss_applied(self):
"""Checks if sss applied.
"""
try:
raw = self.get_raw()
for item in raw.info['proc_history']:
if 'maxfilter' in item.get('creator', []):
return True
except Exception as exc:
return False
return False
def ensure_folders(self):
""" When called, checks that the subject folder with all datatype folders
exist and if not, creates them.
"""
paths = []
datatype_specs = find_all_datatype_specs()
for source, package, datatype_spec in datatype_specs.values():
datatype = datatype_spec['id']
path = getattr(self, datatype + '_directory')
paths.append(path)
try:
filemanager.ensure_folders(
[self.path] + paths)
except OSError:
raise OSError("Couldn't create all the necessary folders. "
"Please ensure that the experiment folder "
"has write permissions everywhere.")
|
the-stack_0_5772 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import wsgi as base_wsgi
import routes
import six
import webob
import webob.exc as webexc
import webtest
import neutron
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import exceptions
from neutron import manager
from neutron.plugins.common import constants
from neutron import quota
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import extension_stubs as ext_stubs
import neutron.tests.unit.extensions
from neutron.tests.unit.extensions import extendedattribute as extattr
from neutron.tests.unit import testlib_api
from neutron import wsgi
LOG = logging.getLogger(__name__)
_uuid = test_base._uuid
_get_path = test_base._get_path
extensions_path = ':'.join(neutron.tests.unit.extensions.__path__)
class ExtensionsTestApp(base_wsgi.Router):
def __init__(self, options=None):
options = options or {}
mapper = routes.Mapper()
controller = ext_stubs.StubBaseAppController()
mapper.resource("dummy_resource", "/dummy_resources",
controller=controller)
super(ExtensionsTestApp, self).__init__(mapper)
class FakePluginWithExtension(object):
"""A fake plugin used only for extension testing in this file."""
supported_extension_aliases = ["FOXNSOX"]
def method_to_support_foxnsox_extension(self, context):
self._log("method_to_support_foxnsox_extension", context)
class ExtensionPathTest(base.BaseTestCase):
def setUp(self):
self.base_path = extensions.get_extensions_path()
super(ExtensionPathTest, self).setUp()
def test_get_extensions_path_with_plugins(self):
path = extensions.get_extensions_path(
{constants.CORE: FakePluginWithExtension()})
self.assertEqual(path,
'%s:neutron/tests/unit/extensions' % self.base_path)
def test_get_extensions_path_no_extensions(self):
# Reset to default value, as it's overridden by base class
cfg.CONF.set_override('api_extensions_path', '')
path = extensions.get_extensions_path()
self.assertEqual(path, self.base_path)
def test_get_extensions_path_single_extension(self):
cfg.CONF.set_override('api_extensions_path', 'path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
def test_get_extensions_path_multiple_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path2')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1:path2' % self.base_path)
def test_get_extensions_path_duplicate_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
class PluginInterfaceTest(base.BaseTestCase):
def test_issubclass_hook(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
self.assertTrue(issubclass(A, B))
def test_issubclass_hook_class_without_abstract_methods(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
def f(self):
pass
self.assertFalse(issubclass(A, B))
def test_issubclass_hook_not_all_methods_implemented(self):
class A(object):
def f(self):
pass
class B(extensions.PluginInterface):
@abc.abstractmethod
def f(self):
pass
@abc.abstractmethod
def g(self):
pass
self.assertFalse(issubclass(A, B))
class ResourceExtensionTest(base.BaseTestCase):
class ResourceExtensionController(wsgi.Controller):
def index(self, request):
return "resource index"
def show(self, request, id):
return {'data': {'id': id}}
def notimplemented_function(self, request, id):
return webob.exc.HTTPNotImplemented()
def custom_member_action(self, request, id):
return {'member_action': 'value'}
def custom_collection_action(self, request, **kwargs):
return {'collection': 'value'}
class DummySvcPlugin(wsgi.Controller):
def get_plugin_type(self):
return constants.DUMMY
def index(self, request, **kwargs):
return "resource index"
def custom_member_action(self, request, **kwargs):
return {'member_action': 'value'}
def collection_action(self, request, **kwargs):
return {'collection': 'value'}
def show(self, request, id):
return {'data': {'id': id}}
def test_exceptions_notimplemented(self):
controller = self.ResourceExtensionController()
member = {'notimplemented_function': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
# Ideally we would check for a 501 code here but webtest doesn't take
# anything that is below 200 or above 400 so we can't actually check
# it. It throws webtest.AppError instead.
try:
test_app.get("/tweedles/some_id/notimplemented_function")
# Shouldn't be reached
self.assertTrue(False)
except webtest.AppError as e:
self.assertIn('501', str(e))
def test_resource_can_be_added_as_extension(self):
res_ext = extensions.ResourceExtension(
'tweedles', self.ResourceExtensionController())
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/tweedles")
self.assertEqual(200, index_response.status_int)
self.assertEqual(b"resource index", index_response.body)
show_response = test_app.get("/tweedles/25266")
self.assertEqual({'data': {'id': "25266"}}, show_response.json)
def test_resource_gets_prefix_of_plugin(self):
class DummySvcPlugin(wsgi.Controller):
def index(self, request):
return ""
def get_plugin_type(self):
return constants.DUMMY
res_ext = extensions.ResourceExtension(
'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc")
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tweedles")
self.assertEqual(200, index_response.status_int)
def test_resource_extension_with_custom_member_action(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self):
controller = self.DummySvcPlugin()
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/dummy_svc/tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_plugin_prefix_with_parent_resource(self):
controller = self.DummySvcPlugin()
parent = dict(member_name="tenant",
collection_name="tenants")
member = {'custom_member_action': "GET"}
collections = {'collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller, parent,
path_prefix="/dummy_svc",
member_actions=member,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
index_response = test_app.get("/dummy_svc/tenants/1/tweedles")
self.assertEqual(200, index_response.status_int)
response = test_app.get("/dummy_svc/tenants/1/"
"tweedles/1/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
response = test_app.get("/dummy_svc/tenants/2/"
"tweedles/collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'],
"value")
def test_resource_extension_for_get_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
LOG.debug(jsonutils.loads(response.body))
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_for_put_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "PUT"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.put("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_post_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "POST"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.post("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_extension_for_delete_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "DELETE"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.delete("/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], 'value')
def test_resource_ext_for_formatted_req_on_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/custom_collection_action.json")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_ext_for_nested_resource_custom_collection_action(self):
controller = self.ResourceExtensionController()
collections = {'custom_collection_action': "GET"}
parent = dict(collection_name='beetles', member_name='beetle')
res_ext = extensions.ResourceExtension('tweedles', controller,
collection_actions=collections,
parent=parent)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/beetles/beetle_id"
"/tweedles/custom_collection_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['collection'], "value")
def test_resource_extension_with_custom_member_action_and_attr_map(self):
controller = self.ResourceExtensionController()
member = {'custom_member_action': "GET"}
params = {
'tweedles': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
}
}
res_ext = extensions.ResourceExtension('tweedles', controller,
member_actions=member,
attr_map=params)
test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))
response = test_app.get("/tweedles/some_id/custom_member_action")
self.assertEqual(200, response.status_int)
self.assertEqual(jsonutils.loads(response.body)['member_action'],
"value")
def test_returns_404_for_non_existent_extension(self):
test_app = _setup_extensions_test_app(SimpleExtensionManager(None))
response = test_app.get("/non_extistant_extension", status='*')
self.assertEqual(404, response.status_int)
class ActionExtensionTest(base.BaseTestCase):
def setUp(self):
super(ActionExtensionTest, self).setUp()
self.extension_app = _setup_extensions_test_app()
def test_extended_action_for_adding_extra_data(self):
action_name = 'FOXNSOX:add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post('/dummy_resources/1/action',
req_body,
content_type='application/json')
self.assertEqual(b"Tweedle Beetle Added.", response.body)
def test_extended_action_for_deleting_extra_data(self):
action_name = 'FOXNSOX:delete_tweedle'
action_params = dict(name='Bailey')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json')
self.assertEqual(b"Tweedle Bailey Deleted.", response.body)
def test_returns_404_for_non_existent_action(self):
non_existent_action = 'blah_action'
action_params = dict(name="test")
req_body = jsonutils.dumps({non_existent_action: action_params})
response = self.extension_app.post("/dummy_resources/1/action",
req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
def test_returns_404_for_non_existent_resource(self):
action_name = 'add_tweedle'
action_params = dict(name='Beetle')
req_body = jsonutils.dumps({action_name: action_params})
response = self.extension_app.post("/asdf/1/action", req_body,
content_type='application/json',
status='*')
self.assertEqual(404, response.status_int)
class RequestExtensionTest(base.BaseTestCase):
def test_headers_can_be_extended(self):
def extend_headers(req, res):
assert req.headers['X-NEW-REQUEST-HEADER'] == "sox"
res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data"
return res
app = self._setup_app_with_request_handler(extend_headers, 'GET')
response = app.get("/dummy_resources/1",
headers={'X-NEW-REQUEST-HEADER': "sox"})
self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'],
"response_header_data")
def test_extend_get_resource_response(self):
def extend_response_data(req, res):
data = jsonutils.loads(res.body)
data['FOXNSOX:extended_key'] = req.GET.get('extended_key')
res.body = jsonutils.dumps(data).encode('utf-8')
return res
app = self._setup_app_with_request_handler(extend_response_data, 'GET')
response = app.get("/dummy_resources/1?extended_key=extended_data")
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('extended_data',
response_data['FOXNSOX:extended_key'])
self.assertEqual('knox', response_data['fort'])
def test_get_resources(self):
app = _setup_extensions_test_app()
response = app.get("/dummy_resources/1?chewing=newblue")
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['FOXNSOX:googoose'])
self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands'])
def test_edit_previously_uneditable_field(self):
def _update_handler(req, res):
data = jsonutils.loads(res.body)
data['uneditable'] = req.params['uneditable']
res.body = jsonutils.dumps(data).encode('utf-8')
return res
base_app = webtest.TestApp(setup_base_app(self))
response = base_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(response.json['uneditable'], "original_value")
ext_app = self._setup_app_with_request_handler(_update_handler,
'PUT')
ext_response = ext_app.put("/dummy_resources/1",
{'uneditable': "new_value"})
self.assertEqual(ext_response.json['uneditable'], "new_value")
def _setup_app_with_request_handler(self, handler, verb):
req_ext = extensions.RequestExtension(verb,
'/dummy_resources/:(id)',
handler)
manager = SimpleExtensionManager(None, None, req_ext)
return _setup_extensions_test_app(manager)
class ExtensionManagerTest(base.BaseTestCase):
def test_missing_required_extensions_raise_error(self):
ext_mgr = extensions.ExtensionManager('')
attr_map = {}
ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs('foo_alias'))
self.assertRaises(exceptions.ExtensionsNotFound,
ext_mgr.extend_resources, "2.0", attr_map)
def test_missing_required_extensions_gracefully_error(self):
ext_mgr = extensions.ExtensionManager('')
attr_map = {}
default_ext = list(constants.DEFAULT_SERVICE_PLUGINS.values())[0]
ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs(default_ext))
ext_mgr.extend_resources("2.0", attr_map)
self.assertIn(default_ext, ext_mgr.extensions)
def test_invalid_extensions_are_not_registered(self):
class InvalidExtension(object):
"""Invalid extension.
This Extension doesn't implement extension methods :
get_name, get_description and get_updated
"""
def get_alias(self):
return "invalid_extension"
ext_mgr = extensions.ExtensionManager('')
ext_mgr.add_extension(InvalidExtension())
ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension"))
self.assertIn('valid_extension', ext_mgr.extensions)
self.assertNotIn('invalid_extension', ext_mgr.extensions)
def test_assignment_of_attr_map(self):
"""Unit test for bug 1443342
In this bug, an extension that extended multiple resources with the
same dict would cause future extensions to inadvertently modify the
resources of all of the resources since they were referencing the same
dictionary.
"""
class MultiResourceExtension(ext_stubs.StubExtension):
"""Generated Extended Resources.
This extension's extended resource will assign
to more than one resource.
"""
def get_extended_resources(self, version):
EXTENDED_TIMESTAMP = {
'created_at': {'allow_post': False, 'allow_put': False,
'is_visible': True}}
EXTENDED_RESOURCES = ["ext1", "ext2"]
attrs = {}
for resources in EXTENDED_RESOURCES:
attrs[resources] = EXTENDED_TIMESTAMP
return attrs
class AttrExtension(ext_stubs.StubExtension):
def get_extended_resources(self, version):
attrs = {
self.alias: {
'%s-attr' % self.alias: {'allow_post': False,
'allow_put': False,
'is_visible': True}}}
return attrs
ext_mgr = extensions.ExtensionManager('')
attr_map = {}
ext_mgr.add_extension(MultiResourceExtension('timestamp'))
ext_mgr.extend_resources("2.0", attr_map)
ext_mgr.add_extension(AttrExtension("ext1"))
ext_mgr.add_extension(AttrExtension("ext2"))
ext_mgr.extend_resources("2.0", attr_map)
self.assertIn('created_at', attr_map['ext2'])
self.assertIn('created_at', attr_map['ext1'])
# now we need to make sure the attrextensions didn't leak across
self.assertNotIn('ext1-attr', attr_map['ext2'])
self.assertNotIn('ext2-attr', attr_map['ext1'])
class PluginAwareExtensionManagerTest(base.BaseTestCase):
def test_unsupported_extensions_are_not_loaded(self):
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
ext_mgr.add_extension(ext_stubs.StubExtension("e2"))
ext_mgr.add_extension(ext_stubs.StubExtension("e3"))
self.assertIn("e1", ext_mgr.extensions)
self.assertNotIn("e2", ext_mgr.extensions)
self.assertIn("e3", ext_mgr.extensions)
def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self):
class ExtensionUnawarePlugin(object):
"""This plugin does not implement supports_extension method.
Extensions will not be loaded when this plugin is used.
"""
pass
plugin_info = {constants.CORE: ExtensionUnawarePlugin()}
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.StubExtension("e1"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_not_loaded_for_plugin_without_expected_interface(self):
class PluginWithoutExpectedIface(object):
"""Does not implement get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
plugin_info = {constants.CORE: PluginWithoutExpectedIface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertNotIn("e1", ext_mgr.extensions)
def test_extensions_are_loaded_for_plugin_with_expected_interface(self):
class PluginWithExpectedInterface(object):
"""Implements get_foo method as expected by extension."""
supported_extension_aliases = ["supported_extension"]
def get_foo(self, bar=None):
pass
plugin_info = {constants.CORE: PluginWithExpectedInterface()}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface(
"supported_extension"))
self.assertIn("supported_extension", ext_mgr.extensions)
def test_extensions_expecting_neutron_plugin_interface_are_loaded(self):
class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension):
"""This Extension does not implement get_plugin_interface method.
This will work with any plugin implementing NeutronPluginBase
"""
pass
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extensions_without_need_for__plugin_interface_are_loaded(self):
class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension):
"""This Extension does not need any plugin interface.
This will work with any plugin implementing NeutronPluginBase
"""
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.CORE: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_extension_loaded_for_non_core_plugin(self):
class NonCorePluginExtenstion(ext_stubs.StubExtension):
def get_plugin_interface(self):
return None
stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"])
plugin_info = {constants.DUMMY: stub_plugin}
with mock.patch("neutron.api.extensions.PluginAwareExtensionManager."
"check_if_plugin_extensions_loaded"):
ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info)
ext_mgr.add_extension(NonCorePluginExtenstion("e1"))
self.assertIn("e1", ext_mgr.extensions)
def test_unloaded_supported_extensions_raises_exception(self):
stub_plugin = ext_stubs.StubPlugin(
supported_extensions=["unloaded_extension"])
plugin_info = {constants.CORE: stub_plugin}
self.assertRaises(exceptions.ExtensionsNotFound,
extensions.PluginAwareExtensionManager,
'', plugin_info)
class ExtensionControllerTest(testlib_api.WebTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.test_app = _setup_extensions_test_app()
def test_index_gets_all_registerd_extensions(self):
response = self.test_app.get("/extensions." + self.fmt)
res_body = self.deserialize(response)
foxnsox = res_body["extensions"][0]
self.assertEqual(foxnsox["alias"], "FOXNSOX")
def test_extension_can_be_accessed_by_alias(self):
response = self.test_app.get("/extensions/FOXNSOX." + self.fmt)
foxnsox_extension = self.deserialize(response)
foxnsox_extension = foxnsox_extension['extension']
self.assertEqual(foxnsox_extension["alias"], "FOXNSOX")
def test_show_returns_not_found_for_non_existent_extension(self):
response = self.test_app.get("/extensions/non_existent" + self.fmt,
status="*")
self.assertEqual(response.status_int, 404)
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return ExtensionsTestApp(conf)
def setup_base_app(test):
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return app
def setup_extensions_middleware(extension_manager=None):
extension_manager = (extension_manager or
extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: FakePluginWithExtension()}))
base.BaseTestCase.config_parse()
app = config.load_paste_app('extensions_test_app')
return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager)
def _setup_extensions_test_app(extension_manager=None):
return webtest.TestApp(setup_extensions_middleware(extension_manager))
class SimpleExtensionManager(object):
def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
class ExtensionExtendedAttributeTestPlugin(object):
supported_extension_aliases = [
'ext-obj-test', "extended-ext-attr"
]
def __init__(self, configfile=None):
super(ExtensionExtendedAttributeTestPlugin, self)
self.objs = []
self.objh = {}
def create_ext_test_resource(self, context, ext_test_resource):
obj = ext_test_resource['ext_test_resource']
id = _uuid()
obj['id'] = id
self.objs.append(obj)
self.objh.update({id: obj})
return obj
def get_ext_test_resources(self, context, filters=None, fields=None):
return self.objs
def get_ext_test_resource(self, context, id, fields=None):
return self.objh[id]
class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionExtendedAttributeTestCase, self).setUp()
plugin = (
"neutron.tests.unit.api.test_extensions."
"ExtensionExtendedAttributeTestPlugin"
)
# point config file to: neutron/tests/etc/neutron.conf
self.config_parse()
self.setup_coreplugin(plugin)
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.CORE: ExtensionExtendedAttributeTestPlugin()}
)
ext_mgr.extend_resources("2.0", {})
extensions.PluginAwareExtensionManager._instance = ext_mgr
app = config.load_paste_app('extensions_test_app')
self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1"
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
self.saved_attr_map[res] = attrs.copy()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
extattr.EXTENDED_ATTRIBUTES_2_0)
self.agentscheduler_dbMinxin = manager.NeutronManager.get_plugin()
self.addCleanup(self.restore_attribute_map)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def _do_request(self, method, path, data=None, params=None, action=None):
content_type = 'application/json'
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
if res.status_code != webexc.HTTPNoContent.code:
return res.json
def _ext_test_resource_create(self, attr=None):
data = {
"ext_test_resource": {
"tenant_id": self._tenant_id,
"name": "test",
extattr.EXTENDED_ATTRIBUTE: attr
}
}
res = self._do_request('POST', _get_path('ext_test_resources'), data)
return res['ext_test_resource']
def test_ext_test_resource_create(self):
ext_test_resource = self._ext_test_resource_create()
attr = _uuid()
ext_test_resource = self._ext_test_resource_create(attr)
self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr)
def test_ext_test_resource_get(self):
attr = _uuid()
obj = self._ext_test_resource_create(attr)
obj_id = obj['id']
res = self._do_request('GET', _get_path(
'ext_test_resources/{0}'.format(obj_id)))
obj2 = res['ext_test_resource']
self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr)
|
the-stack_0_5775 | # encoding: utf-8
"""
Gherkin step implementations for chart features.
"""
from __future__ import absolute_import, print_function
import hashlib
from itertools import islice
from behave import given, then, when
from pptx import Presentation
from pptx.chart.chart import Legend
from pptx.chart.data import (
BubbleChartData, CategoryChartData, ChartData, XyChartData
)
from pptx.enum.chart import XL_CHART_TYPE
from pptx.parts.embeddedpackage import EmbeddedXlsxPart
from pptx.util import Inches
from helpers import count, test_pptx
# given ===================================================
@given('a chart')
def given_a_chart(context):
prs = Presentation(test_pptx('shp-common-props'))
sld = prs.slides[0]
context.chart = sld.shapes[6].chart
@given('a chart having {a_or_no} title')
def given_a_chart_having_a_or_no_title(context, a_or_no):
shape_idx = {'no': 0, 'a': 1}[a_or_no]
prs = Presentation(test_pptx('cht-chart-props'))
context.chart = prs.slides[0].shapes[shape_idx].chart
@given('a chart {having_or_not} a legend')
def given_a_chart_having_or_not_a_legend(context, having_or_not):
slide_idx = {
'having': 0,
'not having': 1,
}[having_or_not]
prs = Presentation(test_pptx('cht-legend'))
context.chart = prs.slides[slide_idx].shapes[0].chart
@given('a chart of size and type {spec}')
def given_a_chart_of_size_and_type_spec(context, spec):
slide_idx = {
'2x2 Clustered Bar': 0,
'2x2 100% Stacked Bar': 1,
'2x2 Clustered Column': 2,
'4x3 Line': 3,
'3x1 Pie': 4,
'3x2 XY': 5,
'3x2 Bubble': 6,
}[spec]
prs = Presentation(test_pptx('cht-replace-data'))
chart = prs.slides[slide_idx].shapes[0].chart
context.chart = chart
context.xlsx_sha1 = hashlib.sha1(
chart._workbook.xlsx_part.blob
).hexdigest()
@given('a chart of type {chart_type}')
def given_a_chart_of_type_chart_type(context, chart_type):
slide_idx, shape_idx = {
'Area': (0, 0),
'Stacked Area': (0, 1),
'100% Stacked Area': (0, 2),
'3-D Area': (0, 3),
'3-D Stacked Area': (0, 4),
'3-D 100% Stacked Area': (0, 5),
'Clustered Bar': (1, 0),
'Stacked Bar': (1, 1),
'100% Stacked Bar': (1, 2),
'Clustered Column': (1, 3),
'Stacked Column': (1, 4),
'100% Stacked Column': (1, 5),
'Line': (2, 0),
'Stacked Line': (2, 1),
'100% Stacked Line': (2, 2),
'Marked Line': (2, 3),
'Stacked Marked Line': (2, 4),
'100% Stacked Marked Line': (2, 5),
'Pie': (3, 0),
'Exploded Pie': (3, 1),
'XY (Scatter)': (4, 0),
'XY Lines': (4, 1),
'XY Lines No Markers': (4, 2),
'XY Smooth Lines': (4, 3),
'XY Smooth No Markers': (4, 4),
'Bubble': (5, 0),
'3D-Bubble': (5, 1),
'Radar': (6, 0),
'Marked Radar': (6, 1),
'Filled Radar': (6, 2),
'Line (with date categories)': (7, 0),
}[chart_type]
prs = Presentation(test_pptx('cht-chart-type'))
context.chart = prs.slides[slide_idx].shapes[shape_idx].chart
@given('a chart title')
def given_a_chart_title(context):
prs = Presentation(test_pptx('cht-chart-props'))
context.chart_title = prs.slides[0].shapes[1].chart.chart_title
@given('a chart title having {a_or_no} text frame')
def given_a_chart_title_having_a_or_no_text_frame(context, a_or_no):
prs = Presentation(test_pptx('cht-chart-props'))
shape_idx = {'no': 0, 'a': 1}[a_or_no]
context.chart_title = prs.slides[1].shapes[shape_idx].chart.chart_title
# when ====================================================
@when('I add a Clustered bar chart with multi-level categories')
def when_I_add_a_clustered_bar_chart_with_multi_level_categories(context):
chart_type = XL_CHART_TYPE.BAR_CLUSTERED
chart_data = CategoryChartData()
WEST = chart_data.add_category('WEST')
WEST.add_sub_category('SF')
WEST.add_sub_category('LA')
EAST = chart_data.add_category('EAST')
EAST.add_sub_category('NY')
EAST.add_sub_category('NJ')
chart_data.add_series('Series 1', (1, 2, None, 4))
chart_data.add_series('Series 2', (5, None, 7, 8))
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I add a {kind} chart with {cats} categories and {sers} series')
def when_I_add_a_chart_with_categories_and_series(context, kind, cats, sers):
chart_type = {
'Area': XL_CHART_TYPE.AREA,
'Stacked Area': XL_CHART_TYPE.AREA_STACKED,
'100% Stacked Area': XL_CHART_TYPE.AREA_STACKED_100,
'Clustered Bar': XL_CHART_TYPE.BAR_CLUSTERED,
'Stacked Bar': XL_CHART_TYPE.BAR_STACKED,
'100% Stacked Bar': XL_CHART_TYPE.BAR_STACKED_100,
'Clustered Column': XL_CHART_TYPE.COLUMN_CLUSTERED,
'Stacked Column': XL_CHART_TYPE.COLUMN_STACKED,
'100% Stacked Column': XL_CHART_TYPE.COLUMN_STACKED_100,
'Doughnut': XL_CHART_TYPE.DOUGHNUT,
'Exploded Doughnut': XL_CHART_TYPE.DOUGHNUT_EXPLODED,
'Line': XL_CHART_TYPE.LINE,
'Line with Markers': XL_CHART_TYPE.LINE_MARKERS,
'Line Markers Stacked': XL_CHART_TYPE.LINE_MARKERS_STACKED,
'100% Line Markers Stacked': XL_CHART_TYPE.LINE_MARKERS_STACKED_100,
'Line Stacked': XL_CHART_TYPE.LINE_STACKED,
'100% Line Stacked': XL_CHART_TYPE.LINE_STACKED_100,
'Pie': XL_CHART_TYPE.PIE,
'Exploded Pie': XL_CHART_TYPE.PIE_EXPLODED,
'Radar': XL_CHART_TYPE.RADAR,
'Filled Radar': XL_CHART_TYPE.RADAR_FILLED,
'Radar with markers': XL_CHART_TYPE.RADAR_MARKERS,
}[kind]
category_count, series_count = int(cats), int(sers)
category_source = ('Foo', 'Bar', 'Baz', 'Boo', 'Far', 'Faz')
series_value_source = count(1.1, 1.1)
chart_data = CategoryChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = 'Series %d' % (idx+1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I add a {bubble_type} chart having 2 series of 3 points each')
def when_I_add_a_bubble_chart_having_2_series_of_3_pts(context, bubble_type):
chart_type = getattr(XL_CHART_TYPE, bubble_type)
data = (
('Series 1', ((-0.1, 0.5, 1.0), (16.2, 0.0, 2.0), (8.0, -0.2, 3.0))),
('Series 2', ((12.4, 0.8, 4.0), (-7.5, 0.5, 5.0), (5.1, -0.5, 6.0))),
)
chart_data = BubbleChartData()
for series_data in data:
series_label, points = series_data
series = chart_data.add_series(series_label)
for point in points:
x, y, size = point
series.add_data_point(x, y, size)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I assign {value} to chart.has_legend')
def when_I_assign_value_to_chart_has_legend(context, value):
new_value = {
'True': True,
'False': False,
}[value]
context.chart.has_legend = new_value
@when('I assign {value} to chart.has_title')
def when_I_assign_value_to_chart_has_title(context, value):
context.chart.has_title = {'True': True, 'False': False}[value]
@when('I assign {value} to chart_title.has_text_frame')
def when_I_assign_value_to_chart_title_has_text_frame(context, value):
context.chart_title.has_text_frame = {
'True': True,
'False': False
}[value]
@when('I replace its data with {cats} categories and {sers} series')
def when_I_replace_its_data_with_categories_and_series(context, cats, sers):
category_count, series_count = int(cats), int(sers)
category_source = ('Foo', 'Bar', 'Baz', 'Boo', 'Far', 'Faz')
series_value_source = count(1.1, 1.1)
chart_data = ChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = 'New Series %d' % (idx+1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart.replace_data(chart_data)
@when('I replace its data with 3 series of 3 bubble points each')
def when_I_replace_its_data_with_3_series_of_three_bubble_pts_each(context):
chart_data = BubbleChartData()
for idx in range(3):
series_title = 'New Series %d' % (idx+1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y, size = idx * 3 + jdx, idx * 2 + jdx, idx + jdx
series.add_data_point(x, y, size)
context.chart.replace_data(chart_data)
@when('I replace its data with 3 series of 3 points each')
def when_I_replace_its_data_with_3_series_of_three_points_each(context):
chart_data = XyChartData()
x = y = 0
for idx in range(3):
series_title = 'New Series %d' % (idx+1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y = idx * 3 + jdx, idx * 2 + jdx
series.add_data_point(x, y)
context.chart.replace_data(chart_data)
# then ====================================================
@then('chart.category_axis is a {cls_name} object')
def then_chart_category_axis_is_a_cls_name_object(context, cls_name):
category_axis = context.chart.category_axis
type_name = type(category_axis).__name__
assert type_name == cls_name, 'got %s' % type_name
@then('chart.chart_title is a ChartTitle object')
def then_chart_chart_title_is_a_ChartTitle_object(context):
class_name = type(context.chart.chart_title).__name__
assert class_name == 'ChartTitle', 'got %s' % class_name
@then('chart.chart_type is {enum_member}')
def then_chart_chart_type_is_value(context, enum_member):
expected_value = getattr(XL_CHART_TYPE, enum_member)
chart = context.chart
assert chart.chart_type is expected_value, 'got %s' % chart.chart_type
@then('chart.has_legend is {value}')
def then_chart_has_legend_is_value(context, value):
expected_value = {
'True': True,
'False': False,
}[value]
chart = context.chart
assert chart.has_legend is expected_value
@then('chart.has_title is {value}')
def then_chart_has_title_is_value(context, value):
chart = context.chart
actual_value = chart.has_title
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('chart.legend is a legend object')
def then_chart_legend_is_a_legend_object(context):
chart = context.chart
assert isinstance(chart.legend, Legend)
@then('chart.series is a SeriesCollection object')
def then_chart_series_is_a_SeriesCollection_object(context):
type_name = type(context.chart.series).__name__
assert type_name == 'SeriesCollection', 'got %s' % type_name
@then('chart.value_axis is a ValueAxis object')
def then_chart_value_axis_is_a_ValueAxis_object(context):
value_axis = context.chart.value_axis
assert type(value_axis).__name__ == 'ValueAxis'
@then('chart_title.format is a ChartFormat object')
def then_chart_title_format_is_a_ChartFormat_object(context):
class_name = type(context.chart_title.format).__name__
assert class_name == 'ChartFormat', 'got %s' % class_name
@then('chart_title.format.fill is a FillFormat object')
def then_chart_title_format_fill_is_a_FillFormat_object(context):
class_name = type(context.chart_title.format.fill).__name__
assert class_name == 'FillFormat', 'got %s' % class_name
@then('chart_title.format.line is a LineFormat object')
def then_chart_title_format_line_is_a_LineFormat_object(context):
class_name = type(context.chart_title.format.line).__name__
assert class_name == 'LineFormat', 'got %s' % class_name
@then('chart_title.has_text_frame is {value}')
def then_chart_title_has_text_frame_is_value(context, value):
actual_value = context.chart_title.has_text_frame
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('chart_title.text_frame is a TextFrame object')
def then_chart_title_text_frame_is_a_TextFrame_object(context):
class_name = type(context.chart_title.text_frame).__name__
assert class_name == 'TextFrame', 'got %s' % class_name
@then('each series has a new name')
def then_each_series_has_a_new_name(context):
for series in context.chart.plots[0].series:
assert series.name.startswith('New ')
@then('each series has {count} values')
def then_each_series_has_count_values(context, count):
expected_count = int(count)
for series in context.chart.plots[0].series:
actual_value_count = len(series.values)
assert actual_value_count == expected_count
@then('len(chart.series) is {count}')
def then_len_chart_series_is_count(context, count):
expected_count = int(count)
assert len(context.chart.series) == expected_count
@then('the chart has an Excel data worksheet')
def then_the_chart_has_an_Excel_data_worksheet(context):
xlsx_part = context.chart._workbook.xlsx_part
assert isinstance(xlsx_part, EmbeddedXlsxPart)
@then('the chart has new chart data')
def then_the_chart_has_new_chart_data(context):
orig_xlsx_sha1 = context.xlsx_sha1
new_xlsx_sha1 = hashlib.sha1(
context.chart._workbook.xlsx_part.blob
).hexdigest()
assert new_xlsx_sha1 != orig_xlsx_sha1
|
the-stack_0_5779 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
bl_info = {
'name': 'Pivot Transform',
#"description": "This is a test version of the addon. Write in the discord channel(link below) about the errors."
"author": "Max Derksen",
'version': (1, 4, 4),
'blender': (2, 81, 0),
'location': 'VIEW 3D > N-Panel > Pivot Point Popover',
#"warning": "This is a test version of the addon. Write in the discord channel(link below) about the errors.",
"support": "COMMUNITY",
'category': 'Object',
}
'''
import bpy
import re
import bmesh
import mathutils
from mathutils import Matrix, Vector
from bpy.types import Operator
from bpy.props import IntProperty, FloatProperty #Bounding Box
#========================================================================PIVOT TRANSFORM TOOL IN EDIT MODE
storeGT = False
storeGR = False
storeGS = False
class PIVOT_OT_transform_on_N(Operator):
bl_idname = "pivot.transform_on_n"
bl_label = "Transform"
bl_description = "Start Pivot Transformation"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
if context.scene.tool_settings.use_transform_data_origin == False:
props_pivot = context.preferences.addons[__name__.split(".")[0]].preferences
global storeGT
global storeGR
global storeGS
storeGT = context.space_data.show_gizmo_object_translate
storeGR = context.space_data.show_gizmo_object_rotate
storeGS = context.space_data.show_gizmo_object_scale
#if props_pivot.gizmo_preselect == True:
#context.space_data.show_gizmo_object_translate = props_pivot.move_giz
#context.space_data.show_gizmo_object_rotate = props_pivot.rotate_giz
#context.space_data.show_gizmo_object_scale = props_pivot.scale_giz
if context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
context.scene.tool_settings.use_transform_data_origin = True
return{'FINISHED'}
else:
return{'CANCELLED'}
class PIVOT_OT_transform_off_N(Operator):
bl_idname = "pivot.transform_off_n"
bl_label = "Apply"
bl_description = "Apply Pivot Transformation"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
if context.scene.tool_settings.use_transform_data_origin == True:
global storeGT
global storeGR
global storeGS
context.space_data.show_gizmo_object_translate = storeGT
context.space_data.show_gizmo_object_rotate = storeGR
context.space_data.show_gizmo_object_scale = storeGS
if context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
context.scene.tool_settings.use_transform_data_origin = False
return{'FINISHED'}
else:
return{'CANCELLED'}
import bgl
import gpu
from gpu_extras.batch import batch_for_shader
def ob_add(self, context, obj):
""" bpy.ops.mesh.primitive_cube_add(enter_editmode=True, align='WORLD', location=(0, 0, 0))
bbox = context.active_object
bbox.matrix_world = self.obj.matrix_world
bm = bmesh.from_edit_mesh(bbox.data)
bm.verts.ensure_lookup_table()
bm.verts.index_update()
for i, vert in enumerate(ob_bbox):
bm.verts[i].co = (vert[0], vert[1], vert[2])
#bm.to_mesh(me)
bpy.ops.mesh.select_all(action='DESELECT') """
ob_bbox = obj.bound_box
me = bpy.data.meshes.new('PivotBBox')
bbox = bpy.data.objects.new('PivotBBox', me)
bbox.matrix_world = obj.matrix_world
context.collection.objects.link(bbox)
bm = bmesh.new()
bm.from_mesh(me)
for vert in ob_bbox:
bm.verts.new(vert[:])
vertex=[]
for v in bm.verts:
vertex.append(v)
bm.faces.new((vertex[0], vertex[1], vertex[2], vertex[3]))
bm.faces.new((vertex[3], vertex[2], vertex[6], vertex[7]))
bm.faces.new((vertex[3], vertex[7], vertex[4], vertex[0]))
bm.faces.new((vertex[4], vertex[5], vertex[6], vertex[7]))
bm.faces.new((vertex[2], vertex[1], vertex[5], vertex[6]))
bm.faces.new((vertex[0], vertex[4], vertex[5], vertex[1]))
bm.to_mesh(me)
bpy.ops.object.select_all(action='DESELECT')
bbox.select_set(state=True)
context.view_layer.objects.active = bbox
context.object.display_type = 'WIRE'
bpy.ops.object.mode_set(mode='EDIT')
context.tool_settings.mesh_select_mode = (True, True, True)
obj.select_set(state=True)
class PIVOT_OT_bounding_box_N(Operator):
bl_idname = "pivot.bounding_box_n"
bl_label = "Pivot To Bounding Box"
bl_description = "Apply Transformation"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
@classmethod
def poll(cls, context):
return context.active_object is not None
def __init__(self):
self.obj = None
self.select_mode = tuple()
@staticmethod
def draw_mesh(self, context):
shader = gpu.shader.from_builtin('3D_SMOOTH_COLOR')
theme = bpy.context.preferences.themes['Default']
vertex_size = theme.view_3d.vertex_size
bgl.glEnable(bgl.GL_BLEND)
bgl.glLineWidth(3)
bgl.glPointSize(vertex_size + 4)
bgl.glEnable(bgl.GL_DEPTH_TEST)
bgl.glEnable(bgl.GL_LINE_SMOOTH)
bgl.glEnable(bgl.GL_CULL_FACE)
bgl.glCullFace(bgl.GL_BACK)
bgl.glDepthRange(0, 0.9999)
bgl.glDepthMask(False)
shader.bind()
bbox = context.active_object
mesh = bmesh.from_edit_mesh(bbox.data)
vertex_co = [bbox.matrix_world @ v.co for v in mesh.verts]
vertex_all = []
for e in mesh.edges:
v1 = bbox.matrix_world @ e.verts[0].co
v2 = bbox.matrix_world @ e.verts[1].co
vCo = (v1 + v2) / 2
vertex_all.append(vCo)
for f in mesh.faces:
vCo = bbox.matrix_world @ f.calc_center_bounds()
vertex_all.append(vCo)
for v in vertex_co:
vertex_all.append(v)
edge_keys = bbox.data.edge_keys
loop_triangles = mesh.calc_loop_triangles()
faces_indices = [[loop.vert.index for loop in looptris] for looptris in loop_triangles]
face_col = [(0.2, 0.2, 0.2, 0.6) for _ in range(len(vertex_co))]
edge_col = [(0.1, 0.1, 0.1, 1.0) for _ in range(len(vertex_co))]
vert_col = [(0.1, 0.4, 1.0, 1.0) for _ in range(len(vertex_all))]
FACES = batch_for_shader(shader, 'TRIS', {"pos": vertex_co, "color": face_col}, indices=faces_indices)
EDGES = batch_for_shader(shader, 'LINES', {"pos": vertex_co, "color": edge_col}, indices=edge_keys)
VERTS = batch_for_shader(shader, 'POINTS', {"pos": vertex_all, "color": vert_col})
FACES.draw(shader)
EDGES.draw(shader)
VERTS.draw(shader)
bgl.glDepthRange(0, 1)
bgl.glDisable(bgl.GL_LINE_SMOOTH)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glDisable(bgl.GL_CULL_FACE)
bgl.glLineWidth(1)
bgl.glPointSize(vertex_size)
bgl.glDisable(bgl.GL_BLEND)
def modal(self, context, event):
props_pivot = context.preferences.addons[__name__.split(".")[0]].preferences
if context.area:
if context.area.type == 'VIEW_3D':
#context.area.tag_redraw()
# Selected Object(EDIT_MODE)
bbox = context.active_object
me = bmesh.from_edit_mesh(bbox.data)
# select items
verts_sel = []
verts_sel.extend([v for v in me.verts if v.select])
if len(verts_sel) >= 1:
#bpy.ops.pivot.alignface()
cursor_pos = context.scene.cursor.location.copy()
bpy.ops.view3d.snap_cursor_to_selected()
context.tool_settings.mesh_select_mode = self.select_mode
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.origin_set(type='ORIGIN_CURSOR', center='MEDIAN')
bpy.context.collection.objects.unlink(bbox)
bpy.ops.object.delete({"selected_objects": [bbox]})
context.view_layer.objects.active = self.obj
context.scene.cursor.location = cursor_pos
bpy.types.SpaceView3D.draw_handler_remove(self._bb_mesh_draw, 'WINDOW')
#props_pivot.bbox_run = False
return {'FINISHED'}
if not event.type in {'RIGHTMOUSE', 'MIDLEMOUSE', 'LEFTMOUSE'} and event.value == 'PRESS':
#props_pivot.bbox_run = False
context.tool_settings.mesh_select_mode = self.select_mode
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.collection.objects.unlink(bbox)
bpy.ops.object.delete({"selected_objects": [bbox]})
context.view_layer.objects.active = self.obj
bpy.types.SpaceView3D.draw_handler_remove(self._bb_mesh_draw, 'WINDOW')
return {'CANCELLED'}
else:
#props_pivot.bbox_run = False
bpy.types.SpaceView3D.draw_handler_remove(self._bb_mesh_draw, 'WINDOW')
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
if context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
props_pivot = context.preferences.addons[__name__.split(".")[0]].preferences
#props_pivot.bbox_run = True
self.select_mode = context.tool_settings.mesh_select_mode[:]
self.obj = context.active_object
ob_add(self, context, self.obj)
if context.area.type == 'VIEW_3D':
args = (self, context)
self._bb_mesh_draw= bpy.types.SpaceView3D.draw_handler_add(self.draw_mesh, args, 'WINDOW', 'POST_VIEW')
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
classes = [
PIVOT_OT_transform_on_N,
PIVOT_OT_transform_off_N,
PIVOT_OT_bounding_box_N,
]
def register():
global storeGT
global storeGR
global storeGS
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls) |
the-stack_0_5781 | ## Alarm Server
## Supporting Envisalink 2DS/3
##
## This code is under the terms of the GPL v3 license.
evl_Commands = {
'KeepAlive' : '000',
'StatusReport' : '001',
'DumpZoneTimers' : '008',
'PartitionKeypress' : '071',
'Disarm' : '040',
'ArmStay' : '031',
'ArmAway' : '030',
'ArmMax' : '032',
'Login' : '005',
'Panic' : '060',
'SendCode' : '200',
'CommandOutput' : '020',
'SetTime' : '010'
}
evl_PanicTypes = {
'Fire' : '1',
'Ambulance' : '2',
'Police' : '3'
}
evl_ArmModes = {
'0' : {'name' : 'Arm Away', 'status':{'armed_away': True, 'armed_zero_entry_delay': False, 'alpha':'Arm Away', 'exit_delay':False, 'entry_delay': False }},
'1' : {'name' : 'Arm Stay', 'status':{'armed_stay': True, 'armed_zero_entry_delay': False, 'alpha':'Arm Stay', 'exit_delay':False, 'entry_delay': False }},
'2' : {'name' : 'Arm Zero Entry Away', 'status':{'armed_away': True, 'armed_zero_entry_delay': True, 'alpha':'Arm Zero Entry Away', 'exit_delay':False, 'entry_delay': False }},
'3' : {'name' : 'Arm Zero Entry Stay', 'status':{'armed_stay': True, 'armed_zero_entry_delay': True, 'alpha':'Arm Zero Entry Stay', 'exit_delay':False, 'entry_delay': False }}
}
evl_ResponseTypes = {
'505' : {'name':'Login Prompt', 'handler':'login'},
'615' : {'name':'Envisalink Zone Timer Dump', 'handler':'zone_timer_dump'},
'500' : {'name':'Poll', 'handler':'poll_response'},
'501' : {'name':'Checksum', 'handler':'command_response_error'},
'900' : {'name':'EnterCode', 'handler':'send_code'},
'912' : {'name':'PGMEnterCode', 'handler':'send_code'},
#ZONE UPDATES
'601' : {'name':'Zone Alarm', 'handler':'zone_state_change', 'status':{'alarm' : True}},
'602' : {'name':'Zone Alarm Restore', 'handler':'zone_state_change', 'status':{'alarm' : False}},
'603' : {'name':'Zone Tamper', 'handler':'zone_state_change', 'status':{'tamper' : True}},
'604' : {'name':'Zone Tamper Restore', 'handler':'zone_state_change', 'status':{'tamper' : False}},
'605' : {'name':'Zone Fault', 'handler':'zone_state_change', 'status':{'fault' : True}},
'606' : {'name':'Zone Fault Restore', 'handler':'zone_state_change', 'status':{'fault' : False}},
'609' : {'name':'Zone Open', 'handler':'zone_state_change', 'status':{'open' : True}},
'610' : {'name':'Zone Restored', 'handler':'zone_state_change', 'status':{'open' : False}},
#PARTITION UPDATES
'650' : {'name':'Ready', 'handler':'partition_state_change', 'status':{'ready' : True, 'alpha' : 'Ready'}},
'651' : {'name':'Not Ready', 'handler':'partition_state_change', 'status':{'ready' : False, 'alpha' : 'Not Ready'}},
'652' : {'name':'Armed', 'handler':'partition_state_change'},
'653' : {'name':'Ready - Force Arming Enabled', 'handler':'partition_state_change', 'status':{'ready': True, 'alpha' : 'Ready - Force Arm'}},
'654' : {'name':'Alarm', 'handler':'partition_state_change', 'status':{'alarm' : True, 'alpha' : 'Alarm'}},
'655' : {'name':'Disarmed', 'handler':'partition_state_change', 'status' : {'alarm' : False, 'armed_stay' : False, 'armed_zero_entry_delay': False, 'armed_away' : False, 'exit_delay' : False, 'entry_delay' : False, 'alpha' : 'Disarmed'}},
'656' : {'name':'Exit Delay in Progress', 'handler':'partition_state_change', 'status':{'exit_delay' : True, 'alpha' : 'Exit Delay In Progress'}},
'657' : {'name':'Entry Delay in Progress', 'handler':'partition_state_change', 'status':{'entry_delay' : True, 'alpha' : 'Entry Delay in Progress'}},
'663' : {'name':'ChimeOn', 'handler':'partition_state_change', 'status': {'chime': True}},
'664' : {'name':'ChimeOff', 'handler':'partition_state_change', 'status': {'chime': False}},
'673' : {'name':'Busy', 'handler':'partition_state_change', 'status': {'alpha': 'Busy'}},
'700' : {'name':'Armed by user', 'handler':'partition_state_change', 'status':{}},
'750' : {'name':'Disarmed by user', 'handler':'partition_state_change', 'status' : {'alarm' : False, 'armed_stay' : False, 'armed_away' : False, 'armed_zero_entry_delay': False, 'exit_delay' : False, 'entry_delay' : False, 'alpha' : 'Disarmed'}},
'751' : {'name':'Disarmed special', 'handler':'partition_state_change', 'status' : {'alarm' : False, 'armed_stay' : False, 'armed_away' : False, 'armed_zero_entry_delay': False, 'exit_delay' : False, 'entry_delay' : False, 'alpha' : 'Disarmed'}},
'840' : {'name':'Trouble LED', 'handler':'partition_state_change', 'status':{'trouble' : True}},
'841' : {'name':'Trouble Clear', 'handler':'partition_state_change', 'status':{'trouble' : False, 'ac_present': True}},
#GENERAL UPDATES
'621' : {'name':'FireAlarmButton', 'handler':'keypad_update', 'status':{'fire' : True, 'alarm': True, 'alpha' : 'Fire Alarm'}},
'622' : {'name':'FireAlarmButtonOff', 'handler':'keypad_update', 'status':{'fire' : False, 'alarm': False, 'alpha' : 'Fire Alarm Cleared'}},
'623' : {'name':'AuxAlarmButton', 'handler':'keypad_update', 'status':{'alarm': True, 'alpha' : 'Aux Alarm'}},
'624' : {'name':'AuxAlarmButtonOff', 'handler':'keypad_update', 'status':{'alarm': False, 'alpha' : 'Aux Alarm Cleared'}},
'625' : {'name':'PanicAlarmButton', 'handler':'keypad_update', 'status':{'alarm': True, 'alpha' : 'Panic Alarm'}},
'626' : {'name':'PanicAlarmButtonOff', 'handler':'keypad_update', 'status':{'alarm': False, 'alpha' : 'Panic Alarm Cleared'}},
'631' : {'name':'SmokeAlarmButton', 'handler':'keypad_update', 'status':{'alarm': True, 'alpha' : 'Smoke Alarm'}},
'632' : {'name':'SmokeAlarmButtonOff', 'handler':'keypad_update', 'status':{'alarm': False, 'alpha' : 'Smoke Alarm Cleared'}},
'800' : {'name':'LowBatTrouble', 'handler':'keypad_update', 'status':{'bat_trouble': True, 'alpha' : 'Low Battery'}},
'801' : {'name':'LowBatTroubleOff', 'handler':'keypad_update', 'status':{'bat_trouble': False, 'alpha' : 'Low Battery Cleared'}},
'802' : {'name':'ACTrouble', 'handler':'keypad_update', 'status':{'ac_present': False, 'alpha' : 'AC Power Lost'}},
'803' : {'name':'ACTroubleOff', 'handler':'keypad_update', 'status':{'ac_present': True, 'alpha' : 'AC Power Restored'}},
'829' : {'name':'SystemTamper', 'handler':'keypad_update', 'status':{'alpha' : 'System tamper'}},
'830' : {'name':'SystemTamperOff', 'handler':'keypad_update', 'status':{'alpha' : 'System tamper Restored'}},
'849' : {'name':'TroubleVerbose', 'handler':'keypad_update', 'status':None}
}
evl_verboseTrouble = {
0 : 'Service is Required',
1 : 'AC Power Lost',
2 : 'Telephone Line Fault',
3 : 'Failure to communicate',
4 : 'Zone/Sensor Fault',
5 : 'Zone/Sensor Tamper',
6 : 'Zone/Sensor Low Battery',
7 : 'Loss of time'
}
|
the-stack_0_5783 | #!/usr/bin/env python
# coding: utf-8
#takes as input a csv or tsv file, and a evalue cutoff, loads the data in pandas and fiters the dataframe by this value.
#writes a clean csv/tsv file. If imported as library is able to take as input a pandas df and return a clean pandas df
import pandas as pd
from sys import argv
def loadData(file):
if file.endswith(".csv"):
df = pd.read_csv(file, sep=',', header=None)
elif file.endswith(".tsv"):
df = pd.read_csv(file, sep='\t', header=None)
df.columns = ["node1","qstart","qend","qlen","qseq","node2","eval","pident","bitscore","sstart","send","slen","length","sseq"]
df = df.drop(columns=["qend","qstart","qlen","qseq","pident","bitscore","sstart","send","slen","length","sseq"])
print(df)
return df
def filterevalue(df,eval):
print(f"## FILTER BY EVALUE {eval}")
to_drop = []
for i in range(len(df)):
evalu = df.iloc[i]['eval']
''' if the evalue of the result is above the indicated one then it is dropped '''
if float(evalu) > float(eval):
to_drop.append(i)
df = df.drop(df.index[to_drop])
print("Lenght of the dataframe after FILTER BY EVALUE: " + str(len(df)))
if len(df) == 0:
print("ERROR: Lenght of the dataframe = 0 - I can't generate the gephi/cytoscape network")
exit()
print('------------------------------')
return df
if __name__ == "__main__":
if not argv[1] or not argv[0]:
"""
This script takes as input a csv/tsv file and a evalue value.
Please run it in this format: python3 filterbyevalue.py [file] [evalue]
Example: python3 filterbyevalue.py myfile.csv 1e-10
"""
exit()
file = argv[1]
file_name = file.split(".")[0]
evalue = float(argv[2])
df = loadData(file)
result = filterevalue(df,evalue)
df.to_csv(f'{file_name}_filtered_{str(evalue)}.csv', index = False)
|
the-stack_0_5784 | import os
from flask import Flask, jsonify, request, abort
from components import CardsView, Card, CardHeader
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
# process payload from archy
payload = request.json.get('payload', {})
args = payload.get('args', {})
links = [{
'address': '/list'
}]
viewProps = {'links': links}
view = CardsView(viewProps,
Card({},
CardHeader({
'title': 'Card Title 1',
'subtitle': 'card subtitle',
})),
Card({},
CardHeader({
'title': 'Card Title 2',
'subtitle': 'card subtitle',
})),
)
return jsonify(**view.to_dict())
if __name__ == '__main__':
port = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=port)
|
the-stack_0_5785 | #!/usr/bin/env python3
import shutil
import iterm2
async def main(connection):
component = iterm2.StatusBarComponent(
short_description="RootVolume Usage",
detailed_description="Show Root Volume Usage",
knobs=[],
exemplar="[RootVolume Usage]",
update_cadence=30,
identifier="koh-sh.iterm2-statusbar-scripts.rootvolume"
)
@iterm2.StatusBarRPC
async def showrootvolume(knobs):
rootusage = shutil.disk_usage('/')
lst = [round(x / 1024 / 1024 / 1024) for x in rootusage]
return ("RootVolume: {}/{} GB ({}%)".format(lst[1], lst[0], (round(lst[1] / lst[0] * 100))))
await component.async_register(connection, showrootvolume)
iterm2.run_forever(main)
|
the-stack_0_5786 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import codecs
import toolbox_utils
import plankton_core
class ImportSharkWeb(plankton_core.DataImportPreparedBase):
""" Class for parsing sharkweb text files. """
def __init__(self):
""" """
# Initialize parent.
super(ImportSharkWeb, self).__init__()
# Information needed for parsing. List of lists with:
# Column 0: node level.
# Column 1: internal key.
# Column 2: view format.
# Column 3: source file column name. Multiple alternatives should be separated by '<or>'. TODO: '<or>' not implemented.
# Column 4: export column name. None = not used, empty string ('') = same as column 1 (internal key).
self._parsing_info = [
['visit', 'visit_year', 'integer', 'visit_year', ''],
['visit', 'sample_date', 'date', 'sample_date', ''],
['visit', 'visit_month', 'integer', '', ''], # Calculate. Code below.
['visit', 'station_name', 'text', 'station_name', ''],
['visit', 'sample_latitude_dd', 'float', 'sample_latitude_dd', ''],
['visit', 'sample_longitude_dd', 'float', 'sample_longitude_dd', ''],
['visit', 'water_depth_m', 'float', 'water_depth_m', ''],
#
['sample', 'sample_id', 'text', 'sample_id', ''],
['sample', 'sample_min_depth_m', 'float', 'sample_min_depth_m', ''],
['sample', 'sample_max_depth_m', 'float', 'sample_max_depth_m', ''],
#
['variable', 'scientific_name', 'text', 'scientific_name', ''],
['variable', 'species_flag_code', 'text', 'species_flag_code', ''],
['variable', 'size_class', 'text', 'size_class', ''],
['variable', 'trophic_type', 'text', 'trophic_type_code', ''],
#
['variable', 'parameter', 'text', 'parameter', ''],
['variable', 'value', 'float', 'value', ''],
['variable', 'unit', 'text', 'unit', ''],
#
['variable', 'plankton_group', 'text', '', ''], # Calculate. Code below.
['variable', 'taxon_kingdom', 'text', 'taxon_kingdom', ''],
['variable', 'taxon_phylum', 'text', 'taxon_phylum', ''],
['variable', 'taxon_class', 'text', 'taxon_class', ''],
['variable', 'taxon_order', 'text', 'taxon_order', ''],
['variable', 'taxon_family', 'text', 'taxon_family', ''],
['variable', 'taxon_genus', 'text', 'taxon_genus', ''],
['variable', 'taxon_hierarchy', 'text', 'taxon_hierarchy', ''],
#
['variable', 'sampling_laboratory', 'text', 'sampling_laboratory_name_sv', ''],
['variable', 'analytical_laboratory', 'text', 'analytical_laboratory_name_sv', ''],
['variable', 'analysis_date', 'text', 'analysis_date', ''],
['variable', 'analysed_by', 'text', 'analysed_by', ''],
]
# Keys:
self._visit_key_fields = ['sample_date', 'station_name']
self._sample_key_fields = ['sample_date', 'station_name', 'sample_id', 'sample_min_depth_m', 'sample_max_depth_m', 'sample_id']
#
self.clear() #
def clear(self):
""" """
self._header = []
self._rows = []
def read_file(self, file_name = None):
""" """
if file_name == None:
raise UserWarning('File name is missing.')
input_file = None
try:
### txtencode = toolbox_settings.ToolboxSettings().getValue('General:Character encoding, txt-files', 'cp1252')
txtencode = 'cp1252'
input_file = codecs.open(file_name, mode = 'r', encoding = txtencode)
# Read data header. Same header used for data and aggregated data.
separator = '\t' # Use ',' as default item separator.
first_row = input_file.readline()
if ';' in first_row:
separator = ';' # Use ';' as item separator.
#
self._header = []
for headeritem in first_row.split(separator):
item = headeritem.strip()
self._header.append(item)
# Read data rows. Continue until empty line occurs.
self._rows = []
for row in input_file.readlines():
rowitems = []
for item in row.split(separator):
rowitems.append(item.strip())
self._rows.append(rowitems)
#
except (IOError, OSError):
raise
finally:
if input_file: input_file.close()
def create_tree_dataset(self, dataset, update_trophic_type):
""" """
try:
# Base class must know header for _asText(), etc.
# self._set_header(self._header)
# Iterate over rows in imported_table.
for row in self._rows:
row_dict = dict(zip(self._header, row))
# Get or create nodes.
currentvisit = None
currentsample = None
currentvariable = None
# Check if visit exists. Create or reuse.
keystring = ''
delimiter = ''
for key_field in self._visit_key_fields:
keystring += delimiter + row_dict.get(key_field, '')
delimiter = '<+>'
#
currentvisit = dataset.get_visit_lookup(keystring)
if not currentvisit:
currentvisit = plankton_core.VisitNode()
dataset.add_child(currentvisit)
currentvisit.set_id_string(keystring)
# Check if sample exists. Create or reuse.
keystring = ''
delimiter = ''
for key_field in self._sample_key_fields:
keystring += delimiter + row_dict.get(key_field, '')
delimiter = '<+>'
#
currentsample = dataset.get_sample_lookup(keystring)
if not currentsample:
currentsample = plankton_core.SampleNode()
currentvisit.add_child(currentsample)
currentsample.set_id_string(keystring)
# Add all variables in row.
currentvariable = plankton_core.VariableNode()
currentsample.add_child(currentvariable)
# === Parse row and add fields on nodes. ===
for parsinginforow in self._parsing_info:
#
value = row_dict.get(parsinginforow[3], '')
# Fix float.
if parsinginforow[2] == 'float':
value = value.replace(',', '.')
# Calculate some values.
if parsinginforow[1] == 'visit_month':
try:
value = row_dict.get('sample_date', '')
value = value[5:7]
except:
pass
if parsinginforow[1] == 'plankton_group':
try:
value = row_dict.get('scientific_name', '')
value = plankton_core.Species().get_plankton_group_from_taxon_name(value)
except:
pass
if parsinginforow[1] == 'analysed_by':
try:
if not value:
value = row_dict.get('taxonomist', '')
except:
pass
if parsinginforow[1] == 'trophic_type':
# Update trophic_type.
if parsinginforow[1] == 'trophic_type':
if update_trophic_type:
scientific_name = row_dict.get('scientific_name', '')
size_class = row_dict.get('size_class', '')
trophic_type = plankton_core.Species().get_bvol_value(scientific_name, size_class, 'trophic_type')
if trophic_type:
value = trophic_type # Use existing if not in local list.
# Replace empty with NS=Not specified.
if not value:
value = 'NS'
# Add at right level.
if parsinginforow[0] == 'visit':
currentvisit.add_data(parsinginforow[1], value)
#
if parsinginforow[0] == 'sample':
currentsample.add_data(parsinginforow[1], value)
#
if parsinginforow[0] == 'variable':
currentvariable.add_data(parsinginforow[1], value)
#
except Exception as e:
toolbox_utils.Logging().warning('Failed to parse dataset: %s' % (e.args[0]))
|
the-stack_0_5789 | # Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import contextlib
import lasagne.layers
import lasagne.nonlinearities
from lasagne.nonlinearities import rectify
import theano.tensor as T
@contextlib.contextmanager
def ignore_sigmoids(layer):
if(hasattr(layer,'nonlinearity') and
layer.nonlinearity in [lasagne.nonlinearities.softmax,
lasagne.nonlinearities.sigmoid]):
print("Removing the sigmoids from output for the explanation approach.")
nonlinearity = layer.nonlinearity
layer.nonlinearity = lambda x: x
try:
yield layer
finally:
layer.nonlinearity = nonlinearity
else:
yield layer
def remove_sigmoids(layer):
if(hasattr(layer,'nonlinearity') and
layer.nonlinearity in [lasagne.nonlinearities.softmax,
lasagne.nonlinearities.sigmoid]):
layer.nonlinearity = lambda x: x
class GuidedReLU(lasagne.layers.MergeLayer):
"""
A layer with two input streams of which the
first will be passed on as long as the
second is not 0. If the second input stream
is not None, then it will be passed on
instead.
"""
def __init__(self, input_layer, other_layer):
super(GuidedReLU, self).__init__([input_layer, other_layer])
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, inputs, **kwargs):
in1, in2 = inputs
out = T.switch(T.gt(in2, 0.), in1, 0.)
return out
class OppositeGuidedRelu(lasagne.layers.MergeLayer):
"""
A layer with two input streams of which the
first will be passed on as long as the
second is 0. If the second input stream is
not None, then it will be passed on
instead.
"""
def __init__(self, input_layer, other_layer):
super(OppositeGuidedRelu, self).__init__([input_layer, other_layer])
def get_output_shape_for(self, input_shapes):
return input_shapes[0]
def get_output_for(self, inputs, **kwargs):
in1, in2 = inputs
out = T.switch(T.gt(in2, 0.), 0., in1)
return out
def has_ReLU(layer):
relus = [lasagne.nonlinearities.rectify, T.nnet.relu]
return (hasattr(layer, 'nonlinearity') and
layer.nonlinearity in relus)
def get_rectifier_copy_layer(input_layer, rectifier_layer):
if has_ReLU(rectifier_layer):
return GuidedReLU(input_layer, rectifier_layer)
return input_layer
def get_rectifier_opposite_layer(input_layer, rectifier_layer):
if has_ReLU(rectifier_layer):
return OppositeGuidedRelu(input_layer, rectifier_layer)
return None
def get_rectifier_layer(input_layer, rectifier_layer):
if has_ReLU(rectifier_layer):
return lasagne.layers.NonlinearityLayer(input_layer,
nonlinearity=rectify)
return input_layer
|
the-stack_0_5790 | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from eoxserver.core import Component, implements
from eoxserver.services.ows.interfaces import (
ServiceHandlerInterface, GetServiceHandlerInterface
)
from eoxserver.services.ows.wms.basehandlers import (
WMSGetCapabilitiesHandlerBase
)
class WMS13GetCapabilitiesHandler(WMSGetCapabilitiesHandlerBase, Component):
implements(ServiceHandlerInterface)
implements(GetServiceHandlerInterface)
versions = ("1.3", "1.3.0",)
|
the-stack_0_5792 | # -*- coding: utf-8 -*-
"""
sentence, word, morph, ...
__author__ = 'Jamie ([email protected])'
__copyright__ = 'Copyright (C) 2019-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
import logging
import re
from typing import List, Tuple
MAX_LEN = 64
#########
# types #
#########
class Sentence:
"""
raw sentence
"""
def __init__(self, raw: str):
"""
:param raw: raw sentence
"""
self.words = raw.split()
class PosMorph:
"""
morpheme
"""
def __init__(self, morph: str, pos_tag: str = 'O', beg: int = -1, end: int = -1):
'''
각 음절마다 'O' Tag와 begin과 end 초기화
Args:
morph: 음절
Example:
morph 저 pos_tag NP beg 0 end 1
morph 는 pos_tag JX beg 1 end 2
morph 일 pos_tag NNG beg 0 end 1
morph 아 pos_tag NNG beg 0 end 1
morph 을 pos_tag JKO beg 2 end 3
morph 사 pos_tag NNG beg 0 end 1
morph 합 pos_tag XSV beg 2 end 3
morph 니 pos_tag EF beg 3 end 4
morph . pos_tag SF beg 5 end 6
'''
self.morph = morph
self.pos_tag = pos_tag
self.beg = beg
self.end = end
def __str__(self):
return '{}/{}'.format(self.morph, self.pos_tag)
def __len__(self):
return self.end - self.beg
class PosWord:
"""
part-of-speech tagged word
"""
def __init__(self, raw: str):
"""
Args:
raw: raw word
"""
self.raw = raw
self.tags = [] # output tags for each character
self.res_chrs = raw # 원형 복원된 형태소들의 음절들을 합친 것
def __str__(self):
return '{}\t{}'.format(self.raw, ' '.join([str(x) for x in self.pos_tagged_morphs]))
def for_pretrain(self) -> str:
"""
pre-training을 위한 출력
Returns:
pre-training을 위한 문장
"""
morph_strs = []
morph = ''
prev_tag = ''
for char, iob_tag in zip(self.raw, self.tags):
try:
iob_tag, _ = iob_tag.split(':', 1)
except ValueError:
pass
try:
iob, tag = iob_tag.split('-')
except ValueError as val_err:
logging.error('raw: %s', self.raw)
logging.error('tags: %s', self.tags)
logging.error('iob_tag: %s', iob_tag)
raise val_err
if iob == 'B':
if morph:
morph_strs.append('%s/%s' % (re.sub(r'\d', '0', morph), prev_tag))
morph = char
prev_tag = tag
elif iob == 'I':
if prev_tag == tag:
morph += char
else:
if morph:
morph_strs.append('%s/%s' % (re.sub(r'\d', '0', morph), prev_tag))
morph = char
prev_tag = tag
if morph:
morph_strs.append('%s/%s' % (re.sub(r'\d', '0', morph), prev_tag))
return ' '.join(morph_strs)
def __eq__(self, other: 'PosWord'):
"""
어절의 형태소 분석 결과가 일치할 경우 같다고 간주한다. (평가 프로그램에서 어절 단위 일치 여부 판단 시 사용)
Args:
other: other object
"""
return self.res_chrs == other.res_chrs and self.res_tags == other.res_tags
def set_pos_result(self, tags: List[str], restore_dic: dict = None):
"""
외부에서 생성된 PosWord객체의 정보를 현재 인스턴스에 설정합니다.
Args:
tags: 파일로 부터 읽은 형태소 태그(음절단위)
restore_dic: 원형 복원 사전
"""
if not restore_dic:
tags = [x.split(':', 1)[0] for x in tags]
self.tags = tags
# print(self.raw)
# print(len(self.raw))
# print(self.tags)
# print(len(self.tags))
# assert len(self.raw) == len(self.tags) # 음절수와 태그수는 동일해야 한다.
self.pos_tagged_morphs = self._make_pos_morphs(restore_dic)
def _make_pos_morphs(self, restore_dic: dict = None):
"""
형태소 태그리스트를 대상으로 B/I 로 병합되는 위치를 구합니다.
Args:
restore_dic: 원형 복원 사전
Returns:
pos_morphs: 단어/Tag을 담은 List Ex. 기억/NNG
"""
if not self.tags:
return []
self._restore(restore_dic)
pos_morphs = []
for beg, (lex, iob_tag) in enumerate(zip(self.res_chrs, self.res_tags)):
try:
iob, pos_tag = iob_tag.rsplit('-', 1)
except ValueError as val_err:
logging.error('invalid char/tag: %s/%s in [%s] %s', lex, iob_tag, self.res_chrs,
self.res_tags)
raise val_err
if iob == 'B' or not pos_morphs or pos_morphs[-1].pos_tag != pos_tag:
pos_morphs.append(PosMorph(lex, pos_tag, beg, beg+1))
elif iob == 'I':
if pos_morphs[-1].pos_tag == pos_tag:
pos_morphs[-1].morph += lex
pos_morphs[-1].end += len(lex)
else:
logging.debug('tag is different between B and I: %s vs %s',
pos_morphs[-1].pos_tag, pos_tag)
pos_morphs.append(PosMorph(lex, pos_tag, beg, beg+1))
else:
raise ValueError('invalid IOB tag: {}/{} in [{}] {}'.format \
(lex, iob_tag, self.res_chrs, self.res_tags))
return pos_morphs
def _restore(self, restore_dic: dict):
"""
원형 복원 사전을 이용하여 형태소의 원형을 복원한다.
Args:
restore_dic: 원형 복원 사전
"""
if not restore_dic:
self.res_chrs = self.raw
self.res_tags = self.tags
return
res_chrs = []
self.res_tags = []
for char, tag in zip(self.raw, self.tags):
if ':' in tag:
key = '{}/{}'.format(char, tag)
if key in restore_dic:
for char_tag in restore_dic[key].split():
res_chr, res_tag = char_tag.rsplit('/', 1)
res_chrs.append(res_chr)
self.res_tags.append(res_tag)
continue
else:
logging.debug('mapping not found: %s/%s', char, tag)
tag, _ = tag.split(':', 1)
res_chrs.append(char)
self.res_tags.append(tag)
self.res_chrs = ''.join(res_chrs)
class PosSentence(Sentence):
"""
part-of-speech tagged sentence
"""
def __init__(self, raw: str):
"""
Args:
raw: raw sentence
"""
super().__init__(raw)
self.pos_tagged_words = [] # list of PosWord
def __str__(self):
return '\n'.join([str(pos_word) for pos_word in self.pos_tagged_words])
def get_beg_end_list(self) -> Tuple[List[int], List[int]]:
"""
모든 형태소의 시작위치를 담는 리스트와, 끝 위치를 담는 리스트를 구합니다.
Returns:
list of begin positions
list of end positions
"""
begs = []
ends = []
for word in self.pos_tagged_words:
for morph in word.pos_tagged_morphs:
begs.append(morph.beg)
ends.append(morph.end)
return begs, ends
def set_raw_by_words(self):
"""
Sentence 객체의 'words' 멤버를 PosWords 객체의 raw를 이용하여 채운다.
"""
self.words = [pos_word.raw for pos_word in self.pos_tagged_words]
# Example:
# words: ['저는', '일요일', '아침을', '사랑합니다.']
def init_pos_tags(self):
"""
PosWord 객체를 생성하고 태그를 'O'로 세팅한다.
"""
if self.pos_tagged_words:
raise RuntimeError('PoS tagged words are already initialized')
for word in self.words:
# 각word를 PosWord Class에게 전달
self.pos_tagged_words.append(PosWord(word))
print('pos_tagged_words', self.pos_tagged_words)
def set_pos_result(self, tags: List[str], restore_dic: dict = None):
"""
문장 전체에 대한 형태소 태그 출력 레이블 정보를 세팅하고 형태소를 복원한다.
Args:
tags: 문장 전체 태그 출력 레이블 정보
restore_dic: 원형 복원 사전
"""
total_char_num = 0
for pos_word in self.pos_tagged_words:
pos_word.set_pos_result(tags[total_char_num:total_char_num + len(pos_word.raw)],
restore_dic)
total_char_num += len(pos_word.raw)
# PAD한 char만큼 길이 변경
total_char_num = len(tags)
assert total_char_num == len(tags)
def get_sequence(self, morph: bool = True, tag: bool = True, simple: bool = False) -> List[str]:
"""
태그를 포함한 형태소 문자열을 생성하여 리턴합니다.
Args:
morph: 형태소 출력
tag: 태그 표시 방법
simple: tag를 1byte만 출력
Returns:
문자열의 리스트
"""
sequence = []
for word in self.pos_tagged_words:
for pos_morph in word.pos_tagged_morphs:
morphs = []
if morph:
morphs.append(pos_morph.morph)
if tag:
morphs.append(pos_morph.pos_tag if not simple else pos_morph.pos_tag[0])
sequence.append('/'.join(morphs))
return sequence
def get_all_morphs(self) -> List[str]:
"""
문장을 구성하는 모든 PosMorph의 리스트를 리턴합니다.
Returns:
모든 형태소 리스트
"""
return [morph for word in self.pos_tagged_words for morph in word.pos_tagged_morphs]
|
the-stack_0_5793 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Phase estimation for the spectrum of a Hamiltonian"""
from typing import Optional, Union
from qiskit import QuantumCircuit
from qiskit.utils import QuantumInstance
from qiskit.opflow import (EvolutionBase, PauliTrotterEvolution, OperatorBase,
SummedOp, PauliOp, MatrixOp, PauliSumOp, StateFn)
from qiskit.providers import BaseBackend
from .phase_estimation import PhaseEstimation
from .hamiltonian_phase_estimation_result import HamiltonianPhaseEstimationResult
from .phase_estimation_scale import PhaseEstimationScale
class HamiltonianPhaseEstimation:
r"""Run the Quantum Phase Estimation algorithm to find the eigenvalues of a Hermitian operator.
This class is nearly the same as :class:`~qiskit.algorithms.PhaseEstimation`, differing only
in that the input in that class is a unitary operator, whereas here the input is a Hermitian
operator from which a unitary will be obtained by scaling and exponentiating. The scaling is
performed in order to prevent the phases from wrapping around :math:`2\pi`.
The problem of estimating eigenvalues :math:`\lambda_j` of the Hermitian operator
:math:`H` is solved by running a circuit representing
.. math::
\exp(i b H) |\psi\rangle = \sum_j \exp(i b \lambda_j) c_j |\lambda_j\rangle,
where the input state is
.. math::
|\psi\rangle = \sum_j c_j |\lambda_j\rangle,
and :math:`\lambda_j` are the eigenvalues of :math:`H`.
Here, :math:`b` is a scaling factor sufficiently large to map positive :math:`\lambda` to
:math:`[0,\pi)` and negative :math:`\lambda` to :math:`[\pi,2\pi)`. Each time the circuit is
run, one measures a phase corresponding to :math:`lambda_j` with probability :math:`|c_j|^2`.
If :math:`H` is a Pauli sum, the bound :math:`b` is computed from the sum of the absolute
values of the coefficients of the terms. There is no way to reliably recover eigenvalues
from phases very near the endpoints of these intervals. Because of this you should be aware
that for degenerate cases, such as :math:`H=Z`, the eigenvalues :math:`\pm 1` will be
mapped to the same phase, :math:`\pi`, and so cannot be distinguished. In this case, you need
to specify a larger bound as an argument to the method ``estimate``.
This class uses and works together with :class:`~qiskit.algorithms.PhaseEstimationScale` to
manage scaling the Hamiltonian and the phases that are obtained by the QPE algorithm. This
includes setting, or computing, a bound on the eigenvalues of the operator, using this
bound to obtain a scale factor, scaling the operator, and shifting and scaling the measured
phases to recover the eigenvalues.
Note that, although we speak of "evolving" the state according the the Hamiltonian, in the
present algorithm, we are not actually considering time evolution. Rather, the role of time is
played by the scaling factor, which is chosen to best extract the eigenvalues of the
Hamiltonian.
A few of the ideas in the algorithm may be found in Ref. [1].
**Reference:**
[1]: Quantum phase estimation of multiple eigenvalues for small-scale (noisy) experiments
T.E. O'Brien, B. Tarasinski, B.M. Terhal
`arXiv:1809.09697 <https://arxiv.org/abs/1809.09697>`_
"""
def __init__(self,
num_evaluation_qubits: int,
quantum_instance: Optional[Union[QuantumInstance, BaseBackend]] = None) -> None:
"""
Args:
num_evaluation_qubits: The number of qubits used in estimating the phase. The phase will
be estimated as a binary string with this many bits.
quantum_instance: The quantum instance on which the circuit will be run.
"""
self._phase_estimation = PhaseEstimation(
num_evaluation_qubits=num_evaluation_qubits,
quantum_instance=quantum_instance)
def _get_scale(self, hamiltonian, bound=None) -> None:
if bound is None:
return PhaseEstimationScale.from_pauli_sum(hamiltonian)
return PhaseEstimationScale(bound)
def _get_unitary(self, hamiltonian, pe_scale, evolution) -> QuantumCircuit:
"""Evolve the Hamiltonian to obtain a unitary.
Apply the scaling to the Hamiltonian that has been computed from an eigenvalue bound
and compute the unitary by applying the evolution object.
"""
# scale so that phase does not wrap.
scaled_hamiltonian = -pe_scale.scale * hamiltonian
unitary = evolution.convert(scaled_hamiltonian.exp_i())
if not isinstance(unitary, QuantumCircuit):
unitary_circuit = unitary.to_circuit()
else:
unitary_circuit = unitary
# Decomposing twice allows some 1Q Hamiltonians to give correct results
# when using MatrixEvolution(), that otherwise would give incorrect results.
# It does not break any others that we tested.
return unitary_circuit.decompose().decompose()
# pylint: disable=arguments-differ
def estimate(self, hamiltonian: OperatorBase,
state_preparation: Optional[StateFn] = None,
evolution: Optional[EvolutionBase] = None,
bound: Optional[float] = None) -> HamiltonianPhaseEstimationResult:
"""Run the Hamiltonian phase estimation algorithm.
Args:
hamiltonian: A Hermitian operator.
state_preparation: The ``StateFn`` to be prepared, whose eigenphase will be
measured. If this parameter is omitted, no preparation circuit will be run and
input state will be the all-zero state in the computational basis.
evolution: An evolution converter that generates a unitary from ``hamiltonian``. If
``None``, then the default ``PauliTrotterEvolution`` is used.
bound: An upper bound on the absolute value of the eigenvalues of
``hamiltonian``. If omitted, then ``hamiltonian`` must be a Pauli sum, or a
``PauliOp``, in which case a bound will be computed. If ``hamiltonian``
is a ``MatrixOp``, then ``bound`` may not be ``None``. The tighter the bound,
the higher the resolution of computed phases.
Returns:
HamiltonianPhaseEstimationResult instance containing the result of the estimation
and diagnostic information.
Raises:
ValueError: If ``bound`` is ``None`` and ``hamiltonian`` is not a Pauli sum, i.e. a
``PauliSumOp`` or a ``SummedOp`` whose terms are of type ``PauliOp``.
TypeError: If ``evolution`` is not of type ``EvolutionBase``.
"""
if evolution is None:
evolution = PauliTrotterEvolution()
elif not isinstance(evolution, EvolutionBase):
raise TypeError(f'Expecting type EvolutionBase, got {type(evolution)}')
if isinstance(hamiltonian, PauliSumOp):
hamiltonian = hamiltonian.to_pauli_op()
elif isinstance(hamiltonian, PauliOp):
hamiltonian = SummedOp([hamiltonian])
if isinstance(hamiltonian, SummedOp):
# remove identitiy terms
# The term propto the identity is removed from hamiltonian.
# This is done for three reasons:
# 1. Work around an unknown bug that otherwise causes the energies to be wrong in some
# cases.
# 2. Allow working with a simpler Hamiltonian, one with fewer terms.
# 3. Tighten the bound on the eigenvalues so that the spectrum is better resolved, i.e.
# occupies more of the range of values representable by the qubit register.
# The coefficient of this term will be added to the eigenvalues.
id_coefficient, hamiltonian_no_id = _remove_identity(hamiltonian)
# get the rescaling object
pe_scale = self._get_scale(hamiltonian_no_id, bound)
# get the unitary
unitary = self._get_unitary(hamiltonian_no_id, pe_scale, evolution)
elif isinstance(hamiltonian, MatrixOp):
if bound is None:
raise ValueError('bound must be specified if Hermitian operator is MatrixOp')
# Do not subtract an identity term from the matrix, so do not compensate.
id_coefficient = 0.0
pe_scale = self._get_scale(hamiltonian, bound)
unitary = self._get_unitary(hamiltonian, pe_scale, evolution)
else:
raise TypeError(f'Hermitian operator of type {type(hamiltonian)} not supported.')
if state_preparation is not None:
state_preparation = state_preparation.to_circuit_op().to_circuit()
# run phase estimation
phase_estimation_result = self._phase_estimation.estimate(
unitary=unitary, state_preparation=state_preparation)
return HamiltonianPhaseEstimationResult(
phase_estimation_result=phase_estimation_result,
id_coefficient=id_coefficient,
phase_estimation_scale=pe_scale)
def _remove_identity(pauli_sum):
"""Remove any identity operators from `pauli_sum`. Return
the sum of the coefficients of the identities and the new operator.
"""
idcoeff = 0.0
ops = []
for op in pauli_sum:
p = op.primitive
if p.x.any() or p.z.any():
ops.append(op)
else:
idcoeff += op.coeff
return idcoeff, SummedOp(ops)
|
the-stack_0_5796 | """
Galaxy Metadata
"""
import copy
import json
import logging
import os
import shutil
import sys
import tempfile
import weakref
from collections import OrderedDict
from collections.abc import Mapping
from os.path import abspath
from typing import Any, Iterator, Optional, TYPE_CHECKING, Union
from sqlalchemy.orm import object_session
from sqlalchemy.orm.attributes import flag_modified
import galaxy.model
from galaxy.model.scoped_session import galaxy_scoped_session
from galaxy.security.object_wrapper import sanitize_lists_to_string
from galaxy.util import (
form_builder,
listify,
string_as_bool,
stringify_dictionary_keys,
unicodify,
)
from galaxy.util.json import safe_dumps
if TYPE_CHECKING:
from galaxy.model import DatasetInstance
from galaxy.model.none_like import NoneDataset
from galaxy.model.store import SessionlessContext
log = logging.getLogger(__name__)
STATEMENTS = "__galaxy_statements__" # this is the name of the property in a Datatype class where new metadata spec element Statements are stored
class Statement:
"""
This class inserts its target into a list in the surrounding
class. the data.Data class has a metaclass which executes these
statements. This is how we shove the metadata element spec into
the class.
"""
def __init__(self, target):
self.target = target
def __call__(self, *args, **kwargs):
# get the locals dictionary of the frame object one down in the call stack (i.e. the Datatype class calling MetadataElement)
class_locals = sys._getframe(1).f_locals
# get and set '__galaxy_statements__' to an empty list if not in locals dict
statements = class_locals.setdefault(STATEMENTS, [])
# add Statement containing info to populate a MetadataElementSpec
statements.append((self, args, kwargs))
@classmethod
def process(cls, element):
for statement, args, kwargs in getattr(element, STATEMENTS, []):
statement.target(element, *args, **kwargs) # statement.target is MetadataElementSpec, element is a Datatype class
class MetadataCollection(Mapping):
"""
MetadataCollection is not a collection at all, but rather a proxy
to the real metadata which is stored as a Dictionary. This class
handles processing the metadata elements when they are set and
retrieved, returning default values in cases when metadata is not set.
"""
def __init__(self, parent: Union["DatasetInstance", "NoneDataset"], session: Optional[Union[galaxy_scoped_session, 'SessionlessContext']] = None) -> None:
self.parent = parent
self._session = session
# initialize dict if needed
if self.parent._metadata is None:
self.parent._metadata = {}
def get_parent(self):
if "_parent" in self.__dict__:
return self.__dict__["_parent"]()
return None
def set_parent(self, parent):
# use weakref to prevent a circular reference interfering with garbage
# collection: hda/lda (parent) <--> MetadataCollection (self) ; needs to be
# hashable, so cannot use proxy.
self.__dict__["_parent"] = weakref.ref(parent)
parent = property(get_parent, set_parent)
@property
def spec(self):
return self.parent.datatype.metadata_spec
def _object_session(self, item):
return self._session if self._session else object_session(item)
def __iter__(self) -> Iterator[Any]:
yield from self.spec.keys()
def __getitem__(self, key):
try:
self.__getattribute__(key)
except AttributeError:
try:
return self.__getattr__(key)
except Exception:
raise KeyError
# `key` is an attribute of this instance, not some metadata: raise
# KeyError to prevent e.g. `'items' in dataset.metadata` from returning
# True
# Not doing this would also break Cheetah's NameMapper._valueForName()
# since dataset.metadata['items'] would be None
raise KeyError
def __len__(self):
return len(self.spec)
def __str__(self):
return dict(self.items()).__str__()
def __bool__(self):
return bool(self.parent._metadata)
__nonzero__ = __bool__
def __getattr__(self, name):
if name in self.spec:
if name in self.parent._metadata:
return self.spec[name].wrap(self.parent._metadata[name], self._object_session(self.parent))
return self.spec[name].wrap(self.spec[name].default, self._object_session(self.parent))
if name in self.parent._metadata:
return self.parent._metadata[name]
# Instead of raising an AttributeError for non-existing metadata, we return None
return None
def __setattr__(self, name, value):
if name == "parent":
return self.set_parent(value)
elif name == '_session':
super().__setattr__(name, value)
else:
if name in self.spec:
self.parent._metadata[name] = self.spec[name].unwrap(value)
else:
self.parent._metadata[name] = value
flag_modified(self.parent, '_metadata')
def remove_key(self, name):
if name in self.parent._metadata:
del self.parent._metadata[name]
else:
log.info(f"Attempted to delete invalid key '{name}' from MetadataCollection")
def element_is_set(self, name) -> bool:
"""
check if the meta data with the given name is set, i.e.
- if the such a metadata actually exists and
- if its value differs from no_value
:param name: the name of the metadata element
:returns: True if the value differes from the no_value
False if its equal of if no metadata with the name is specified
"""
try:
meta_val = self[name]
except KeyError:
log.debug(f"no metadata with name {name} found")
return False
meta_spec = self.parent.metadata.spec[name]
return meta_val != meta_spec.no_value
def get_metadata_parameter(self, name, **kwd):
if name in self.spec:
field = self.spec[name].param.get_field(getattr(self, name), self, None, **kwd)
field.value = getattr(self, name)
return field
def make_dict_copy(self, to_copy):
"""Makes a deep copy of input iterable to_copy according to self.spec"""
rval = {}
for key, value in to_copy.items():
if key in self.spec:
rval[key] = self.spec[key].param.make_copy(value, target_context=self, source_context=to_copy)
return rval
@property
def requires_dataset_id(self):
for key in self.spec:
if isinstance(self.spec[key].param, FileParameter):
return True
return False
def from_JSON_dict(self, filename=None, path_rewriter=None, json_dict=None):
dataset = self.parent
if filename is not None:
log.debug(f'loading metadata from file for: {dataset.__class__.__name__} {dataset.id}')
with open(filename) as fh:
JSONified_dict = json.load(fh)
elif json_dict is not None:
log.debug(f'loading metadata from dict for: {dataset.__class__.__name__} {dataset.id}')
if isinstance(json_dict, str):
JSONified_dict = json.loads(json_dict)
elif isinstance(json_dict, dict):
JSONified_dict = json_dict
else:
raise ValueError(f"json_dict must be either a dictionary or a string, got {type(json_dict)}.")
else:
raise ValueError("You must provide either a filename or a json_dict")
# We build a dictionary for metadata name / value pairs
# because when we copy MetadataTempFile objects we flush the datasets'
# session, but only include the newly created MetadataFile object.
# If we were to set the metadata elements in the first for loop we'd
# lose all previously set metadata elements
metadata_name_value = {}
for name, spec in self.spec.items():
if name in JSONified_dict:
from_ext_kwds = {}
external_value = JSONified_dict[name]
param = spec.param
if isinstance(param, FileParameter):
from_ext_kwds['path_rewriter'] = path_rewriter
value = param.from_external_value(external_value, dataset, **from_ext_kwds)
metadata_name_value[name] = value
elif name in dataset._metadata:
# if the metadata value is not found in our externally set metadata but it has a value in the 'old'
# metadata associated with our dataset, we'll delete it from our dataset's metadata dict
del dataset._metadata[name]
for name, value in metadata_name_value.items():
dataset._metadata[name] = value
if '__extension__' in JSONified_dict:
dataset.extension = JSONified_dict['__extension__']
if '__validated_state__' in JSONified_dict:
dataset.validated_state = JSONified_dict['__validated_state__']
if '__validated_state_message__' in JSONified_dict:
dataset.validated_state_message = JSONified_dict['__validated_state_message__']
flag_modified(dataset, '_metadata')
def to_JSON_dict(self, filename=None):
meta_dict = {}
dataset_meta_dict = self.parent._metadata
for name, spec in self.spec.items():
if name in dataset_meta_dict:
meta_dict[name] = spec.param.to_external_value(dataset_meta_dict[name])
if '__extension__' in dataset_meta_dict:
meta_dict['__extension__'] = dataset_meta_dict['__extension__']
if '__validated_state__' in dataset_meta_dict:
meta_dict['__validated_state__'] = dataset_meta_dict['__validated_state__']
if '__validated_state_message__' in dataset_meta_dict:
meta_dict['__validated_state_message__'] = dataset_meta_dict['__validated_state_message__']
try:
encoded_meta_dict = galaxy.model.custom_types.json_encoder.encode(meta_dict)
except Exception as e:
raise Exception(f"Failed encoding metadata dictionary: {meta_dict}") from e
if filename is None:
return encoded_meta_dict
with open(filename, 'wt+') as fh:
fh.write(encoded_meta_dict)
def __getstate__(self):
# cannot pickle a weakref item (self._parent), when
# data._metadata_collection is None, it will be recreated on demand
return None
class MetadataSpecCollection(OrderedDict):
"""
A simple extension of OrderedDict which allows cleaner access to items
and allows the values to be iterated over directly as if it were a
list. append() is also implemented for simplicity and does not
"append".
"""
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
def append(self, item):
self[item.name] = item
def __getattr__(self, name):
if name not in self:
raise AttributeError
return self.get(name)
def __repr__(self):
# force elements to draw with __str__ for sphinx-apidoc
return ', '.join(item.__str__() for item in self.values())
class MetadataParameter:
def __init__(self, spec):
self.spec = spec
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=value)
def to_string(self, value):
return str(value)
def to_safe_string(self, value):
return sanitize_lists_to_string(self.to_string(value))
def make_copy(self, value, target_context: MetadataCollection, source_context):
return copy.deepcopy(value)
@classmethod
def marshal(cls, value):
"""
This method should/can be overridden to convert the incoming
value to whatever type it is supposed to be.
"""
return value
def validate(self, value):
"""
Throw an exception if the value is invalid.
"""
def unwrap(self, form_value):
"""
Turns a value into its storable form.
"""
value = self.marshal(form_value)
self.validate(value)
return value
def wrap(self, value, session):
"""
Turns a value into its usable form.
"""
return value
def from_external_value(self, value, parent):
"""
Turns a value read from an external dict into its value to be pushed directly into the metadata dict.
"""
return value
def to_external_value(self, value):
"""
Turns a value read from a metadata into its value to be pushed directly into the external dict.
"""
return value
class MetadataElementSpec:
"""
Defines a metadata element and adds it to the metadata_spec (which
is a MetadataSpecCollection) of datatype.
"""
def __init__(self, datatype, name=None, desc=None,
param=MetadataParameter, default=None, no_value=None,
visible=True, set_in_upload=False, **kwargs):
self.name = name
self.desc = desc or name
self.default = default
self.no_value = no_value
self.visible = visible
self.set_in_upload = set_in_upload
# Catch-all, allows for extra attributes to be set
self.__dict__.update(kwargs)
# set up param last, as it uses values set above
self.param = param(self)
# add spec element to the spec
datatype.metadata_spec.append(self)
def get(self, name, default=None):
return self.__dict__.get(name, default)
def wrap(self, value, session):
"""
Turns a stored value into its usable form.
"""
return self.param.wrap(value, session)
def unwrap(self, value):
"""
Turns an incoming value into its storable form.
"""
return self.param.unwrap(value)
def __str__(self):
# TODO??: assuming param is the class of this MetadataElementSpec - add the plain class name for that
spec_dict = dict(param_class=self.param.__class__.__name__)
spec_dict.update(self.__dict__)
return ("{name} ({param_class}): {desc}, defaults to '{default}'".format(**spec_dict))
# create a statement class that, when called,
# will add a new MetadataElementSpec to a class's metadata_spec
MetadataElement = Statement(MetadataElementSpec)
"""
MetadataParameter sub-classes.
"""
class SelectParameter(MetadataParameter):
def __init__(self, spec):
MetadataParameter.__init__(self, spec)
self.values = self.spec.get("values")
self.multiple = string_as_bool(self.spec.get("multiple"))
def to_string(self, value):
if value in [None, []]:
return str(self.spec.no_value)
if not isinstance(value, list):
value = [value]
return ",".join(map(str, value))
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
field = form_builder.SelectField(self.spec.name, multiple=self.multiple, display=self.spec.get("display"))
if self.values:
value_list = self.values
elif values:
value_list = values
elif value:
value_list = [(v, v) for v in listify(value)]
else:
value_list = []
for val, label in value_list:
try:
if (self.multiple and val in value) or (not self.multiple and val == value):
field.add_option(label, val, selected=True)
else:
field.add_option(label, val, selected=False)
except TypeError:
field.add_option(val, label, selected=False)
return field
def wrap(self, value, session):
# do we really need this (wasteful)? - yes because we are not sure that
# all existing selects have been stored previously as lists. Also this
# will handle the case where defaults/no_values are specified and are
# single non-list values.
value = self.marshal(value)
if self.multiple:
return value
elif value:
return value[0] # single select, only return the first value
return None
@classmethod
def marshal(cls, value):
# Store select as list, even if single item
if value is None:
return []
if not isinstance(value, list):
return [value]
return value
class DBKeyParameter(SelectParameter):
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
try:
values = kwd['trans'].app.genome_builds.get_genome_build_names(kwd['trans'])
except KeyError:
pass
return super().get_field(value, context, other_values, values, **kwd)
class RangeParameter(SelectParameter):
def __init__(self, spec):
SelectParameter.__init__(self, spec)
# The spec must be set with min and max values
self.min = spec.get("min") or 1
self.max = spec.get("max") or 1
self.step = self.spec.get("step") or 1
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
if values is None:
values = list(zip(range(self.min, self.max, self.step), range(self.min, self.max, self.step)))
return SelectParameter.get_field(self, value=value, context=context, other_values=other_values, values=values, **kwd)
@classmethod
def marshal(cls, value):
value = SelectParameter.marshal(value)
values = [int(x) for x in value]
return values
class ColumnParameter(RangeParameter):
def get_field(self, value=None, context=None, other_values=None, values=None, **kwd):
context = context or {}
other_values = other_values or {}
if values is None and context:
column_range = range(1, (context.columns or 0) + 1, 1)
values = list(zip(column_range, column_range))
return RangeParameter.get_field(self, value=value, context=context, other_values=other_values, values=values, **kwd)
class ColumnTypesParameter(MetadataParameter):
def to_string(self, value):
return ",".join(map(str, value))
class ListParameter(MetadataParameter):
def to_string(self, value):
return ",".join(str(x) for x in value)
class DictParameter(MetadataParameter):
def to_string(self, value):
return json.dumps(value)
def to_safe_string(self, value):
# We do not sanitize json dicts
return safe_dumps(value)
class PythonObjectParameter(MetadataParameter):
def to_string(self, value):
if not value:
return self.spec._to_string(self.spec.no_value)
return self.spec._to_string(value)
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=self._to_string(value))
@classmethod
def marshal(cls, value):
return value
class FileParameter(MetadataParameter):
def to_string(self, value):
if not value:
return str(self.spec.no_value)
return value.file_name
def to_safe_string(self, value):
# We do not sanitize file names
return self.to_string(value)
def get_field(self, value=None, context=None, other_values=None, **kwd):
context = context or {}
other_values = other_values or {}
return form_builder.TextField(self.spec.name, value=str(value.id))
def wrap(self, value, session):
if value is None:
return None
if isinstance(value, galaxy.model.MetadataFile) or isinstance(value, MetadataTempFile):
return value
if isinstance(value, int):
return session.query(galaxy.model.MetadataFile).get(value)
else:
return session.query(galaxy.model.MetadataFile).filter_by(uuid=value).one()
def make_copy(self, value, target_context: MetadataCollection, source_context):
session = target_context._object_session(target_context.parent)
value = self.wrap(value, session=session)
target_dataset = target_context.parent.dataset
if value and target_dataset.object_store.exists(target_dataset):
# Only copy MetadataFile if the target dataset has been created in an object store.
# All current datatypes re-generate MetadataFile objects when setting metadata,
# so this would ultimately get overwritten anyway.
new_value = galaxy.model.MetadataFile(dataset=target_context.parent, name=self.spec.name)
session.add(new_value)
try:
shutil.copy(value.file_name, new_value.file_name)
except AssertionError:
session(target_context.parent).flush()
shutil.copy(value.file_name, new_value.file_name)
return self.unwrap(new_value)
return None
@classmethod
def marshal(cls, value):
if isinstance(value, galaxy.model.MetadataFile):
# We want to push value.id to the database, but need to skip this when no session is available,
# as in extended_metadata mode, so there we just accept MetadataFile.
# We will only serialize MetadataFile in this mode and not push to the database, so this is OK.
value = value.id or value
if not isinstance(value, int) and object_session(value):
value = str(value.uuid)
return value
def from_external_value(self, value, parent, path_rewriter=None):
"""
Turns a value read from a external dict into its value to be pushed directly into the metadata dict.
"""
if MetadataTempFile.is_JSONified_value(value):
value = MetadataTempFile.from_JSON(value)
if isinstance(value, MetadataTempFile):
mf = parent.metadata.get(self.spec.name, None)
if mf is None:
mf = self.new_file(dataset=parent, **value.kwds)
# Ensure the metadata file gets updated with content
file_name = value.file_name
if path_rewriter:
# Job may have run with a different (non-local) tmp/working
# directory. Correct.
file_name = path_rewriter(file_name)
parent.dataset.object_store.update_from_file(mf,
file_name=file_name,
extra_dir='_metadata_files',
extra_dir_at_root=True,
alt_name=os.path.basename(mf.file_name))
os.unlink(file_name)
value = mf.id
return value
def to_external_value(self, value):
"""
Turns a value read from a metadata into its value to be pushed directly into the external dict.
"""
if isinstance(value, galaxy.model.MetadataFile):
value = value.id
elif isinstance(value, MetadataTempFile):
value = MetadataTempFile.to_JSON(value)
return value
def new_file(self, dataset=None, **kwds):
# If there is a place to store the file (i.e. an object_store has been bound to
# Dataset) then use a MetadataFile and assume it is accessible. Otherwise use
# a MetadataTempFile.
if getattr(dataset.dataset, "object_store", False):
mf = galaxy.model.MetadataFile(name=self.spec.name, dataset=dataset, **kwds)
sa_session = object_session(dataset)
if sa_session:
sa_session.add(mf)
sa_session.flush() # flush to assign id
return mf
else:
# we need to make a tmp file that is accessable to the head node,
# we will be copying its contents into the MetadataFile objects filename after restoring from JSON
# we do not include 'dataset' in the kwds passed, as from_JSON_value() will handle this for us
return MetadataTempFile(**kwds)
# This class is used when a database file connection is not available
class MetadataTempFile:
tmp_dir = 'database/tmp' # this should be overwritten as necessary in calling scripts
def __init__(self, **kwds):
self.kwds = kwds
self._filename = None
@property
def file_name(self):
if self._filename is None:
# we need to create a tmp file, accessable across all nodes/heads, save the name, and return it
self._filename = abspath(tempfile.NamedTemporaryFile(dir=self.tmp_dir, prefix="metadata_temp_file_").name)
open(self._filename, 'wb+') # create an empty file, so it can't be reused using tempfile
return self._filename
def to_JSON(self):
return {'__class__': self.__class__.__name__,
'filename': self.file_name,
'kwds': self.kwds}
@classmethod
def from_JSON(cls, json_dict):
# need to ensure our keywords are not unicode
rval = cls(**stringify_dictionary_keys(json_dict['kwds']))
rval._filename = json_dict['filename']
return rval
@classmethod
def is_JSONified_value(cls, value):
return (isinstance(value, dict) and value.get('__class__', None) == cls.__name__)
@classmethod
def cleanup_from_JSON_dict_filename(cls, filename):
try:
with open(filename) as fh:
for value in json.load(fh).values():
if cls.is_JSONified_value(value):
value = cls.from_JSON(value)
if isinstance(value, cls) and os.path.exists(value.file_name):
log.debug('Cleaning up abandoned MetadataTempFile file: %s', value.file_name)
os.unlink(value.file_name)
except Exception as e:
log.debug('Failed to cleanup MetadataTempFile temp files from %s: %s', filename, unicodify(e))
__all__ = (
"Statement",
"MetadataElement",
"MetadataCollection",
"MetadataSpecCollection",
"MetadataParameter",
"MetadataElementSpec",
"SelectParameter",
"DBKeyParameter",
"RangeParameter",
"ColumnParameter",
"ColumnTypesParameter",
"ListParameter",
"DictParameter",
"PythonObjectParameter",
"FileParameter",
"MetadataTempFile",
)
|
the-stack_0_5798 | #!/usr/bin/python
from ansible.module_utils.opsmanager import ansible_setup
if __name__ == '__main__':
module, opsmanager = ansible_setup()
group = opsmanager.get_group_by_name(module.params['cluster'])
response = opsmanager.delete_maintenance(group)
module.exit_json(changed=False, meta=response)
|
the-stack_0_5801 | # Test script for opening and closing gate
def main(robot):
# Setup
timestep = int(robot.getBasicTimeStep())
# Main loop, perform simulation steps until Webots is stopping the controller
while robot.step(timestep) != -1:
if not(gate_open := robot.gate.open()):
print(gate_open)
|
the-stack_0_5804 | import re
import numpy as np
import matplotlib.pyplot as plt
def _plot_primitives_boxplots(log_file):
with open(log_file, 'r') as file:
lines = file.readlines()
# read Primitives
p = [re.findall('P=\d+', line) for line in lines]
p = [v[0] for v in p if v]
p = np.array([float(v.split('=')[1]) for v in p])
p = [p[:int(len(p) / 2)], p[int(len(p) / 2):]]
# read metrics
metrics_cls, metrics_att = [], []
for re_str in ['AP=\d+\.\d+', 'FScore=\d+\.\d+']:
m = [re.findall(re_str, line) for line in lines]
m = [v[0] for v in m if v]
m = np.array([float(v.split('=')[1]) for v in m])
m = [m[:int(len(m) / 2)], m[int(len(m) / 2):]]
metrics_cls.append([m[0][p[0] < 200], m[1][p[1] < 200]])
metrics_att.append([m[0][p[0] >= 200], m[1][p[1] >= 200]])
# plot boxplots
for ylabel, cls, att in zip(['Average Precision (Ap)', 'FScore'], metrics_cls, metrics_att):
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.boxplot(cls, labels=['Train', 'Test'], showmeans=True)
ax1.set_title('Objects', fontsize=16)
ax1.tick_params(axis='x', labelsize=12)
ax1.set_ylabel(ylabel, fontsize=12)
ax2.boxplot(att, labels=['Train', 'Test'], showmeans=True)
ax2.set_title('Attributes', fontsize=16)
ax2.tick_params(axis='x', labelsize=12)
f.tight_layout()
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Script for computing metrics for Neural Algebra of Classifiers models",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('log_file', type=str, help='Paths to log file.')
args = parser.parse_args()
_plot_primitives_boxplots(args.log_file)
|
the-stack_0_5805 | """Facilities for implementing hooks that call shell commands."""
from __future__ import print_function
import logging
import os
from subprocess import Popen, PIPE
from certbot import errors
logger = logging.getLogger(__name__)
def validate_hooks(config):
"""Check hook commands are executable."""
_validate_hook(config.pre_hook, "pre")
_validate_hook(config.post_hook, "post")
_validate_hook(config.renew_hook, "renew")
def _prog(shell_cmd):
"""Extract the program run by a shell command"""
cmd = _which(shell_cmd)
return os.path.basename(cmd) if cmd else None
def _validate_hook(shell_cmd, hook_name):
"""Check that a command provided as a hook is plausibly executable.
:raises .errors.HookCommandNotFound: if the command is not found
"""
if shell_cmd:
cmd = shell_cmd.split(None, 1)[0]
if not _prog(cmd):
path = os.environ["PATH"]
msg = "Unable to find {2}-hook command {0} in the PATH.\n(PATH is {1})".format(
cmd, path, hook_name)
raise errors.HookCommandNotFound(msg)
def pre_hook(config):
"Run pre-hook if it's defined and hasn't been run."
if config.pre_hook and not pre_hook.already:
logger.info("Running pre-hook command: %s", config.pre_hook)
_run_hook(config.pre_hook)
pre_hook.already = True
pre_hook.already = False
def post_hook(config, final=False):
"""Run post hook if defined.
If the verb is renew, we might have more certs to renew, so we wait until
we're called with final=True before actually doing anything.
"""
if config.post_hook:
if not pre_hook.already:
logger.info("No renewals attempted, so not running post-hook")
if config.verb != "renew":
logger.warn("Sanity failure in renewal hooks")
return
if final or config.verb != "renew":
logger.info("Running post-hook command: %s", config.post_hook)
_run_hook(config.post_hook)
def renew_hook(config, domains, lineage_path):
"Run post-renewal hook if defined."
if config.renew_hook:
if not config.dry_run:
os.environ["RENEWED_DOMAINS"] = " ".join(domains)
os.environ["RENEWED_LINEAGE"] = lineage_path
_run_hook(config.renew_hook)
else:
logger.warning("Dry run: skipping renewal hook command: %s", config.renew_hook)
def _run_hook(shell_cmd):
"""Run a hook command.
:returns: stderr if there was any"""
cmd = Popen(shell_cmd, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE)
_out, err = cmd.communicate()
if cmd.returncode != 0:
logger.error('Hook command "%s" returned error code %d', shell_cmd, cmd.returncode)
if err:
logger.error('Error output from %s:\n%s', _prog(shell_cmd), err)
def _is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def _which(program):
"""Test if program is in the path."""
# Borrowed from:
# https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
# XXX May need more porting to handle .exe extensions on Windows
fpath, _fname = os.path.split(program)
if fpath:
if _is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if _is_exe(exe_file):
return exe_file
return None
|
the-stack_0_5806 | from contextlib import AsyncExitStack
from asyncpg import create_pool
from fastapi import FastAPI
from pytest import fixture
from fastapi_pagination import LimitOffsetPage, Page, add_pagination
from fastapi_pagination.ext.asyncpg import paginate
from ..base import BasePaginationTestCase
from ..utils import faker
@fixture(scope="session")
def database_url(postgres_url) -> str:
return postgres_url
@fixture(scope="session")
def pool(database_url):
return create_pool(database_url)
@fixture(scope="session")
def app(pool, model_cls):
app = FastAPI()
stack = AsyncExitStack()
@app.on_event("startup")
async def on_startup() -> None:
await stack.enter_async_context(pool)
@app.on_event("shutdown")
async def on_shutdown() -> None:
await stack.aclose()
@app.get("/default", response_model=Page[model_cls])
@app.get("/limit-offset", response_model=LimitOffsetPage[model_cls])
async def route():
async with pool.acquire() as conn:
return await paginate(conn, "SELECT id, name FROM users")
return add_pagination(app)
class TestAsyncpg(BasePaginationTestCase):
@fixture(scope="class")
async def entities(self, pool):
async with pool.acquire() as conn:
await conn.executemany(f"INSERT INTO users(name) VALUES ($1);", [(faker.name(),) for _ in range(100)])
return [{**user} for user in await conn.fetch("SELECT id, name FROM users;")]
|
the-stack_0_5807 | # -*- coding=utf-8 -*-
#Implmentation of anmm model based on bin sum input of QA matrix
from __future__ import print_function
from __future__ import absolute_import
import keras
import keras.backend as K
from keras.models import Sequential, Model
from keras.layers import *
from keras.activations import softmax
from model import BasicModel
from utils.utility import *
class ANMM(BasicModel):
def __init__(self, config):
super(ANMM, self).__init__(config)
self._name = 'ANMM'
self.check_list = [ 'text1_maxlen', 'bin_num',
'embed', 'embed_size', 'vocab_size',
'num_layers', 'hidden_sizes']
self.setup(config)
self.initializer_fc = keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=11)
self.initializer_gate = keras.initializers.RandomUniform(minval=-0.01, maxval=0.01, seed=11)
if not self.check():
raise TypeError('[ANMM] parameter check wrong')
print('[ANMM] init done', end='\n')
def setup(self, config):
if not isinstance(config, dict):
raise TypeError('parameter config should be dict:', config)
self.set_default('text1_maxlen', 10)
self.set_default('hist_size', 60)
self.set_default('dropout_rate', 0.)
self.config.update(config)
def build(self):
def tensor_product(x):
a = x[0]
b = x[1]
y = K.batch_dot(a, b, axis=1)
y = K.einsum('ijk, ikl->ijl', a, b)
return y
query = Input(name='query', shape=(self.config['text1_maxlen'],))
show_layer_info('Input', query)
doc = Input(name='doc', shape=(self.config['text1_maxlen'], self.config['bin_num']))
show_layer_info('Input', doc)
embedding = Embedding(self.config['vocab_size'], self.config['embed_size'], weights=[self.config['embed']], trainable = False)
q_embed = embedding(query)
show_layer_info('Embedding', q_embed)
q_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False)(q_embed)
show_layer_info('Dense', q_w)
q_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config['text1_maxlen'], ))(q_w)
show_layer_info('Lambda-softmax', q_w)
z = doc
z = Dropout(rate=self.config['dropout_rate'])(z)
show_layer_info('Dropout', z)
for i in range(self.config['num_layers']-1):
z = Dense(self.config['hidden_sizes'][i], kernel_initializer=self.initializer_fc)(z)
z = Activation('tanh')(z)
show_layer_info('Dense', z)
z = Dense(self.config['hidden_sizes'][self.config['num_layers']-1], kernel_initializer=self.initializer_fc)(z)
show_layer_info('Dense', z)
z = Permute((2, 1))(z)
show_layer_info('Permute', z)
z = Reshape((self.config['text1_maxlen'],))(z)
show_layer_info('Reshape', z)
q_w = Reshape((self.config['text1_maxlen'],))(q_w)
show_layer_info('Reshape', q_w)
out_ = Dot( axes= [1, 1])([z, q_w])
if self.config['target_mode'] == 'classification':
out_ = Dense(2, activation='softmax')(out_)
show_layer_info('Dense', out_)
model = Model(inputs=[query, doc], outputs=[out_])
return model
|
the-stack_0_5809 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Train a video classification model."""
import numpy as np
import pprint
import torch
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.models import build_model
from slowfast.utils.meters import AVAMeter, TrainMeter, ValMeter
from slowfast.utils.multigrid import MultigridSchedule
logger = logging.get_logger(__name__)
def train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer=None
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(train_loader)
for cur_iter, (inputs, labels, _, meta) in enumerate(train_loader):
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
if cfg.DETECTION.ENABLE:
preds = model(inputs, meta["boxes"])
else:
preds = model(inputs)
# Explicitly declare reduction to mean.
loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(reduction="mean")
# Compute the loss.
loss = loss_fun(preds, labels)
# check Nan Loss.
misc.check_nan_losses(loss)
# Perform the backward pass.
optimizer.zero_grad()
loss.backward()
# Update the parameters.
optimizer.step()
if cfg.DETECTION.ENABLE:
if cfg.NUM_GPUS > 1:
loss = du.all_reduce([loss])[0]
loss = loss.item()
# Update and log stats.
train_meter.update_stats(None, None, None, loss, lr)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Train/loss": loss, "Train/lr": lr},
global_step=data_size * cur_epoch + cur_iter,
)
else:
top1_err, top5_err = None, None
if cfg.DATA.MULTI_LABEL:
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
[loss] = du.all_reduce([loss])
loss = loss.item()
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss, top1_err, top5_err = (
loss.item(),
top1_err.item(),
top5_err.item(),
)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss,
lr,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Train/loss": loss,
"Train/lr": lr,
"Train/Top1_err": top1_err,
"Train/Top5_err": top5_err,
},
global_step=data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
if cfg.DETECTION.ENABLE:
# Compute the predictions.
preds = model(inputs, meta["boxes"])
ori_boxes = meta["ori_boxes"]
metadata = meta["metadata"]
if cfg.NUM_GPUS:
preds = preds.cpu()
ori_boxes = ori_boxes.cpu()
metadata = metadata.cpu()
if cfg.NUM_GPUS > 1:
preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)
metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(preds, ori_boxes, metadata)
else:
preds = model(inputs)
if cfg.DATA.MULTI_LABEL:
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.DETECTION.ENABLE:
writer.add_scalars(
{"Val/mAP": val_meter.full_map}, global_step=cur_epoch
)
else:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds, labels=all_labels, global_step=cur_epoch
)
val_meter.reset()
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters)
def build_trainer(cfg):
"""
Build training model and its associated tools, including optimizer,
dataloaders and meters.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
Returns:
model (nn.Module): training model.
optimizer (Optimizer): optimizer.
train_loader (DataLoader): training data loader.
val_loader (DataLoader): validatoin data loader.
precise_bn_loader (DataLoader): training data loader for computing
precise BN.
train_meter (TrainMeter): tool for measuring training stats.
val_meter (ValMeter): tool for measuring validation stats.
"""
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = loader.construct_loader(
cfg, "train", is_precise_bn=True
)
# Create meters.
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
return (
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
)
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Init multigrid.
multigrid = None
if cfg.MULTIGRID.LONG_CYCLE or cfg.MULTIGRID.SHORT_CYCLE:
multigrid = MultigridSchedule()
cfg = multigrid.init_multigrid(cfg)
if cfg.MULTIGRID.LONG_CYCLE:
cfg, _ = multigrid.update_long_cycle(cfg, cur_epoch=0)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
optimizer = optim.construct_optimizer(model, cfg)
# Load a checkpoint to resume training if applicable.
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer)
# Create the video train and val loaders.
train_loader = loader.construct_loader(cfg, "train")
val_loader = loader.construct_loader(cfg, "val")
precise_bn_loader = (
loader.construct_loader(cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
# Create meters.
if cfg.DETECTION.ENABLE:
train_meter = AVAMeter(len(train_loader), cfg, mode="train")
val_meter = AVAMeter(len(val_loader), cfg, mode="val")
else:
train_meter = TrainMeter(len(train_loader), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
if cfg.MULTIGRID.LONG_CYCLE:
cfg, changed = multigrid.update_long_cycle(cfg, cur_epoch)
if changed:
(
model,
optimizer,
train_loader,
val_loader,
precise_bn_loader,
train_meter,
val_meter,
) = build_trainer(cfg)
# Load checkpoint.
if cu.has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = cu.get_last_checkpoint(cfg.OUTPUT_DIR)
assert "{:05d}.pyth".format(cur_epoch) in last_checkpoint
else:
last_checkpoint = cfg.TRAIN.CHECKPOINT_FILE_PATH
logger.info("Load from {}".format(last_checkpoint))
cu.load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer
)
# Shuffle the dataset.
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
train_epoch(
train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None if multigrid is None else multigrid.schedule,
)
is_eval_epoch = misc.is_eval_epoch(
cfg, cur_epoch, None if multigrid is None else multigrid.schedule
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(cfg.OUTPUT_DIR, model, optimizer, cur_epoch, cfg)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer)
if writer is not None:
writer.close()
|
the-stack_0_5810 | #------------------------------------------------------------------------------
# test_try.py
#------------------------------------------------------------------------------
# BSD 3-Clause License
#
# Copyright (c) 2018, Affirm
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
import re
import unittest
from functools import partial
from pyfnz.tri import *
#------------------------------------------------------------------------------
# test classes
#------------------------------------------------------------------------------
class TryTest(unittest.TestCase):
#--------------------------------------------------------------------------
# tests
#--------------------------------------------------------------------------
def test_init(self):
"""Test initilizing a Try.
"""
failure = lambda: 1 / 0
failure_args = lambda x, y=1 : x / y
success = lambda: 1 + 1
success_args = lambda x, y=1: x + y
self.assertTrue(Try(failure).is_failure)
self.assertTrue(Try(failure, 1, y=0).is_failure)
self.assertTrue(Try(success).is_success)
self.assertTrue(Try(success_args, 1, y=2).is_success)
#--------------------------------------------------------------------------
def test_slots(self):
"""Test slots directive is correctly working.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(failure.is_failure)
with self.assertRaises(AttributeError):
failure.a = 1
self.assertTrue(success.is_success)
with self.assertRaises(AttributeError):
success.a = 1
#--------------------------------------------------------------------------
def test_repr(self):
"""Test string representation.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(re.match("Failure\(.+\)", repr(failure)) is not None)
self.assertEqual("Success(2)", repr(success))
#--------------------------------------------------------------------------
def test_do(self):
"""Test do notation.
"""
failure = Try(lambda: 1 / 0)
success1 = Try(lambda: 1 + 1)
success2 = Try(lambda: 2 * 2)
failure_result = Try.do(f * s
for f in failure
for s in success1)
success_result = Try.do(s1 * s2
for s1 in success1
for s2 in success2)
self.assertEqual(0, failure_result | 0)
self.assertEqual(8, success_result | 0)
#--------------------------------------------------------------------------
def test_is_failure(self):
"""Test checking if try is a failure.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(failure.is_failure())
self.assertFalse(success.is_failure())
#--------------------------------------------------------------------------
def test_is_success(self):
"""Test checking if try is a success.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
self.assertTrue(success.is_success())
self.assertFalse(failure.is_success())
#--------------------------------------------------------------------------
def test_foreach(self):
"""Test running a function with a side-effects on a success.
"""
cache = []
cache_elem = lambda x: cache.append(x)
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure.foreach(cache_elem)
success.foreach(cache_elem)
self.assertEqual(1, len(cache))
self.assertEqual(2, cache[0])
#--------------------------------------------------------------------------
def test_to_either(self):
"""Test converting to an either.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure_either = failure.to_either()
success_either = success.to_either()
self.assertTrue(failure_either.is_left())
self.assertTrue(success_either.is_right())
#--------------------------------------------------------------------------
def test_get_success(self):
"""Test retrieving value contained in a successful Try.
"""
success = Try(lambda: 1 + 1)
failure = Try(lambda: 1 / 0)
success_result = success.get()
self.assertEqual(2, success_result)
with self.assertRaises(ZeroDivisionError):
failure.get()
#--------------------------------------------------------------------------
def test_get_or_else(self):
"""Test retrieving a value from a success else return default for
failure.
"""
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.get_or_else(0)
success1_result = success.get_or_else(0)
failure2_result = failure | 0
success2_result = success | 0
self.assertEqual(0, failure1_result)
self.assertEqual(2, success1_result)
self.assertEqual(0, failure2_result)
self.assertEqual(2, success2_result)
#--------------------------------------------------------------------------
def test_or_else(self):
"""Test retrieving self or other either if failure.
"""
default_4 = Try(lambda: 2 + 2)
more_fail = Try(lambda: [][0])
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.or_else(more_fail)
success1_result = success.or_else(more_fail)
failure2_result = failure.or_else(default_4)
success2_result = success.or_else(default_4)
self.assertEqual(0, failure1_result | 0)
self.assertEqual(2, success1_result | 0)
self.assertEqual(4, failure2_result | 0)
self.assertEqual(2, success2_result | 0)
#--------------------------------------------------------------------------
def test_recover(self):
"""Test recovering from a failure.
"""
default_9 = lambda e: 9 if isinstance(e, ZeroDivisionError) else None
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure_result = failure.recover(default_9)
success_result = success.recover(default_9)
self.assertEqual(9, failure_result | 0)
self.assertEqual(2, success_result | 0)
#--------------------------------------------------------------------------
def test_recover_with(self):
"""Test recovering from a failure.
"""
default_9 = lambda e: Try(lambda: 9 if isinstance(e, ZeroDivisionError) else None)
more_fail = lambda e: Try(lambda: [][0])
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.recover_with(default_9)
success1_result = success.recover_with(default_9)
failure2_result = failure.recover_with(more_fail)
success2_result = success.recover_with(more_fail)
self.assertEqual(9, failure1_result | 0)
self.assertEqual(2, success1_result | 0)
self.assertEqual(0, failure2_result | 0)
self.assertEqual(2, success2_result | 0)
#--------------------------------------------------------------------------
def test_map(self):
"""Test running a function on a success.
"""
plus_5 = lambda x: x + 5
fail = lambda x: x[0]
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_plus = failure.map(plus_5)
success1_plus = success.map(plus_5)
failure2_fail = failure.map(fail)
success2_fail = success.map(fail)
self.assertEqual(0, failure1_plus | 0)
self.assertEqual(7, success1_plus | 0)
self.assertEqual(0, failure2_fail | 0)
self.assertEqual(0, success2_fail | 0)
#--------------------------------------------------------------------------
def test_pure(self):
"""Test turning a value into an Either.
"""
success1 = Try.pure(4)
success2 = Try.pure('a')
self.assertEqual(4, success1 | 0)
self.assertEqual('a', success2 | 'b')
#--------------------------------------------------------------------------
def test_flatmap(self):
"""Test binding through a success.
"""
plus_5_maybe = lambda x: Try(lambda: x + 5)
fail_maybe = lambda x: Try(lambda: [][0])
failure = Try(lambda: 1 / 0)
success = Try(lambda: 1 + 1)
failure1_result = failure.flatmap(plus_5_maybe)
success1_result = success.flatmap(plus_5_maybe)
failure2_result = failure.flatmap(fail_maybe)
success2_result = success.flatmap(fail_maybe)
self.assertEqual(0, failure1_result | 0)
self.assertEqual(7, success1_result | 0)
self.assertEqual(0, failure2_result | 0)
self.assertEqual(0, success2_result | 0)
#--------------------------------------------------------------------------
def test_monad_laws(self):
"""Test the monad laws holds for Try.
"""
sub2 = lambda b: Try(lambda: b - 2)
div2 = lambda b: Try(lambda: b / 2)
# left unit | (unit >>= a) == a
self.assertEqual(Try.pure(6).flatmap(sub2), sub2(6))
# right unit | (a >>= unit) == a
self.assertEqual(sub2(6).flatmap(Try.pure), sub2(6))
# associative | ((a >>= b) >>= c) == (a >>= (b >>= c))
self.assertEqual(Try.pure(6).flatmap(lambda b: sub2(b).flatmap(div2)),
Try.pure(6).flatmap(sub2).flatmap(div2))
|
the-stack_0_5813 | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron-2 Config object."""
from tensorflow_tts.configs import BaseConfig
from tensorflow_tts.processor.ljspeech import LJSPEECH_SYMBOLS as lj_symbols
from tensorflow_tts.processor.kss import KSS_SYMBOLS as kss_symbols
from tensorflow_tts.processor.baker import BAKER_SYMBOLS as bk_symbols
from tensorflow_tts.processor.libritts import LIBRITTS_SYMBOLS as lbri_symbols
from tensorflow_tts.processor.synpaflex import SYNPAFLEX_SYMBOLS as synpaflex_symbols
class Tacotron2Config(BaseConfig):
"""Initialize Tacotron-2 Config."""
def __init__(
self,
dataset="ljspeech",
vocab_size=len(lj_symbols),
embedding_hidden_size=512,
initializer_range=0.02,
layer_norm_eps=1e-6,
embedding_dropout_prob=0.1,
n_speakers=5,
n_conv_encoder=3,
encoder_conv_filters=512,
encoder_conv_kernel_sizes=5,
encoder_conv_activation="mish",
encoder_conv_dropout_rate=0.5,
encoder_lstm_units=256,
reduction_factor=5,
n_prenet_layers=2,
prenet_units=256,
prenet_activation="mish",
prenet_dropout_rate=0.5,
n_lstm_decoder=1,
decoder_lstm_units=1024,
attention_type="lsa",
attention_dim=128,
attention_filters=32,
attention_kernel=31,
n_mels=80,
n_conv_postnet=5,
postnet_conv_filters=512,
postnet_conv_kernel_sizes=5,
postnet_dropout_rate=0.1,
):
"""Init parameters for Tacotron-2 model."""
if dataset == "ljspeech":
self.vocab_size = vocab_size
elif dataset == "kss":
self.vocab_size = len(kss_symbols)
elif dataset == "baker":
self.vocab_size = len(bk_symbols)
elif dataset == "libritts":
self.vocab_size = len(lbri_symbols)
elif dataset == "synpaflex":
self.vocab_size = len(synpaflex_symbols)
else:
raise ValueError("No such dataset: {}".format(dataset))
self.embedding_hidden_size = embedding_hidden_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_dropout_prob = embedding_dropout_prob
self.n_speakers = n_speakers
self.n_conv_encoder = n_conv_encoder
self.encoder_conv_filters = encoder_conv_filters
self.encoder_conv_kernel_sizes = encoder_conv_kernel_sizes
self.encoder_conv_activation = encoder_conv_activation
self.encoder_conv_dropout_rate = encoder_conv_dropout_rate
self.encoder_lstm_units = encoder_lstm_units
# decoder param
self.reduction_factor = reduction_factor
self.n_prenet_layers = n_prenet_layers
self.prenet_units = prenet_units
self.prenet_activation = prenet_activation
self.prenet_dropout_rate = prenet_dropout_rate
self.n_lstm_decoder = n_lstm_decoder
self.decoder_lstm_units = decoder_lstm_units
self.attention_type = attention_type
self.attention_dim = attention_dim
self.attention_filters = attention_filters
self.attention_kernel = attention_kernel
self.n_mels = n_mels
# postnet
self.n_conv_postnet = n_conv_postnet
self.postnet_conv_filters = postnet_conv_filters
self.postnet_conv_kernel_sizes = postnet_conv_kernel_sizes
self.postnet_dropout_rate = postnet_dropout_rate
|
the-stack_0_5814 | ## Main
"""
OpenAI gym execution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import importlib
import json
import logging
import os
import time
import sys
from tensorforce import TensorForceError
from tensorforce.agents import Agent
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
# python examples/openai_gym.py Pong-ram-v0 -a examples/configs/vpg.json -n examples/configs/mlp2_network.json -e 50000 -m 2000
# python examples/openai_gym.py CartPole-v0 -a examples/configs/vpg.json -n examples/configs/mlp2_network.json -e 2000 -m 200
def main():
parser = argparse.ArgumentParser()
parser.add_argument('gym_id', help="Id of the Gym environment")
parser.add_argument('-i', '--import-modules', help="Import module(s) required for environment")
parser.add_argument('-a', '--agent', help="Agent configuration file")
parser.add_argument('-n', '--network', default=None, help="Network specification file")
parser.add_argument('-e', '--episodes', type=int, default=None, help="Number of episodes")
parser.add_argument('-t', '--timesteps', type=int, default=None, help="Number of timesteps")
parser.add_argument('-m', '--max-episode-timesteps', type=int, default=None, help="Maximum number of timesteps per episode")
parser.add_argument('-d', '--deterministic', action='store_true', default=False, help="Choose actions deterministically")
parser.add_argument('-s', '--save', help="Save agent to this dir")
parser.add_argument('-se', '--save-episodes', type=int, default=100, help="Save agent every x episodes")
parser.add_argument('-l', '--load', help="Load agent from this dir")
parser.add_argument('--monitor', help="Save results to this directory")
parser.add_argument('--monitor-safe', action='store_true', default=False, help="Do not overwrite previous results")
parser.add_argument('--monitor-video', type=int, default=0, help="Save video every x steps (0 = disabled)")
parser.add_argument('--visualize', action='store_true', default=False, help="Enable OpenAI Gym's visualization")
parser.add_argument('-D', '--debug', action='store_true', default=False, help="Show debug outputs")
parser.add_argument('-te', '--test', action='store_true', default=False, help="Test agent without learning.")
parser.add_argument('-sl', '--sleep', type=float, default=None, help="Slow down simulation by sleeping for x seconds (fractions allowed).")
parser.add_argument('--job', type=str, default=None, help="For distributed mode: The job type of this agent.")
parser.add_argument('--task', type=int, default=0, help="For distributed mode: The task index of this agent.")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if args.import_modules is not None:
for module in args.import_modules.split(','):
importlib.import_module(name=module)
environment = OpenAIGym(
gym_id=args.gym_id,
monitor=args.monitor,
monitor_safe=args.monitor_safe,
monitor_video=args.monitor_video,
visualize=args.visualize
)
if args.agent is not None:
with open(args.agent, 'r') as fp:
agent = json.load(fp=fp)
else:
raise TensorForceError("No agent configuration provided.")
if args.network is not None:
with open(args.network, 'r') as fp:
network = json.load(fp=fp)
agent = Agent.from_spec(
spec=agent,
kwargs=dict(
states=environment.states,
actions=environment.actions,
network=network
)
)
else:
logger.info("No network configuration provided.")
agent = Agent.from_spec(
spec=agent,
kwargs=dict(
states=environment.states,
actions=environment.actions
)
)
if args.load:
load_dir = os.path.dirname(args.load)
if not os.path.isdir(load_dir):
raise OSError("Could not load agent from {}: No such directory.".format(load_dir))
agent.restore_model(args.load)
if args.save:
save_dir = os.path.dirname(args.save)
if not os.path.isdir(save_dir):
try:
os.mkdir(save_dir, 0o755)
except OSError:
raise OSError("Cannot save agent to dir {} ()".format(save_dir))
if args.debug:
logger.info("-" * 16)
logger.info("Configuration:")
logger.info(agent)
runner = Runner(
agent=agent,
environment=environment,
repeat_actions=1
)
if args.debug: # TODO: Timestep-based reporting
report_episodes = 1
else:
report_episodes = 100
logger.info("Starting {agent} for Environment '{env}'".format(agent=agent, env=environment))
def episode_finished(r, id_):
if r.episode % report_episodes == 0:
steps_per_second = r.timestep / (time.time() - r.start_time)
logger.info("Finished episode {:d} after {:d} timesteps. Steps Per Second {:0.2f}".format(
r.agent.episode, r.episode_timestep, steps_per_second
))
logger.info("Episode reward: {}".format(r.episode_rewards[-1]))
logger.info("Average of last 500 rewards: {:0.2f}".
format(sum(r.episode_rewards[-500:]) / min(500, len(r.episode_rewards))))
logger.info("Average of last 100 rewards: {:0.2f}".
format(sum(r.episode_rewards[-100:]) / min(100, len(r.episode_rewards))))
if args.save and args.save_episodes is not None and not r.episode % args.save_episodes:
logger.info("Saving agent to {}".format(args.save))
r.agent.save_model(args.save)
return True
runner.run(
num_timesteps=args.timesteps,
num_episodes=args.episodes,
max_episode_timesteps=args.max_episode_timesteps,
deterministic=args.deterministic,
episode_finished=episode_finished,
testing=args.test,
sleep=args.sleep
)
runner.close()
logger.info("Learning finished. Total episodes: {ep}".format(ep=runner.agent.episode))
if __name__ == '__main__':
main()
|
the-stack_0_5819 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.rpg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import tensorflow as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import policy_gradient
from open_spiel.python.algorithms.losses import rl_losses
import pyspiel
class PolicyGradientTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
itertools.product(("rpg", "qpg", "rm", "a2c"),
("kuhn_poker", "leduc_poker")))
def test_run_game(self, loss_str, game_name):
env = rl_environment.Environment(game_name)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
sess,
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str=loss_str,
hidden_layers_sizes=[8, 8],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in [0, 1]
]
sess.run(tf.global_variables_initializer())
for _ in range(2):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
def test_run_hanabi(self):
# Hanabi is an optional game, so check we have it before running the test.
game = "hanabi"
if game not in pyspiel.registered_names():
return
num_players = 3
env_configs = {
"players": num_players,
"max_life_tokens": 1,
"colors": 2,
"ranks": 3,
"hand_size": 2,
"max_information_tokens": 3,
"discount": 0.
}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
sess,
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
hidden_layers_sizes=[8, 8],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in range(num_players)
]
sess.run(tf.global_variables_initializer())
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
agent_output = [agent.step(time_step) for agent in agents]
time_step = env.step([agent_output[current_player].action])
for agent in agents:
agent.step(time_step)
def test_loss_modes(self):
loss_dict = {
"qpg": rl_losses.BatchQPGLoss,
"rpg": rl_losses.BatchRPGLoss,
"rm": rl_losses.BatchRMLoss,
"a2c": rl_losses.BatchA2CLoss,
}
with self.session() as sess:
for loss_str, loss_class in loss_dict.items():
agent_by_str = policy_gradient.PolicyGradient(
sess,
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=loss_str,
loss_class=None)
agent_by_class = policy_gradient.PolicyGradient(
sess,
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=None,
loss_class=loss_class)
self.assertEqual(agent_by_str._pi_loss.shape,
agent_by_class._pi_loss.shape)
self.assertEqual(agent_by_str._pi_loss.dtype,
agent_by_class._pi_loss.dtype)
self.assertEqual(agent_by_str._pi_loss.op.type,
agent_by_class._pi_loss.op.type)
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_5821 | # 1080
n, m = map(int, input().split())
cnt = 0
normal = [list(map(int, list(input()))) for _ in range(n)]
comp = [list(map(int, list(input()))) for _ in range(n)]
def change(a, b):
for i in range(a, a+3):
for j in range(b, b+3):
if normal[i][j] == 1:
normal[i][j] = 0
else:
normal[i][j] = 1
for i in range(0, n-2):
for j in range(0, m-2):
if normal[i][j] != comp[i][j]:
change(i, j)
cnt += 1
print(normal, comp)
for i in range(0, n):
for j in range(0, m):
if normal[i][j] != comp[i][j]:
cnt = -1
break
print(cnt)
|
the-stack_0_5822 | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import cast, Any, Dict
import streamlit
import json
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as PydeckProto
class PydeckMixin:
def pydeck_chart(self, pydeck_obj=None, use_container_width=False):
"""Draw a chart using the PyDeck library.
This supports 3D maps, point clouds, and more! More info about PyDeck
at https://deckgl.readthedocs.io/en/latest/.
These docs are also quite useful:
- DeckGL docs: https://github.com/uber/deck.gl/tree/master/docs
- DeckGL JSON docs: https://github.com/uber/deck.gl/tree/master/modules/json
When using this command, we advise all users to use a personal Mapbox
token. This ensures the map tiles used in this chart are more
robust. You can do this with the mapbox.token config option.
To get a token for yourself, create an account at
https://mapbox.com. It's free! (for moderate usage levels). For more info
on how to set config options, see
https://docs.streamlit.io/library/advanced-features/configuration#set-configuration-options
Parameters
----------
spec: pydeck.Deck or None
Object specifying the PyDeck chart to draw.
Example
-------
Here's a chart using a HexagonLayer and a ScatterplotLayer on top of
the light map style:
>>> df = pd.DataFrame(
... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
... columns=['lat', 'lon'])
>>>
>>> st.pydeck_chart(pdk.Deck(
... map_style='mapbox://styles/mapbox/light-v9',
... initial_view_state=pdk.ViewState(
... latitude=37.76,
... longitude=-122.4,
... zoom=11,
... pitch=50,
... ),
... layers=[
... pdk.Layer(
... 'HexagonLayer',
... data=df,
... get_position='[lon, lat]',
... radius=200,
... elevation_scale=4,
... elevation_range=[0, 1000],
... pickable=True,
... extruded=True,
... ),
... pdk.Layer(
... 'ScatterplotLayer',
... data=df,
... get_position='[lon, lat]',
... get_color='[200, 30, 0, 160]',
... get_radius=200,
... ),
... ],
... ))
.. output::
https://static.streamlit.io/0.25.0-2JkNY/index.html?id=ASTdExBpJ1WxbGceneKN1i
height: 530px
"""
pydeck_proto = PydeckProto()
marshall(pydeck_proto, pydeck_obj, use_container_width)
return self.dg._enqueue("deck_gl_json_chart", pydeck_proto)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
# Map used when no data is passed.
EMPTY_MAP: Dict[str, Any] = {
"initialViewState": {"latitude": 0, "longitude": 0, "pitch": 0, "zoom": 1}
}
def marshall(pydeck_proto, pydeck_obj, use_container_width):
if pydeck_obj is None:
spec = json.dumps(EMPTY_MAP)
else:
spec = pydeck_obj.to_json()
pydeck_proto.json = spec
pydeck_proto.use_container_width = use_container_width
if pydeck_obj is not None and isinstance(pydeck_obj.deck_widget.tooltip, dict):
pydeck_proto.tooltip = json.dumps(pydeck_obj.deck_widget.tooltip)
|
the-stack_0_5823 | #!env python
from importlib import import_module
import sys
import logging
logger = logging.getLogger('root')
import canmatrix
import os
if sys.version_info > (3, 0):
import io
else:
import StringIO
moduleList = ["arxml", "cmcsv", "dbc", "dbf", "cmjson",
"kcd", "fibex", "sym", "xls", "xlsx", "yaml"]
loadedFormats = []
supportedFormats = {}
extensionMapping = {}
for module in moduleList:
try:
import_module("canmatrix." + module)
loadedFormats.append(module)
except ImportError:
logger.info("%s is not supported", module)
for loadedModule in loadedFormats:
supportedFormats[loadedModule] = []
moduleInstance = sys.modules["canmatrix." + loadedModule]
if "load" in dir(moduleInstance):
supportedFormats[loadedModule].append("load")
if "dump" in dir(moduleInstance):
supportedFormats[loadedModule].append("dump")
if "clusterImporter" in dir(moduleInstance):
supportedFormats[loadedModule].append("clusterImporter")
if "clusterExporter" in dir(moduleInstance):
supportedFormats[loadedModule].append("clusterExporter")
if "extension" in dir(moduleInstance):
supportedFormats[loadedModule].append("extension")
extensionMapping[loadedModule] = moduleInstance.extension
else:
extensionMapping[loadedModule] = loadedModule
def loads(string, importType=None, key="", flatImport=None, encoding="utf-8",**options):
if sys.version_info > (3, 0):
if type(string) == str:
string = bytes(string, encoding)
fileObject = io.BytesIO(string)
else:
fileObject = StringIO.StringIO(string)
return load(fileObject, importType, key, flatImport, **options)
def loadp(path, importType=None, key="", flatImport=None, **options):
with open(path, "rb") as fileObject:
if not importType:
for supportedImportType, extension in extensionMapping.items():
if path.endswith(extension) and "load" in supportedFormats[supportedImportType]:
importType = supportedImportType
break
if importType:
return load(fileObject, importType, key, flatImport, **options)
else:
logger.error("This file format is not supported for reading")
return None
return None
def load(fileObject, importType, key="", flatImport=None, **options):
dbs = {}
moduleInstance = sys.modules["canmatrix." + importType]
if "clusterImporter" in supportedFormats[importType]:
dbs = moduleInstance.load(fileObject, **options)
else:
dbs[key] = moduleInstance.load(fileObject, **options)
if flatImport:
for key in dbs:
return dbs[key]
else:
return dbs
def dump(canMatrixOrCluster, fileObject, exportType, **options):
moduleInstance = sys.modules["canmatrix." + exportType]
if (sys.version_info > (3, 0) and type(canmatrix.canmatrix.CanMatrix()) == type(canMatrixOrCluster)) or \
(sys.version_info < (3, 0) and type(canmatrix.CanMatrix()) == type(canMatrixOrCluster)):
moduleInstance.dump(canMatrixOrCluster, fileObject, **options)
elif "clusterExporter" in supportedFormats[exportType]:
moduleInstance.dump(canMatrixOrCluster, fileObject, **options)
def dumpp(canCluster, path, exportType=None, **options):
if not exportType:
for key, extension in extensionMapping.items():
if path.endswith("." + extension) and "dump" in supportedFormats[key]:
exportType = key
break
if exportType:
if "clusterExporter" in supportedFormats[exportType]:
fileObject = open(path, "wb")
dump(canCluster, fileObject, exportType, **options)
else:
for name in canCluster:
if len(name) > 0:
(filepath, ext) = os.path.splitext(path)
outfile = filepath + "_" + name + ext
else:
outfile = path
db = canCluster[name]
fileObject = open(outfile, "wb")
dump(db, fileObject, exportType, **options)
fileObject.close()
else:
logger.error("This file format is not supported for writing")
return None
|
the-stack_0_5824 | from functools import partial
from urllib.parse import urlunsplit, urlencode
from strava.base import RequestHandler
from strava.constants import APPROVAL_PROMPT, SCOPE, DEFAULT_VERIFY_TOKEN
from strava.helpers import BatchIterator, from_datetime_to_epoch
class StravaApiClientV3(RequestHandler):
api_path = 'api/v3/'
def __init__(self, access_token=None):
self.access_token = access_token
@classmethod
def authorization_url(cls, client_id, redirect_uri, approval_prompt=None, scope=None, state=None, mobile=False):
"""
Returns the Strava authorization URL.
See docs: https://developers.strava.com/docs/authentication/
:param client_id [str]: Strava Client ID.
:param redirect_uri [str]: URI that the user will be redirected after authetication.
:param approval_prompt [str]: indicates if Strava should show the autorization prompt to the user
:param scope [Sequence[str]]: list/tuple of the requested scope.
:params state [str]: A value to be returned in the redirect URI.
:param mobile [bool]: Indicates if the user should be redirect to the mobile page or not.
"""
oauth_path = 'oauth/authorize/'
mobile_oauth_path = 'oauth/mobile/authorize/'
approval_prompt = approval_prompt or APPROVAL_PROMPT.AUTO
assert approval_prompt in APPROVAL_PROMPT, (
"Invalid value for 'approval_prompt': '{}'".format(approval_prompt),
"Valid values are: {}".format([items for items in APPROVAL_PROMPT.values()])
)
scope = scope or [SCOPE.READ, SCOPE.ACTIVITY_READ_ALL]
invalid_scope = set(scope) - set(SCOPE.values())
assert not invalid_scope, (
"Invalid value for 'scope': {}".format(invalid_scope),
"Valid values are: {}".format(SCOPE.values())
)
qs = {
'client_id': client_id,
'redirect_uri': redirect_uri,
'response_type': 'code',
'approval_prompt': approval_prompt,
'scope': ','.join(scope)
}
if state:
assert isinstance(state, str), "Invalid value for 'state'. This value must be str."
qs['state'] = state
path = mobile_oauth_path if mobile else oauth_path
return urlunsplit(('https', cls.api_domain, path, urlencode(qs), ''))
def subscribe_webhook(self, client_id, client_secret, callback_url, verify_token=DEFAULT_VERIFY_TOKEN):
path = 'push_subscriptions/'
params = {
'client_id': client_id,
'client_secret': client_secret,
'callback_url': callback_url,
'verify_token': verify_token
}
return self._dispatcher('post', path, is_webhook=True, **params)
def validate_webhook_subscription(self, hub_mode, hub_challenge, verify_token=None):
assert hub_mode == 'subscribe', "Invalid 'hub_mode'."
if verify_token:
assert verify_token == DEFAULT_VERIFY_TOKEN, "Invalid 'verify token'."
return {"hub.challenge": hub_challenge}
def check_webhook_subscription(self, client_id, client_secret):
path = 'push_subscriptions/'
params = {'client_id': client_id, 'client_secret': client_secret}
return self._dispatcher('get', path, is_webhook=True, **params)
def delete_webhook_subscription(self, subscription_id, client_id, client_secret):
path = 'push_subscriptions/'
params = {'id': subscription_id, 'client_id': client_id, 'client_secret': client_secret}
return self._dispatcher('delete', path, is_webhook=True, **params)
def exchange_token(self, client_id, client_secret, code):
"""
Exchange the authorization code (received from Strava) for the token.
See docs: https://developers.strava.com/docs/authentication/
:param client_id [str]: Strava Client ID
:param client_secret [str]: Strava Client Secret
:param code [str]: Temporary authorization code received by Strava.
"""
path = 'oauth/token/'
params = {
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'grant_type': 'authorization_code'
}
data = self._dispatcher('post', path, **params)
self.access_token = data['access_token']
return data
def refresh_token(self, client_id, client_secret, refresh_token):
"""
Get the new access token and refresh token from Strava given a refresh token.
See docs: https://developers.strava.com/docs/authentication/
:param client_id [str]: Strava Client ID
:param client_secret [str]: Strava Client Secret
:param refresh_token [str]: Refresh token received by Strava.
"""
path = 'oauth/token/'
params = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
'refresh_token': refresh_token
}
data = self._dispatcher('post', path, **params)
self.access_token = data['access_token']
return data
def deauthorize(self, access_token):
"""
Deauthorize the application.
See docs: https://developers.strava.com/docs/authentication/
"""
path = 'oauth/deauthorize/'
self._dispatcher('post', path, access_token=access_token)
def get_athlete_profile(self):
"""
Return the profile of the authenticated user (access_token owner).
See docs: http://developers.strava.com/docs/reference/#api-Athletes-getLoggedInAthlete
"""
path = 'athlete/'
return self._dispatcher('get', path)
def get_activities(self, before=None, after=None, per_page=50, limit=None):
"""
Get the athele activities
See docs: http://developers.strava.com/docs/reference/#api-Activities-getLoggedInAthleteActivities
:param before [datetime]: datetime to use for filtering activities that have taken place before a certain time
:param after [datetime]: datetime to use for filtering activities that have taken place after a certain time
:param per_page [int]: page size
:param limit [int]: maximum number of activities to fetch
Note: 'before' and 'after' will be considered in UTC.
"""
path = 'athlete/activities/'
params = {}
if before:
params['before'] = from_datetime_to_epoch(before)
if after:
params['after'] - from_datetime_to_epoch(after)
fetcher = partial(self._dispatcher, 'get', path, **params)
return BatchIterator(fetcher, per_page=per_page, limit=limit)
def get_activity(self, activity_id, include_all_efforts=True):
"""
Get an athlete activity by id
See docs: http://developers.strava.com/docs/reference/#api-Activities-getActivityById
:param activity_id [int]: activity's id
:param include_all_efforts [bool]: include segment efforts in the response
"""
path = f'activities/{activity_id}/'
return self._dispatcher('get', path, include_all_efforts=include_all_efforts)
def explore_segments(self, bounds, activity_type=None, min_cat=None, max_cat=None):
"""
Returns the top 10 segments matching a specified query.
See docs: http://developers.strava.com/docs/reference/#api-Segments-exploreSegments
:param bounds [Sequence[float]]: The latitude and longitude for two points describing a rectangular
boundary for the search: [southwest corner latitutde, southwest corner longitude, northeast corner
latitude, northeast corner longitude]. Bounds should be a sequence of points sequence:
Example: [[lat, long], [lat, long]]
:param activity_type [str]: Desired activity type. Can be 'running' or 'riding'.
:param min_cat [int]: the minimum climbing category.
:param max_cat [int]: the maximum climbing category.
"""
path = 'segments/explore/'
assert len(bounds) == 4, "Invalid bounds. Must be '[southwest_corner_latitude, southwest_corner_longitude, northeast_corner_latitude, northeast_corner_longitude]'"
params = {'bounds': ','.join(str(bound) for bound in bounds)}
if activity_type:
assert activity_type in ('running', 'riding'), "Invalid 'activity_type'. Must be 'running' or 'riding'"
params['activity_type'] = activity_type
if min_cat:
params['min_cat'] = min_cat
if max_cat:
params['max_cat'] = max_cat
return self._dispatcher('get', path, **params)
def get_segment(self, segment_id):
"""
Return the specified segment by id.
See docs: http://developers.strava.com/docs/reference/#api-Segments-getSegmentById
:param segment_id [int]: Segment id.
"""
path = f'segments/{segment_id}/'
return self._dispatcher('get', path)
def get_segment_efforts(self, segment_id, per_page=50, limit=None):
"""
Return all segment's efforts from activities of the authenticated user.
See docs: http://developers.strava.com/docs/reference/#api-SegmentEfforts-getEffortsBySegmentId
:param segment_id [int]: Segment id.
:param per_page [int]: page size.
:param limit [int]: maximum number of activities to fetch.
"""
path = f'segments/{segment_id}/all_efforts/'
fetcher = partial(self._dispatcher, 'get', path)
return BatchIterator(fetcher, per_page=per_page, limit=limit)
def get_segment_effort(self, effort_id):
"""
Returns a segment effort from an activity that is owned by the authenticated athlete.
See docs: http://developers.strava.com/docs/reference/#api-SegmentEfforts-getSegmentEffortById
:param effort_id [id]: segment effort id
"""
path = f'segment_efforts/{effort_id}/'
return self._dispatcher('get', path)
|
the-stack_0_5825 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
def convert_params_for_cell(np_cell, paddle_cell):
state = np_cell.parameters
for k, v in paddle_cell.named_parameters():
v.set_value(state[k])
def convert_params_for_cell_static(np_cell, paddle_cell, place):
state = np_cell.parameters
for k, v in paddle_cell.named_parameters():
scope = paddle.static.global_scope()
tensor = scope.find_var(v.name).get_tensor()
tensor.set(state[k], place)
def convert_params_for_net(np_net, paddle_net):
for np_layer, paddle_layer in zip(np_net, paddle_net):
if hasattr(np_layer, "cell"):
convert_params_for_cell(np_layer.cell, paddle_layer.cell)
else:
convert_params_for_cell(np_layer.cell_fw, paddle_layer.cell_fw)
convert_params_for_cell(np_layer.cell_bw, paddle_layer.cell_bw)
def convert_params_for_net_static(np_net, paddle_net, place):
for np_layer, paddle_layer in zip(np_net, paddle_net):
if hasattr(np_layer, "cell"):
convert_params_for_cell_static(np_layer.cell, paddle_layer.cell,
place)
else:
convert_params_for_cell_static(np_layer.cell_fw,
paddle_layer.cell_fw, place)
convert_params_for_cell_static(np_layer.cell_bw,
paddle_layer.cell_bw, place)
def get_params_for_cell(np_cell, num_layers, idx):
state = np_cell.parameters
weight_list = [('{}.weight_{}'.format(num_layers, idx), state['weight_ih']),
('{}.weight_{}'.format(num_layers,
idx + 1), state['weight_hh'])]
bias_list = [('{}.bias_{}'.format(num_layers, idx), state['bias_ih']),
('{}.bias_{}'.format(num_layers, idx + 1), state['bias_hh'])]
return weight_list, bias_list
def get_params_for_net(np_net):
weight_list = []
bias_list = []
for layer_idx, np_layer in enumerate(np_net):
if hasattr(np_layer, "cell"):
weight, bias = get_params_for_cell(np_layer.cell, layer_idx, 0)
for w, b in zip(weight, bias):
weight_list.append(w)
bias_list.append(b)
else:
for count, cell in enumerate([np_layer.cell_fw, np_layer.cell_bw]):
weight, bias = get_params_for_cell(cell, layer_idx, count * 2)
for w, b in zip(weight, bias):
weight_list.append(w)
bias_list.append(b)
weight_list.extend(bias_list)
return weight_list
|
the-stack_0_5828 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django middleware helper to capture and trace a request."""
import logging
from opencensus.trace.ext import utils
from opencensus.trace.ext.django.config import (settings, convert_to_import)
from opencensus.trace import attributes_helper
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
from opencensus.trace import tracer as tracer_module
from opencensus.trace.samplers import probability
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # pragma: NO COVER
MiddlewareMixin = object
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
REQUEST_THREAD_LOCAL_KEY = 'django_request'
SPAN_THREAD_LOCAL_KEY = 'django_span'
BLACKLIST_PATHS = 'BLACKLIST_PATHS'
GCP_EXPORTER_PROJECT = 'GCP_EXPORTER_PROJECT'
SAMPLING_RATE = 'SAMPLING_RATE'
TRANSPORT = 'TRANSPORT'
SERVICE_NAME = 'SERVICE_NAME'
ZIPKIN_EXPORTER_SERVICE_NAME = 'ZIPKIN_EXPORTER_SERVICE_NAME'
ZIPKIN_EXPORTER_HOST_NAME = 'ZIPKIN_EXPORTER_HOST_NAME'
ZIPKIN_EXPORTER_PORT = 'ZIPKIN_EXPORTER_PORT'
ZIPKIN_EXPORTER_PROTOCOL = 'ZIPKIN_EXPORTER_PROTOCOL'
OCAGENT_TRACE_EXPORTER_ENDPOINT = 'OCAGENT_TRACE_EXPORTER_ENDPOINT'
BLACKLIST_HOSTNAMES = 'BLACKLIST_HOSTNAMES'
log = logging.getLogger(__name__)
class _DjangoMetaWrapper(object):
"""
Wrapper class which takes HTTP header name and retrieve the value from
Django request.META
"""
def __init__(self, meta=None):
self.meta = meta or _get_django_request().META
def get(self, key):
return self.meta.get('HTTP_' + key.upper().replace('-', '_'))
def _get_django_request():
"""Get Django request from thread local.
:rtype: str
:returns: Django request.
"""
return execution_context.get_opencensus_attr(REQUEST_THREAD_LOCAL_KEY)
def _get_django_span():
"""Get Django span from thread local.
:rtype: str
:returns: Django request.
"""
return execution_context.get_opencensus_attr(SPAN_THREAD_LOCAL_KEY)
def _get_current_tracer():
"""Get the current request tracer."""
return execution_context.get_opencensus_tracer()
def _set_django_attributes(span, request):
"""Set the django related attributes."""
django_user = getattr(request, 'user', None)
if django_user is None:
return
user_id = django_user.pk
user_name = django_user.get_username()
# User id is the django autofield for User model as the primary key
if user_id is not None:
span.add_attribute('django.user.id', str(user_id))
if user_name is not None:
span.add_attribute('django.user.name', str(user_name))
class OpencensusMiddleware(MiddlewareMixin):
"""Saves the request in thread local"""
def __init__(self, get_response=None):
# One-time configuration and initialization.
self.get_response = get_response
self._sampler = settings.SAMPLER
self._exporter = settings.EXPORTER
self._propagator = settings.PROPAGATOR
self._blacklist_paths = settings.params.get(BLACKLIST_PATHS)
# Initialize the sampler
if self._sampler.__name__ == 'ProbabilitySampler':
_rate = settings.params.get(
SAMPLING_RATE, probability.DEFAULT_SAMPLING_RATE)
self.sampler = self._sampler(_rate)
else:
self.sampler = self._sampler()
# Initialize the exporter
transport = convert_to_import(settings.params.get(TRANSPORT))
if self._exporter.__name__ == 'GoogleCloudExporter':
_project_id = settings.params.get(GCP_EXPORTER_PROJECT, None)
self.exporter = self._exporter(
project_id=_project_id,
transport=transport)
elif self._exporter.__name__ == 'ZipkinExporter':
_service_name = self._get_service_name(settings.params)
_zipkin_host_name = settings.params.get(
ZIPKIN_EXPORTER_HOST_NAME, 'localhost')
_zipkin_port = settings.params.get(
ZIPKIN_EXPORTER_PORT, 9411)
_zipkin_protocol = settings.params.get(
ZIPKIN_EXPORTER_PROTOCOL, 'http')
self.exporter = self._exporter(
service_name=_service_name,
host_name=_zipkin_host_name,
port=_zipkin_port,
protocol=_zipkin_protocol,
transport=transport)
elif self._exporter.__name__ == 'TraceExporter':
_service_name = self._get_service_name(settings.params)
_endpoint = settings.params.get(
OCAGENT_TRACE_EXPORTER_ENDPOINT, None)
self.exporter = self._exporter(
service_name=_service_name,
endpoint=_endpoint,
transport=transport)
elif self._exporter.__name__ == 'JaegerExporter':
_service_name = self._get_service_name(settings.params)
self.exporter = self._exporter(
service_name=_service_name,
transport=transport)
else:
self.exporter = self._exporter(transport=transport)
self.blacklist_hostnames = settings.params.get(
BLACKLIST_HOSTNAMES, None)
# Initialize the propagator
self.propagator = self._propagator()
def process_request(self, request):
"""Called on each request, before Django decides which view to execute.
:type request: :class:`~django.http.request.HttpRequest`
:param request: Django http request.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
# Add the request to thread local
execution_context.set_opencensus_attr(
REQUEST_THREAD_LOCAL_KEY,
request)
execution_context.set_opencensus_attr(
'blacklist_hostnames',
self.blacklist_hostnames)
try:
# Start tracing this request
span_context = self.propagator.from_headers(
_DjangoMetaWrapper(_get_django_request().META))
# Reload the tracer with the new span context
tracer = tracer_module.Tracer(
span_context=span_context,
sampler=self.sampler,
exporter=self.exporter,
propagator=self.propagator)
# Span name is being set at process_view
span = tracer.start_span()
span.span_kind = span_module.SpanKind.SERVER
tracer.add_attribute_to_current_span(
attribute_key=HTTP_METHOD,
attribute_value=request.method)
tracer.add_attribute_to_current_span(
attribute_key=HTTP_URL,
attribute_value=str(request.path))
# Add the span to thread local
# in some cases (exceptions, timeouts) currentspan in
# response event will be one of a child spans.
# let's keep reference to 'django' span and
# use it in response event
execution_context.set_opencensus_attr(
SPAN_THREAD_LOCAL_KEY,
span)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_view(self, request, view_func, *args, **kwargs):
"""Process view is executed before the view function, here we get the
function name add set it as the span name.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
# Get the current span and set the span name to the current
# function name of the request.
tracer = _get_current_tracer()
span = tracer.current_span()
span.name = utils.get_func_name(view_func)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_response(self, request, response):
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return response
try:
span = _get_django_span()
span.add_attribute(
attribute_key=HTTP_STATUS_CODE,
attribute_value=str(response.status_code))
_set_django_attributes(span, request)
tracer = _get_current_tracer()
tracer.end_span()
tracer.finish()
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
finally:
return response
def _get_service_name(self, params):
_service_name = params.get(
SERVICE_NAME, None)
if _service_name is None:
_service_name = params.get(
ZIPKIN_EXPORTER_SERVICE_NAME, 'my_service')
return _service_name
|
the-stack_0_5829 | import os
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from meross_iot.controller.device import HubDevice
from meross_iot.manager import MerossManager
from meross_iot.model.enums import OnlineStatus
from tests import async_get_client
if os.name == 'nt':
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
else:
import asyncio
class TestHub(AioHTTPTestCase):
async def get_application(self):
return web.Application()
async def setUpAsync(self):
# Wait some time before next test-burst
await asyncio.sleep(10)
self.meross_client, self.requires_logout = await async_get_client()
# Look for a device to be used for this test
self.meross_manager = MerossManager(http_client=self.meross_client)
await self.meross_manager.async_init()
await self.meross_manager.async_device_discovery()
self.test_devices = self.meross_manager.find_devices(device_class=HubDevice,
online_status=OnlineStatus.ONLINE)
@unittest_run_loop
async def test_update(self):
if len(self.test_devices) < 1:
self.skipTest("No HUB device has been found to run this test.")
return
dev = self.test_devices[0]
await dev.async_update()
async def tearDownAsync(self):
if self.requires_logout:
await self.meross_client.async_logout()
|
the-stack_0_5830 | import qiskit.quantum_info
from qiskit.quantum_info.synthesis.xx_decompose import XXDecomposer
import numpy as np
from scipy.stats import unitary_group
from monodromy.coverage import *
from monodromy.static.examples import *
from monodromy.haar import expected_cost
import monodromy.render
def default_zx_operation_cost(
strength: Fraction,
# note: Isaac reports this value in percent per degree
scale_factor: float = (64 * 90) / (10000 * 100),
# first component: 2Q invocation cost; second component: local cost
offset: float = 909 / (10000 * 100) + 1 / 1000,
):
"""
A sample fidelity cost model, extracted from experiment, for ZX operations.
"""
return strength * scale_factor + offset
def get_zx_operations(strengths: Dict[Fraction, float]) \
-> List[CircuitPolytope]:
"""
Converts a dictionary mapping fractional CX `strengths` to fidelities to the
corresponding list of `OperationPolytope`s.
"""
operations = []
for strength, fidelity in strengths.items():
operations.append(CircuitPolytope(
operations=[f"rzx(pi/2 * {strength})"],
cost=fidelity,
convex_subpolytopes=exactly(
strength / 4, strength / 4, -strength / 4,
).convex_subpolytopes,
))
return operations
operations = get_zx_operations({
frac: default_zx_operation_cost(frac)
for frac in [Fraction(1), Fraction(1, 2), Fraction(1, 3)]
})
# build the set of covering polytopes
print("==== Working to build a set of covering polytopes ====")
coverage_set = build_coverage_set(operations, chatty=True)
# print it out for user inspection
print("==== Done. Here's what we found: ====")
print_coverage_set(coverage_set)
print("==== Haar volumes ====")
print(f"Haar-expectation cost: {expected_cost(coverage_set, chatty=True)}")
# flex the rendering code
print("==== Render these in Mathematica: =====")
print(monodromy.render.polytopes_to_mathematica(coverage_set))
# perform a gate decomposition
print("==== Compiling a single Haar-random gate into CX, CX/2, CX/3 ====")
# generate a random special unitary
u = unitary_group.rvs(4)
u /= np.linalg.det(u) ** (1 / 4)
# decompose into CX, CX/2, and CX/3
monodromy_decomposer = XXDecomposer(euler_basis="PSX")
circuit = monodromy_decomposer(u, approximate=False)
with np.printoptions(precision=4, suppress=True):
print(u)
print(qiskit.quantum_info.Operator(circuit).data)
print(f"=== {(abs(u - qiskit.quantum_info.Operator(circuit).data) < 1e-1).all()} ===")
print(circuit)
|
the-stack_0_5833 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Category(Model):
"""An object describing identified category.
:param name: Name of the category.
:type name: str
:param score: Scoring of the category.
:type score: float
:param detail: Additional category detail if available.
:type detail:
~azure.cognitiveservices.vision.computervision.models.CategoryDetail
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'score': {'key': 'score', 'type': 'float'},
'detail': {'key': 'detail', 'type': 'CategoryDetail'},
}
def __init__(self, name=None, score=None, detail=None):
super(Category, self).__init__()
self.name = name
self.score = score
self.detail = detail
|
the-stack_0_5835 | from _collections import deque
males = [int(x) for x in input().split()]
females = deque([int(x) for x in input().split()])
matches = 0
while males and females:
current_female = females[0]
current_male = males[-1]
if current_female <= 0:
females.popleft()
elif current_male <= 0:
males.pop()
elif current_male == current_female:
females.popleft()
males.pop()
matches += 1
elif current_female % 25 == 0:
females.popleft()
if females:
females.popleft()
elif current_male % 25 == 0:
males.pop()
if males:
males.pop()
else:
females.popleft()
#males[-1] -= 2
males.append(males.pop() - 2)
print(f"Matches: {matches}")
if males:
print(f"Males left: {', '.join(reversed([str(x) for x in males]))}")
else:
print(f"Males left: none")
if females:
print(f"Females left: {', '.join([str(x) for x in females])}")
else:
print(f"Females left: none") |
the-stack_0_5837 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import time
import re
import datetime
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy import units as u
from astropy import _erfa as erfa
from .utils import day_frac, quantity_day_frac, two_sum, two_product
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormatMeta(type):
"""
Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the
`TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.
"""
_registry = TIME_FORMATS
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Register time formats that have a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in members and cls.name != 'astropy_time':
mcls._registry[cls.name] = cls
if 'subfmts' in members:
cls.subfmts = _regexify_subfmts(members['subfmts'])
return cls
class TimeFormat(metaclass=TimeFormatMeta):
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = 'utc' # As of astropy 0.4
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if 'mask' not in self.cache:
self.cache['mask'] = np.isnan(self.jd2)
if self.cache['mask'].shape:
self.cache['mask'].flags.writeable = False
return self.cache['mask']
@property
def masked(self):
if 'masked' not in self.cache:
self.cache['masked'] = bool(np.any(self.mask))
return self.cache['masked']
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# val1 cannot contain nan, but val2 can contain nan
ok1 = (val1.dtype == np.double and np.all(np.isfinite(val1)) or
val1.size == 0)
ok2 = val2 is None or (val2.dtype == np.double and
not np.any(np.isinf(val2))) or val2.size == 0
if not (ok1 and ok2):
raise TypeError('Input values for {} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without loosing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances.")
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1. / getattr(self, 'unit', 1.)
if factor != 1.:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None):
"""
Return time representation from internal jd1 and jd2. This is
the base method that ignores ``parent`` and requires that
subclasses implement the ``value`` property. Subclasses that
require ``parent`` or have other optional args for ``to_value``
should compute and return the value directly.
"""
return self.mask_if_needed(self.value)
@property
def value(self):
raise NotImplementedError
class TimeJD(TimeFormat):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
@property
def value(self):
return self.jd1 + self.jd2
class TimeMJD(TimeFormat):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
# TODO - this routine and vals should be Cythonized to follow the ERFA
# convention of preserving precision by adding to the larger of the two
# values in a vectorized operation. But in most practical cases the
# first one is probably biggest.
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
return (self.jd1 - erfa.DJM0) + self.jd2
class TimeDecimalYear(TimeFormat):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2_filled)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return decimalyear
class TimeFromEpoch(TimeFormat):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale
# Initialize the reference epoch (a single time defined in subclasses)
epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,
format=self.epoch_format)
self.epoch = epoch
# Now create the TimeFormat object as normal
super().__init__(val1, val2, scale, precision, in_subfmt, out_subfmt,
from_jd)
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err))
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err))
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
time_from_epoch = ((jd1 - self.epoch.jd1) +
(jd2 - self.epoch.jd2)) / self.unit
return self.mask_if_needed(time_from_epoch)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time: seconds from 1970-01-01 00:00:00 UTC.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None} (optional)
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2_filled)
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=7*[None] + [object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0*u.day, dst=0*u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity` (optional)
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity` (optional)
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : string, `None` (optional)
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
This is a reference implementation can be made much faster with effort.
"""
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for these classes
if val1.dtype.kind not in ('S', 'U') and val1.size:
raise TypeError('Input values for {} class must be strings'
.format(self.name))
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val, subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2_filled)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
if '{yday:' in str_fmt:
has_yday = True
else:
has_yday = False
yday = None
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs],
flags=['zerosize_ok']):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
def _select_subfmts(self, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
"""
fnmatchcase = fnmatch.fnmatchcase
subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
raise ValueError(f'No subformats match {pattern}')
return subfmts
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
class TimeDatetime64(TimeISOT):
name = 'datetime64'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class`
if not val1.dtype.kind == 'M':
if val1.size > 0:
raise TypeError('Input values for {} class must be '
'datetime64 objects'.format(self.name))
else:
val1 = np.array([], 'datetime64[D]')
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = '2000'
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']:
val1 = val1.astype('datetime64[D]')
val1 = val1.astype('S')
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype('datetime64')
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present"""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm['scale'] is not None:
warnings.warn("FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {!r} is not in the allowed scales {}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError("Input strings for {} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = 'long' + self.out_subfmt
return super().value
class TimeEpochDate(TimeFormat):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
return jd_to_epoch(self.jd1, self.jd2)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit'):
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double],
flags=['zerosize_ok'])
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError('Time {} does not match {} format'
.format(time_str, self.name))
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormatMeta(TimeFormatMeta):
_registry = TIME_DELTA_FORMATS
class TimeDeltaFormat(TimeFormat, metaclass=TimeDeltaFormatMeta):
"""Base class for time delta representations"""
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_DELTA_SCALES))
return scale
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1./self.unit)
@property
def value(self):
return (self.jd1 + self.jd2) / self.unit
class TimeDeltaSec(TimeDeltaFormat):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaFormat):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta"""
name = 'datetime'
def _check_val_type(self, val1, val2):
# Note: don't care about val2 for this class
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime.timedelta objects'.format(self.name))
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer([val1, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + [np.double])
for val, sec in iterator:
sec[...] = val.item().total_seconds()
self.jd1, self.jd2 = day_frac(iterator.operands[-1], 0.0,
divisor=erfa.DAYSEC)
@property
def value(self):
iterator = np.nditer([self.jd1 + self.jd2, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + [object])
for jd, out in iterator:
out[...] = datetime.timedelta(days=jd.item())
return self.mask_if_needed(iterator.operands[-1])
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError
|
the-stack_0_5841 | from __future__ import print_function
import collections
import logging
from itertools import chain, product
import math
import random
_logger = logging.getLogger(__name__)
EvaluationConfig = collections.namedtuple('EvaluationConfig',
['num_samples', 'sample_size'])
FORMAT_STRINGS = {
'default': """Filename : {name}
Num samples: {samplesize_count}
Sample size: {samplesize_avg}
F-score : {fscore_avg:.3}
Precision : {precision_avg:.3}
Recall : {recall_avg:.3}""",
'table': "{name:10} {precision_avg:6.3} {recall_avg:6.3} {fscore_avg:6.3}",
'latex': "{name} & {precision_avg:.3} &"
" {recall_avg:.3} & {fscore_avg:.3} \\\\"}
def _sample(compound_list, size, seed):
"""Create a specific size sample from the compound list using a specific
seed"""
return random.Random(seed).sample(compound_list, size)
class MorfessorEvaluationResult(object):
"""A MorfessorEvaluationResult is returned by a MorfessorEvaluation
object. It's purpose is to store the evaluation data and provide nice
formatting options.
Each MorfessorEvaluationResult contains the data of 1 evaluation
(which can have multiple samples).
"""
print_functions = {'avg': lambda x: sum(x) / len(x),
'min': min,
'max': max,
'values': list,
'count': len}
#TODO add maybe std as a print function?
def __init__(self, meta_data=None):
self.meta_data = meta_data
self.precision = []
self.recall = []
self.fscore = []
self.samplesize = []
self._cache = None
def __getitem__(self, item):
"""Provide dict style interface for all values (standard values and
metadata)"""
if self._cache is None:
self._fill_cache()
return self._cache[item]
def add_data_point(self, precision, recall, f_score, sample_size):
"""Method used by MorfessorEvaluation to add the results of a single
sample to the object"""
self.precision.append(precision)
self.recall.append(recall)
self.fscore.append(f_score)
self.samplesize.append(sample_size)
#clear cache
self._cache = None
def __str__(self):
"""Method for default visualization"""
return self.format(FORMAT_STRINGS['default'])
def _fill_cache(self):
""" Pre calculate all variable / function combinations and put them in
cache"""
self._cache = {'{}_{}'.format(val, func_name): func(getattr(self, val))
for val in ('precision', 'recall', 'fscore',
'samplesize')
for func_name, func in self.print_functions.items()}
self._cache.update(self.meta_data)
def _get_cache(self):
""" Fill the cache (if necessary) and return it"""
if self._cache is None:
self._fill_cache()
return self._cache
def format(self, format_string):
""" Format this object. The format string can contain all variables,
e.g. fscore_avg, precision_values or any item from metadata"""
return format_string.format(**self._get_cache())
class MorfessorEvaluation(object):
""" Do the evaluation of one model, on one testset. The basic procedure is
to create, in a stable manner, a number of samples and evaluate them
independently. The stable selection of samples makes it possible to use
the resulting values for Pair-wise statistical significance testing.
reference_annotations is a standard annotation dictionary:
{compound => ([annoation1],.. ) }
"""
def __init__(self, reference_annotations):
self.reference = {}
for compound, analyses in reference_annotations.items():
self.reference[compound] = list(
tuple(self._segmentation_indices(a)) for a in analyses)
self._samples = {}
def _create_samples(self, configuration=EvaluationConfig(10, 1000)):
"""Create, in a stable manner, n testsets of size x as defined in
test_configuration
"""
#TODO: What is a reasonable limit to warn about a too small testset?
if len(self.reference) < (configuration.num_samples *
configuration.sample_size):
_logger.warn("The test set is too small for this sample size")
compound_list = sorted(self.reference.keys())
self._samples[configuration] = [
_sample(compound_list, configuration.sample_size, i) for i in
range(configuration.num_samples)]
def get_samples(self, configuration=EvaluationConfig(10, 1000)):
"""Get a list of samples. A sample is a list of compounds.
This method is stable, so each time it is called with a specific
test_set and configuration it will return the same samples. Also this
method caches the samples in the _samples variable.
"""
if not configuration in self._samples:
self._create_samples(configuration)
return self._samples[configuration]
def _evaluate(self, prediction):
"""Helper method to get the precision and recall of 1 sample"""
def calc_prop_distance(ref, pred):
if len(ref) == 0:
return 1.0
diff = len(set(ref) - set(pred))
return (len(ref) - diff) / float(len(ref))
wordlist = sorted(set(prediction.keys()) & set(self.reference.keys()))
recall_sum = 0.0
precis_sum = 0.0
for word in wordlist:
if len(word) < 2:
continue
recall_sum += max(calc_prop_distance(r, p)
for p, r in product(prediction[word],
self.reference[word]))
precis_sum += max(calc_prop_distance(p, r)
for p, r in product(prediction[word],
self.reference[word]))
precision = precis_sum / len(wordlist)
recall = recall_sum / len(wordlist)
f_score = 2.0 / (1.0 / precision + 1.0 / recall)
return precision, recall, f_score, len(wordlist)
@staticmethod
def _segmentation_indices(annotation):
"""Method to transform a annotation into a tuple of split indices"""
cur_len = 0
for a in annotation[:-1]:
cur_len += len(a)
yield cur_len
def evaluate_model(self, model, configuration=EvaluationConfig(10, 1000),
meta_data=None):
"""Get the prediction of the test samples from the model and do the
evaluation
The meta_data object has preferably at least the key 'name'.
"""
if meta_data is None:
meta_data = {'name': 'UNKNOWN'}
mer = MorfessorEvaluationResult(meta_data)
for i, sample in enumerate(self.get_samples(configuration)):
_logger.debug("Evaluating sample {}".format(i))
prediction = {}
for compound in sample:
prediction[compound] = [tuple(self._segmentation_indices(
model.viterbi_segment(compound)[0]))]
mer.add_data_point(*self._evaluate(prediction))
return mer
def evaluate_segmentation(self, segmentation,
configuration=EvaluationConfig(10, 1000),
meta_data=None):
"""Method for evaluating an existing segmentation"""
def merge_constructions(constructions):
compound = constructions[0]
for i in range(1, len(constructions)):
compound = compound + constructions[i]
return compound
segmentation = {merge_constructions(x[1]):
[tuple(self._segmentation_indices(x[1]))]
for x in segmentation}
if meta_data is None:
meta_data = {'name': 'UNKNOWN'}
mer = MorfessorEvaluationResult(meta_data)
for i, sample in enumerate(self.get_samples(configuration)):
_logger.debug("Evaluating sample {}".format(i))
prediction = {k: v for k, v in segmentation.items() if k in sample}
mer.add_data_point(*self._evaluate(prediction))
return mer
class WilcoxonSignedRank(object):
"""Class for doing statistical signficance testing with the Wilcoxon
Signed-Rank test
It implements the Pratt method for handling zero-differences and
applies a 0.5 continuity correction for the z-statistic.
"""
@staticmethod
def _wilcoxon(d, method='pratt', correction=True):
if method not in ('wilcox', 'pratt'):
raise ValueError
if method == 'wilcox':
d = list(filter(lambda a: a != 0, d))
count = len(d)
ranks = WilcoxonSignedRank._rankdata([abs(v) for v in d])
rank_sum_pos = sum(r for r, v in zip(ranks, d) if v > 0)
rank_sum_neg = sum(r for r, v in zip(ranks, d) if v < 0)
test = min(rank_sum_neg, rank_sum_pos)
mean = count * (count + 1) * 0.25
stdev = (count*(count + 1) * (2 * count + 1))
# compensate for duplicate ranks
no_zero_ranks = [r for i, r in enumerate(ranks) if d[i] != 0]
stdev -= 0.5 * sum(x * (x*x-1) for x in
collections.Counter(no_zero_ranks).values())
stdev = math.sqrt(stdev / 24.0)
if correction:
correction = +0.5 if test > mean else -0.5
else:
correction = 0
z = (test - mean - correction) / stdev
return 2 * WilcoxonSignedRank._norm_cum_pdf(abs(z))
@staticmethod
def _rankdata(d):
od = collections.Counter()
for v in d:
od[v] += 1
rank_dict = {}
cur_rank = 1
for val, count in sorted(od.items(), key=lambda x: x[0]):
rank_dict[val] = (cur_rank + (cur_rank + count - 1)) / 2
cur_rank += count
return [rank_dict[v] for v in d]
@staticmethod
def _norm_cum_pdf(z):
"""Pure python implementation of the normal cumulative pdf function"""
return 0.5 - 0.5 * math.erf(z / math.sqrt(2))
def significance_test(self, evaluations, val_property='fscore_values',
name_property='name'):
"""Takes a set of evaluations (which should have the same
test-configuration) and calculates the p-value for the Wilcoxon signed
rank test
Returns a dictionary with (name1,name2) keys and p-values as values.
"""
results = {r[name_property]: r[val_property] for r in evaluations}
if any(len(x) < 10 for x in results.values()):
_logger.error("Too small number of samples for the Wilcoxon test")
return {}
p = {}
for r1, r2 in product(results.keys(), results.keys()):
p[(r1, r2)] = self._wilcoxon([v1-v2
for v1, v2 in zip(results[r1],
results[r2])])
return p
@staticmethod
def print_table(results):
"""Nicely format a results table as returned by significance_test"""
names = sorted(set(r[0] for r in results.keys()))
col_width = max(max(len(n) for n in names), 5)
for h in chain([""], names):
print('{:{width}}'.format(h, width=col_width), end='|')
print()
for name in names:
print('{:{width}}'.format(name, width=col_width), end='|')
for name2 in names:
print('{:{width}.5}'.format(results[(name, name2)],
width=col_width), end='|')
print()
|
the-stack_0_5842 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import time
import jax
import jax.numpy as jnp
import networkx as nx
import src.sign_recovery as sign_recovery
from src.global_vars import *
from src.hyperplane_normal import get_ratios_lstsq
from src.tracker import Logger, Tracker
from src.utils import AcceptableFailure, GatherMoreData, matmul, KnownT, cheat_get_inner_layers, which_is_zero
logger = Logger()
@jax.jit
def process_block(ratios, other_ratios):
"""
Let jax efficiently compute pairwise similarity by blocking things.
"""
differences = jnp.abs(ratios[:, jnp.newaxis, :] - other_ratios[jnp.newaxis, :, :])
differences = differences / jnp.abs(ratios[:, jnp.newaxis, :]) + differences / jnp.abs(
other_ratios[jnp.newaxis, :, :])
close = differences < BLOCK_ERROR_TOL * jnp.log(ratios.shape[1])
pairings = jnp.sum(close, axis=2) >= max(MIN_SAME_SIZE, BLOCK_MULTIPLY_FACTOR * (np.log(ratios.shape[1]) - 2))
return pairings
def graph_solve(all_ratios, all_criticals, expected_neurons, LAYER, debug=False):
# 1. Load the critical points and ratios we precomputed
all_ratios = np.array(all_ratios, dtype=np.float64)
all_ratios_f32 = np.array(all_ratios, dtype=np.float32)
all_criticals = np.array(all_criticals, dtype=np.float64)
# Batch them to be sensibly sized
ratios_group = [all_ratios_f32[i:i + 1000] for i in range(0, len(all_ratios), 1000)]
criticals_group = [all_criticals[i:i + 1000] for i in range(0, len(all_criticals), 1000)]
# 2. Compute the similarity pairwise between the ratios we've computed
logger.log("Go up to", len(criticals_group), level=Logger.INFO)
now = time.time()
all_pairings = [[] for _ in range(sum(map(len, ratios_group)))]
for batch_index, (criticals, ratios) in enumerate(zip(criticals_group, ratios_group)):
logger.log(batch_index, level=Logger.INFO)
# Compute the all-pairs similarity
axis = list(range(all_ratios.shape[1]))
random.shuffle(axis)
axis = axis[:20]
for dim in axis:
# We may have an error on one of the directions, so let's try all of them
scaled_all_ratios = all_ratios_f32 / all_ratios_f32[:, dim:dim + 1]
scaled_ratios = ratios / ratios[:, dim:dim + 1]
batch_pairings = process_block(scaled_ratios, scaled_all_ratios)
# To get the offset, Compute the cumsum of the length up to batch_index
batch_offset = sum(map(len, ratios_group[:batch_index]))
# And now create the graph matching ratios that are similar
for this_batch_i, global_j in zip(*np.nonzero(np.array(batch_pairings))):
all_pairings[this_batch_i + batch_offset].append(global_j)
print(time.time() - now)
graph = nx.Graph()
# Add the edges to the graph, removing self-loops
graph.add_edges_from([(i, j) for i, js in enumerate(all_pairings) for j in js if abs(i - j) > 1])
components = list(nx.connected_components(graph))
sorted_components = sorted(components, key=lambda x: -len(x))
if CHEATING:
logger.log('Total (unmatched) examples found:',
sorted(collections.Counter(which_is_zero(LAYER, cheat_get_inner_layers(all_criticals))).items()),
level=Logger.INFO)
if len(components) == 0:
logger.log('No components found', level=Logger.ERROR)
raise AcceptableFailure()
logger.log("Graph search found", len(components), "different components with the following counts",
list(map(len, sorted_components)), level=Logger.INFO)
if CHEATING:
which_neurons = [
collections.Counter(which_is_zero(LAYER, cheat_get_inner_layers(all_criticals[list(orig_component)]))) for
orig_component in sorted_components]
first_index_of = [-1] * expected_neurons
for i, items in enumerate(which_neurons):
for item in items.keys():
if first_index_of[item] == -1:
first_index_of[item] = i
logger.log('These components correspond to', which_neurons, level=Logger.INFO)
logger.log('With the corresponding index in the list:', first_index_of, level=Logger.INFO)
previous_num_components = np.inf
while previous_num_components > len(sorted_components):
previous_num_components = len(sorted_components)
candidate_rows = []
candidate_components = []
datas = [all_ratios[list(component)] for component in sorted_components]
results = pool[0].map(ratio_normalize, datas)
candidate_rows = [x[0] for x in results]
candidate_components = sorted_components
candidate_rows = np.array(candidate_rows)
new_pairings = [[] for _ in range(len(candidate_rows))]
# Re-do the pairings
for dim in range(all_ratios.shape[1]):
scaled_ratios = candidate_rows / candidate_rows[:, dim:dim + 1]
batch_pairings = process_block(scaled_ratios, scaled_ratios)
# And now create the graph matching ratios that are similar
for this_batch_i, global_j in zip(*np.nonzero(np.array(batch_pairings))):
new_pairings[this_batch_i].append(global_j)
graph = nx.Graph()
# Add the edges to the graph, ALLOWING self-loops this time
graph.add_edges_from([(i, j) for i, js in enumerate(new_pairings) for j in js])
components = list(nx.connected_components(graph))
components = [sum([list(candidate_components[y]) for y in comp], []) for comp in components]
sorted_components = sorted(components, key=lambda x: -len(x))
logger.log("After re-doing the graph, the component counts is", len(components), "with items",
list(map(len, sorted_components)), level=Logger.INFO)
if CHEATING:
which_neurons = [
collections.Counter(which_is_zero(LAYER, cheat_get_inner_layers(all_criticals[list(orig_component)])))
for orig_component in sorted_components]
first_index_of = [-1] * expected_neurons
for i, items in enumerate(which_neurons):
for item in items.keys():
if first_index_of[item] == -1:
first_index_of[item] = i
logger.log('Corresponding to', which_neurons, level=Logger.INFO)
logger.log("First index:", first_index_of, level=Logger.INFO)
logger.log("Expected neurons", expected_neurons, level=Logger.INFO)
logger.log("Processing each connected component in turn.", level=Logger.INFO)
resulting_examples = []
resulting_rows = []
skips_because_of_nan = 0
failure = None
for c_count, component in enumerate(sorted_components):
if debug:
logger.log("\n", level=Logger.DEBUG)
if c_count >= expected_neurons:
logger.log("WARNING: This one might be a duplicate!", level=Logger.DEBUG)
logger.log("On component", c_count, "with indexs", component, level=Logger.INFO)
if debug and CHEATING:
inner = cheat_get_inner_layers(all_criticals[list(component)])
logger.log('Corresponding to (cheating) ', which_is_zero(LAYER, inner), level=Logger.DEBUG)
possible_matrix_rows = all_ratios[list(component)]
guessed_row, normalize_axis, normalize_error = ratio_normalize(possible_matrix_rows)
logger.log('The guessed error in the computation is', normalize_error, 'with', len(component), 'witnesses',
level=Logger.INFO)
if normalize_error > .01 and len(component) <= 5:
logger.log("Component size less than 5 with high error; this isn't enough to be sure",
level=Logger.INFO)
continue
logger.log("Normalize on axis", normalize_axis, level=Logger.INFO)
if len(resulting_rows):
scaled_resulting_rows = np.array(resulting_rows)
# print(scaled_resulting_rows.shape)
scaled_resulting_rows /= scaled_resulting_rows[:, normalize_axis:normalize_axis + 1]
delta = np.abs(scaled_resulting_rows - guessed_row[np.newaxis, :])
if min(np.nanmax(delta, axis=1)) < 1e-2:
logger.log("Likely have found this node before", level=Logger.ERROR)
raise AcceptableFailure()
if CHEATING:
# Check our work against the ground truth entries in the corresponding matrix
layers = cheat_get_inner_layers(all_criticals[list(component)[0]])
layer_vals = [np.min(np.abs(x)) for x in layers]
which_layer = np.argmin(layer_vals)
M = A[which_layer]
which_neuron = which_is_zero(which_layer, layers)
logger.log("Neuron corresponds to", which_neuron, level=Logger.INFO)
if which_layer != LAYER:
which_neuron = 0
normalize_axis = 0
actual_row = M[:, which_neuron] / M[normalize_axis, which_neuron]
actual_row = actual_row[:guessed_row.shape[0]]
do_print_err = np.any(np.isnan(guessed_row))
if which_layer == LAYER:
error = np.max(np.abs(np.abs(guessed_row) - np.abs(actual_row)))
else:
error = 1e6
logger.log('max error', "%0.8f" % error, len(component), level=Logger.INFO)
if (error > 1e-4 * len(guessed_row) and debug) or do_print_err:
logger.log('real ', " ".join("%2.3f" % x for x in actual_row), level=Logger.INFO)
logger.log('guess', " ".join("%2.3f" % x for x in guessed_row), level=Logger.INFO)
logger.log('gap', " ".join("%2.3f" % (np.abs(x - y)) for x, y in zip(guessed_row, actual_row)),
level=Logger.INFO)
logger.log("--", level=Logger.INFO)
for row in possible_matrix_rows:
logger.log('posbl', " ".join("%2.3f" % x for x in row / row[normalize_axis]), level=Logger.INFO)
logger.log("--", level=Logger.INFO)
scale = 10 ** int(np.round(np.log(np.nanmedian(np.abs(possible_matrix_rows))) / np.log(10)))
possible_matrix_rows /= scale
for row in possible_matrix_rows:
logger.log('posbl', " ".join("%2.3f" % x for x in row), level=Logger.INFO)
if np.any(np.isnan(guessed_row)) and c_count < expected_neurons:
logger.log("Got NaN, need more data", len(component) / sum(map(len, components)), 1 / sizes[LAYER + 1],
level=Logger.INFO)
if len(component) >= 3:
if c_count < expected_neurons:
failure = GatherMoreData([all_criticals[x] for x in component])
skips_because_of_nan += 1
continue
guessed_row[np.isnan(guessed_row)] = 0
if c_count < expected_neurons and len(component) >= 3:
resulting_rows.append(guessed_row)
resulting_examples.append([all_criticals[x] for x in component])
else:
logger.log("Don't add it to the set", level=Logger.INFO)
# We set failure when something went wrong but we want to defer crashing
# (so that we can use the partial solution)
if len(resulting_rows) + skips_because_of_nan < expected_neurons and len(all_ratios) < DEAD_NEURON_THRESHOLD:
logger.log("We have not explored all neurons. Do more random search", len(resulting_rows), skips_because_of_nan,
expected_neurons, level=Logger.INFO)
raise AcceptableFailure(partial_solution=(np.array(resulting_rows), np.array(resulting_examples)))
else:
logger.log("At this point, we just assume the neuron must be dead", level=Logger.INFO)
while len(resulting_rows) < expected_neurons:
resulting_rows.append(np.zeros_like((resulting_rows[0])))
resulting_examples.append([np.zeros_like(resulting_examples[0][0])])
# Here we know it's a GatherMoreData failure, but we want to only do this
# if there was enough data for everything else
if failure is not None:
logger.log("Need to raise a previously generated failure.", level=Logger.INFO)
raise failure
logger.log("Successfully returning a solution attempt.\n", level=Logger.INFO)
return resulting_examples, resulting_rows
def ratio_normalize(possible_matrix_rows):
# We get a set of a bunch of numbers
# a1 b1 c1 d1 e1 f1 g1
# a2 b2 c2 d2 e2 f2 g2
# such that some of them are nan
# We want to compute the pairwise ratios ignoring the nans
now = time.time()
ratio_evidence = [[[] for _ in range(possible_matrix_rows.shape[1])] for _ in range(possible_matrix_rows.shape[1])]
for row in possible_matrix_rows:
for i in range(len(row)):
for j in range(len(row)):
ratio_evidence[i][j].append(row[i] / row[j])
if len(ratio_evidence) > 100:
ratio_evidence = np.array(ratio_evidence, dtype=np.float32)
else:
ratio_evidence = np.array(ratio_evidence, dtype=np.float64)
medians = np.nanmedian(ratio_evidence, axis=2)
errors = np.nanstd(ratio_evidence, axis=2) / np.sum(~np.isnan(ratio_evidence), axis=2) ** .5
errors += 1e-2 * (np.sum(~np.isnan(ratio_evidence), axis=2) == 1)
errors /= np.abs(medians)
errors[np.isnan(errors)] = 1e6
ratio_evidence = medians
last_nan_count = 1e8
last_total_cost = 1e8
while (np.sum(np.isnan(ratio_evidence)) < last_nan_count or last_total_cost < np.sum(errors) * .9) and False:
last_nan_count = np.sum(np.isnan(ratio_evidence))
last_total_cost = np.sum(errors)
logger.log(".", level=Logger.INFO)
logger.log("Takenc", time.time() - now, level=Logger.INFO)
logger.log('nan count', last_nan_count, level=Logger.INFO)
logger.log('total cost', last_total_cost, level=Logger.INFO)
cost_i_over_j = ratio_evidence[:, :, np.newaxis]
cost_j_over_k = ratio_evidence
cost_i_over_k = cost_i_over_j * cost_j_over_k
del cost_i_over_j, cost_j_over_k
logger.log(cost_i_over_k.shape, cost_i_over_k.dtype, level=Logger.INFO)
error_i_over_j = errors[:, :, np.newaxis]
error_j_over_k = errors
error_i_over_k = error_i_over_j + error_j_over_k
best_indexs = np.nanargmin(error_i_over_k, axis=1)
best_errors = np.nanmin(error_i_over_k, axis=1)
del error_i_over_j, error_j_over_k, error_i_over_k
cost_i_over_k_new = []
for i in range(len(best_indexs)):
cost_i_over_k_new.append(cost_i_over_k[i].T[np.arange(len(best_indexs)), best_indexs[i]])
cost_i_over_k = np.array(cost_i_over_k_new)
which = best_errors < errors
ratio_evidence = cost_i_over_k * which + ratio_evidence * (1 - which)
errors = best_errors
# Choose the column with the fewest nans to return
nancount = np.sum(np.isnan(ratio_evidence), axis=0)
# print("Column nan count", nancount)
column_ok = np.min(nancount) == nancount
best = (None, np.inf)
cost_i_over_j = ratio_evidence[:, :, np.newaxis]
cost_j_over_k = ratio_evidence
cost_i_over_k = cost_i_over_j * cost_j_over_k
cost_i_j_k = cost_i_over_k
# cost from i through j to k
for column in range(len(column_ok)):
if not column_ok[column]:
continue
quality = np.nansum(np.abs(cost_i_j_k[:, column, :] - ratio_evidence))
# print('q',quality)
if quality < best[1]:
best = (column, quality)
column, best_error = best
return ratio_evidence[:, column], column, best_error
def gather_ratios(critical_points, known_T, check_fn, LAYER, COUNT):
this_layer_critical_points = []
logger.log("Gathering", COUNT, "critical points", level=Logger.INFO)
for point in critical_points:
if LAYER > 0:
if any(np.any(np.abs(x) < 1e-5) for x in known_T.get_hidden_layers(point)):
continue
if CHEATING:
if np.any(np.abs(cheat_get_inner_layers(point)[0]) < 1e-10):
logger.log(cheat_get_inner_layers(point), level=Logger.INFO)
logger.log("Looking at one I don't need to", level=Logger.INFO)
if LAYER > 0 and np.sum(known_T.forward(point) != 0) <= 1:
logger.log("Not enough hidden values are active to get meaningful data", level=Logger.INFO)
continue
if not check_fn(point):
# print("Check function rejected it")
continue
if CHEATING:
logger.log("What layer is this neuron on (by cheating)?",
[(np.min(np.abs(x)), np.argmin(np.abs(x))) for x in cheat_get_inner_layers(point)],
level=Logger.INFO)
tmp = Tracker().query_count
for EPS in [GRAD_EPS, GRAD_EPS / 10, GRAD_EPS / 100]:
try:
normal = get_ratios_lstsq(LAYER, [point], [range(DIM)], known_T, eps=EPS)[0].flatten()
# normal = get_ratios([point], [range(DIM)], eps=EPS)[0].flatten()
break
except AcceptableFailure:
logger.log("Try again with smaller eps", level=Logger.INFO)
pass
# print("LSTSQ Delta queries", query_count-tmp)
this_layer_critical_points.append((normal, point))
# coupon collector: we need nlogn points.
logger.log("Up to", len(this_layer_critical_points), 'of', COUNT, level=Logger.INFO)
if len(this_layer_critical_points) >= COUNT:
break
return this_layer_critical_points
def compute_layer_values(critical_points, known_T, LAYER):
if LAYER == 0:
COUNT = neuron_count[LAYER + 1] * 3
else:
COUNT = neuron_count[LAYER + 1] * np.log(sizes[LAYER + 1]) * 3
# type: [(ratios, critical_point)]
this_layer_critical_points = []
partial_weights = None
partial_biases = None
def check_fn(point):
if partial_weights is None:
return True
hidden = matmul(known_T.forward(point, with_relu=True), partial_weights.T, partial_biases)
if np.any(np.abs(hidden) < 1e-4):
return False
return True
logger.log("", level=Logger.INFO)
logger.log("Start running critical point search to find neurons on layer", LAYER, level=Logger.INFO)
while True:
logger.log("At this iteration I have", len(this_layer_critical_points), "critical points", level=Logger.INFO)
def reuse_critical_points():
for witness in critical_points:
yield witness
this_layer_critical_points.extend(gather_ratios(reuse_critical_points(), known_T, check_fn,
LAYER, COUNT))
logger.log("Query count after that search:", Tracker().query_count, level=Logger.INFO)
logger.log("And now up to ", len(this_layer_critical_points), "critical points", level=Logger.INFO)
## filter out duplicates
filtered_points = []
# Let's not add points that are identical to onees we've already done.
for i, (ratio1, point1) in enumerate(this_layer_critical_points):
for ratio2, point2 in this_layer_critical_points[i + 1:]:
if np.sum((point1 - point2) ** 2) ** .5 < 1e-10:
break
else:
filtered_points.append((ratio1, point1))
this_layer_critical_points = filtered_points
logger.log("After filtering duplicates we're down to ", len(this_layer_critical_points), "critical points",
level=Logger.INFO)
logger.log("Start trying to do the graph solving", level=Logger.INFO)
try:
critical_groups, extracted_normals = graph_solve([x[0] for x in this_layer_critical_points],
[x[1] for x in this_layer_critical_points],
neuron_count[LAYER + 1],
LAYER=LAYER,
debug=True)
break
except GatherMoreData as e:
logger.log("Graph solving failed because we didn't explore all sides of at least one neuron",
level=Logger.INFO)
logger.log("Fall back to the hyperplane following algorithm in order to get more data", level=Logger.INFO)
def mine(r):
while len(r) > 0:
logger.log("Yielding a point", level=Logger.INFO)
yield r[0]
r = r[1:]
logger.log("No more to give!", level=Logger.INFO)
prev_T = KnownT(known_T.A[:-1], known_T.B[:-1])
_, more_critical_points = sign_recovery.solve_layer_sign(prev_T, known_T.A[-1], known_T.B[-1], mine(e.data),
LAYER - 1, already_checked_critical_points=True,
only_need_positive=True)
logger.log("Add more", len(more_critical_points), level=Logger.INFO)
this_layer_critical_points.extend(gather_ratios(more_critical_points, known_T, check_fn,
LAYER, 1e6))
logger.log("Done adding", level=Logger.INFO)
COUNT = neuron_count[LAYER + 1]
except AcceptableFailure as e:
logger.log("Graph solving failed; get more points", level=Logger.INFO)
COUNT = neuron_count[LAYER + 1]
if 'partial_solution' in dir(e):
if len(e.partial_solution[0]) > 0:
partial_weights, corresponding_examples = e.partial_solution
logger.log("Got partial solution with shape", partial_weights.shape, level=Logger.INFO)
if CHEATING:
logger.log("Corresponding to",
np.argmin(
np.abs(cheat_get_inner_layers([x[0] for x in corresponding_examples])[LAYER]),
axis=1), level=Logger.INFO)
partial_biases = []
for weight, examples in zip(partial_weights, corresponding_examples):
hidden = known_T.forward(examples, with_relu=True)
logger.log("hidden", np.array(hidden).shape, level=Logger.INFO)
bias = -np.median(np.dot(hidden, weight))
partial_biases.append(bias)
partial_biases = np.array(partial_biases)
logger.log("Number of critical points per cluster", [len(x) for x in critical_groups], level=Logger.INFO)
point_per_class = [x[0] for x in critical_groups]
extracted_normals = np.array(extracted_normals).T
# Compute the bias because we know wx+b=0
extracted_bias = [matmul(known_T.forward(point_per_class[i], with_relu=True), extracted_normals[:, i], c=None) for i
in range(neuron_count[LAYER + 1])]
# Don't forget to negate it.
# That's important.
# No, I definitely didn't forget this line the first time around.
extracted_bias = -np.array(extracted_bias)
# For the failed-to-identify neurons, set the bias to zero
extracted_bias *= np.any(extracted_normals != 0, axis=0)[:, np.newaxis]
if CHEATING:
# Compute how far we off from the true matrix
real_scaled = A[LAYER] / A[LAYER][0]
extracted_scaled = extracted_normals / extracted_normals[0]
mask = []
reorder_rows = []
for i in range(len(extracted_bias)):
which_idx = np.argmin(np.sum(np.abs(real_scaled - extracted_scaled[:, [i]]), axis=0))
reorder_rows.append(which_idx)
mask.append((A[LAYER][0, which_idx]))
logger.log('matrix norm difference', np.sum(np.abs(extracted_normals * mask - A[LAYER][:, reorder_rows])),
level=Logger.INFO)
else:
mask = [1] * len(extracted_bias)
return extracted_normals, extracted_bias, mask
|
the-stack_0_5843 | from PIL import Image
import pytesseract
import argparse
import os
import time
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", default="post",
help="path of folder with images to be OCR'd, folder should not include another forlders")
args = vars(ap.parse_args())
path=args["path"]+"/"
print(path)
postcoden=["3705"]
brievenAlsText=[]
brievenAlsFoto=os.listdir(path)
print(brievenAlsFoto)
counter=0
for im in brievenAlsFoto:
img = Image.open(path+im)
size = img.size
BottomL=(size[0]*3132)/3732
BottomH=(size[1]*1616)/2616
TopL=(size[0]*300)/3732
TopH=(size[1]*500)/2616
area = (TopL,TopH,BottomL,BottomH)
# img = img.crop(area)
if counter > 2:
img.show()
brievenAlsText.append(pytesseract.image_to_string(img, lang='eng'))
counter+=1
a=0
found=0
postcode=""
adres=[]
print(brievenAlsText[-2])
print(brievenAlsText[-1])
for text in brievenAlsText:
voriglen=len(adres)
dublicate=0
for letter in text:
try:
if type(eval(letter))==int:
a+=1
postcode+=letter
if a == 4 and postcode in postcoden:
found=1
if dublicate == 2:
found=0
adres.pop(-1)
adres.append("skip")
break
dublicate+=1
continue
except:
a=0
if found==0:
postcode=""
if found <=3 and found!=0:
postcode+=letter
found+=1
if found==4:
text = text.split(postcode)
text[0] = text[0].strip(" ")
text[0] = text[0].split(" ")
huisnummer = text[0][-1]
adres.append(postcode + " " + huisnummer)
text=text[1]
postcode=""
found = 0
dublicate+=1
continue
if len(adres)==voriglen:
adres.append("skip")
for i in range(len(adres)):
adres[i]=adres[i].strip("\n")
if "\n" in adres[i]:
adres[i]=adres[i].split("\n")
adres[i]=adres[i][0]
print(adres)
print(round(time.perf_counter(),2))
|
the-stack_0_5845 | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.piratesgui.BoardingPermissionPanel
from pandac.PandaModules import *
from direct.gui.DirectGui import DGG
from pirates.piratesgui.BorderFrame import BorderFrame
from pirates.piratesgui.GuiPanel import GuiPanel
from pirates.piratesgui.GuiButton import GuiButton
from pirates.piratesgui.DialogButton import DialogButton
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui.CheckButton import CheckButton
from pirates.piratesbase import PiratesGlobals
class BoardingPermissionPanel(GuiPanel):
__module__ = __name__
def __init__(self, parent, *args, **kw):
self.guiSetup = False
optiondefs = (('parent', parent, None), ('pos', (-0.58, 0, -0.09), None), ('command', None, None), ('extraArgs', [], None), ('ownShip', 0, None))
self.defineoptions(kw, optiondefs)
GuiPanel.__init__(self, title=PLocalizer.BoardPermTitle, h=0.8, w=0.5, titleSize=1.5, showClose=False, **kw)
self.initialiseoptions(BoardingPermissionPanel)
self.titleLabel['text_align'] = TextNode.ACenter
self.titleLabel.setPos(0.23, 0, 0.72)
self.setupGui()
return
def destroy(self):
self.button = None
self.background = None
self.friendsButton = None
self.crewButton = None
self.guildButton = None
self.publicButton = None
GuiPanel.destroy(self)
return
def setupGui(self):
self.destroyGui()
if not self.guiSetup:
self.button = DialogButton(parent=self, buttonStyle=DialogButton.NO, pos=(0.25,
0,
0.08), text=PLocalizer.lClose, helpPos=(-0.4, 0, 0.03), helpDelay=0.3, command=self['command'], extraArgs=self['extraArgs'])
self.background = BorderFrame(parent=self, pos=(0.05, 0, 0.05), frameSize=[0.0, 0.4, 0.1, 0.6], bgColorScale=VBase4(0, 0, 0, 0.75), bgTransparency=1, flatten=0)
if self['ownShip']:
state = DGG.NORMAL
else:
state = DGG.DISABLED
ship = localAvatar.getShip()
if ship:
friendState = ship.getAllowFriendState()
crewState = ship.getAllowCrewState()
guildState = ship.getAllowGuildState()
publicState = ship.getAllowPublicState()
else:
friendState = 0
crewState = 0
guildState = 0
publicState = 0
buttonOptions = {'parent': self.background, 'state': state, 'relief': None, 'pos': (0.06, 0, 0.53), 'scale': 0.3, 'text': PLocalizer.CrewBoardingAccessAllowFriends, 'value': friendState, 'text_pos': (0.167, -0.06, 0), 'text0_fg': PiratesGuiGlobals.TextFG1, 'text1_fg': PiratesGuiGlobals.TextFG1, 'text2_fg': PiratesGuiGlobals.TextFG1, 'text3_fg': PiratesGuiGlobals.TextFG9, 'text_font': PiratesGlobals.getInterfaceFont(), 'text_scale': 0.15, 'text_shadow': (0, 0, 0, 1), 'text_align': TextNode.ALeft, 'command': self.allowFriends}
self.friendsButton = CheckButton(**buttonOptions)
buttonOptions['text'] = PLocalizer.CrewBoardingAccessAllowCrew
buttonOptions['pos'] = (buttonOptions['pos'][0], buttonOptions['pos'][1], buttonOptions['pos'][2] - 0.12)
buttonOptions['command'] = self.allowCrew
buttonOptions['value'] = crewState
self.crewButton = CheckButton(**buttonOptions)
buttonOptions['text'] = PLocalizer.CrewBoardingAccessAllowGuild
buttonOptions['pos'] = (buttonOptions['pos'][0], buttonOptions['pos'][1], buttonOptions['pos'][2] - 0.12)
buttonOptions['command'] = self.allowGuild
buttonOptions['value'] = guildState
self.guildButton = CheckButton(**buttonOptions)
buttonOptions['text'] = PLocalizer.CrewBoardingAccessAllowPublic
buttonOptions['pos'] = (buttonOptions['pos'][0], buttonOptions['pos'][1], buttonOptions['pos'][2] - 0.12)
buttonOptions['command'] = self.allowPublic
buttonOptions['value'] = publicState
self.publicButton = CheckButton(**buttonOptions)
self.guiSetup = True
return
def destroyGui(self):
if self.guiSetup:
self.background.destroy()
self.background = None
self.friendsButton.destroy()
self.friendsButton = None
self.crewButton.destroy()
self.crewButton = None
self.guildButton.destroy()
self.guildButton = None
self.publicButton.destroy()
self.publicButton = None
self.button.destroy()
self.button = None
self.guiSetup = False
return
def allowFriends(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowFriendState(allow)
def allowCrew(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowCrewState(allow)
def allowGuild(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowGuildState(allow)
def allowPublic(self, allow):
if self['ownShip']:
ship = localAvatar.getShip()
if ship:
ship.b_setAllowPublicState(allow)
def setAllowFriends(self, allow):
self.friendsButton['value'] = allow
def setAllowCrew(self, allow):
self.crewButton['value'] = allow
def setAllowGuild(self, allow):
self.guildButton['value'] = allow
def setAllowPublic(self, allow):
self.publicButton['value'] = allow |
the-stack_0_5847 | #{{{ Import
import numpy as np
pi = np.pi
#}}}
#{{{ Snell's Law
def deflection_angle(theta, n1, n2, deg=True):
"""Calculate deflection angle according to Snell's law.
Parameters
----------
theta : float
Angle of incidence.
n1 : float
Refractive index of the first medium.
n2 : float
Refraction index of the second medium.
deg : boolean, optional
True if theta is specified in degrees.
"""
if deg:
factor = pi/180.0
else:
factor = 1.0
return np.arcsin(n1*np.sin(theta*factor)/n2)/factor
#}}}
#{{{ Geometry utilities
#{{{ line_plane_intersection
def line_plane_intersection(pos,
dirVect,
plane_center,
normalVector,
diameter):
'''
Compute the intersection point between a line
and a plane
Parameters
----------
pos : array
The position of the end point of the line.
dirVert : array
The directional vector specifying the line.
plane_center : array
The position of the center of the plane.
normalVector: array
The normal vector of the plane.
diameter: float
The diameter of the plane.
Returns
-------
dict
The returned value is a dictionary with the following keys:
"Intersection Point": numpy array of the coordinates of the intersection point.
"isHit": A boolean value of whether the line intersects with the plane or not.
"distance": Distance between the origin of the line and the intersection point.
"distance from center": Distance between the center of the plane and the intersection point.
'''
#Make sure the inputs are ndarrays
pos = np.array(pos, dtype=np.float64)
dirVect = np.array(dirVect, dtype=np.float64)
plane_center = np.array(plane_center, dtype=np.float64)
normalVector = np.array(normalVector, dtype=np.float64)
diameter = float(diameter)
#Get a normalized vector along the plane
plVect = np.array([-normalVector[1], normalVector[0]])
plVect = plVect/np.linalg.norm(plVect)
#Normalize
dirVect = dirVect/np.linalg.norm(dirVect)
#Make sure that the plVect and dirVect are not parallel
if np.abs(np.dot(dirVect, plVect)) > 1 - 1e-10:
return {'Intersection Point': np.array((0.,0.)), 'isHit': False,
'distance': 0.0,
'distance from center': 0.0}
#Solve line equations to get the intersection point
M = np.vstack((dirVect, -plVect)).T
ans = np.linalg.solve(M, plane_center - pos)
intersection_point = pos + ans[0]*dirVect
#How far the intersection point is from the center
#of the plane
dist_from_center = np.abs(ans[1])
if dist_from_center > diameter/2.0\
or ans[0] < 0.\
or np.dot(dirVect, normalVector) > 0.:
hit = False
else:
hit = True
return {'Intersection Point': intersection_point, 'isHit': hit,
'distance': np.abs(ans[0]),
'distance from center': ans[1]}
#}}}
#{{{ line_arc_intersection
def line_arc_intersection(pos,
dirVect,
chord_center,
chordNormVect,
invROC,
diameter,
verbose=False):
'''
Compute the intersection point between a line
and an arc.
Parameters
----------
pos : array
Origin of the line.
dirVect : array
Direction of the line.
chord_center : array
The center of the chord made by the arc.
chordNormVect : array
Normal vector of the chord.
invROC : float
Inverse of the ROC of the arc. Positive for concave surface.
diameter : float
Length of the chord.
verbose : boolean, optional
Prints useful information.
Returns
-------
dict
The returned value is a dictionary with the following keys:
"Intersection Point": numpy array of the coordinates of the intersection point.
"isHit": A boolean value of whether the line intersects with the plane or not.
"distance": Distance between the origin of the line and the intersection point.
"localNormVect": localNormVect,
"localNormAngle": localNormAngle.
'''
#Make sure the inputs are ndarrays
pos = np.array(pos, dtype=np.float64)
dirVect = np.array(dirVect, dtype=np.float64)
chord_center = np.array(chord_center, dtype=np.float64)
chordNormVect = np.array(chordNormVect, dtype=np.float64)
invROC = float(invROC)
diameter = float(diameter)
#Normalize
dirVect = dirVect/np.linalg.norm(dirVect)
chordNormVect = chordNormVect/np.linalg.norm(chordNormVect)
#Check if the ROC is too large.
if np.abs(invROC) < 1e-5:
#It is almost a plane
ans = line_plane_intersection(pos, dirVect, chord_center, chordNormVect, diameter)
localNormVect = chordNormVect
localNormAngle = np.mod(np.arctan2(localNormVect[1],
localNormVect[0]), 2*pi)
ans['localNormVect'] = localNormVect
ans['localNormAngle'] = localNormAngle
return ans
ROC = 1/invROC
#Compute the center of the arc
theta = np.arcsin(diameter/(2*ROC))
l = ROC*np.cos(theta)
arc_center = chord_center + chordNormVect*l
#For convex surface, pos has to be outside the circle.
if ROC < 0 and np.linalg.norm(pos - arc_center) < np.abs(ROC):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#First, decompose the vector connecting from the arc_center
#to pos into the components parallel to the line and orthogonal to it.
# s is the component in the orthogonal direction and t is the one along
#the line.
#A vector orthogonal to the line
k = np.array([-dirVect[1], dirVect[0]])
#Solve the equation to decompose the vector pos-arc_center
M = np.vstack((k, -dirVect)).T
ans = np.linalg.solve(M, pos - arc_center)
s = ans[0]
t = ans[1]
if np.abs(s) > np.abs(ROC):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#Compute two cross points
#Work with the chord formed by the line and the circle.
#d is half the length of the chord.
d = np.sqrt(ROC**2 - s**2)
if ROC > 0:
intersection_point = k*s+arc_center + d*dirVect
localNormVect = arc_center - intersection_point
else:
intersection_point = k*s+arc_center - d*dirVect
localNormVect = intersection_point - arc_center
#Check if dirVect and the vector connecting from pos to intersection_point
#are pointing the same direction.
if np.dot(dirVect, intersection_point - pos) < 0:
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
#Normalize
localNormVect = localNormVect/np.linalg.norm(localNormVect)
localNormAngle = np.mod(np.arctan2(localNormVect[1],
localNormVect[0]), 2*pi)
#Check if the intersection point is within the
#diameter
v0 = - np.sign(ROC) * chordNormVect*(1-1e-16) #(1-1e-16) is necessary to avoid rounding error
v1 = intersection_point - arc_center
v1 = v1/np.linalg.norm(v1)*(1-1e-16)
if np.arccos(np.dot(v0,v1)) > np.abs(theta):
if verbose:
print('The line does not hit the arc.')
return {'isHit': False}
distance = np.linalg.norm(intersection_point - pos)
return {'Intersection Point': intersection_point, 'isHit': True,
'distance': distance, 'localNormVect': localNormVect,
'localNormAngle': localNormAngle}
#}}}
#{{{ vector_rotation_2D
def vector_rotation_2D(vect, angle):
"""Rotate a 2D vector by an angle.
Parameters
----------
vect : array
A 2D vector.
angle : float
Angle of rotation in radians.
Returns
-------
array
The rotated vector.
"""
vect = np.array(vect)
angle = float(angle)
M = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle),np.cos(angle)]])
return np.dot(M, vect)
#}}}
def vector_normalize(vect):
'''
Normalize a vector
Parameters
----------
vect : array
The vector to be normalized
Returns
-------
array
The normalized vector.
'''
return vect/np.linalg.norm(vect)
#{{{ normSpheric
def normSpheric(normAngle, invROC, dist_from_center):
'''
Returns the local normal angle of a spheric mirror
at a distance from the center.
Parameters
----------
normAngle : float
The angle formed by the normal vector of the mirror
at the center and the x-axis.
invROC : float
1/R, where R is the ROC of the mirror.
dist_from_center: float
The distance from the center of the point where
the local normal is requested.
This is a signed value.
For a mirror facing +x (the normal vector points
towards positive x direction), this distance
is positive for points with positive y coordinate,
and negative for points with negative y coordinate.
Returns
-------
float
The local normal angle of a spheric mirror
at a distance from the center.
'''
normAngle = np.mod(normAngle, 2*pi)
return np.mod(np.arcsin(- dist_from_center * invROC) + normAngle, 2*pi)
#}}}
#{{{ reflection and deflection angle
def refl_defl_angle(beamAngle, normAngle, n1, n2, invROC=None):
'''
Returns a tuples of reflection and deflection angles.
Parameters
----------
beamAngle : float
The angle formed by the propagation direction vector
of the incident beam and the x-axis.
normAngle : float
The angle formed by the normal vector of the surface
and the x-axis.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
invROC : float or None, optional
Inverse of the radius of curvature.
Returns
-------
6-tuple or 2-tuple
(reflAngle, deflAngle, Mrx, Mry, Mtx, Mty) or (reflAngle, deflAngle)
'''
beamAngle = np.mod(beamAngle, 2*pi)
normAngle = np.mod(normAngle, 2*pi)
incidentAngle = np.mod(beamAngle - normAngle, 2*pi) - pi
reflAngle = np.mod(normAngle - incidentAngle, 2*pi)
deflAngle = np.arcsin(n1*np.sin(incidentAngle)/n2)
deflAngle = np.mod(deflAngle + pi + normAngle, 2*pi)
if not invROC == None:
#Calculate ABCD matrices
#Absolute value of the incident angle
theta1 = np.abs(incidentAngle)
#For reflection
Mrx = np.array([[1., 0.], [-2*n1*invROC/np.cos(theta1), 1.]])
Mry = np.array([[1., 0.], [-2*n1*invROC*np.cos(theta1), 1.]])
#For transmission
theta2 = np.arcsin(n1*np.sin(theta1)/n2)
nex = (n2*np.cos(theta2)-n1*np.cos(theta1))/(np.cos(theta1)*np.cos(theta2))
Mtx = np.array([[np.cos(theta2)/np.cos(theta1), 0.],
[nex*invROC, np.cos(theta1)/np.cos(theta2)]])
ney = n2*np.cos(theta2)-n1*np.cos(theta1)
Mty = np.array([[1., 0.],[ney*invROC, 1.]])
return (reflAngle, deflAngle, Mrx, Mry, Mtx, Mty)
else:
return (reflAngle, deflAngle)
#}}}
#{{{ reflection and deflection angle for cylindrical surface
def cyl_refl_defl_angle(beamAngle, normAngle, n1, n2, invROC=None, curve_direction='h'):
'''
Returns a tuples of reflection and deflection angles for incidence of a beam into a cylindrical surface.
Parameters
----------
beamAngle : float
The angle formed by the propagation direction vector
of the incident beam and the x-axis.
normAngle : float
The angle formed by the normal vector of the surface
and the x-axis.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
invROC : float or None, optional
Inverse of the radius of curvature.
curve_direction : str, optional
Direction of curvature. Either 'h' or 'v'.
'''
beamAngle = np.mod(beamAngle, 2*pi)
normAngle = np.mod(normAngle, 2*pi)
incidentAngle = np.mod(beamAngle - normAngle, 2*pi) - pi
reflAngle = np.mod(normAngle - incidentAngle, 2*pi)
deflAngle = np.arcsin(n1*np.sin(incidentAngle)/n2)
deflAngle = np.mod(deflAngle + pi + normAngle, 2*pi)
if not invROC == None:
#Calculate ABCD matrices
#Absolute value of the incident angle
theta1 = np.abs(incidentAngle)
#For reflection
if curve_direction == 'h':
Mrx = np.array([[1., 0.], [-2*n1*invROC/np.cos(theta1), 1.]])
Mry = np.array([[1., 0.], [0., 1.]])
else:
Mrx = np.array([[1., 0.], [0., 1.]])
Mry = np.array([[1., 0.], [-2*n1*invROC*np.cos(theta1), 1.]])
#For transmission
theta2 = np.arcsin(n1*np.sin(theta1)/n2)
nex = (n2*np.cos(theta2)-n1*np.cos(theta1))/(np.cos(theta1)*np.cos(theta2))
Mtx = np.array([[np.cos(theta2)/np.cos(theta1), 0.],
[nex*invROC, np.cos(theta1)/np.cos(theta2)]])
ney = n2*np.cos(theta2)-n1*np.cos(theta1)
Mty = np.array([[1., 0.],[ney*invROC, 1.]])
return (reflAngle, deflAngle, Mrx, Mry, Mtx, Mty)
else:
return (reflAngle, deflAngle)
#}}}
#}}}
#{{{ VariCAD utility functions
def vc_deflect(theta, theta1, n1, n2):
'''
Deflection angle helper function for VariCAD.
Parameters
----------
theta : float
Angle of the surface measured from right.
theta1 : float
Angle of the incident beam measured from right.
n1 : float
Index of refraction of the incident side medium.
n2 : float
Index of refraction of the transmission side medium.
Returns
-------
phi2 : float
Angle of the deflected beam measured from right.
'''
#Combert theta and theta1 to 0-360 format
if theta < 0:
theta = 360.0 + theta
if theta > 180:
theta = theta -180.0
if theta1 < 0:
theta1 = 360.0 + theta1
#Determine the incident angle
phi = abs(theta - theta1)
phi1 = 90.0-np.arcsin(np.abs(np.sin(pi*phi/180.0)))*180.0/pi
#Calculate deflection angle
phi2 = deflection_angle(phi1, n1, n2)
#Convert to the 0-360 angle
s1 = np.sign(np.sin(pi*(theta1 - theta)/180.0))
s2 = -np.sign(np.cos(pi*(theta1 - theta)/180.0))
phi2 = theta + s1*90 + s1*s2*phi2
return phi2
def vc_reflect(theta, theta1):
"""Convert theta and theta1 to 0-360 format.
Parameters
----------
theta : float
Angle of the surface measured from right.
theta1 : float
Angle of the incident beam measured from right.
Returns
-------
float
"""
#Combert theta and theta1 to 0-360 format
if theta < 0:
theta = 360.0 + theta
if theta > 180:
theta = theta -180.0
if theta1 < 0:
theta1 = 360.0 + theta1
return theta - (theta1 - theta)
#}}}
|
the-stack_0_5849 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 16 14:52:14 2019
@author: Artemis
"""
"""
import numpy as np
import matplotlib.pyplot as plt
line = np.linspace(-5, 5, 200)
plt.plot(line, np.tanh(line), label='tanh')
plt.plot(line, np.maximum(line, 0), label='relu')
plt.legend(loc='best')
plt.xlabel('x')
plt.ylabel('relu(x) and tanh(x)')
plt.show()
"""
"""
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
wine = load_wine()
X = wine.data[:,:2]
y = wine.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=[10,10],
activation='tanh', alpha=1)
mlp.fit(X_train, y_train)
cmap_light = ListedColormap(['#FFAAAA','#AAFFAA','#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000','#00FF00','#0000FF'])
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .02),
np.arange(y_min, y_max, .02))
Z = mlp.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolor='k', s=60)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("MLPClassifier: solver=lbfgs")
plt.show()
"""
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from PIL import Image
import numpy as np
mnist = fetch_mldata('mnist-original')
X = mnist.data/255.
y = mnist.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=5000, test_size=1000, random_state=62)
mlp_hw = MLPClassifier(solver='lbfgs', hidden_layer_sizes=[100,100],
activation='relu', alpha=1e-5, random_state=62)
mlp_hw.fit(X_train, y_train)
print("Score:{:.2f}%".format(mlp_hw.score(X_test, y_test)*100))
image = Image.open('data/4.jpg').convert('F')
image = image.resize((28,28))
arr = []
for i in range(28):
for j in range(28):
pixel = 1.0 - float(image.getpixel((j,i))) / 255.
arr.append(pixel)
arr_1 = np.array(arr).reshape(1, -1)
print('The number in the image: {:.0f}'.format(mlp_hw.predict(arr_1)[0]))
pass |
the-stack_0_5851 | from vdb.lib.npm import NpmSource
from depscan.lib import config as config
from depscan.lib.pkg_query import npm_metadata, pypi_metadata
# Dict mapping project type to the audit source
type_audit_map = {"nodejs": NpmSource(), "js": NpmSource()}
# Dict mapping project type to risk audit
risk_audit_map = {
"nodejs": npm_metadata,
"js": npm_metadata,
"python": pypi_metadata,
"py": pypi_metadata,
}
def audit(project_type, pkg_list, report_file):
"""
Method to audit packages using remote source such as npm advisory
:param project_type: Project type
:param pkg_list: List of packages
:param report_file: Report file
"""
results = type_audit_map[project_type].bulk_search(
app_info=config.npm_app_info, pkg_list=pkg_list
)
return results
def risk_audit(project_type, scoped_pkgs, private_ns, pkg_list, report_file):
"""
Method to perform risk audit for packages using package managers api
:param project_type: Project type
:param private_ns: Private namespace
:param pkg_list: List of packages
:param report_file: Report file
"""
audit_fn = risk_audit_map[project_type]
results = audit_fn(scoped_pkgs, pkg_list, private_ns)
return results
|
the-stack_0_5853 | import logging
import datetime
import xml.etree.cElementTree as ET
import core
from core.helpers import Url
logging = logging.getLogger(__name__)
'''
Does not supply rss feed -- backlog searches only.
'''
def search(imdbid, term):
proxy_enabled = core.CONFIG['Server']['Proxy']['enabled']
logging.info('Searching Zooqle for {}.'.format(term))
url = 'https://zooqle.com/search?q={}&fmt=rss'.format(term)
try:
if proxy_enabled and core.proxy.whitelist('https://www.zooqle.com') is True:
response = Url.open(url, proxy_bypass=True).text
else:
response = Url.open(url).text
if response:
return _parse(response, imdbid)
else:
return []
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Zooqle search failed.', exc_info=True)
return []
def get_rss():
return []
def _parse(xml, imdbid):
logging.info('Parsing Zooqle results.')
tree = ET.fromstring(xml)
items = tree[0].findall('item')
results = []
for i in items:
result = {}
try:
result['score'] = 0
size, suffix = i.find('description').text.strip().split(', ')[-1].split(' ')
m = (1024 ** 2) if suffix == 'MB' else (1024 ** 3)
result['size'] = int(float(size.replace(',', '')) * m)
result['status'] = 'Available'
pd = i.find('pubDate').text
result['pubdate'] = datetime.datetime.strftime(datetime.datetime.strptime(pd, '%a, %d %b %Y %H:%M:%S %z'), '%d %b %Y')
result['title'] = i.find('title').text
result['imdbid'] = imdbid
result['indexer'] = 'Zooqle'
result['info_link'] = i.find('guid').text
result['torrentfile'] = i[8].text
result['guid'] = i[7].text.lower()
result['type'] = 'magnet'
result['downloadid'] = None
result['freeleech'] = 0
result['download_client'] = None
result['seeders'] = int(i[9].text)
results.append(result)
except Exception as e:
logging.error('Error parsing Zooqle XML.', exc_info=True)
continue
logging.info('Found {} results from Zooqle.'.format(len(results)))
return results
|
the-stack_0_5854 | import torch
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec, LeafSpec
from torch.utils._pytree import _broadcast_to_and_flatten
class TestPytree(TestCase):
def test_treespec_equality(self):
self.assertTrue(LeafSpec() == LeafSpec())
self.assertTrue(TreeSpec(list, None, []) == TreeSpec(list, None, []))
self.assertTrue(TreeSpec(list, None, [LeafSpec()]) == TreeSpec(list, None, [LeafSpec()]))
self.assertFalse(TreeSpec(tuple, None, []) == TreeSpec(list, None, []))
self.assertTrue(TreeSpec(tuple, None, []) != TreeSpec(list, None, []))
def test_flatten_unflatten_leaf(self):
def run_test_with_leaf(leaf):
values, treespec = tree_flatten(leaf)
self.assertEqual(values, [leaf])
self.assertEqual(treespec, LeafSpec())
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, leaf)
run_test_with_leaf(1)
run_test_with_leaf(1.)
run_test_with_leaf(None)
run_test_with_leaf(bool)
run_test_with_leaf(torch.randn(3, 3))
def test_flatten_unflatten_list(self):
def run_test(lst):
expected_spec = TreeSpec(list, None, [LeafSpec() for _ in lst])
values, treespec = tree_flatten(lst)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, lst)
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, lst)
self.assertTrue(isinstance(unflattened, list))
run_test([])
run_test([1., 2])
run_test([torch.tensor([1., 2]), 2, 10, 9, 11])
def test_flatten_unflatten_tuple(self):
def run_test(tup):
expected_spec = TreeSpec(tuple, None, [LeafSpec() for _ in tup])
values, treespec = tree_flatten(tup)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(tup))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, tup)
self.assertTrue(isinstance(unflattened, tuple))
run_test(())
run_test((1.,))
run_test((1., 2))
run_test((torch.tensor([1., 2]), 2, 10, 9, 11))
def test_flatten_unflatten_dict(self):
def run_test(tup):
expected_spec = TreeSpec(dict, list(tup.keys()),
[LeafSpec() for _ in tup.values()])
values, treespec = tree_flatten(tup)
self.assertTrue(isinstance(values, list))
self.assertEqual(values, list(tup.values()))
self.assertEqual(treespec, expected_spec)
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, tup)
self.assertTrue(isinstance(unflattened, dict))
run_test({})
run_test({'a': 1})
run_test({'abcdefg': torch.randn(2, 3)})
run_test({1: torch.randn(2, 3)})
run_test({'a': 1, 'b': 2, 'c': torch.randn(2, 3)})
def test_flatten_unflatten_nested(self):
def run_test(pytree):
values, treespec = tree_flatten(pytree)
self.assertTrue(isinstance(values, list))
self.assertEqual(len(values), treespec.num_leaves)
# NB: python basic data structures (dict list tuple) all have
# contents equality defined on them, so the following works for them.
unflattened = tree_unflatten(values, treespec)
self.assertEqual(unflattened, pytree)
cases = [
[()],
([],),
{'a': ()},
{'a': 0, 'b': [{'c': 1}]},
{'a': 0, 'b': [1, {'c': 2}, torch.randn(3)], 'c': (torch.randn(2, 3), 1)},
]
for case in cases:
run_test(case)
def test_treespec_repr(self):
# Check that it looks sane
pytree = (0, [0, 0, 0])
_, spec = tree_flatten(pytree)
self.assertEqual(
repr(spec), 'TreeSpec(tuple, None, [*, TreeSpec(list, None, [*, *, *])])')
def test_broadcast_to_and_flatten(self):
cases = [
(1, (), []),
# Same (flat) structures
((1,), (0,), [1]),
([1], [0], [1]),
((1, 2, 3), (0, 0, 0), [1, 2, 3]),
({'a': 1, 'b': 2}, {'a': 0, 'b': 0}, [1, 2]),
# Mismatched (flat) structures
([1], (0,), None),
([1], (0,), None),
((1,), [0], None),
((1, 2, 3), (0, 0), None),
({'a': 1, 'b': 2}, {'a': 0}, None),
({'a': 1, 'b': 2}, {'a': 0, 'c': 0}, None),
({'a': 1, 'b': 2}, {'a': 0, 'b': 0, 'c': 0}, None),
# Same (nested) structures
((1, [2, 3]), (0, [0, 0]), [1, 2, 3]),
((1, [(2, 3), 4]), (0, [(0, 0), 0]), [1, 2, 3, 4]),
# Mismatched (nested) structures
((1, [2, 3]), (0, (0, 0)), None),
((1, [2, 3]), (0, [0, 0, 0]), None),
# Broadcasting single value
(1, (0, 0, 0), [1, 1, 1]),
(1, [0, 0, 0], [1, 1, 1]),
(1, {'a': 0, 'b': 0}, [1, 1]),
(1, (0, [0, [0]], 0), [1, 1, 1, 1]),
(1, (0, [0, [0, [], [[[0]]]]], 0), [1, 1, 1, 1, 1]),
# Broadcast multiple things
((1, 2), ([0, 0, 0], [0, 0]), [1, 1, 1, 2, 2]),
((1, 2), ([0, [0, 0], 0], [0, 0]), [1, 1, 1, 1, 2, 2]),
(([1, 2, 3], 4), ([0, [0, 0], 0], [0, 0]), [1, 2, 2, 3, 4, 4]),
]
for pytree, to_pytree, expected in cases:
_, to_spec = tree_flatten(to_pytree)
result = _broadcast_to_and_flatten(pytree, to_spec)
self.assertEqual(result, expected, msg=str([pytree, to_spec, expected]))
if __name__ == '__main__':
run_tests()
|
the-stack_0_5855 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..core.target import Target
from ..utility.notification import Notification
import logging
from struct import unpack
from time import time
from binascii import crc32
# Number of bytes in a page to read to quickly determine if the page has the same data
PAGE_ESTIMATE_SIZE = 32
PAGE_READ_WEIGHT = 0.3
DATA_TRANSFER_B_PER_S = 40 * 1000 # ~40KB/s, depends on clock speed, theoretical limit for HID is 56,000 B/s
## @brief Exception raised when flashing fails outright.
class FlashFailure(RuntimeError):
pass
class ProgrammingInfo(object):
def __init__(self):
self.program_type = None # Type of programming performed - FLASH_PAGE_ERASE or FLASH_CHIP_ERASE
self.program_time = None # Total programming time
self.analyze_type = None # Type of flash analysis performed - FLASH_ANALYSIS_CRC32 or FLASH_ANALYSIS_PARTIAL_PAGE_READ
self.analyze_time = None # Time to analyze flash contents
def _same(d1, d2):
assert len(d1) == len(d2)
for i in range(len(d1)):
if d1[i] != d2[i]:
return False
return True
def _erased(d):
for i in range(len(d)):
if d[i] != 0xFF:
return False
return True
def _stub_progress(percent):
pass
class FlashPage(object):
def __init__(self, addr, size, data, erase_weight, program_weight):
self.addr = addr
self.size = size
self.data = data
self.erase_weight = erase_weight
self.program_weight = program_weight
self.erased = None
self.same = None
def get_program_weight(self):
"""
Get time to program a page including the data transfer
"""
return self.program_weight + \
float(len(self.data)) / float(DATA_TRANSFER_B_PER_S)
def get_erase_program_weight(self):
"""
Get time to erase and program a page including data transfer time
"""
return self.erase_weight + self.program_weight + \
float(len(self.data)) / float(DATA_TRANSFER_B_PER_S)
def get_verify_weight(self):
"""
Get time to verify a page
"""
return float(self.size) / float(DATA_TRANSFER_B_PER_S)
class FlashOperation(object):
def __init__(self, addr, data):
self.addr = addr
self.data = data
class FlashBuilder(object):
# Type of flash operation
FLASH_PAGE_ERASE = 1
FLASH_CHIP_ERASE = 2
# Type of flash analysis
FLASH_ANALYSIS_CRC32 = "CRC32"
FLASH_ANALYSIS_PARTIAL_PAGE_READ = "PAGE_READ"
def __init__(self, flash, base_addr=0):
self.flash = flash
self.flash_start = base_addr
self.flash_operation_list = []
self.page_list = []
self.perf = ProgrammingInfo()
self.enable_double_buffering = True
self.max_errors = 10
def enable_double_buffer(self, enable):
self.enable_double_buffering = enable
def set_max_errors(self, count):
self.max_errors = count
def add_data(self, addr, data):
"""
Add a block of data to be programmed
Note - programming does not start until the method
program is called.
"""
# Sanity check
if addr < self.flash_start:
raise Exception("Invalid flash address 0x%x is before flash start 0x%x" % (addr, self.flash_start))
# Add operation to list
self.flash_operation_list.append(FlashOperation(addr, data))
# Keep list sorted
self.flash_operation_list = sorted(self.flash_operation_list, key=lambda operation: operation.addr)
# Verify this does not overlap
prev_flash_operation = None
for operation in self.flash_operation_list:
if prev_flash_operation != None:
if prev_flash_operation.addr + len(prev_flash_operation.data) > operation.addr:
raise ValueError("Error adding data - Data at 0x%x..0x%x overlaps with 0x%x..0x%x"
% (prev_flash_operation.addr, prev_flash_operation.addr + len(prev_flash_operation.data),
operation.addr, operation.addr + len(operation.data)))
prev_flash_operation = operation
def program(self, chip_erase=None, progress_cb=None, smart_flash=True, fast_verify=False):
"""
Determine fastest method of flashing and then run flash programming.
Data must have already been added with add_data
"""
# Send notification that we're about to program flash.
self.flash.target.notify(Notification(event=Target.EVENT_PRE_FLASH_PROGRAM, source=self))
# Assumptions
# 1. Page erases must be on page boundaries ( page_erase_addr % page_size == 0 )
# 2. Page erase can have a different size depending on location
# 3. It is safe to program a page with less than a page of data
# Examples
# - lpc4330 -Non 0 base address
# - nRF51 -UICR location far from flash (address 0x10001000)
# - LPC1768 -Different sized pages
program_start = time()
if progress_cb is None:
progress_cb = _stub_progress
# There must be at least 1 flash operation
if len(self.flash_operation_list) == 0:
logging.warning("No pages were programmed")
return
# Convert the list of flash operations into flash pages
program_byte_count = 0
flash_addr = self.flash_operation_list[0].addr
info = self.flash.get_page_info(flash_addr)
if info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
page_addr = flash_addr - (flash_addr % info.size)
current_page = FlashPage(page_addr, info.size, [], info.erase_weight, info.program_weight)
self.page_list.append(current_page)
for flash_operation in self.flash_operation_list:
pos = 0
while pos < len(flash_operation.data):
# Check if operation is in next page
flash_addr = flash_operation.addr + pos
if flash_addr >= current_page.addr + current_page.size:
info = self.flash.get_page_info(flash_addr)
if info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
page_addr = flash_addr - (flash_addr % info.size)
current_page = FlashPage(page_addr, info.size, [], info.erase_weight, info.program_weight)
self.page_list.append(current_page)
# Fill the page gap if there is one
page_data_end = current_page.addr + len(current_page.data)
if flash_addr != page_data_end:
old_data = self.flash.target.read_memory_block8(page_data_end, flash_addr - page_data_end)
current_page.data.extend(old_data)
# Copy data to page and increment pos
space_left_in_page = info.size - len(current_page.data)
space_left_in_data = len(flash_operation.data) - pos
amount = min(space_left_in_page, space_left_in_data)
current_page.data.extend(flash_operation.data[pos:pos + amount])
program_byte_count += amount
#increment position
pos += amount
# If smart flash was set to false then mark all pages
# as requiring programming
if not smart_flash:
self._mark_all_pages_for_programming()
# If the first page being programmed is not the first page
# in ROM then don't use a chip erase
if self.page_list[0].addr > self.flash_start:
if chip_erase is None:
chip_erase = False
elif chip_erase is True:
logging.warning('Chip erase used when flash address 0x%x is not the same as flash start 0x%x', self.page_list[0].addr, self.flash_start)
self.flash.init()
chip_erase_count, chip_erase_program_time = self._compute_chip_erase_pages_and_weight()
page_erase_min_program_time = self._compute_page_erase_pages_weight_min()
# If chip_erase hasn't been specified determine if chip erase is faster
# than page erase regardless of contents
if (chip_erase is None) and (chip_erase_program_time < page_erase_min_program_time):
chip_erase = True
# If chip erase isn't True then analyze the flash
if chip_erase != True:
analyze_start = time()
if self.flash.get_flash_info().crc_supported:
sector_erase_count, page_program_time = self._compute_page_erase_pages_and_weight_crc32(fast_verify)
self.perf.analyze_type = FlashBuilder.FLASH_ANALYSIS_CRC32
else:
sector_erase_count, page_program_time = self._compute_page_erase_pages_and_weight_sector_read()
self.perf.analyze_type = FlashBuilder.FLASH_ANALYSIS_PARTIAL_PAGE_READ
analyze_finish = time()
self.perf.analyze_time = analyze_finish - analyze_start
logging.debug("Analyze time: %f" % (analyze_finish - analyze_start))
# If chip erase hasn't been set then determine fastest method to program
if chip_erase is None:
logging.debug("Chip erase count %i, Page erase est count %i" % (chip_erase_count, sector_erase_count))
logging.debug("Chip erase weight %f, Page erase weight %f" % (chip_erase_program_time, page_program_time))
chip_erase = chip_erase_program_time < page_program_time
if chip_erase:
if self.flash.is_double_buffering_supported and self.enable_double_buffering:
logging.debug("Using double buffer chip erase program")
flash_operation = self._chip_erase_program_double_buffer(progress_cb)
else:
flash_operation = self._chip_erase_program(progress_cb)
else:
if self.flash.is_double_buffering_supported and self.enable_double_buffering:
logging.debug("Using double buffer page erase program")
flash_operation = self._page_erase_program_double_buffer(progress_cb)
else:
flash_operation = self._page_erase_program(progress_cb)
self.flash.target.reset_stop_on_reset()
program_finish = time()
self.perf.program_time = program_finish - program_start
self.perf.program_type = flash_operation
logging.info("Programmed %d bytes (%d pages) at %.02f kB/s", program_byte_count, len(self.page_list), ((program_byte_count/1024) / self.perf.program_time))
# Send notification that we're done programming flash.
self.flash.target.notify(Notification(event=Target.EVENT_POST_FLASH_PROGRAM, source=self))
return self.perf
def get_performance(self):
return self.perf
def _mark_all_pages_for_programming(self):
for page in self.page_list:
page.erased = False
page.same = False
def _compute_chip_erase_pages_and_weight(self):
"""
Compute the number of erased pages.
Determine how many pages in the new data are already erased.
"""
chip_erase_count = 0
chip_erase_weight = 0
chip_erase_weight += self.flash.get_flash_info().erase_weight
for page in self.page_list:
if page.erased is None:
page.erased = _erased(page.data)
if not page.erased:
chip_erase_count += 1
chip_erase_weight += page.get_program_weight()
self.chip_erase_count = chip_erase_count
self.chip_erase_weight = chip_erase_weight
return chip_erase_count, chip_erase_weight
def _compute_page_erase_pages_weight_min(self):
page_erase_min_weight = 0
for page in self.page_list:
page_erase_min_weight += page.get_verify_weight()
return page_erase_min_weight
def _compute_page_erase_pages_and_weight_sector_read(self):
"""
Estimate how many pages are the same.
Quickly estimate how many pages are the same. These estimates are used
by page_erase_program so it is recommended to call this before beginning programming
This is done automatically by smart_program.
"""
# Quickly estimate how many pages are the same
page_erase_count = 0
page_erase_weight = 0
for page in self.page_list:
# Analyze pages that haven't been analyzed yet
if page.same is None:
size = min(PAGE_ESTIMATE_SIZE, len(page.data))
data = self.flash.target.read_memory_block8(page.addr, size)
page_same = _same(data, page.data[0:size])
if page_same is False:
page.same = False
# Put together page and time estimate
for page in self.page_list:
if page.same is False:
page_erase_count += 1
page_erase_weight += page.get_erase_program_weight()
elif page.same is None:
# Page is probably the same but must be read to confirm
page_erase_weight += page.get_verify_weight()
elif page.same is True:
# Page is confirmed to be the same so no programming weight
pass
self.page_erase_count = page_erase_count
self.page_erase_weight = page_erase_weight
return page_erase_count, page_erase_weight
def _compute_page_erase_pages_and_weight_crc32(self, assume_estimate_correct=False):
"""
Estimate how many pages are the same.
Quickly estimate how many pages are the same. These estimates are used
by page_erase_program so it is recommended to call this before beginning programming
This is done automatically by smart_program.
If assume_estimate_correct is set to True, then pages with matching CRCs
will be marked as the same. There is a small chance that the CRCs match even though the
data is different, but the odds of this happing are low: ~1/(2^32) = ~2.33*10^-8%.
"""
# Build list of all the pages that need to be analyzed
sector_list = []
page_list = []
for page in self.page_list:
if page.same is None:
# Add sector to compute_crcs
sector_list.append((page.addr, page.size))
page_list.append(page)
# Compute CRC of data (Padded with 0xFF)
data = list(page.data)
pad_size = page.size - len(page.data)
if pad_size > 0:
data.extend([0xFF] * pad_size)
page.crc = crc32(bytearray(data)) & 0xFFFFFFFF
# Analyze pages
page_erase_count = 0
page_erase_weight = 0
if len(page_list) > 0:
crc_list = self.flash.compute_crcs(sector_list)
for page, crc in zip(page_list, crc_list):
page_same = page.crc == crc
if assume_estimate_correct:
page.same = page_same
elif page_same is False:
page.same = False
# Put together page and time estimate
for page in self.page_list:
if page.same is False:
page_erase_count += 1
page_erase_weight += page.get_erase_program_weight()
elif page.same is None:
# Page is probably the same but must be read to confirm
page_erase_weight += page.get_verify_weight()
elif page.same is True:
# Page is confirmed to be the same so no programming weight
pass
self.page_erase_count = page_erase_count
self.page_erase_weight = page_erase_weight
return page_erase_count, page_erase_weight
def _chip_erase_program(self, progress_cb=_stub_progress):
"""
Program by first performing a chip erase.
"""
logging.debug("Smart chip erase")
logging.debug("%i of %i pages already erased", len(self.page_list) - self.chip_erase_count, len(self.page_list))
progress_cb(0.0)
progress = 0
self.flash.erase_all()
progress += self.flash.get_flash_info().erase_weight
for page in self.page_list:
if not page.erased:
self.flash.program_page(page.addr, page.data)
progress += page.get_program_weight()
progress_cb(float(progress) / float(self.chip_erase_weight))
progress_cb(1.0)
return FlashBuilder.FLASH_CHIP_ERASE
def _next_unerased_page(self, i):
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
while page.erased:
i += 1
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
return page, i + 1
def _chip_erase_program_double_buffer(self, progress_cb=_stub_progress):
"""
Program by first performing a chip erase.
"""
logging.debug("Smart chip erase")
logging.debug("%i of %i pages already erased", len(self.page_list) - self.chip_erase_count, len(self.page_list))
progress_cb(0.0)
progress = 0
self.flash.erase_all()
progress += self.flash.get_flash_info().erase_weight
# Set up page and buffer info.
error_count = 0
current_buf = 0
next_buf = 1
page, i = self._next_unerased_page(0)
assert page is not None
# Load first page buffer
self.flash.load_page_buffer(current_buf, page.addr, page.data)
while page is not None:
# Kick off this page program.
current_addr = page.addr
current_weight = page.get_program_weight()
self.flash.start_program_page_with_buffer(current_buf, current_addr)
# Get next page and load it.
page, i = self._next_unerased_page(i)
if page is not None:
self.flash.load_page_buffer(next_buf, page.addr, page.data)
# Wait for the program to complete.
result = self.flash.wait_for_completion()
# check the return code
if result != 0:
logging.error('program_page(0x%x) error: %i', current_addr, result)
error_count += 1
if error_count > self.max_errors:
logging.error("Too many page programming errors, aborting program operation")
break
# Swap buffers.
temp = current_buf
current_buf = next_buf
next_buf = temp
# Update progress.
progress += current_weight
progress_cb(float(progress) / float(self.chip_erase_weight))
progress_cb(1.0)
return FlashBuilder.FLASH_CHIP_ERASE
def _page_erase_program(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
actual_page_erase_count = 0
actual_page_erase_weight = 0
progress = 0
progress_cb(0.0)
for page in self.page_list:
# If the page is not the same
if page.same is False:
progress += page.get_erase_program_weight()
# Read page data if unknown - after this page.same will be True or False
if page.same is None:
data = self.flash.target.read_memory_block8(page.addr, len(page.data))
page.same = _same(page.data, data)
progress += page.get_verify_weight()
# Program page if not the same
if page.same is False:
self.flash.erase_page(page.addr)
self.flash.program_page(page.addr, page.data)
actual_page_erase_count += 1
actual_page_erase_weight += page.get_erase_program_weight()
# Update progress
if self.page_erase_weight > 0:
progress_cb(float(progress) / float(self.page_erase_weight))
progress_cb(1.0)
logging.debug("Estimated page erase count: %i", self.page_erase_count)
logging.debug("Actual page erase count: %i", actual_page_erase_count)
return FlashBuilder.FLASH_PAGE_ERASE
def _scan_pages_for_same(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
progress = 0
count = 0
same_count = 0
for page in self.page_list:
# Read page data if unknown - after this page.same will be True or False
if page.same is None:
data = self.flash.target.read_memory_block8(page.addr, len(page.data))
page.same = _same(page.data, data)
progress += page.get_verify_weight()
count += 1
if page.same:
same_count += 1
# Update progress
progress_cb(float(progress) / float(self.page_erase_weight))
return progress
def _next_nonsame_page(self, i):
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
while page.same:
i += 1
if i >= len(self.page_list):
return None, i
page = self.page_list[i]
return page, i + 1
def _page_erase_program_double_buffer(self, progress_cb=_stub_progress):
"""
Program by performing sector erases.
"""
actual_page_erase_count = 0
actual_page_erase_weight = 0
progress = 0
progress_cb(0.0)
# Fill in same flag for all pages. This is done up front so we're not trying
# to read from flash while simultaneously programming it.
progress = self._scan_pages_for_same(progress_cb)
# Set up page and buffer info.
error_count = 0
current_buf = 0
next_buf = 1
page, i = self._next_nonsame_page(0)
# Make sure there are actually pages to program differently from current flash contents.
if page is not None:
# Load first page buffer
self.flash.load_page_buffer(current_buf, page.addr, page.data)
while page is not None:
assert page.same is not None
# Kick off this page program.
current_addr = page.addr
current_weight = page.get_erase_program_weight()
self.flash.erase_page(current_addr)
self.flash.start_program_page_with_buffer(current_buf, current_addr) #, erase_page=True)
actual_page_erase_count += 1
actual_page_erase_weight += page.get_erase_program_weight()
# Get next page and load it.
page, i = self._next_nonsame_page(i)
if page is not None:
self.flash.load_page_buffer(next_buf, page.addr, page.data)
# Wait for the program to complete.
result = self.flash.wait_for_completion()
# check the return code
if result != 0:
logging.error('program_page(0x%x) error: %i', current_addr, result)
error_count += 1
if error_count > self.max_errors:
logging.error("Too many page programming errors, aborting program operation")
break
# Swap buffers.
temp = current_buf
current_buf = next_buf
next_buf = temp
# Update progress
progress += current_weight
if self.page_erase_weight > 0:
progress_cb(float(progress) / float(self.page_erase_weight))
progress_cb(1.0)
logging.debug("Estimated page erase count: %i", self.page_erase_count)
logging.debug("Actual page erase count: %i", actual_page_erase_count)
return FlashBuilder.FLASH_PAGE_ERASE
|
the-stack_0_5856 | # Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Off-Policy Evaluation Class to Streamline OPE."""
from dataclasses import dataclass
from logging import getLogger
from pathlib import Path
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
from pandas import DataFrame
import seaborn as sns
from sklearn.utils import check_scalar
from ..types import BanditFeedback
from ..utils import check_array
from ..utils import check_confidence_interval_arguments
from .estimators_continuous import BaseContinuousOffPolicyEstimator
from .estimators_continuous import KernelizedDoublyRobust as KDR
logger = getLogger(__name__)
@dataclass
class ContinuousOffPolicyEvaluation:
"""Class to conduct OPE using multiple estimators simultaneously.
Parameters
-----------
bandit_feedback: BanditFeedback
Logged bandit feedback data with continuous actions used to conduct OPE.
ope_estimators: List[BaseOffPolicyEstimator]
List of OPE estimators used to evaluate the policy value of evaluation policy.
Estimators must follow the interface of `obp.ope.BaseContinuousOffPolicyEstimator`.
Examples
----------
.. code-block:: python
# a case of implementing OPE (with continuous actions) of an synthetic evaluation policy
>>> from obp.dataset import (
SyntheticContinuousBanditDataset,
linear_reward_funcion_continuous,
linear_behavior_policy_continuous,
linear_synthetic_policy_continuous
)
>>> from obp.ope import (
ContinuousOffPolicyEvaluation,
KernelizedInverseProbabilityWeighting as KernelizedIPW
)
# (1) Synthetic Data Generation
>>> dataset = SyntheticContinuousBanditDataset(
dim_context=5,
reward_function=linear_reward_funcion_continuous,
behavior_policy_function=linear_behavior_policy_continuous,
random_state=12345,
)
>>> bandit_feedback = dataset.obtain_batch_bandit_feedback(
n_rounds=10000, min_action_value=-10, max_action_value=10,
)
# (2) Synthetic Evaluation Policy
>>> action_by_evaluation_policy = linear_synthetic_policy_continuous(
context=bandit_feedback["context"]
)
# (3) Off-Policy Evaluation
>>> ope = ContinuousOffPolicyEvaluation(
bandit_feedback=bandit_feedback,
ope_estimators=[KernelizedIPW(kernel="epanechnikov", bandwidth=0.02)]
)
>>> estimated_policy_value = ope.estimate_policy_values(
action_by_evaluation_policy=action_by_evaluation_policy,
)
>>> estimated_policy_value
{'kernelized_ipw': 2.2858905015106723}
# (4) Ground-truth Policy Value of the Synthetic Evaluation Policy
>>> dataset.calc_ground_truth_policy_value(
context=bandit_feedback["context"], action=action_by_evaluation_policy
)
2.2893029243895215
"""
bandit_feedback: BanditFeedback
ope_estimators: List[BaseContinuousOffPolicyEstimator]
def __post_init__(self) -> None:
"""Initialize class."""
for key_ in ["action", "reward", "pscore"]:
if key_ not in self.bandit_feedback:
raise RuntimeError(f"Missing key of {key_} in 'bandit_feedback'.")
self.bandit_feedback["action_by_behavior_policy"] = self.bandit_feedback[
"action"
]
self.ope_estimators_ = dict()
self.is_model_dependent = False
for estimator in self.ope_estimators:
self.ope_estimators_[estimator.estimator_name] = estimator
if isinstance(estimator, KDR):
self.is_model_dependent = True
def _create_estimator_inputs(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
) -> Dict[str, Dict[str, np.ndarray]]:
"""Create input dictionary to estimate policy value by subclasses of `BaseOffPolicyEstimator`"""
check_array(
array=action_by_evaluation_policy,
name="action_by_evaluation_policy",
expected_dim=1,
)
if estimated_rewards_by_reg_model is None:
pass
elif isinstance(estimated_rewards_by_reg_model, dict):
for estimator_name, value in estimated_rewards_by_reg_model.items():
check_array(
array=value,
name=f"estimated_rewards_by_reg_model[{estimator_name}]",
expected_dim=1,
)
if value.shape != action_by_evaluation_policy.shape:
raise ValueError(
f"Expected `estimated_rewards_by_reg_model[{estimator_name}].shape == action_by_evaluation_policy.shape`, but found it False"
)
elif estimated_rewards_by_reg_model.shape != action_by_evaluation_policy.shape:
raise ValueError(
"Expected `estimated_rewards_by_reg_model.shape == action_by_evaluation_policy.shape`, but found it False"
)
estimator_inputs = {
estimator_name: {
input_: self.bandit_feedback[input_]
for input_ in ["reward", "action_by_behavior_policy", "pscore"]
}
for estimator_name in self.ope_estimators_
}
for estimator_name in self.ope_estimators_:
estimator_inputs[estimator_name][
"action_by_evaluation_policy"
] = action_by_evaluation_policy
if isinstance(estimated_rewards_by_reg_model, dict):
if estimator_name in estimated_rewards_by_reg_model:
estimator_inputs[estimator_name][
"estimated_rewards_by_reg_model"
] = estimated_rewards_by_reg_model[estimator_name]
else:
estimator_inputs[estimator_name][
"estimated_rewards_by_reg_model"
] = None
else:
estimator_inputs[estimator_name][
"estimated_rewards_by_reg_model"
] = estimated_rewards_by_reg_model
return estimator_inputs
def estimate_policy_values(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
) -> Dict[str, float]:
"""Estimate policy value of evaluation policy.
Parameters
------------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds,) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
Returns
----------
policy_value_dict: Dict[str, float]
Dictionary containing estimated policy values by OPE estimators.
"""
if self.is_model_dependent:
if estimated_rewards_by_reg_model is None:
raise ValueError(
"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given"
)
policy_value_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
policy_value_dict[estimator_name] = estimator.estimate_policy_value(
**estimator_inputs[estimator_name]
)
return policy_value_dict
def estimate_intervals(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
) -> Dict[str, Dict[str, float]]:
"""Estimate confidence intervals of policy values by nonparametric bootstrap procedure.
Parameters
------------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
policy_value_interval_dict: Dict[str, Dict[str, float]]
Dictionary containing confidence intervals of estimated policy value estimated
using nonparametric bootstrap procedure.
"""
if self.is_model_dependent:
if estimated_rewards_by_reg_model is None:
raise ValueError(
"When model dependent estimators such as DM or DR are used, `estimated_rewards_by_reg_model` must be given"
)
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
policy_value_interval_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
policy_value_interval_dict[estimator_name] = estimator.estimate_interval(
**estimator_inputs[estimator_name],
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return policy_value_interval_dict
def summarize_off_policy_estimates(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
) -> Tuple[DataFrame, DataFrame]:
"""Summarize policy values and their confidence intervals estimated by OPE estimators.
Parameters
------------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
(policy_value_df, policy_value_interval_df): Tuple[DataFrame, DataFrame]
Policy values and their confidence intervals estimated by OPE estimators.
"""
policy_value_df = DataFrame(
self.estimate_policy_values(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
),
index=["estimated_policy_value"],
)
policy_value_interval_df = DataFrame(
self.estimate_intervals(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
)
policy_value_of_behavior_policy = self.bandit_feedback["reward"].mean()
policy_value_df = policy_value_df.T
if policy_value_of_behavior_policy <= 0:
logger.warning(
f"Policy value of the behavior policy is {policy_value_of_behavior_policy} (<=0); relative estimated policy value is set to np.nan"
)
policy_value_df["relative_estimated_policy_value"] = np.nan
else:
policy_value_df["relative_estimated_policy_value"] = (
policy_value_df.estimated_policy_value / policy_value_of_behavior_policy
)
return policy_value_df, policy_value_interval_df.T
def visualize_off_policy_estimates(
self,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
is_relative: bool = False,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
fig_dir: Optional[Path] = None,
fig_name: str = "estimated_policy_value.png",
) -> None:
"""Visualize policy values estimated by OPE estimators.
Parameters
----------
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
is_relative: bool, default=False,
If True, the method visualizes the estimated policy values of evaluation policy
relative to the ground-truth policy value of behavior policy.
fig_dir: Path, default=None
Path to store the bar figure.
If 'None' is given, the figure will not be saved.
fig_name: str, default="estimated_policy_value.png"
Name of the bar figure.
"""
if fig_dir is not None:
assert isinstance(fig_dir, Path), "fig_dir must be a Path"
if fig_name is not None:
assert isinstance(fig_name, str), "fig_dir must be a string"
estimated_round_rewards_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
estimated_round_rewards_dict[
estimator_name
] = estimator._estimate_round_rewards(**estimator_inputs[estimator_name])
estimated_round_rewards_df = DataFrame(estimated_round_rewards_dict)
estimated_round_rewards_df.rename(
columns={key: key.upper() for key in estimated_round_rewards_dict.keys()},
inplace=True,
)
if is_relative:
estimated_round_rewards_df /= self.bandit_feedback["reward"].mean()
plt.style.use("ggplot")
fig, ax = plt.subplots(figsize=(8, 6))
sns.barplot(
data=estimated_round_rewards_df,
ax=ax,
ci=100 * (1 - alpha),
n_boot=n_bootstrap_samples,
seed=random_state,
)
plt.xlabel("OPE Estimators", fontsize=25)
plt.ylabel(
f"Estimated Policy Value (± {np.int32(100*(1 - alpha))}% CI)", fontsize=20
)
plt.yticks(fontsize=15)
plt.xticks(fontsize=25 - 2 * len(self.ope_estimators))
if fig_dir:
fig.savefig(str(fig_dir / fig_name))
def evaluate_performance_of_estimators(
self,
ground_truth_policy_value: float,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
metric: str = "relative-ee",
) -> Dict[str, float]:
"""Evaluate estimation performance of OPE estimators.
Note
------
Evaluate the estimation performance of OPE estimators by relative estimation error (relative-EE) or squared error (SE):
.. math ::
\\text{Relative-EE} (\\hat{V}; \\mathcal{D}) = \\left| \\frac{\\hat{V}(\\pi; \\mathcal{D}) - V(\\pi)}{V(\\pi)} \\right|,
.. math ::
\\text{SE} (\\hat{V}; \\mathcal{D}) = \\left(\\hat{V}(\\pi; \\mathcal{D}) - V(\\pi) \\right)^2,
where :math:`V({\\pi})` is the ground-truth policy value of the evalation policy :math:`\\pi_e` (often estimated using on-policy estimation).
:math:`\\hat{V}(\\pi; \\mathcal{D})` is an estimated policy value by an OPE estimator :math:`\\hat{V}` and logged bandit feedback :math:`\\mathcal{D}`.
Parameters
----------
ground_truth policy value: float
Ground_truth policy value of evaluation policy, i.e., :math:`V(\\pi)`.
With Open Bandit Dataset, we use an on-policy estimate of the policy value as its ground-truth.
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of a estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
metric: str, default="relative-ee"
Evaluation metric to evaluate and compare the estimation performance of OPE estimators.
Must be "relative-ee" or "se".
Returns
----------
eval_metric_ope_dict: Dict[str, float]
Dictionary containing evaluation metric for evaluating the estimation performance of OPE estimators.
"""
check_scalar(
ground_truth_policy_value,
"ground_truth_policy_value",
float,
)
if metric not in ["relative-ee", "se"]:
raise ValueError(
f"metric must be either 'relative-ee' or 'se', but {metric} is given"
)
if metric == "relative-ee" and ground_truth_policy_value == 0.0:
raise ValueError(
"ground_truth_policy_value must be non-zero when metric is relative-ee"
)
eval_metric_ope_dict = dict()
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
estimated_policy_value = estimator.estimate_policy_value(
**estimator_inputs[estimator_name]
)
if metric == "relative-ee":
relative_ee_ = estimated_policy_value - ground_truth_policy_value
relative_ee_ /= ground_truth_policy_value
eval_metric_ope_dict[estimator_name] = np.abs(relative_ee_)
elif metric == "se":
se_ = (estimated_policy_value - ground_truth_policy_value) ** 2
eval_metric_ope_dict[estimator_name] = se_
return eval_metric_ope_dict
def summarize_estimators_comparison(
self,
ground_truth_policy_value: float,
action_by_evaluation_policy: np.ndarray,
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
metric: str = "relative-ee",
) -> DataFrame:
"""Summarize performance comparison of OPE estimators.
Parameters
----------
ground_truth policy value: float
Ground_truth policy value of evaluation policy, i.e., :math:`V(\\pi)`.
With Open Bandit Dataset, we use an on-policy estimate of the policy value as ground-truth.
action_by_evaluation_policy: array-like, shape (n_rounds,)
Continuous action values given by the (deterministic) evaluation policy, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
metric: str, default="relative-ee"
Evaluation metric to evaluate and compare the estimation performance of OPE estimators.
Must be either "relative-ee" or "se".
Returns
----------
eval_metric_ope_df: DataFrame
Evaluation metric to evaluate and compare the estimation performance of OPE estimators.
"""
eval_metric_ope_df = DataFrame(
self.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
metric=metric,
),
index=[metric],
)
return eval_metric_ope_df.T
def visualize_off_policy_estimates_of_multiple_policies(
self,
policy_name_list: List[str],
action_by_evaluation_policy_list: List[np.ndarray],
estimated_rewards_by_reg_model: Optional[
Union[np.ndarray, Dict[str, np.ndarray]]
] = None,
alpha: float = 0.05,
is_relative: bool = False,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
fig_dir: Optional[Path] = None,
fig_name: str = "estimated_policy_value.png",
) -> None:
"""Visualize policy values estimated by OPE estimators.
Parameters
----------
policy_name_list: List[str]
List of the names of evaluation policies.
action_by_evaluation_policy_list: List[array-like, shape (n_rounds, n_actions, len_list)]
List of action values given by the (deterministic) evaluation policies, i.e., :math:`\\pi_e(x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list) or Dict[str, array-like], default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
When an array-like is given, all OPE estimators use it.
When a dict is given, if the dict has the name of an estimator as a key, the corresponding value is used.
When it is not given, model-dependent estimators such as DM and DR cannot be used.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=100
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
is_relative: bool, default=False,
If True, the method visualizes the estimated policy values of evaluation policy
relative to the ground-truth policy value of behavior policy.
fig_dir: Path, default=None
Path to store the bar figure.
If 'None' is given, the figure will not be saved.
fig_name: str, default="estimated_policy_value.png"
Name of the bar figure.
"""
if len(policy_name_list) != len(action_by_evaluation_policy_list):
raise ValueError(
"the length of policy_name_list must be the same as action_by_evaluation_policy_list"
)
if fig_dir is not None:
assert isinstance(fig_dir, Path), "fig_dir must be a Path"
if fig_name is not None:
assert isinstance(fig_name, str), "fig_dir must be a string"
estimated_round_rewards_dict = {
estimator_name: {} for estimator_name in self.ope_estimators_
}
for policy_name, action_by_evaluation_policy in zip(
policy_name_list, action_by_evaluation_policy_list
):
estimator_inputs = self._create_estimator_inputs(
action_by_evaluation_policy=action_by_evaluation_policy,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
for estimator_name, estimator in self.ope_estimators_.items():
estimated_round_rewards_dict[estimator_name][
policy_name
] = estimator._estimate_round_rewards(
**estimator_inputs[estimator_name]
)
plt.style.use("ggplot")
fig = plt.figure(figsize=(8, 6.2 * len(self.ope_estimators_)))
for i, estimator_name in enumerate(self.ope_estimators_):
estimated_round_rewards_df = DataFrame(
estimated_round_rewards_dict[estimator_name]
)
if is_relative:
estimated_round_rewards_df /= self.bandit_feedback["reward"].mean()
ax = fig.add_subplot(len(action_by_evaluation_policy_list), 1, i + 1)
sns.barplot(
data=estimated_round_rewards_df,
ax=ax,
ci=100 * (1 - alpha),
n_boot=n_bootstrap_samples,
seed=random_state,
)
ax.set_title(estimator_name.upper(), fontsize=20)
ax.set_ylabel(
f"Estimated Policy Value (± {np.int32(100*(1 - alpha))}% CI)",
fontsize=20,
)
plt.yticks(fontsize=15)
plt.xticks(fontsize=25 - 2 * len(policy_name_list))
if fig_dir:
fig.savefig(str(fig_dir / fig_name))
|
the-stack_0_5859 | from __future__ import print_function
from builtins import range
import json
class selectionParser(object):
def __init__(self,selectStr):
self.__result={}
self.__strresult={}
strresult=json.loads(selectStr)
for k,v in strresult.items():
expandedvalues=[]
for w in v:
if len(w)==0:
self.__result[int(k)]=expandedvalues
self.__strresult[k]=[]
continue
###weed out [10]-like stuff just in case they exist
elif len(w)==1:
expandedvalues.append(w[0])
##weed out [10,10]-like stuff
elif len(w)==2 and w[0]==w[1]:
expandedvalues.append(w[0])
else:
for i in range(w[0],w[1]+1):
expandedvalues.append(i)
self.__result[int(k)]=expandedvalues
self.__strresult[k]=[str(x) for x in expandedvalues]
def runs(self):
return self.__result.keys()
def runsandls(self):
'''return expanded {run:lslist}
'''
return self.__result
def runsandlsStr(self):
'''return expanded {'run':lslist}
'''
return self.__strresult
def numruns(self):
return len(self.__result)
def numls(self,run):
return len(self.__result[run])
if __name__ == "__main__":
s=selectionParser('{"1":[[3,45]],"2":[[4,8],[10,10]],"3":[[]]}')
print('runs : ',s.runs())
print('full result : ',s.runsandls())
print('str result : ',s.runsandlsStr())
print('num runs : ',s.numruns())
print('numls in run : ',s.numls(1))
|
the-stack_0_5863 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import os.path as osp
import os
from PIL import Image
import numpy as np
import json
from transform import *
class CityScapes(Dataset):
def __init__(self, rootpth, cropsize=(640, 480), mode='train',
randomscale=(0.125, 0.25, 0.375, 0.5, 0.675, 0.75, 0.875, 1.0, 1.25, 1.5), *args, **kwargs):
super(CityScapes, self).__init__(*args, **kwargs)
assert mode in ('train', 'val', 'test', 'trainval')
self.mode = mode
print('self.mode', self.mode)
self.ignore_lb = 255
with open('./cityscapes_info.json', 'r') as fr:
labels_info = json.load(fr)
self.lb_map = {el['id']: el['trainId'] for el in labels_info} # cityscape标注了35个id,但是衡量时只用19个类别好像。
## parse img directory
self.imgs = {}
imgnames = []
impth = osp.join(rootpth, 'leftImg8bit', mode)
folders = os.listdir(impth)
for fd in folders:
fdpth = osp.join(impth, fd)
im_names = os.listdir(fdpth)
names = [el.replace('_leftImg8bit.png', '') for el in im_names]
impths = [osp.join(fdpth, el) for el in im_names]
imgnames.extend(names)
self.imgs.update(dict(zip(names, impths)))
## parse gt directory
self.labels = {}
gtnames = []
gtpth = osp.join(rootpth, 'gtFine', mode)
folders = os.listdir(gtpth)
for fd in folders:
fdpth = osp.join(gtpth, fd)
lbnames = os.listdir(fdpth)
lbnames = [el for el in lbnames if 'labelIds' in el]
names = [el.replace('_gtFine_labelIds.png', '') for el in lbnames]
lbpths = [osp.join(fdpth, el) for el in lbnames]
gtnames.extend(names)
self.labels.update(dict(zip(names, lbpths)))
self.imnames = imgnames
self.len = len(self.imnames)
print('self.len', self.mode, self.len)
assert set(imgnames) == set(gtnames)
assert set(self.imnames) == set(self.imgs.keys())
assert set(self.imnames) == set(self.labels.keys())
## pre-processing
self.to_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
self.trans_train = Compose([
ColorJitter(
brightness = 0.5,
contrast = 0.5,
saturation = 0.5),
HorizontalFlip(),
# RandomScale((0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0)),
RandomScale(randomscale),
# RandomScale((0.125, 1)),
# RandomScale((0.125, 0.25, 0.375, 0.5, 0.675, 0.75, 0.875, 1.0)),
# RandomScale((0.125, 0.25, 0.375, 0.5, 0.675, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5)),
RandomCrop(cropsize)
])
def __getitem__(self, idx):
fn = self.imnames[idx]
impth = self.imgs[fn]
lbpth = self.labels[fn]
img = Image.open(impth).convert('RGB')
label = Image.open(lbpth)
if self.mode == 'train' or self.mode == 'trainval':
im_lb = dict(im = img, lb = label)
im_lb = self.trans_train(im_lb)
img, label = im_lb['im'], im_lb['lb']
img = self.to_tensor(img)
label = np.array(label).astype(np.int64)[np.newaxis, :]
label = self.convert_labels(label)
return img, label
def __len__(self):
return self.len
def convert_labels(self, label):
for k, v in self.lb_map.items():
label[label == k] = v
return label
if __name__ == "__main__":
from tqdm import tqdm
ds = CityScapes('./data/', n_classes=19, mode='val')
uni = []
for im, lb in tqdm(ds):
lb_uni = np.unique(lb).tolist()
uni.extend(lb_uni)
print(uni)
print(set(uni))
|
the-stack_0_5864 | import pytest
import json
from bitarray import bitarray
from bigsi.tests.base import CONFIGS
from bigsi import BIGSI
from bigsi.storage import get_storage
from bigsi.utils import seq_to_kmers
import pytest
def test_create():
for config in CONFIGS:
get_storage(config).delete_all()
bloomfilters = [BIGSI.bloom(config, ["ATC", "ATA"])]
samples = ["1"]
bigsi = BIGSI.build(config, bloomfilters, samples)
assert bigsi.kmer_size == 3
assert bigsi.bloomfilter_size == 1000
assert bigsi.num_hashes == 3
assert bigsi.num_samples == 1
assert bigsi.lookup("ATC") == {"ATC": bitarray("1")}
assert bigsi.colour_to_sample(0) == "1"
assert bigsi.sample_to_colour("1") == 0
bigsi.delete()
def test_insert():
for config in CONFIGS:
get_storage(config).delete_all()
bloomfilters = [BIGSI.bloom(config, ["ATC", "ATA"])]
samples = ["1"]
bigsi = BIGSI.build(config, bloomfilters, samples)
bloomfilter_2 = BIGSI.bloom(config, ["ATC", "ATT"])
bigsi.insert(bloomfilter_2, "2")
assert bigsi.kmer_size == 3
assert bigsi.bloomfilter_size == 1000
assert bigsi.num_hashes == 3
assert bigsi.num_samples == 2
assert bigsi.lookup(["ATC", "ATA", "ATT"]) == {
"ATC": bitarray("11"),
"ATA": bitarray("10"),
"ATT": bitarray("01"),
}
assert bigsi.colour_to_sample(0) == "1"
assert bigsi.sample_to_colour("1") == 0
assert bigsi.colour_to_sample(1) == "2"
assert bigsi.sample_to_colour("2") == 1
bigsi.delete()
def test_unique_sample_names():
for config in CONFIGS:
get_storage(config).delete_all()
bloom = BIGSI.bloom(config, ["ATC", "ATA"])
bigsi = BIGSI.build(config, [bloom], ["1"])
with pytest.raises(ValueError):
bigsi.insert(bloom, "1")
assert bigsi.num_samples == 1
assert bigsi.lookup(["ATC", "ATA", "ATT"]) == {
"ATC": bitarray("1"),
"ATA": bitarray("1"),
"ATT": bitarray("0"),
}
bigsi.delete()
def test_exact_search():
config = CONFIGS[0]
kmers_1 = seq_to_kmers("ATACACAAT", config["k"])
kmers_2 = seq_to_kmers("ACAGAGAAC", config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
for config in CONFIGS:
get_storage(config).delete_all()
bigsi = BIGSI.build(config, [bloom1, bloom2], ["a", "b"])
assert bigsi.search("ATACACAAT")[0] == {
"percent_kmers_found": 100,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "a",
}
assert bigsi.search("ACAGAGAAC")[0] == {
"percent_kmers_found": 100,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "b",
}
assert bigsi.search("ACAGTTAAC") == []
bigsi.delete()
@pytest.mark.skip(
reason="Passes in isolation, but fails when run with the rest of the tests"
)
def test_inexact_search():
for config in CONFIGS:
get_storage(config).delete_all()
config = CONFIGS[0]
kmers_1 = seq_to_kmers("ATACACAAT", config["k"])
kmers_2 = seq_to_kmers("ATACACAAC", config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
for config in CONFIGS:
get_storage(config).delete_all()
with pytest.raises(BaseException):
BIGSI(config)
bigsi = BIGSI.build(config, [bloom1, bloom2], ["a", "b"])
assert bigsi.search("ACAGTTAAC", 0.5) == []
assert bigsi.lookup("AAT") == {"AAT": bitarray("10")}
results = bigsi.search("ATACACAAT", 0.5)
assert results[0] == {
"percent_kmers_found": 100.0,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "a",
}
assert (
json.dumps(results[0])
== '{"percent_kmers_found": 100.0, "num_kmers": 6, "num_kmers_found": 6, "sample_name": "a"}'
)
assert results[1] == {
"percent_kmers_found": 83.33,
"num_kmers": 6,
"num_kmers_found": 5,
"sample_name": "b",
}
bigsi.delete()
def test_search_concordance():
config = CONFIGS[0]
seq_a = "ATACACAAT"
seq_b = "ATACACAAC"
kmers_1 = seq_to_kmers(seq_a, config["k"])
kmers_2 = seq_to_kmers(seq_b, config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
for config in CONFIGS:
get_storage(config).delete_all()
bigsi = BIGSI.build(config, [bloom1, bloom2], ["a", "b"])
exp_result_a = {
"percent_kmers_found": 100.0,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "a"
}
inexact_results_a = sorted(bigsi.search(seq_a, 0.5),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(inexact_results_a) == 2
assert inexact_results_a[0] == exp_result_a
exact_results_a = sorted(bigsi.search(seq_a, 1.0),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(exact_results_a) == 1
assert exact_results_a[0] == exp_result_a
exp_result_b = {
"percent_kmers_found": 100.0,
"num_kmers": 6,
"num_kmers_found": 6,
"sample_name": "b"
}
inexact_results_b = sorted(bigsi.search(seq_b, 0.5),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(inexact_results_b) == 2
assert inexact_results_b[0] == exp_result_b
exact_results_b = sorted(bigsi.search(seq_b, 1.0),
key=lambda x: x["num_kmers_found"],
reverse=True)
assert len(exact_results_b) == 1
assert exact_results_b[0] == exp_result_b
bigsi.delete()
##
@pytest.mark.skip(reason="TODO, fix test to work on single config")
def test_merge():
for config in CONFIGS:
get_storage(config).delete_all()
config = CONFIGS[0]
kmers_1 = seq_to_kmers("ATACACAAT", config["k"])
kmers_2 = seq_to_kmers("ATACACAAC", config["k"])
bloom1 = BIGSI.bloom(config, kmers_1)
bloom2 = BIGSI.bloom(config, kmers_2)
bigsi1 = BIGSI.build(CONFIGS[0], [bloom1], ["a"])
bigsi2 = BIGSI.build(CONFIGS[1], [bloom2], ["b"])
bigsic = BIGSI.build(CONFIGS[2], [bloom1, bloom2], ["a", "b"])
bigsi1.merge(bigsi2)
assert bigsi1.search("ATACACAAT", 0.5) == bigsic.search("ATACACAAT", 0.5)
bigsi1.delete()
bigsi2.delete()
bigsic.delete()
|
the-stack_0_5866 | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
from projects.SimCSE.modeling.model_utils import MLPLayer, cosine_similarity
from projects.SimCSE.utils.load_huggingface_weight import load_huggingface_bert
from .bert_for_simcse import BertForSimCSE
class Simcse_sup(nn.Module):
def __init__(self, cfg):
super().__init__()
self.bert = BertForSimCSE(cfg)
self.mlp = MLPLayer(cfg)
self.pooler_type = cfg.pooler_type
if cfg.pretrained_model_weight is not None:
load_huggingface_bert(
self.bert,
cfg.pretrained_model_weight,
cfg["hidden_size"],
cfg["num_attention_heads"],
cfg["hidden_layers"],
)
def pooler(self, inputs, attention_mask):
if self.pooler_type == "cls":
return inputs[0][:, 0]
elif self.pooler_type == "pooled":
return inputs[1]
elif self.pooler_type == "last-avg":
last_hidden = inputs[0]
return (last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(
-1
).unsqueeze(-1)
elif self.pooler_type == "first-last-avg":
first_hidden = inputs[2][1]
last_hidden = inputs[0]
res = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(
1
) / attention_mask.sum(-1).unsqueeze(-1)
return res
def create_use_row(self, labels):
count = 0
use_row = []
for row in range(labels.size(0)):
if count % 2 == 0 and count != 0:
count = 0
continue
use_row.append(row)
count += 1
return flow.tensor(use_row, sbp=labels.sbp, placement=labels.placement)
def forward(self, input_ids, attention_mask, token_type_ids=None, labels=None):
if self.training:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 3, -1)
attention_mask = attention_mask.view(bs * 3, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
out = self.mlp(out)
labels = flow.arange(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
use_row = self.create_use_row(labels)
labels = (use_row - use_row % 3 * 2) + 1
sim = cosine_similarity(out.unsqueeze(1), out.unsqueeze(0))
sim = (
sim
- flow.eye(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
* 1e12
)
sim = flow.index_select(sim, dim=0, index=use_row)
sim = sim / 0.05
loss = nn.CrossEntropyLoss()(sim, labels)
return {"loss": loss}
else:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 2, -1)
attention_mask = attention_mask.view(bs * 2, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
self.mlp(out)
out = out.view(bs, 2, -1)
sent1 = out[:, 0]
sent2 = out[:, 1]
sim = cosine_similarity(sent1, sent2)
sim = sim.to_global(sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]))
return {"sim": sim.unsqueeze(1), "labels": labels}
|
the-stack_0_5870 | import pandas as pd
import pytest
import torch
from deepdow.benchmarks import OneOverN
from deepdow.callbacks import Callback
from deepdow.experiments import History, Run
from deepdow.losses import MeanReturns, StandardDeviation
from deepdow.nn import DummyNet
def test_basic():
n_channels = 2
x = torch.rand(10, n_channels, 4, 5)
network = DummyNet(n_channels=n_channels)
y = network(x)
print(y)
def test_history():
history = History()
history.add_entry(model='whatever', epoch=1)
history.add_entry(model='whatever_2', epoch=1, value=3)
history.add_entry(model='1111', epoch=2)
metrics_1 = history.metrics_per_epoch(1)
metrics_2 = history.metrics_per_epoch(2)
metrics_all = history.metrics
assert isinstance(metrics_1, pd.DataFrame)
assert isinstance(metrics_2, pd.DataFrame)
assert isinstance(metrics_all, pd.DataFrame)
assert len(metrics_1) == 2
assert len(metrics_2) == 1
assert len(metrics_all) == 3
with pytest.raises(KeyError):
history.metrics_per_epoch(3)
history.pretty_print(epoch=1)
history.pretty_print(epoch=None)
class TestRun:
def test_wrong_construction_1(self, dataloader_dummy):
"""Wrong positional arguments."""
with pytest.raises(TypeError):
Run('this_is_fake', MeanReturns(), dataloader_dummy)
with pytest.raises(TypeError):
Run(DummyNet(), 'this_is_fake', dataloader_dummy)
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), 'this_is_fake')
def test_wrong_construction_2(self, dataloader_dummy):
"""Wrong keyword arguments."""
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics='this_is_fake')
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'a': 'this_is_fake'})
with pytest.raises(ValueError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'loss': MeanReturns()})
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders='this_is_fake')
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders={'val': 'this_is_fake'})
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks='this_is_fake')
with pytest.raises(TypeError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'uniform': 'this_is_fake'})
with pytest.raises(ValueError):
Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'main': OneOverN()})
@pytest.mark.parametrize('additional_kwargs', [True, False])
def test_attributes_after_construction(self, dataloader_dummy, additional_kwargs):
network = DummyNet()
loss = MeanReturns()
kwargs = {}
if additional_kwargs:
kwargs.update({'metrics': {'std': StandardDeviation()},
'val_dataloaders': {'val': dataloader_dummy},
'benchmarks': {'whatever': OneOverN()}})
run = Run(network, loss, dataloader_dummy, **kwargs)
assert network is run.network
assert loss is run.loss
assert dataloader_dummy is run.train_dataloader
assert isinstance(run.metrics, dict)
assert isinstance(run.val_dataloaders, dict)
assert isinstance(run.hparams, dict)
def test_launch(self, dataloader_dummy):
network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1])
loss = MeanReturns()
run = Run(network, loss, dataloader_dummy)
run.launch(n_epochs=1)
def test_launch_interrupt(self, dataloader_dummy, monkeypatch):
network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1])
loss = MeanReturns()
class TempCallback(Callback):
def on_train_begin(self, metadata):
raise KeyboardInterrupt()
monkeypatch.setattr('time.sleep', lambda x: None)
run = Run(network, loss, dataloader_dummy, callbacks=[TempCallback()])
run.launch(n_epochs=1)
|
the-stack_0_5871 | import re
from pyspark import SparkConf, SparkContext
def normalizeWords(text):
return re.compile(r'\W+', re.UNICODE).split(text.lower())
conf = SparkConf().setMaster("local").setAppName("WordCount")
sc = SparkContext(conf = conf)
input = sc.textFile("file:///sparkcourse/book.txt")
words = input.flatMap(normalizeWords)
wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
wordCountsSorted = wordCounts.map(lambda x: (x[1], x[0])).sortByKey()
results = wordCountsSorted.collect()
for result in results:
count = str(result[0])
word = result[1].encode('ascii', 'ignore')
if (word):
print(word.decode() + ":\t\t" + count)
|
the-stack_0_5873 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for converting parsed doc content into markdown pages.
The adjacent `parser` module creates `PageInfo` objects, containing all data
necessary to document an element of the TensorFlow API.
This module contains one public function, which handels the conversion of these
`PageInfo` objects into a markdown string:
md_page = build_md_page(page_info)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
def build_md_page(page_info):
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if page_info.for_function():
return _build_function_page(page_info)
if page_info.for_class():
return _build_class_page(page_info)
if page_info.for_module():
return _build_module_page(page_info)
raise ValueError('Unknown Page Info Type: %s' % type(page_info))
def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = ['# %s\n\n' % page_info.full_name]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts)
def _build_class_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# {page_info.full_name}\n\n'.format(page_info=page_info)]
parts.append('## Class `%s`\n\n' % page_info.full_name.split('.')[-1])
if page_info.bases:
parts.append('Inherits From: ')
link_template = '[`{short_name}`]({url})'
parts.append(', '.join(
link_template.format(**base._asdict()) for base in page_info.bases))
parts.append('\n\n')
# Sort the methods list, but make sure constructors come first.
constructor_names = ['__init__', '__new__']
constructors = sorted(
method for method in page_info.methods
if method.short_name in constructor_names)
other_methods = sorted(
method for method in page_info.methods
if method.short_name not in constructor_names)
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Class `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if constructors:
for method_info in constructors:
parts.append(_build_method_section(method_info, heading_level=2))
parts.append('\n\n')
if page_info.classes:
parts.append('## Child Classes\n')
link_template = ('[`class {class_info.short_name}`]'
'({class_info.url})\n\n')
class_links = sorted(
link_template.format(class_info=class_info)
for class_info in page_info.classes)
parts.extend(class_links)
if page_info.properties:
parts.append('## Properties\n\n')
for prop_info in page_info.properties:
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
parts.append(h3.format(short_name=prop_info.short_name))
parts.append(prop_info.doc.docstring)
parts.append(_build_function_details(prop_info.doc.function_details))
parts.append(_build_compatibility(prop_info.doc.compatibility))
parts.append('\n\n')
parts.append('\n\n')
if other_methods:
parts.append('## Methods\n\n')
for method_info in other_methods:
parts.append(_build_method_section(method_info))
parts.append('\n\n')
if page_info.other_members:
parts.append('## Class Members\n\n')
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
others_member_headings = (h3.format(short_name=info.short_name)
for info in sorted(page_info.other_members))
parts.extend(others_member_headings)
return ''.join(parts)
def _build_method_section(method_info, heading_level=3):
"""Generates a markdown section for a method.
Args:
method_info: A `MethodInfo` object.
heading_level: An Int, which HTML heading level to use.
Returns:
A markdown string.
"""
parts = []
heading = ('<h{heading_level} id="{short_name}">'
'<code>{short_name}</code>'
'</h{heading_level}>\n\n')
parts.append(heading.format(heading_level=heading_level,
**method_info._asdict()))
if method_info.signature is not None:
parts.append(_build_signature(method_info, use_full_name=False))
parts.append(method_info.doc.docstring)
parts.append(_build_function_details(method_info.doc.function_details))
parts.append(_build_compatibility(method_info.doc.compatibility))
parts.append('\n\n')
return ''.join(parts)
def _build_module_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# Module: {full_name}\n\n'.format(full_name=page_info.full_name)]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Module `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.doc.docstring)
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
if page_info.modules:
parts.append('## Modules\n\n')
template = '[`{short_name}`]({url}) module'
for item in page_info.modules:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.classes:
parts.append('## Classes\n\n')
template = '[`class {short_name}`]({url})'
for item in page_info.classes:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.functions:
parts.append('## Functions\n\n')
template = '[`{short_name}(...)`]({url})'
for item in page_info.functions:
parts.append(template.format(**item._asdict()))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.other_members:
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
parts.append('## Other Members\n\n')
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
for item in page_info.other_members:
parts.append(h3.format(**item._asdict()))
return ''.join(parts)
def _build_signature(obj_info, use_full_name=True):
"""Returns a md code block showing the function signature."""
# Special case tf.range, since it has an optional first argument
if obj_info.full_name == 'tf.range':
return (
'``` python\n'
"tf.range(limit, delta=1, dtype=None, name='range')\n"
"tf.range(start, limit, delta=1, dtype=None, name='range')\n"
'```\n\n')
parts = ['``` python']
parts.extend(['@' + dec for dec in obj_info.decorators])
signature_template = '{name}({sig})'
if not obj_info.signature:
sig = ''
elif len(obj_info.signature) == 1:
sig = obj_info.signature[0]
else:
sig = ',\n'.join(' %s' % sig_item for sig_item in obj_info.signature)
sig = '\n'+sig+'\n'
if use_full_name:
obj_name = obj_info.full_name
else:
obj_name = obj_info.short_name
parts.append(signature_template.format(name=obj_name, sig=sig))
parts.append('```\n\n')
return '\n'.join(parts)
def _build_compatibility(compatibility):
"""Return the compatibility section as an md string."""
parts = []
sorted_keys = sorted(compatibility.keys())
for key in sorted_keys:
value = compatibility[key]
# Dedent so that it does not trigger markdown code formatting.
value = textwrap.dedent(value)
parts.append('\n\n#### %s Compatibility\n%s\n' % (key.title(), value))
return ''.join(parts)
def _build_function_details(function_details):
"""Return the function details section as an md string."""
parts = []
for detail in function_details:
sub = []
sub.append('#### ' + detail.keyword + ':\n\n')
sub.append(textwrap.dedent(detail.header))
for key, value in detail.items:
sub.append('* <b>`%s`</b>: %s' % (key, value))
parts.append(''.join(sub))
return '\n'.join(parts)
|
the-stack_0_5875 | import socket
import time
import pychromecast
from gtts import gTTS
def get_speaker(ip_addr=None, name=None):
if ip_addr:
return pychromecast.Chromecast(str(ip_addr))
speakers = pychromecast.get_chromecasts()
if len(speakers) == 0:
print("No devices are found")
raise Exception
if name:
return next(s for s in speakers if s.device.friendly_name == name)
return next(speakers)
def speak(text, speaker, lang="en"):
try:
tts = gTTS(text=text, lang=lang)
urls = tts.get_urls()
if not speaker.is_idle:
print("Killing current running app")
speaker.quit_app()
time.sleep(5)
speaker.wait()
speaker.media_controller.play_media(urls[0], "audit/mp3")
speaker.media_controller.block_until_active()
except Exception as error:
print(str(error))
raise Exception
def check_speaker(speaker, lang):
try:
speak(text="OK", speaker=speaker, lang=lang)
print("You are ready to speak!")
return True
except Exception as error:
print("Try an another ip or name: %s" % (str(error)))
return False
def prepare_speaker():
print("Enter language (English: en or Japanese: ja): ", end="")
lang = input()
print("Enter Google Home name or IP: ", end="")
name_or_ip = input()
try:
socket.inet_aton(name_or_ip)
speaker = get_speaker(ip_addr=name_or_ip)
except socket.error:
speaker = get_speaker(name=name_or_ip)
except Exception as error:
print("Error: %s" % (str(error)))
raise Exception
return speaker, lang
def main():
while True:
try:
speaker, lang = prepare_speaker()
except Exception:
continue
if check_speaker(speaker, lang):
break
print("Failed to setup. Try again!")
print("Start typing ...")
text = ""
while text != "bye":
print(">> ", end="")
text = input()
if text:
speak(text, speaker, lang)
if __name__ == "__main__":
main()
|
the-stack_0_5879 | # model settings
model = dict(
type='CascadeRCNN',
num_stages=3,
pretrained='open-mmlab://msra/hrnetv2_w32',
backbone=dict(
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256)))),
neck=dict(type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 19])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 20
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/cascade_rcnn_hrnetv2p_w32'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_5880 | # Copyright (c) 2019 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Provide data iterator for CIFAR10 examples.
'''
from contextlib import contextmanager
import numpy as np
import struct
import tarfile
import zlib
import time
import os
import errno
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download, get_data_home
class Cifar10DataSource(DataSource):
'''
Get data directly from cifar10 dataset from Internet(yann.lecun.com).
'''
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, train=True, shuffle=False, rng=None):
super(Cifar10DataSource, self).__init__(shuffle=shuffle, rng=rng)
self._train = train
data_uri = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
logger.info('Getting labeled data from {}.'.format(data_uri))
r = download(data_uri) # file object returned
with tarfile.open(fileobj=r, mode="r:gz") as fpin:
# Training data
if train:
images = []
labels = []
for member in fpin.getmembers():
if "data_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images.append(data[b"data"])
labels.append(data[b"labels"])
self._size = 50000
self._images = np.concatenate(
images).reshape(self._size, 3, 32, 32)
self._labels = np.concatenate(labels).reshape(-1, 1)
# Validation data
else:
for member in fpin.getmembers():
if "test_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images = data[b"data"]
labels = data[b"labels"]
self._size = 10000
self._images = images.reshape(self._size, 3, 32, 32)
self._labels = np.array(labels).reshape(-1, 1)
r.close()
logger.info('Getting labeled data from {}.'.format(data_uri))
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(Cifar10DataSource, self).reset()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
def data_iterator_cifar10(batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False):
'''
Provide DataIterator with :py:class:`Cifar10DataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`Cifar10DataSource` is able to store all data into memory.
'''
return data_iterator(Cifar10DataSource(train=train, shuffle=shuffle, rng=rng),
batch_size,
rng,
with_memory_cache,
with_file_cache)
|
the-stack_0_5882 | import pandas as pd
from ..utils.messages import msg_warning, msg_info
def _drop(df: pd.DataFrame, *cols) -> pd.DataFrame:
try:
index = df.columns.values
for col in cols:
if col not in index:
msg_warning("Column", col, "not found. Aborting")
return
df = df.drop(col, axis=1)
except Exception as e:
raise ("Can not drop column", e)
return df
def _rename(df: pd.DataFrame, source_col: str, dest_col: str) -> pd.DataFrame:
try:
df = df.rename(columns={source_col: dest_col})
except Exception as e:
raise ("Can not rename column", e)
msg_info("Column", source_col, "renamed")
return df
|
the-stack_0_5884 | """
Sumarize results for the train/valid/test splits.
# PROGRAM : metrics.py
# POURPOSE : compute model metrics on the test datasete
# AUTHOR : Caio Eadi Stringari
# EMAIL : [email protected]
# V1.0 : 05/05/2020 [Caio Stringari]
"""
import argparse
import numpy as np
import tensorflow as tf
import pandas as pd
import pathlib
try:
import efficientnet.tfkeras as efn
except Exception:
print(ImportError("\nWarning: run pip install -U --pre efficientnet"))
from tensorflow.keras.preprocessing.image import ImageDataGenerator
if __name__ == '__main__':
print("\nClassifiying wave breaking data, please wait...\n")
# Argument parser
parser = argparse.ArgumentParser()
# input model and history
parser.add_argument("--model", "-M",
nargs=1,
action="store",
dest="model",
required=True,
help="Input model in .h5 format.",)
parser.add_argument("--history", "-hist",
nargs=1,
action="store",
dest="history",
required=True,
help="Input model history in csv format.",)
# input test data
parser.add_argument("--data", "-data",
nargs=1,
action="store",
dest="data",
required=True,
help="Input path with image data.",)
parser.add_argument("--threshold", "-trx",
nargs=1,
action="store",
dest="TRX",
default=[0.5],
required=False,
help="Probability threshold for classification.")
parser.add_argument("--epoch", "-epch",
nargs=1,
action="store",
dest="epoch",
default=[-1],
required=False,
help="Which epoch to use. Default is last epoch.")
# output data
parser.add_argument("--output", "-o",
nargs=1,
action="store",
dest="output",
required=True,
help="Output file.",)
args = parser.parse_args()
# --- test data input ---
test_dir = args.data[0]
test_dir = pathlib.Path(test_dir)
image_count = len(list(test_dir.glob('*/*')))
epoch = int(args.epoch[0])
BATCH_SIZE = int(image_count/10)
class_names = np.array([item.name for item in test_dir.glob('*')])
try:
nclasses = len(class_names)
print(" Found image data, proceeding.\n")
print(" - Classes are {}".format(class_names))
except Exception:
raise IOError("Check your data!")
# --- pre-trained model ---
model = tf.keras.models.load_model(args.model[0])
history = pd.read_csv(args.history[0])
# train data
accuracy = history.iloc[epoch]["Binary_Accuracy"]
tp = history.iloc[epoch]["True_Positives"]
fp = history.iloc[epoch]["False_Positives"]
tn = history.iloc[epoch]["True_Negatives"]
fn = history.iloc[epoch]["False_Negatives"]
precision = history.iloc[epoch]["Precision"]
recall = history.iloc[epoch]["Recall"]
auc = history.iloc[epoch]["AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_train = pd.DataFrame([X], columns=cols)
df_train.index = ["Train"]
print(df_train)
# validation data
accuracy = history.iloc[epoch]["val_Binary_Accuracy"]
tp = history.iloc[epoch]["val_True_Positives"]
fp = history.iloc[epoch]["val_False_Positives"]
tn = history.iloc[epoch]["val_True_Negatives"]
fn = history.iloc[epoch]["val_False_Negatives"]
precision = history.iloc[epoch]["val_Precision"]
recall = history.iloc[epoch]["val_Recall"]
auc = history.iloc[epoch]["val_AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_val = pd.DataFrame([X], columns=cols)
df_val.index = ["Validation"]
print(df_val)
# evaluate the model on test data
inp_shape = model.input_shape
img_height = inp_shape[1] # image height for all images
img_width = inp_shape[2] # image width for all images
datagen = ImageDataGenerator(rescale=1./255.)
print("\n Fitting the teset data generator:\n")
data_gen_test = datagen.flow_from_directory(
directory=str(test_dir), batch_size=BATCH_SIZE, shuffle=False,
target_size=(img_height, img_width), classes=["0", "1"],
class_mode="binary")
result = model.evaluate(data_gen_test)
metrics = dict(zip(model.metrics_names, result))
# validation data
accuracy = metrics["Binary_Accuracy"]
tp = metrics["True_Positives"]
fp = metrics["False_Positives"]
tn = metrics["True_Negatives"]
fn = metrics["False_Negatives"]
precision = metrics["Precision"]
recall = metrics["Recall"]
auc = metrics["AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_test = pd.DataFrame([X], columns=cols)
df_test.index = ["Test"]
# merge results
df = pd.concat([df_train, df_val, df_test])
print(df)
df.to_excel(args.output[0], float_format="%.3f", index=True)
print("\nMy work is done!\n")
|
the-stack_0_5885 | """Eclect.us view"""
__docformat__ = "numpy"
from gamestonk_terminal.stocks.fundamental_analysis import eclect_us_model
from gamestonk_terminal.rich_config import console
def display_analysis(
ticker: str,
) -> None:
"""Display analysis of SEC filings based on NLP model. [Source: https://eclect.us]
Parameters
----------
ticker: str
Ticker to do SEC filings analysis from
"""
analysis = eclect_us_model.get_filings_analysis(ticker)
if analysis:
console.print(analysis)
else:
console.print("Filings not found from eclect.us")
console.print("")
|
the-stack_0_5886 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
import pytest
import uamqp
from uamqp import authentication, errors, c_uamqp
from azure.eventhub import (
EventData,
EventHubSharedKeyCredential,
EventHubProducerClient,
EventHubConsumerClient
)
from azure.eventhub.exceptions import OperationTimeoutError
@pytest.mark.liveTest
def test_send_with_long_interval_sync(live_eventhub, sleep):
sender = EventHubProducerClient(live_eventhub['hostname'], live_eventhub['event_hub'],
EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']))
with sender:
batch = sender.create_batch()
batch.add(EventData(b"A single event"))
sender.send_batch(batch)
for _ in range(1):
if sleep:
time.sleep(300)
else:
sender._producers[-1]._handler._connection._conn.destroy()
batch = sender.create_batch()
batch.add(EventData(b"A single event"))
sender.send_batch(batch)
partition_ids = sender.get_partition_ids()
received = []
for p in partition_ids:
uri = "sb://{}/{}".format(live_eventhub['hostname'], live_eventhub['event_hub'])
sas_auth = authentication.SASTokenAuth.from_shared_access_key(
uri, live_eventhub['key_name'], live_eventhub['access_key'])
source = "amqps://{}/{}/ConsumerGroups/{}/Partitions/{}".format(
live_eventhub['hostname'],
live_eventhub['event_hub'],
live_eventhub['consumer_group'],
p)
receiver = uamqp.ReceiveClient(source, auth=sas_auth, debug=False, timeout=5000, prefetch=500)
try:
receiver.open()
received.extend([EventData._from_message(x) for x in receiver.receive_message_batch(timeout=5000)])
finally:
receiver.close()
assert len(received) == 2
assert list(received[0].body)[0] == b"A single event"
@pytest.mark.liveTest
def test_send_connection_idle_timeout_and_reconnect_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubProducerClient.from_connection_string(conn_str=connection_str, idle_timeout=10)
with client:
ed = EventData('data')
sender = client._create_producer(partition_id='0')
with sender:
sender._open_with_retry()
time.sleep(11)
sender._unsent_events = [ed.message]
ed.message.on_send_complete = sender._on_outcome
with pytest.raises((uamqp.errors.ConnectionClose,
uamqp.errors.MessageHandlerError, OperationTimeoutError)):
# Mac may raise OperationTimeoutError or MessageHandlerError
sender._send_event_data()
sender._send_event_data_with_retry()
messages = receivers[0].receive_message_batch(max_batch_size=10, timeout=10000)
received_ed1 = EventData._from_message(messages[0])
assert received_ed1.body_as_str() == 'data'
@pytest.mark.liveTest
def test_receive_connection_idle_timeout_and_reconnect_sync(connstr_senders):
connection_str, senders = connstr_senders
client = EventHubConsumerClient.from_connection_string(
conn_str=connection_str,
consumer_group='$default',
idle_timeout=10
)
def on_event_received(event):
on_event_received.event = event
with client:
consumer = client._create_consumer("$default", "0", "-1", on_event_received)
with consumer:
consumer._open()
time.sleep(11)
ed = EventData("Event")
senders[0].send(ed)
consumer._handler.do_work()
assert consumer._handler._connection._state == c_uamqp.ConnectionState.DISCARDING
duration = 10
now_time = time.time()
end_time = now_time + duration
while now_time < end_time:
consumer.receive()
time.sleep(0.01)
now_time = time.time()
assert on_event_received.event.body_as_str() == "Event"
|
the-stack_0_5887 | import discord
from discord.ext import commands
from random import randint
class Bottlespin:
"""Spins a bottle and lands on a random user."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True, alias=["bottlespin"])
async def spin(self, ctx, role):
"""Spin the bottle"""
roles = ctx.message.server.roles
if "@" in role:
await self.bot.say("Please do noy use @ infront of the role. Thank you")
return
rolename = [role.name for role in roles]
rolename = str(rolename).lower()
role = role.lower()
author = ctx.message.author
server = ctx.message.server
if len(server.members) < 2:
await self.bot.say("`Not enough people are around to spin the bottle`")
return
if role in rolename:
roleexist = True
else:
await self.bot.say("`{} is not a exising role`".format(role))
return
if roleexist:
target = [m for m in server.members if m != author and role in [
s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"]
else:
target = [m for m in server.members if m != author and str(
m.status) == "online" or str(m.status) == "idle"]
if not target:
if role:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role))
else:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`")
return
else:
target = target[randint(0, len(target)-1)]
await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target))
def setup(bot):
n = Bottlespin(bot)
bot.add_cog(n)
|
the-stack_0_5888 | """ Periodic maintenance tasks """
import time
import typing
class Maintenance:
""" Container for periodic maintenance tasks """
def __init__(self, app):
self.app = app
self.tasks: typing.Dict[typing.Callable[[], None],
typing.Dict[str, float]] = {}
def register(self, func: typing.Callable[[], None], interval: float):
""" Registers a task to run periodically """
self.tasks[func] = {'interval': interval}
def run(self, force: bool = False):
""" Run all pending tasks; 'force' will run all tasks whether they're
pending or not. """
with self.app.app_context():
now = time.time()
for func, spec in self.tasks.items():
if force or now >= spec.get('next_run', 0):
func()
spec['next_run'] = now + spec['interval']
|
the-stack_0_5892 | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.compute.common.clients.ping import PingClient
from cloudroast.compute.fixtures import ComputeFixture
class ConfigDriveFilesTest(ComputeFixture):
@classmethod
def setUpClass(cls):
"""
Perform actions that setup the necessary resources for testing
The following resources are created during this set up:
- A keypair with a random name starting with 'key'
- A server with the following settings:
- config_drive set to True
- The keypair previously created
- Remaining values required for creating a server will come
from test configuration.
"""
super(ConfigDriveFilesTest, cls).setUpClass()
cls.key = cls.keypairs_client.create_keypair(rand_name("key")).entity
cls.resources.add(cls.key.name,
cls.keypairs_client.delete_keypair)
cls.server = cls.server_behaviors.create_active_server(
config_drive=True,
key_name=cls.key.name).entity
cls.resources.add(cls.server.id,
cls.servers_client.delete_server)
cls.config_drive_behaviors.mount_config_drive(
server=cls.server, servers_config=cls.servers_config,
key=cls.key.private_key,
source_path=cls.config_drive_config.mount_source_path,
destination_path=cls.config_drive_config.base_path_to_mount)
cls.vendor_meta = cls.config_drive_behaviors.get_vendor_metadata(
cls.server, cls.servers_config, key=cls.key.private_key,
filepath=cls.config_drive_config.vendor_meta_filepath)
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_dns_services(self):
"""
Verify Services of vendor networking metadata on config drive
Validate that there is at least one network information service in the
vendor metadata. Attempt to ping every service IP address in the network
information service(s). Validate that none of the ping attempts failed.
The following assertions occur:
- The number of network information services on the server is
greater than or equal to 1
- The list of failed ping attempts is empty.
"""
self.assertGreaterEqual(len(self.vendor_meta.network_info.services), 1,
msg='Expected config drive to have at least 1'
' network dns service configured')
service_ips = [service.address for service in
self.vendor_meta.network_info.services]
failed_pings = []
for service_ip in service_ips:
try:
PingClient.ping_until_reachable(
service_ip, timeout=60, interval_time=5)
except:
failed_pings.append(service_ip)
self.assertFalse(failed_pings, msg="Unable to reach the following "
"IP addresses: {0}".format(failed_pings))
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_networks(self):
"""
Vendor networking metadata should match the server's addresses
Validate that every IP address on the server is found in the network
information in the vendor metadata for the server created during test
set up.
The following assertions occur:
- The list of ips that are found on the server but not found in the
vendor metadata networks information is empty.
"""
expected_addresses = []
addresses = self.server.addresses
for name, ip_addresses in self.expected_networks.iteritems():
network = addresses.get_by_name(name)
if ip_addresses.get('v4'):
expected_addresses.append(network.ipv4)
if ip_addresses.get('v6'):
expected_addresses.append(network.ipv6)
config_drive_instance_ips = [network.ip_address for network in
self.vendor_meta.network_info.networks]
missing_ips = [ip for ip in expected_addresses if ip not in
config_drive_instance_ips]
self.assertFalse(missing_ips, msg="Missing IPs found: {0}".format(
missing_ips))
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_file_links_structure(self):
"""
Verify File structure of vendor metadata on config drive
Validate that there is at least one network information link. Validate
that the last link in the list has values for the attributes 'mtu',
'id', or 'vif_id'.
The following assertions occur:
- The number of network information links on the server is
greater than or equal to 1
- The last link in the list of links in vendor metadata has values
for the attributes 'mtu', 'id', and 'vif_id'
"""
self.assertGreaterEqual(len(self.vendor_meta.network_info.links), 1,
msg='Expected config drive to have at least 1'
' hardware link configured')
for link in self.vendor_meta.network_info.links:
bad_attrs = [attr for attr in ['mtu', 'id', 'vif_id']
if getattr(link, attr, None) is None]
self.assertFalse(bad_attrs, msg="{0} not set in response".format(
" ".join(bad_attrs)))
@tags(type='smoke', net='yes')
def test_config_drive_network_metadata_file_network_structure(self):
"""
Verify File structure of vendor metadata on config drive
Validate that the last network in the network list from the network
information in vendor metadata on the server created during test set up
has values for the 'type', 'netmask', 'link', 'routes', 'id'.
The following assertions occur:
- The last network in the network information in the vendor metadata
has values for the attributes 'type', 'netmask', 'link', 'routes',
and 'id'
"""
for network in self.vendor_meta.network_info.networks:
bad_attrs = [attr for attr in ['type',
'netmask',
'link',
'routes',
'id']
if getattr(network, attr, None) is None]
self.assertFalse(bad_attrs, msg="{0} not set in response".format(
" ".join(bad_attrs)))
def test_config_drive_vendor_metadata_ip_whitelist(self):
"""
The vendor metadata in config drive should have an ip whitelist
Validate that there is a value for the IP whitelist in the vendor
metadata.
The following assertions occur:
- The ip whitelist vendor metadata is not None
"""
self.assertIsNotNone(self.vendor_meta.ip_whitelist,
msg="ip_whitelist was not set in the response")
|
the-stack_0_5894 | import collections.abc
import copy
import inspect
import warnings
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
)
import prefect
import prefect.engine.cache_validators
from prefect.engine.results import ResultHandlerResult
import prefect.engine.signals
import prefect.triggers
from prefect.utilities import logging
from prefect.utilities.notifications import callback_factory
from prefect.utilities.tasks import unmapped
if TYPE_CHECKING:
from prefect.core.flow import Flow # pylint: disable=W0611
from prefect.engine.result import Result # pylint: disable=W0611
from prefect.engine.result_handlers import ResultHandler # pylint: disable=W0611
from prefect.engine.state import State # pylint: disable=W0611
from prefect.core import Edge # pylint: disable=W0611
VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
def _validate_run_signature(run: Callable) -> None:
func = getattr(run, "__wrapped__", run)
try:
run_sig = inspect.getfullargspec(func)
except TypeError as exc:
if str(exc) == "unsupported callable":
raise ValueError(
"This function can not be inspected (this is common "
"with `builtin` and `numpy` functions). In order to "
"use it as a task, please wrap it in a standard "
"Python function. For more detail, see "
"https://docs.prefect.io/core/advanced_tutorials/task-guide.html#the-task-decorator"
)
raise
if run_sig.varargs:
raise ValueError(
"Tasks with variable positional arguments (*args) are not "
"supported, because all Prefect arguments are stored as "
"keywords. As a workaround, consider modifying the run() "
"method to accept **kwargs and feeding the values "
"to *args."
)
reserved_kwargs = ["upstream_tasks", "mapped", "task_args", "flow"]
violations = [kw for kw in reserved_kwargs if kw in run_sig.args]
if violations:
msg = "Tasks cannot have the following argument names: {}.".format(
", ".join(violations)
)
msg += " These are reserved keyword arguments."
raise ValueError(msg)
class SignatureValidator(type):
def __new__(cls, name: str, parents: tuple, methods: dict) -> "SignatureValidator":
run = methods.get("run", lambda: None)
_validate_run_signature(run)
# necessary to ensure classes that inherit from parent class
# also get passed through __new__
return type.__new__(cls, name, parents, methods) # type: ignore
class Task(metaclass=SignatureValidator):
"""
The Task class which is used as the full representation of a unit of work.
This Task class can be used directly as a first class object where it must
be inherited from by a class that implements the `run` method. For a more
functional way of generating Tasks, see [the task decorator](../utilities/tasks.html).
Inheritance example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
```
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
An instance of a `Task` can be used functionally to generate other task instances
with the same attributes but with different values bound to their `run` methods.
Example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
a = AddTask()
with Flow("My Flow") as f:
t1 = a(1, 2) # t1 != a
t2 = a(5, 7) # t2 != a
```
To bind values to a Task's run method imperatively (and without making a copy), see `Task.bind`.
Args:
- name (str, optional): The name of this task
- slug (str, optional): The slug for this task. Slugs provide a stable ID for tasks so that
the Prefect API can identify task run states. If a slug is not provided, one will be generated
automatically once the task is added to a Flow.
- tags ([str], optional): A list of tags for this task
- max_retries (int, optional): The maximum amount of times this task can be retried
- retry_delay (timedelta, optional): The amount of time to wait until task is retried
- timeout (int, optional): The amount of time (in seconds) to wait while
running this task before a timeout occurs; note that sub-second
resolution is not supported
- trigger (callable, optional): a function that determines whether the
task should run, based on the states of any upstream tasks.
- skip_on_upstream_skip (bool, optional): if `True`, if any immediately
upstream tasks are skipped, this task will automatically be skipped as
well, regardless of trigger. By default, this prevents tasks from
attempting to use either state or data from tasks that didn't run. If
`False`, the task's trigger will be called as normal, with skips
considered successes. Defaults to `True`.
- cache_for (timedelta, optional, DEPRECATED): The amount of time to maintain a cache
of the outputs of this task. Useful for situations where the containing Flow
will be rerun multiple times, but this task doesn't need to be.
- cache_validator (Callable, optional, DEPRECATED): Validator that will determine
whether the cache for this task is still valid (only required if `cache_for`
is provided; defaults to `prefect.engine.cache_validators.duration_only`)
- cache_key (str, optional, DEPRECATED): if provided, a `cache_key`
serves as a unique identifier for this Task's cache, and can be shared
across both Tasks _and_ Flows; if not provided, the Task's _name_ will
be used if running locally, or the Task's database ID if running in
Cloud
- checkpoint (bool, optional): if this Task is successful, whether to
store its result using the `result_handler` available during the run;
Also note that checkpointing will only occur locally if
`prefect.config.flows.checkpointing` is set to `True`
- result_handler (ResultHandler, optional, DEPRECATED): the handler to
use for retrieving and storing state results during execution; if not
provided, will default to the one attached to the Flow
- result (Result, optional): the result instance used to retrieve and
store task results during execution
- target (Union[str, Callable], optional): location to check for task Result. If a result
exists at that location then the task run will enter a cached state.
`target` strings can be templated formatting strings which will be
formatted at runtime with values from `prefect.context`. If a callable function
is provided, it should have signature `callable(**kwargs) -> str` and at write
time all formatting kwargs will be passed and a fully formatted location is
expected as the return value. Can be used for string formatting logic that
`.format(**kwargs)` doesn't support
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the task instance, the old (prior) state, and the new
(current) state, with the following signature:
`state_handler(task: Task, old_state: State, new_state: State) -> Optional[State]`
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
- on_failure (Callable, optional): A function with signature
`fn(task: Task, state: State) -> None` that will be called anytime this
Task enters a failure state
- log_stdout (bool, optional): Toggle whether or not to send stdout messages to
the Prefect logger. Defaults to `False`.
Raises:
- TypeError: if `tags` is of type `str`
- TypeError: if `timeout` is not of type `int`
"""
# Tasks are not iterable, though they do have a __getitem__ method
__iter__ = None
def __init__(
self,
name: str = None,
slug: str = None,
tags: Iterable[str] = None,
max_retries: int = None,
retry_delay: timedelta = None,
timeout: int = None,
trigger: Callable[[Dict["Edge", "State"]], bool] = None,
skip_on_upstream_skip: bool = True,
cache_for: timedelta = None,
cache_validator: Callable = None,
cache_key: str = None,
checkpoint: bool = None,
result_handler: "ResultHandler" = None,
state_handlers: List[Callable] = None,
on_failure: Callable = None,
log_stdout: bool = False,
result: "Result" = None,
target: str = None,
):
self.name = name or type(self).__name__
self.slug = slug
self.logger = logging.get_logger(self.name)
# avoid silently iterating over a string
if isinstance(tags, str):
raise TypeError("Tags should be a set of tags, not a string.")
current_tags = set(prefect.context.get("tags", set()))
self.tags = (set(tags) if tags is not None else set()) | current_tags
max_retries = (
max_retries
if max_retries is not None
else prefect.config.tasks.defaults.max_retries
)
retry_delay = (
retry_delay
if retry_delay is not None
else prefect.config.tasks.defaults.retry_delay
)
timeout = (
timeout if timeout is not None else prefect.config.tasks.defaults.timeout
)
if max_retries > 0 and retry_delay is None:
raise ValueError(
"A datetime.timedelta `retry_delay` must be provided if max_retries > 0"
)
# specify not max retries because the default is false
if retry_delay is not None and not max_retries:
raise ValueError(
"A `max_retries` argument greater than 0 must be provided if specifying a retry delay."
)
if timeout is not None and not isinstance(timeout, int):
raise TypeError(
"Only integer timeouts (representing seconds) are supported."
)
self.max_retries = max_retries
self.retry_delay = retry_delay
self.timeout = timeout
self.trigger = trigger or prefect.triggers.all_successful
self.skip_on_upstream_skip = skip_on_upstream_skip
if cache_for is None and (
cache_validator is not None
and cache_validator is not prefect.engine.cache_validators.never_use
):
warnings.warn(
"cache_validator provided without specifying cache expiration (cache_for); this Task will not be cached."
)
self.cache_for = cache_for
self.cache_key = cache_key
default_validator = (
prefect.engine.cache_validators.never_use
if cache_for is None
else prefect.engine.cache_validators.duration_only
)
self.cache_validator = cache_validator or default_validator
self.checkpoint = checkpoint
if result_handler:
warnings.warn(
"Result Handlers are deprecated; please use the new style Result classes instead."
)
self.result = ResultHandlerResult.from_result_handler(
result_handler
) # type: Optional[Result]
else:
self.result = result
self.target = target
# if both a target and a result were provided, update the result location
# to point at the target
if self.target and self.result:
if (
getattr(self.result, "location", None)
and self.result.location != self.target
):
warnings.warn(
"Both `result.location` and `target` were provided. "
"The `target` value will be used."
)
self.result = self.result.copy()
self.result.location = self.target
if state_handlers and not isinstance(state_handlers, collections.abc.Sequence):
raise TypeError("state_handlers should be iterable.")
self.state_handlers = state_handlers or []
if on_failure is not None:
self.state_handlers.append(
callback_factory(on_failure, check=lambda s: s.is_failed())
)
self.auto_generated = False
self.log_stdout = log_stdout
# if new task creations are being tracked, add this task
# this makes it possible to give guidance to users that forget
# to add tasks to a flow
if "_unused_task_tracker" in prefect.context:
if not isinstance(self, prefect.tasks.core.constants.Constant):
prefect.context._unused_task_tracker.add(self)
def __repr__(self) -> str:
return "<Task: {self.name}>".format(self=self)
# reimplement __hash__ because we override __eq__
def __hash__(self) -> int:
return id(self)
# Run --------------------------------------------------------------------
def run(self) -> None:
"""
The `run()` method is called (with arguments, if appropriate) to run a task.
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
If a task has arguments in its `run()` method, these can be bound either by using the functional
API and _calling_ the task instance, or by using `self.bind` directly.
In addition to running arbitrary functions, tasks can interact with Prefect in a few ways:
<ul><li> Return an optional result. When this function runs successfully,
the task is considered successful and the result (if any) can be
made available to downstream tasks. </li>
<li> Raise an error. Errors are interpreted as failure. </li>
<li> Raise a [signal](../engine/signals.html). Signals can include `FAIL`, `SUCCESS`, `RETRY`, `SKIP`, etc.
and indicate that the task should be put in the indicated state.
<ul>
<li> `FAIL` will lead to retries if appropriate </li>
<li> `SUCCESS` will cause the task to be marked successful </li>
<li> `RETRY` will cause the task to be marked for retry, even if `max_retries`
has been exceeded </li>
<li> `SKIP` will skip the task and possibly propogate the skip state through the
flow, depending on whether downstream tasks have `skip_on_upstream_skip=True`. </li></ul>
</li></ul>
"""
# Dependencies -------------------------------------------------------------
def copy(self, **task_args: Any) -> "Task":
"""
Creates and returns a copy of the current Task.
Args:
- **task_args (dict, optional): a dictionary of task attribute keyword arguments, these attributes
will be set on the new copy
Raises:
- AttributeError: if any passed `task_args` are not attributes of the original
Returns:
- Task: a copy of the current Task, with any attributes updated from `task_args`
"""
flow = prefect.context.get("flow", None)
if (
flow
and self in flow.tasks
and (flow.edges_to(self) or flow.edges_from(self))
):
warnings.warn(
"You are making a copy of a task that has dependencies on or to other tasks "
"in the active flow context. The copy will not retain those dependencies."
)
new = copy.copy(self)
if new.slug and "slug" not in task_args:
task_args["slug"] = new.slug + "-copy"
# check task_args
for attr, val in task_args.items():
if not hasattr(new, attr):
raise AttributeError(
"{0} does not have {1} as an attribute".format(self, attr)
)
else:
setattr(new, attr, val)
# if both a target and a result were provided, update the result location
# to point at the target
if new.target and new.result:
if (
getattr(new.result, "location", None)
and new.result.location != new.target
):
warnings.warn(
"Both `result.location` and `target` were provided. "
"The `target` value will be used."
)
new.result = new.result.copy()
new.result.location = new.target
new.tags = copy.deepcopy(self.tags).union(set(new.tags))
tags = set(prefect.context.get("tags", set()))
new.tags.update(tags)
# if new task creations are being tracked, add this task
# this makes it possible to give guidance to users that forget
# to add tasks to a flow. We also remove the original task,
# as it has been "interacted" with and don't want spurious
# warnings
if "_unused_task_tracker" in prefect.context:
if self in prefect.context._unused_task_tracker:
prefect.context._unused_task_tracker.remove(self)
if not isinstance(new, prefect.tasks.core.constants.Constant):
prefect.context._unused_task_tracker.add(new)
return new
@property
def __signature__(self) -> inspect.Signature:
"""Dynamically generate the signature, replacing ``*args``/``**kwargs``
with parameters from ``run``"""
if not hasattr(self, "_cached_signature"):
sig = inspect.Signature.from_callable(self.run)
parameters = list(sig.parameters.values())
parameters.extend(EXTRA_CALL_PARAMETERS)
self._cached_signature = inspect.Signature(
parameters=parameters, return_annotation="Task"
)
return self._cached_signature
def __call__(
self,
*args: Any,
mapped: bool = False,
task_args: dict = None,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any
) -> "Task":
"""
Calling a Task instance will first create a _copy_ of the instance, and then
bind any passed `args` / `kwargs` to the run method of the copy. This new task
is then returned.
Args:
- *args: arguments to bind to the new Task's `run` method
- **kwargs: keyword arguments to bind to the new Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.tasks.unmapped`
container will _not_ be mapped over.
- task_args (dict, optional): a dictionary of task attribute keyword arguments, these attributes
will be set on the new copy
- upstream_tasks ([Task], optional): a list of upstream dependencies
for the new task. This kwarg can be used to functionally specify
dependencies without binding their result to `run()`
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
Returns:
- Task: a new Task instance
"""
new = self.copy(**(task_args or {}))
new.bind(
*args, mapped=mapped, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
return new
def bind(
self,
*args: Any,
mapped: bool = False,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any
) -> "Task":
"""
Binding a task to (keyword) arguments creates a _keyed_ edge in the active Flow
that will pass data from the arguments (whether Tasks or constants) to the
Task's `run` method under the appropriate key. Once a Task is bound in this
manner, the same task instance cannot be bound a second time in the same Flow.
To bind arguments to a _copy_ of this Task instance, see `__call__`.
Additionally, non-keyed edges can be created by passing any upstream
dependencies through `upstream_tasks`.
Args:
- *args: arguments to bind to the current Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.tasks.unmapped`
container will _not_ be mapped over.
- upstream_tasks ([Task], optional): a list of upstream dependencies for the
current task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- **kwargs: keyword arguments to bind to the current Task's `run` method
Returns:
- Task: the current Task instance
"""
# this will raise an error if callargs weren't all provided
signature = inspect.signature(self.run)
callargs = dict(signature.bind(*args, **kwargs).arguments) # type: Dict
# bind() compresses all variable keyword arguments under the ** argument name,
# so we expand them explicitly
var_kw_arg = next(
(p for p in signature.parameters.values() if p.kind == VAR_KEYWORD), None
)
if var_kw_arg:
callargs.update(callargs.pop(var_kw_arg.name, {}))
flow = flow or prefect.context.get("flow", None)
if not flow:
raise ValueError("Could not infer an active Flow context.")
self.set_dependencies(
flow=flow,
upstream_tasks=upstream_tasks,
keyword_tasks=callargs,
mapped=mapped,
)
tags = set(prefect.context.get("tags", set()))
self.tags.update(tags)
return self
def map(
self,
*args: Any,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
task_args: dict = None,
**kwargs: Any
) -> "Task":
"""
Map the Task elementwise across one or more Tasks. Arguments that should _not_ be mapped over
should be placed in the `prefect.utilities.tasks.unmapped` container.
For example:
```
task.map(x=X, y=unmapped(Y))
```
will map over the values of `X`, but not over the values of `Y`
Args:
- *args: arguments to map over, which will elementwise be bound to the Task's `run` method
- upstream_tasks ([Task], optional): a list of upstream dependencies
to map over
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- task_args (dict, optional): a dictionary of task attribute keyword arguments,
these attributes will be set on the new copy
- **kwargs: keyword arguments to map over, which will elementwise be bound to the Task's `run` method
Raises:
- AttributeError: if any passed `task_args` are not attributes of the original
Returns:
- Task: a new Task instance
"""
for arg in args:
if not hasattr(arg, "__getitem__") and not isinstance(arg, unmapped):
raise TypeError(
"Cannot map over unsubscriptable object of type {t}: {preview}...".format(
t=type(arg), preview=repr(arg)[:10]
)
)
new = self.copy(**(task_args or {}))
return new.bind(
*args, mapped=True, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
def set_dependencies(
self,
flow: "Flow" = None,
upstream_tasks: Iterable[object] = None,
downstream_tasks: Iterable[object] = None,
keyword_tasks: Mapping[str, object] = None,
mapped: bool = False,
validate: bool = None,
) -> None:
"""
Set dependencies for a flow either specified or in the current context using this task
Args:
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- upstream_tasks ([object], optional): A list of upstream tasks for this task
- downstream_tasks ([object], optional): A list of downtream tasks for this task
- keyword_tasks ({str, object}}, optional): The results of these tasks will be provided
to this task under the specified keyword arguments.
- mapped (bool, optional): Whether the results of the _upstream_ tasks should be mapped over
with the specified keyword arguments
- validate (bool, optional): Whether or not to check the validity of the flow. If not
provided, defaults to the value of `eager_edge_validation` in your Prefect
configuration file.
Returns:
- None
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
flow = flow or prefect.context.get("flow", None)
if not flow:
raise ValueError(
"No Flow was passed, and could not infer an active Flow context."
)
flow.set_dependencies(
task=self,
upstream_tasks=upstream_tasks,
downstream_tasks=downstream_tasks,
keyword_tasks=keyword_tasks,
validate=validate,
mapped=mapped,
)
def set_upstream(
self, task: object, flow: "Flow" = None, key: str = None, mapped: bool = False
) -> None:
"""
Sets the provided task as an upstream dependency of this task.
Args:
- task (object): A task or object that will be converted to a task that will be set
as a upstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- key (str, optional): The key to be set for the new edge; the result of the upstream task
will be passed to this task's `run()` method under this keyword argument.
- mapped (bool, optional): Whether this dependency is mapped; defaults to `False`
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
if key is not None:
keyword_tasks = {key: task}
self.set_dependencies(flow=flow, keyword_tasks=keyword_tasks, mapped=mapped)
else:
self.set_dependencies(flow=flow, upstream_tasks=[task], mapped=mapped)
def set_downstream(
self, task: "Task", flow: "Flow" = None, key: str = None, mapped: bool = False
) -> None:
"""
Sets the provided task as a downstream dependency of this task.
Args:
- task (Task): A task that will be set as a downstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- key (str, optional): The key to be set for the new edge; the result of this task
will be passed to the downstream task's `run()` method under this keyword argument.
- mapped (bool, optional): Whether this dependency is mapped; defaults to `False`
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
if key is not None:
keyword_tasks = {key: self}
task.set_dependencies( # type: ignore
flow=flow, keyword_tasks=keyword_tasks, mapped=mapped
) # type: ignore
else:
task.set_dependencies(flow=flow, upstream_tasks=[self], mapped=mapped)
def inputs(self) -> Dict[str, Dict]:
"""
Describe the inputs for this task. The result is a dictionary that maps each input to
a `type`, `required`, and `default`. All values are inferred from the `run()`
signature; this method can be overloaded for more precise control.
Returns:
- dict
"""
inputs = {}
for name, parameter in inspect.signature(self.run).parameters.items():
input_type = parameter.annotation
if input_type is inspect._empty: # type: ignore
input_type = Any
input_default = parameter.default
input_required = False
if input_default is inspect._empty: # type: ignore
input_required = True
input_default = None
inputs[name] = dict(
type=input_type, default=input_default, required=input_required
)
return inputs
def outputs(self) -> Any:
"""
Get the output types for this task.
Returns:
- Any
"""
return_annotation = inspect.signature(self.run).return_annotation
if return_annotation is inspect._empty: # type: ignore
return_annotation = Any
return return_annotation
# Serialization ------------------------------------------------------------
def serialize(self) -> Dict[str, Any]:
"""
Creates a serialized representation of this task
Returns:
- dict representing this task
"""
return prefect.serialization.task.TaskSchema().dump(self)
# Operators ----------------------------------------------------------------
def is_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self == other`
This can't be implemented as the __eq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Equal().bind(self, other)
def is_not_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self != other`
This can't be implemented as the __neq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.NotEqual().bind(self, other)
def not_(self) -> "Task":
"""
Produces a Task that evaluates `not self`
Returns:
- Task
"""
return prefect.tasks.core.operators.Not().bind(self)
def or_(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self or other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Or().bind(self, other)
# Magic Method Interactions ----------------------------------------------------
def __getitem__(self, key: Any) -> "Task":
"""
Produces a Task that evaluates `self[key]`
Args:
- key (object): the object to use an an index for this task. It will be converted
to a Task if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GetItem().bind(self, key)
def __or__(self, other: object) -> object:
"""
Creates a state dependency between `self` and `other`
`self | other --> self.set_dependencies(downstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task (if it isn't one already)
and set as a downstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(downstream_tasks=[other])
return other
def __mifflin__(self) -> None: # coverage: ignore
"Calls Dunder Mifflin"
import webbrowser
webbrowser.open("https://cicdw.github.io/welcome.html")
def __ror__(self, other: object) -> "Task":
"""
Creates a state dependency between `self` and `other`:
`other | self --> self.set_dependencies(upstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task and set as an
upstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(upstream_tasks=[other])
return self
# Maginc Method Operators -----------------------------------------------------
def __add__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self + other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(self, other)
def __sub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self - other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(self, other)
def __mul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self * other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(self, other)
def __truediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self / other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(self, other)
def __floordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self // other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(self, other)
def __mod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self % other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(self, other)
def __pow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self ** other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(self, other)
def __and__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self & other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(self, other)
def __radd__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other + self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(other, self)
def __rsub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other - self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(other, self)
def __rmul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other * self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(other, self)
def __rtruediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other / self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(other, self)
def __rfloordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other // self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(other, self)
def __rmod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other % self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(other, self)
def __rpow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other ** self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(other, self)
def __rand__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other & self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(other, self)
def __gt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self > other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThan().bind(self, other)
def __ge__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self >= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThanOrEqual().bind(self, other)
def __lt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self < other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThan().bind(self, other)
def __le__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self <= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThanOrEqual().bind(self, other)
# All keyword-only arguments to Task.__call__, used for dynamically generating
# Signature objects for Task objects
EXTRA_CALL_PARAMETERS = [
p
for p in inspect.Signature.from_callable(Task.__call__).parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
# DEPRECATED - this is to allow backwards-compatible access to Parameters
# https://github.com/PrefectHQ/prefect/pull/2758
from .parameter import Parameter as _Parameter
class Parameter(_Parameter):
def __new__(cls, *args, **kwargs): # type: ignore
warnings.warn("`Parameter` has moved, please import as `prefect.Parameter`")
return super().__new__(cls)
|
the-stack_0_5897 |
import numpy as np
import tensornetwork as tn
import itertools as itt
#from scipy.sparse import linalg as la
#import matrixproductstates as mp
import scipy as SP
import pymps as mp
def kdelta(i,j):
"""
Parameters
----------
i : int
State index i.
j : int
State index j.
Returns
-------
int
Kronecker_Delta(i,j).
"""
return int(i==j)
# Construct MPS for 4 sites
bond_dim=2
n_sites = 4
mps = mp.init_wavefunction(n_sites)
#
# Creating the things we need for the Hamiltonian
#============================
from scipy import linalg as LA
from scipy import special as sp
#FUNDAMENTAL CONSTANTS
hbar = 6.582119569e-16 #eV*s
m_e = 0.51099895000e6 #eV/c^2
m_eff = 0.067
c_light = 299792458 #m/s
bohr = 5.7883818060e-2 #meV/T
lande = 0.52
rydberg=5.93 #meV
#=======
def vmatrix(n1l,m1l,n2l,m2l,n2r,m2r,n1r,m1r):
"""
Computes Coulomb matrix elements for a parabolic quantum dot. Analytic formula
derived by Marek Korkusinski can be found here: https://mysite.science.uottawa.ca/phawrylak/member_pages/korkusinski/thesis/thesis.html.
Computes <n1l m1l, n2l m2l|V|n2r m2r, n1r m1r>
Parameters
----------
n1l : int
index of Landau level of electron 1 for initial state.
m1l : int
index of quasidegenerate orbital of electron 1 for initial state.
n2l : int
index of Landau level of electron 2 for initial state.
m2l : int
index of quasidegenerate orbital of electron 1 for initial state.
n2r : int
index of Landau level of electron 2 for final state.
m2r : int
index of quasidegenerate orbital of electron 2 for final state.
n1r : int
index of Landau level of electron 1 for final state.
m1r : int
index of quasidegenerate orbital of electron 1 for final state.
Returns
-------
vmatel : float
<n1l m1l, n2l m2l|V|n2r m2r, n1r m1r>
"""
delta_rl_rr = kdelta((m1l+m2l)-(n1l+n2l),(m1r+m2r)-(n1r+n2r))
fac_denom = np.sqrt(sp.factorial(n1l)*sp.factorial(m1l)*\
sp.factorial(n1r)*sp.factorial(m1r)*\
sp.factorial(n2l)*sp.factorial(m2l)*\
sp.factorial(n2r)*sp.factorial(m2r))
phase = (-1)**(n2l+m2l+n2r+m2r)
total = 0.
for p1 in range(min(n1l,n1r)+1):
for p2 in range(min(m1l,m1r)+1):
for p3 in range(min(n2l,n2r)+1):
for p4 in range(min(m2l,m2r)+1):
power = n1l + n2l + m1r + m2r - (p1+p2+p3+p4)
p1fac=sp.factorial(p1)*sp.binom(n1l,p1)*sp.binom(n1r,p1)
p2fac=sp.factorial(p2)*sp.binom(m1l,p2)*sp.binom(m1r,p2)
p3fac=sp.factorial(p3)*sp.binom(n2l,p3)*sp.binom(n2r,p3)
p4fac=sp.factorial(p4)*sp.binom(m2l,p4)*sp.binom(m2r,p4)
gammafac=(-0.5)**power*sp.gamma(power+0.5)
total+=p1fac*p2fac*p3fac*p4fac*gammafac
vmatel = delta_rl_rr*phase*total/(fac_denom*np.sqrt(np.pi))
return vmatel
def sp_energies(n,m,B,spin,hbar,m_e,m_eff,c_light,bohr,lande,rydberg,omega_0,omega_c):
"""
Parameters
----------
n : int
Landau level.
m : int
Sub orbital in Landau level n.
B : float
Magnetic field in T.
spin : float
Spin of electron.
hbar : float
Planck's constant.
m_e : flaot
Mass of electron.
m_eff : float
Effective mass of electron.
c_light : float
Speed of light.
bohr : float
Bohr radius.
lande : float
g-factor.
rydberg : float
Rydberg energy.
omega_0 : float
Characteristic frequency of harmonic oscillator.
omega_c : float
Cyclotron frequency.
Returns
-------
energy : float
Single particle energy.
"""
omega_p=np.sqrt(omega_0**2+0.25*omega_c**2)+0.5*omega_c
omega_m=np.sqrt(omega_0**2+0.25*omega_c**2)-0.5*omega_c
energy = omega_p*(n+0.5)+omega_m*(m+0.5)-lande*bohr*B*spin
return energy
B=10
omega_0 = 3.31 #meV
omega_c = 1e3*hbar*B*c_light**2/(m_e*m_eff)
OMEGA_H = np.sqrt(omega_0**2+0.25*omega_c**2)/rydberg
E_0=np.sqrt(np.pi*OMEGA_H)*rydberg
epsilon = []
for m in range(4):
epsilon.append(sp_energies(1,m,B,0.5,hbar,m_e,m_eff,c_light,bohr,lande,rydberg,omega_0,omega_c))
v12=(vmatrix(1, 0, 1, 1, 1, 1, 1, 0)-vmatrix(1, 0, 1, 1, 1, 0, 1, 1))*E_0
v13=(vmatrix(1, 0, 1, 2, 1, 2, 1, 0)-vmatrix(1, 0, 1, 2, 1, 0, 1, 2))*E_0
v14=(vmatrix(1, 0, 1, 3, 1, 3, 1, 0)-vmatrix(1, 0, 1, 3, 1, 0, 1, 3))*E_0
v23=(vmatrix(1, 1, 1, 2, 1, 2, 1, 1)-vmatrix(1, 1, 1, 2, 1, 1, 1, 2))*E_0
v24=(vmatrix(1, 1, 1, 3, 1, 3, 1, 1)-vmatrix(1, 1, 1, 3, 1, 1, 1, 3))*E_0
v34=(vmatrix(1, 2, 1, 3, 1, 3, 1, 2)-vmatrix(1, 2, 1, 3, 1, 2, 1, 3))*E_0
w=(vmatrix(1, 0, 1, 3, 1, 1, 1, 2)-vmatrix(1, 0, 1, 3, 1, 2, 1, 1))*E_0
for m in range(4):
epsilon.append(sp_energies(1,m,B,0.5,hbar,m_e,m_eff,c_light,bohr,lande,rydberg,omega_0,omega_c))
# Create H MPO
G0 = np.array([[[0.]*4]*2]*2)
G1 = np.array([[[[0.]*6]*4]*2]*2)
G2 = np.array([[[[0.]*4]*6]*2]*2)
G3 = np.array([[[0.]*4]*2]*2)
for n0p in range(2):
for n0 in range(2):
G0[n0p,n0]=np.array([n0*kdelta(n0p,n0),kdelta(n0p,n0),kdelta(n0p-1,n0),kdelta(n0p,n0-1)])
for n1p in range(2):
for n1 in range(2):
G1[n1p,n1]=np.array([[v14*kdelta(n1p,n1),0,epsilon[0]*kdelta(n1p,n1)+v12*n1*kdelta(n1p,n1),v13*kdelta(n1p,n1),0,0]\
,[epsilon[3]*kdelta(n1p,n1)+v24*n1*kdelta(n1p,n1),v34*kdelta(n1p,n1),epsilon[1]*n1*kdelta(n1p,n1),epsilon[2]*\
kdelta(n1p,n1)+v23*n1*kdelta(n1p,n1),0,0],\
[0,0,0,0,-w*kdelta(n1p,n1-1),0],[0,0,0,0,0,-w*kdelta(n1p-1,n1)]])
for n2p in range(2):
for n2 in range(2):
G2[n2p,n2]=np.array([[kdelta(n2p,n2),0,0,0],[n2*kdelta(n2p,n2),0,0,0],[0,kdelta(n2p,n2),0,0],[0,n2*kdelta(n2p,n2),0,0],\
[0,0,kdelta(n2p,n2-1),0],[0,0,0,kdelta(n2p-1,n2)]])
for n3p in range(2):
for n3 in range(2):
G3[n3p,n3]=np.array([n3*kdelta(n3p,n3),kdelta(n3p,n3),kdelta(n3p-1,n3),kdelta(n3p,n3-1)])
#Create the chemical potential MPO
W0 = np.array([[[0.]*2]*2]*2)
W1 = np.array([[[[0.]*2]*2]*2]*2)
W2 = np.array([[[[0.]*2]*2]*2]*2)
W3 = np.array([[[0.]*2]*2]*2)
chem_pot=-35
for n0p in range(2):
for n0 in range(2):
W0[n0p,n0]=np.array([n0*kdelta(n0p,n0),kdelta(n0p,n0)])*chem_pot
for n1p in range(2):
for n1 in range(2):
W1[n1p,n1]=np.array([[kdelta(n1p,n1),0.],[n1*kdelta(n1p,n1),kdelta(n1p,n1)]])
for n2p in range(2):
for n2 in range(2):
W2[n2p,n2]=np.array([[kdelta(n2p,n2),0.],[n2*kdelta(n2p,n2),kdelta(n2p,n2)]])
for n3p in range(2):
for n3 in range(2):
W3[n3p,n3]=np.array([kdelta(n3p,n3),n3*kdelta(n3p,n3)])
O0 = np.array([[[0.]*6]*2]*2)
O1 = np.array([[[[0.]*8]*6]*2]*2)
O2 = np.array([[[[0.]*6]*8]*2]*2)
O3 = np.array([[[0.]*6]*2]*2)
for n0p in range(2):
for n0 in range(2):
O0[n0p,n0]=np.hstack((G0[n0p,n0],W0[n0p,n0]))
for n1p in range(2):
for n1 in range(2):
O1[n1p,n1]=SP.linalg.block_diag(G1[n1p,n1],W1[n1p,n1])
for n2p in range(2):
for n2 in range(2):
O2[n2p,n2]=SP.linalg.block_diag(G2[n2p,n2],W2[n2p,n2])
for n3p in range(2):
for n3 in range(2):
O3[n3p,n3]=np.hstack((G3[n3p,n3],W3[n3p,n3]))
#Creating MPO as a tensornetwork
hmpo = [ \
tn.Node(O0,axis_names=["n_0p","n_0","i_0"] )] + \
[tn.Node(O1,axis_names=["n_1p","n_1","i_0","i_1"])] + \
[tn.Node(O2,axis_names=["n_2p","n_2","i_1","i_2"])] + \
[tn.Node(O3,axis_names=["n_3p","n_3","i_2"])]
# Connect edges to build MPO
connected_edges2=[]
conn2=hmpo[0]["i_0"]^hmpo[1]["i_0"]
connected_edges2.append(conn2)
conn2=hmpo[1]["i_1"]^hmpo[2]["i_1"]
connected_edges2.append(conn2)
conn2=hmpo[2]["i_2"]^hmpo[3]["i_2"]
connected_edges2.append(conn2)
#Run DMRG algorithm
energy,energies,MPS=mp.DMRG(4,hmpo,10,mps)
MPS[0]["i_0"]^MPS[1]["i_0"]
MPS[1]["i_1"]^MPS[2]["i_1"]
MPS[2]["i_2"]^MPS[3]["i_2"]
test=MPS[0]@MPS[1]@MPS[2]@MPS[3]
np.transpose(np.where(np.abs(test.tensor)>=1e-10))[0]
number_e=np.count_nonzero(np.transpose(np.where(np.abs(test.tensor)>=1e-10))[0])
print('Corrected Energy = {}'.format(energy-number_e*chem_pot))
|
the-stack_0_5898 | '''Melhore o desafio 028, onde o computador
vai 'pensar' em um número entre 1 e 10.
Só que agora o jogador vai tentar adivinhar
até acertar, mostrando no final quantos palpites
foram necessários para vencer.'''
from random import choice
lista = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
cont = 0
res = 'S'
while res != 'N':
num = int(input('Digite um número inteiro entre 0 e 10. '))
random = choice(lista)
cont = cont + 1
if num == random:
print('Você precisou de {} para acertar.'.format(cont))
res = str(input('Quer continuar? [S/N]')).upper() |
the-stack_0_5900 | #!/usr/bin/env python
import csv
import dataset
import json
import locale
import logging
import re
import sys
from collections import OrderedDict
from itertools import groupby
SECTION_BREAK = 'CLEARANCE RATE DATA FOR INDEX OFFENSES'
END_BREAK = ' READ'
FIELDNAMES = ['year', 'state', 'ori7', 'lea_name', 'population', 'mos', 'agg_assault_cleared', 'agg_assault_cleared_pct', 'agg_assault_count', 'arson_cleared', 'arson_cleared_pct', 'arson_count', 'burglary_cleared', 'burglary_cleared_pct', 'burglary_count', 'forcible_rape_cleared', 'forcible_rape_cleared_pct', 'forcible_rape_count', 'larceny_theft_cleared', 'larceny_theft_cleared_pct', 'larceny_theft_count', 'murder_cleared', 'murder_cleared_pct', 'murder_count', 'mvt_cleared', 'mvt_cleared_pct', 'mvt_count', 'property_cleared', 'property_cleared_pct', 'property_count', 'robbery_cleared', 'robbery_cleared_pct', 'robbery_count', 'violent_cleared', 'violent_cleared_pct', 'violent_count']
CRIME_TYPES = [
'violent',
'property',
'murder',
#'forcible_rape',
'robbery',
'agg_assault',
'burglary',
'larceny_theft',
'mvt',
'arson',
]
IMPORT_FILES = [
('2011', '2011-clearance-rates.txt'),
('2012', '2012-clearance-rates.txt'),
('2013', '2013-clearance-rates.txt'),
('2014', '2014-clearance-rates.txt'),
]
POPULATION_BUCKETS = [
{
'name': '1,000,000 and above',
'low': 1000000,
'high': None,
},
{
'name': '500,000 to 999,999',
'low': 500000,
'high': 999999,
},
{
'name': '250,000 to 499,999',
'low': 250000,
'high': 499999,
},
{
'name': '100,000 to 249,999',
'low': 100000,
'high': 249999,
},
{
'name': '50,000 to 99,999',
'low': 50000,
'high': 99999,
},
{
'name': '25,000 to 49,999',
'low': 25000,
'high': 49999,
},
{
'name': '10,000 to 24,999',
'low': 10000,
'high': 24999,
},
{
'name': 'Under 10,000',
'low': 1, # Population should never be 0
'high': 9999,
},
]
locale.setlocale(locale.LC_ALL, 'en_US')
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('ucr-parser')
db = dataset.connect('postgresql:///ucr_clearance')
def parse(file_path, year):
output = []
f = open(file_path)
line = skip_to_start(f)
while True:
row = {
'year': year
}
for i in range(0, 4):
if SECTION_BREAK in line:
line = skip_section_break(f)
# We're done!
if END_BREAK in line or line == '':
return output
line_parts = split_line(line)
if i == 0:
row['ori7'] = line_parts[0]
if row['ori7'].startswith('0'):
row['ori7'] = row['ori7'][1:]
row['lea_name'] = ' '.join(line_parts[1:])
row['state'] = parse_state(row['ori7'])
if i == 1:
row['mos'] = parse_int(line_parts[0])
row['violent_count'] = parse_int(line_parts[3])
row['property_count'] = parse_int(line_parts[4])
row['murder_count'] = parse_int(line_parts[5])
row['forcible_rape_count'] = parse_int(line_parts[6])
row['robbery_count'] = parse_int(line_parts[7])
row['agg_assault_count'] = parse_int(line_parts[8])
row['burglary_count'] = parse_int(line_parts[9])
row['larceny_theft_count'] = parse_int(line_parts[10])
row['mvt_count'] = parse_int(line_parts[11])
row['arson_count'] = parse_int(line_parts[12])
if i == 2:
row['population'] = parse_int(line_parts[0])
row['violent_cleared'] = parse_int(line_parts[3])
row['property_cleared'] = parse_int(line_parts[4])
row['murder_cleared'] = parse_int(line_parts[5])
row['forcible_rape_cleared'] = parse_int(line_parts[6])
row['robbery_cleared'] = parse_int(line_parts[7])
row['agg_assault_cleared'] = parse_int(line_parts[8])
row['burglary_cleared'] = parse_int(line_parts[9])
row['larceny_theft_cleared'] = parse_int(line_parts[10])
row['mvt_cleared'] = parse_int(line_parts[11])
row['arson_cleared'] = parse_int(line_parts[12])
if i == 3:
row['violent_cleared_pct'] = parse_pct(line_parts[1])
row['property_cleared_pct'] = parse_pct(line_parts[2])
row['murder_cleared_pct'] = parse_pct(line_parts[3])
row['forcible_rape_cleared_pct'] = parse_pct(line_parts[4])
row['robbery_cleared_pct'] = parse_pct(line_parts[5])
row['agg_assault_cleared_pct'] = parse_pct(line_parts[6])
row['burglary_cleared_pct'] = parse_pct(line_parts[7])
row['larceny_theft_cleared_pct'] = parse_pct(line_parts[8])
row['mvt_cleared_pct'] = parse_pct(line_parts[9])
row['arson_cleared_pct'] = parse_pct(line_parts[10])
line = f.readline()
logger.debug('Writing row for %s (%s), %s' % (row['ori7'], row['lea_name'], year))
output.append(row)
def skip_to_start(f):
"""
Skip to start of data
"""
while True:
line = f.readline()
if SECTION_BREAK in line:
break
return line
def skip_section_break(f):
"""
Read four lines after section break
"""
f.readline()
f.readline()
f.readline()
return f.readline()
def split_line(line):
return re.sub(' +', ' ', line).strip().split(' ')
def parse_pct(value):
"""
Parse percentage
"""
return float(value)/100
def parse_int(value):
"""
Parse integer
"""
return locale.atoi(value)
def parse_state(value):
"""
Parse state from LEA code.
"""
return value[0:2]
def get_data():
"""
Get and parse raw data
"""
all_data = []
for year, file in IMPORT_FILES:
data_file = 'data/%s' % file
data = parse(data_file, year)
all_data = all_data + data
return all_data
def get_agencies():
"""
Get agency data
"""
agencies = {}
with open('data/agency-crosswalk.csv') as f:
reader = csv.DictReader(f)
for row in reader:
agencies[row['ORI7']] = row
return agencies
def write_agency_lookup():
"""
Write agency lookup
"""
result = db.query("""
select
a.ori7, a.agency, a.agentype, a.state
from agencies as a join clearance_rates as c on a.ori7 = c.ori7
group by a.ori7, a.agency, a.agentype, a.state
order by a.ori7
""")
dataset.freeze(result, format='csv', filename='output/agency_names.csv')
def write_clearance_json():
"""
Write json data
"""
result = db.query("""
select
a.ori7, a.agency, a.state, a.agentype,
c.year, c.population, c.mos,
c.violent_count, c.violent_cleared, c.violent_cleared_pct,
c.property_count, c.property_cleared, c.property_cleared_pct,
c.murder_count, c.murder_cleared, c.murder_cleared_pct,
c.forcible_rape_count, c.forcible_rape_cleared, c.forcible_rape_cleared_pct,
c.robbery_count, c.robbery_cleared, c.robbery_cleared_pct,
c.agg_assault_count, c.agg_assault_cleared, c.agg_assault_cleared_pct,
c.burglary_count, c.burglary_cleared, c.burglary_cleared_pct,
c.mvt_count, c.mvt_cleared, c.mvt_cleared_pct,
c.larceny_theft_count, c.larceny_theft_cleared, c.larceny_theft_cleared_pct,
c.arson_count, c.arson_cleared, c.arson_cleared_pct
from clearance_rates as c join agencies as a on a.ori7 = c.ori7
order by c.ori7, c.year
""")
medians = analyze_medians()
data = []
for row in result:
data.append(row)
for ori7, yearly_data in groupby(data, lambda x: x['ori7']):
output = {
'ori7': ori7,
'crimes': OrderedDict(),
}
for row in yearly_data:
year = row['year']
has_median = False
if row['agentype'] == 'Municipal police':
has_median = True
bucket = get_population_bucket(row['population'])
if bucket and not output.get('medians'):
output['medians'] = OrderedDict()
if not output.get('agency'):
output['agency'] = row['agency']
output['state'] = row['state']
output['agency_type'] = row['agentype']
if year == '2013' and has_median and bucket:
output['population_bucket'] = bucket
output['population'] = row['population']
for field in CRIME_TYPES:
if not output['crimes'].get(field):
output['crimes'][field] = {}
if has_median and bucket:
if not output['medians'].get(field):
output['medians'][field] = {}
output['medians'][field][year] = {}
output['crimes'][field][year] = {}
output['crimes'][field][year]['mos'] = row['mos']
for measure in ['count', 'cleared', 'cleared_pct']:
if row['mos'] < 12:
output['crimes'][field][year][measure] = None
else:
row_value = row['%s_%s' % (field, measure)]
if measure == 'cleared' and row_value == 0 and row['%s_%s' % (field, 'count')] > 0:
output['data_warning'] = True
output['crimes'][field][year][measure] = row_value
if output.get('medians') and bucket:
median_key = 'median_%s_%s' % (field, measure)
median_value = medians[year][bucket][median_key]
if row['mos'] < 12:
output['medians'][field][year][measure] = None
else:
output['medians'][field][year][measure] = median_value
with open('output/%s.json' % ori7, 'w') as outfile:
logger.debug('Writing output/%s.json' % ori7)
json.dump(output, outfile)
def get_population_bucket(population):
"""
Get population bucket
"""
for bucket in POPULATION_BUCKETS:
if bucket['high']:
if population >= bucket['low'] and population <= bucket['high']:
return bucket['name']
else:
if population >= bucket['low']:
return bucket['name']
return None
def analyze_medians():
"""
Analyze medians
"""
# Output is per-year, per-bucket, per-crime-type
output = {}
# Loop over years
for year, filename in IMPORT_FILES:
output[year] = {}
for bucket in POPULATION_BUCKETS:
where = 'population >= %d' % bucket['low']
if bucket['high']:
where = '%s and population <= %d' % (where, bucket['high'])
result = db.query("""
select
median(violent_count) as median_violent_count,
median(violent_cleared) as median_violent_cleared,
median(violent_cleared_pct) as median_violent_cleared_pct,
median(property_count) as median_property_count,
median(property_cleared) as median_property_cleared,
median(property_cleared_pct) as median_property_cleared_pct,
median(murder_count) as median_murder_count,
median(murder_cleared) as median_murder_cleared,
median(murder_cleared_pct) as median_murder_cleared_pct,
median(robbery_count) as median_robbery_count,
median(robbery_cleared) as median_robbery_cleared,
median(robbery_cleared_pct) as median_robbery_cleared_pct,
median(agg_assault_count) as median_agg_assault_count,
median(agg_assault_cleared) as median_agg_assault_cleared,
median(agg_assault_cleared_pct) as median_agg_assault_cleared_pct,
median(burglary_count) as median_burglary_count,
median(burglary_cleared) as median_burglary_cleared,
median(burglary_cleared_pct) as median_burglary_cleared_pct,
median(mvt_count) as median_mvt_count,
median(mvt_cleared) as median_mvt_cleared,
median(mvt_cleared_pct) as median_mvt_cleared_pct,
median(larceny_theft_count) as median_larceny_theft_count,
median(larceny_theft_cleared) as median_larceny_theft_cleared,
median(larceny_theft_cleared_pct) as median_larceny_theft_cleared_pct,
median(arson_count) as median_arson_count,
median(arson_cleared) as median_arson_cleared,
median(arson_cleared_pct) as median_arson_cleared_pct
from clearance_rates as c join agencies as a on a.ori7 = c.ori7
where mos=12 and year='%s'
and a.agentype='Municipal police'
and %s
""" % (year, where))
data = []
for row in result:
data.append(row)
output[year][bucket['name']] = data[0]
return output
def write_rates_to_db(data):
"""
Write clearance rate data to db
"""
logger.info('writing rates')
table = db['clearance_rates']
table.insert_many(data)
def write_agencies_to_db(agencies):
"""
Write agency data to db
"""
logger.info('writing agencies')
table = db['agencies']
process_agencies = []
for agency in agencies.values():
if not agency.get('ORI7'):
continue
processed_agency = {}
for key, value in agency.items():
# Skip the empty column whose meaning is not known
if key != '':
processed_agency[key.lower()] = value
process_agencies.append(processed_agency)
table.insert_many(process_agencies)
if __name__ == '__main__':
logger.info('Parsing agency data')
agencies = get_agencies()
logger.info('Writing agency data to db')
write_agencies_to_db(agencies)
logger.info('Parsing clearance data')
data = get_data()
logger.info('Writing clearance data to db')
write_rates_to_db(data)
logger.info('Writing agency lookup')
write_agency_lookup()
logger.info('Writing individual JSON files')
write_clearance_json()
|
the-stack_0_5901 | import torch.nn as nn
import torch.nn.functional as F
# from ws.wsRLInterfaces.PARAM_KEY_NAMES import STATE_DIMENSIONS, ACTION_DIMENSIONS
class ActorCritic(nn.Module):
def __init__(self, app_info):
super(ActorCritic, self).__init__()
env = app_info.ENV
action_size = env.fn_get_action_size()
state_size = env.fn_get_state_size()
hidden_layer_size = 256
self._app_info = app_info
self.state_to_hidden = nn.Linear(state_size, hidden_layer_size)
self.action_layer = nn.Linear(hidden_layer_size, action_size)
self.value_layer = nn.Linear(hidden_layer_size, 1)
self.state_value = None
# self.state_values = []
def forward(self, state):
action_info = None
hidden = self.state_to_hidden(state)
state = F.relu(hidden)
self.state_value = self.value_layer(state)
action_info = self.action_layer(state)
policy = F.softmax(action_info)
return policy
return None
def get_state_value(self):
return self.state_value
|
the-stack_0_5903 | #!/usr/bin/env python
# -.- coding: UTF-8 -.-
# Creted by Jordan Newman 10th June 2017
import os, sys, socket, struct
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = '\033[30m', '\033[31m', '\033[32m', '\033[33m', '\033[34m', '\033[1;35m', '\033[36m', '\033[37m'
if not sys.platform.startswith('linux'):
raise SystemExit("{0}This program only works on {1}linux{2} machines{3}".format(RED, YELLOW, RED, WHITE))
try:
import netifaces
from netifaces import AF_INET
netifacesInstalled = True
except:
print("{0}Please install the {1}\'netifaces\'{2} python library to enable all features of this command{3}".format(RED, GREEN, RED, WHITE))
netifacesInstalled = False
def displayInterfaces(interfaces):
print("""{0}.__ ______________________________________________ _______ __________ ________
| |\ \__ ___/_ ____/_ _ \_ ____/ _ \ \ __ \\\\_ _____// _____/
| |/ | \| | | __)_ | _/| __)/ /_\ \/ / \/ | __)_ \____ \
| / | \ | | \| | \| \/ | \ \____| \/ \\
|__\___|_ /___| /______ /|___|_ /\__ /\____|__ /\____ /______ /______ /
\/ \/ \/ \/ \/ \/ \/ \/{1}""").format(GREEN, WHITE)
for i in interfaces:
print(u'{0}\n\u250c[{1}{2}{3}]\n\u251c\u2500\u2500[{4}MAC{5}]\u257a\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2578[{6}{7}{8}]').format(RED, BLACK, i['name'], RED, YELLOW, RED, GREEN, i['mac'], RED)
print(u'\u251c\u2500\u2500[{0}IP{1}]\u257a\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2578[{2}{3}{4}]').format(YELLOW, RED, GREEN, i['ip'], RED)
print(u'\u251c\u2500\u2500[{0}Gateway{1}]\u257a\u2500\u2500\u2500\u2500\u2500\u2578[{2}{3}{4}]').format(YELLOW, RED, GREEN, i['gateway'], RED)
print(u'\u2514\u2500\u2500[{0}Gateway MAC{1}]\u257a\u2500\u2578[{2}{3}{4}]{5}').format(YELLOW, RED, GREEN, i['gatewayMac'], RED, WHITE)
def getInterfaces():
interfaces = os.listdir("/sys/class/net")
interfacesList = []
for interface in interfaces:
mac = getMAC(interface)
ip = getIP(interface)
gw = getGateway()
gwMac = getGatewayMAC(interface)
interfacesList.append({"name": interface, "ip": ip, "mac": mac, "gateway": gw, "gatewayMac": gwMac})
return interfacesList
def getGateway():
with open('/proc/net/route') as r:
for line in r:
fields = line.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
def getMAC(iFace = None):
if iFace != None:
try:
conn = open('/sys/class/net/'+iFace+'/address')
mac = conn.read().strip()
conn.close()
return mac
except:
pass # /sys/class/net/iFace/address probably didnt exist
else:
return 'unknown'
def getGatewayMAC(iFace = None):
entries = {}
with open('/proc/net/arp') as arpFile:
for line in arpFile:
fields = line.strip().split()
if iFace == None:
return fileds[3]
entries[fields[5]] = fields[3]
if iFace == None or iFace not in entries:
entriesKeys = entries.keys()
if len(entriesKeys) >= 2:
return entries[entriesKeys[1]]
else:
return "unknown"
else:
return entries[iFace]
def getIP(iFace = None):
if netifacesInstalled == True and iFace != None:
internetBroadcastInfo = netifaces.ifaddresses(iFace)[AF_INET]
return internetBroadcastInfo[0]['addr']
iFaceIP = socket.gethostbyname(socket.gethostname())
if iFaceIP[0:6] == "127.0." and iFace != "lo":
return "unknown"
return iFaceIP
def resizeTerminal():
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=26, cols=96))
if __name__ == "__main__":
#columns = os.popen('stty size', 'r').read().split()[1]
#if int(columns) < 95:
# resizeTerminal()
# I made the banner thinner, so there is no longer any need to resize terminal window :)
iFaces = getInterfaces()
displayInterfaces(iFaces)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.